diff --git a/cameratransform/google.png b/cameratransform/google.png deleted file mode 100644 index b714561..0000000 Binary files a/cameratransform/google.png and /dev/null differ diff --git a/cameratransform/topview.py b/cameratransform/topview.py deleted file mode 100644 index cbd4e73..0000000 --- a/cameratransform/topview.py +++ /dev/null @@ -1,58 +0,0 @@ -import matplotlib.pyplot as plt -import matplotlib.image as mpimg -import numpy as np - -image_path = "cameratransform/google.png" - -from scipy.ndimage import map_coordinates - -def simulate_top_view(image_path, inclination=0, rotation=0): - # Charger l'image - img = mpimg.imread(image_path) - - # Dimensions de l'image - height, width, _ = img.shape - - # Créer une grille d'indices pour les pixels de l'image - y, x = np.indices((height, width)) - - # Convertir les coordonnées x, y en coordonnées polaires - r = np.sqrt((x - width / 2) ** 2 + (y - height / 2) ** 2) - theta = np.arctan2(y - height / 2, x - width / 2) - - # Ajuster l'inclinaison et la rotation - r_adjusted = r * np.cos(np.deg2rad(inclination)) - theta_adjusted = theta + np.deg2rad(rotation) - - # Convertir les coordonnées polaires ajustées en coordonnées cartésiennes - x_adjusted = width / 2 + r_adjusted * np.cos(theta_adjusted) - y_adjusted = height / 2 + r_adjusted * np.sin(theta_adjusted) - - # Interpolation bilinéaire pour obtenir les nouvelles valeurs de pixel - coordinates = np.vstack((y_adjusted.flatten(), x_adjusted.flatten())) - simulated_img = np.zeros_like(img) - for c in range(3): # Canal de couleur (R, G, B) - simulated_img[:, :, c] = map_coordinates(img[:, :, c], coordinates, order=1).reshape(img.shape[:2]) - - return simulated_img - -# Chemin vers votre image - -# Paramètres de simulation (inclinaison et rotation en degrés) -inclination_degrees = 30 -rotation_degrees = 45 - -# Simulation de la vue de dessus avec les paramètres donnés -simulated_image = simulate_top_view(image_path, inclination=inclination_degrees, rotation=rotation_degrees) - -# Afficher l'image originale et l'image simulée côte à côte -fig, axes = plt.subplots(1, 2, figsize=(10, 5)) -axes[0].imshow(mpimg.imread(image_path)) -axes[0].set_title("Image originale") -axes[0].axis("off") - -axes[1].imshow(simulated_image) -axes[1].set_title("Vue de dessus simulée") -axes[1].axis("off") - -plt.show() \ No newline at end of file diff --git a/cams/new/extract + calibrated.py b/cams/new/extract + calibrated.py new file mode 100644 index 0000000..fffdd9c --- /dev/null +++ b/cams/new/extract + calibrated.py @@ -0,0 +1,110 @@ +import pandas as pd +import numpy as np +import cv2 +import matplotlib.pyplot as plt +from scipy.optimize import least_squares +from imageio import imread +import cameratransform as ct + +cap = cv2.VideoCapture('cams/new/cut2.mp4') +folder_path = "track/expgood/labels/" +name = 'cut2' +fps = 780 + +allfiles = [] +for i in range(1, fps+1): + allfiles.append(folder_path + name + '_' + str(i) + '.txt') + +# Set the desired dimensions for displaying the video +display_width = 1280 +display_height = 720 + +display_width = 1920 +display_height = 1080 + +width = 1920 +height = 1080 + +frame_nb = 0 + +bleu = (255, 0, 0) +vert = (0, 255, 0) + +# # Cam part +# img = cv2.imread("track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png") +# nh,nw,_ = img.shape + +res = np.array([ 3.99594676, 3.53413555, 4.55 , 16.41739973, 74.96395791, 49.11271189, 2.79384615]) +image_size = (width,height) +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res[0], sensor=(res[1],res[2]), image=image_size), + ct.SpatialOrientation(elevation_m=res[3], tilt_deg=res[4], heading_deg = res[5], roll_deg = res[6] ) ) + + +if not cap.isOpened(): + print("Error opening video stream or file") + +while cap.isOpened(): + ret, frame = cap.read() + if ret: + df = pd.read_csv(allfiles[frame_nb], header=None, sep=' ') + ind_px_ground_pts = [] + for index, row in df.iterrows(): + class_id, center_x, center_y, bbox_width, bbox_height, object_id = row + + center_x = int(center_x * width) + center_y = int(center_y * height) + bbox_width = int(bbox_width * width) + bbox_height = int(bbox_height * height) + + top_left_x = int(center_x - bbox_width / 2) + top_left_y = int(center_y - bbox_height / 2) + bottom_right_x = int(center_x + bbox_width / 2) + bottom_right_y = int(center_y + bbox_height / 2) + + # (19;112) à (636;714) et (86;86) à (1087;715) + if (((112-714)/(19-636)) * top_left_x + 112 - ((112-714)/(19-636)) *19 > top_left_y ) and (((86-715)/(86-1097)) * bottom_right_x + 112 - ((86-715)/(86-1097)) *86 < bottom_right_y ): + + label = f'Class: {int(class_id)}, Object ID: {int(object_id)}' + cv2.putText(frame, label, (top_left_x, top_left_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vert, 1) + + # obetnir le centre du rectangle + center_x = (top_left_x + bottom_right_x) // 2 + center_y = (top_left_y + bottom_right_y) // 2 + cv2.circle(frame, (center_x, center_y), 5, vert, -1) + + ind_px_ground_pts += [center_x, center_y] + + + else : + pass + ind_px_ground_pts = np.array(ind_px_ground_pts) + + px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T + print(px_ground_pts) + + space_pts = [] + for pt in px_ground_pts: + space_pts.append(cam.spaceFromImage(pt)) + space_pts = np.array(space_pts) + + # resized_frame = cv2.resize(frame, (display_width, display_height)) + # cv2.imshow('Frame', resized_frame) + # plt.figure() + +####################### + plt.scatter(space_pts[:,0], space_pts[:,1], color="red", s=2) + plt.plot([28.569, 51.681],[26.665, 89.904], color='blue', linestyle='-', linewidth=1) + # plt.axis("equal") + plt.xlim([0, 100]) + plt.ylim([0, 150]) + plt.draw() + plt.pause(0.0000000000001) + plt.clf() +###################### + + if cv2.waitKey(25) & 0xFF == ord('q'):break + frame_nb = frame_nb + 1 + else:break + +cap.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/track/Sylvain/ImmersionTemplate.zip b/track/Sylvain/ImmersionTemplate.zip new file mode 100644 index 0000000..9b3320c Binary files /dev/null and b/track/Sylvain/ImmersionTemplate.zip differ diff --git a/track/Sylvain/ImmersionTemplate/ImmersionTemplate/logo-ups.png b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/logo-ups.png new file mode 100644 index 0000000..60a6005 Binary files /dev/null and b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/logo-ups.png differ diff --git a/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.pdf b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.pdf new file mode 100644 index 0000000..09279dc Binary files /dev/null and b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.pdf differ diff --git a/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.tex b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.tex new file mode 100644 index 0000000..e6acdda --- /dev/null +++ b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/rapport.tex @@ -0,0 +1,182 @@ +\documentclass[a4paper]{article} +\usepackage[margin=25mm]{geometry} +\usepackage{amsmath} +\usepackage{amsfonts} +\usepackage{amssymb} +\usepackage{graphicx} +\pagenumbering{gobble} +\usepackage{verbatim} +\usepackage[utf8]{inputenc} +\usepackage[french,english]{babel} +\usepackage{tikz} +\usepackage{xcolor} + +\newtheorem{theorem}{Th\'eor\`eme}[subsection] +\newtheorem{proposition}{Proposition}[subsection] +\newtheorem{definition}{D\'efinition}[subsection] + +\newtheorem{lemma}{Lemme}[subsection] +\newtheorem{model}{Mod\`ele}[subsection] +\newtheorem{algorithm}{Algorithme}[subsection] +\newtheorem{problem}{Probl\`eme}[subsection] +\newtheorem{remark}{Remarque}[subsection] + +%\newcommand{\Id}{\mathbf{Id}} +%\newcommand{\ie}{$i. e.\ $} +%\newcommand{\eg}{$e. g.\ $} +%\newcommand{\st}{ such that } +%\newcommand{\Div}{\mbox{div }} +%\newcommand{\Curl}{\mbox{curl }} + +% Keywords command +\providecommand{\keywords}[1] +{ + \small + \textbf{\textit{Keywords---}} #1 +} + +\title{Titre du rapport} + +\author{Premier Auteur$^{1}$, Second Auteur$^{2}$ \\ + \small $^{1}$L3 LDD Informatique, Mathématiques, Université Paris-Saclay, 91405 Orsay, France \\ + \small $^{2}$L3 LDD Mathématiques, Physique, Université Paris-Saclay, 91405 Orsay, France \\ +} + +\date{} % Comment this line to show today's date + + +\makeindex + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} +\selectlanguage{french} + +\maketitle + +\begin{tikzpicture}[overlay,yshift=5cm, xshift=13.4cm] + \pgftext{\includegraphics[width=90pt]{logo-ups.png}} +\end{tikzpicture} + +\begin{abstract} +{\color{blue}Résumé en français...} +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum pretium libero non odio tincidunt semper. Vivamus sollicitudin egestas mattis. Sed vitae risus vel ex tincidunt molestie nec vel leo. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Maecenas quis massa tincidunt, faucibus magna non, fringilla sapien. In ullamcorper justo a scelerisque egestas. Ut maximus, elit a rutrum viverra, lectus sapien varius est, vel tempor neque mi et augue. Fusce ornare venenatis nunc nec feugiat. Proin a enim mauris. Mauris dignissim vulputate erat, vitae cursus risus elementum at. Cras luctus pharetra congue. Aliquam id est dictum, finibus ligula sed, tempus arcu. +\end{abstract} +\hspace{10pt} + +\selectlanguage{english} +\begin{abstract} +{\color{blue}Abstract in English... } +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum pretium libero non odio tincidunt semper. Vivamus sollicitudin egestas mattis. Sed vitae risus vel ex tincidunt molestie nec vel leo. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Maecenas quis massa tincidunt, faucibus magna non, fringilla sapien. In ullamcorper justo a scelerisque egestas. Ut maximus, elit a rutrum viverra, lectus sapien varius est, vel tempor neque mi et augue. Fusce ornare venenatis nunc nec feugiat. Proin a enim mauris. Mauris dignissim vulputate erat, vitae cursus risus elementum at. Cras luctus pharetra congue. Aliquam id est dictum, finibus ligula sed, tempus arcu. +\end{abstract} +\selectlanguage{french} + + +%TC:ignore +\keywords{mot clé; mot clé; mot clé} + +\clearpage + +\section{Introduction} + +Aenean tellus orci, accumsan $i$ nec neque at, vestibulum eleifend elit \cite{helbing09,SchadCA09} ({\color{blue}bien cité dans le texte de l'article toute référence présente dans la bibliographie}) Sed luctus enim dui, in fermentum $j$ dui pharetra at. Fusce vel nisl et diam feugiat porttitor et at libero. Maecenas scelerisque varius mauris non euismod. Nulla eget cursus leo. Integer interdum lacus vel ligula maximus, at feugiat orci porttitor. Suspendisse egestas, lorem a \index{elementum} lobortis, tellus mauris hendrerit nunc, sed vestibulum mi velit quis risus. Mauris gravida mi et ullamcorper blandit. Aenean lacinia, quam id tempus interdum, massa orci rhoncus turpis, eu finibus nisi lectus id sem. Vivamus ut mauris sed diam porta viverra sit amet quis risus (\cite{Zuriguel09}). + +Nam id ornare dolor. Nulla metus enim, venenatis vel dui ac, accumsan vehicula est. Suspendisse luctus eros et velit eleifend, nec finibus ante rutrum. Interdum et malesuada fames ac ante ipsum primis {\em systemic} in faucibus. Vivamus tempor lorem turpis, nec venenatis turpis venenatis nec. Integer hendrerit at mi nec aliquet. Vestibulum auctor arcu scelerisque lacus rhoncus ornare. Vivamus convallis libero nulla, vitae ullamcorper mauris venenatis nec. Donec elementum ligula non tortor \index{pellentesque} finibus. + +Vestibulum mauris odio, scelerisque ut nisi ut, tincidunt maximus eros. Fusce tempor ex non mi commodo consectetur. Sed sit amet massa id elit commodo bibendum. Nunc id neque tempus erat tempus dictum. Fusce mi leo, hendrerit in egestas sed, faucibus vel ex. In hac habitasse platea dictumst. Vivamus eget odio arcu. Ut finibus et lacus ac interdum. Donec consectetur dolor neque, vel condimentum nunc varius nec. Mauris sapien dolor, aliquam nec vulputate at, fermentum vel nulla. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam posuere vulputate vestibulum. + +\section{Première section} + +Integer iaculis vitae nisi mollis congue. Cras sed facilisis tortor. Aliquam quis neque ipsum. Proin et accumsan arcu. Donec sit amet nibh lacus. Vestibulum mattis arcu sed ante \index{vestibulum} condimentum. Nunc auctor ligula vel velit finibus imperdiet. Cras consequat ipsum quis rhoncus consequat. Etiam luctus purus turpis, quis tempor massa posuere non. Donec vitae $\Phi$ ex in ligula ultricies feugiat. Sed urna sem, rutrum at tempus vel, mollis vel magna. Etiam ex est, pulvinar et risus at, facilisis efficitur turpis. Etiam egestas est a erat elementum, vitae porta lectus finibus. Donec ac consequat sapien. Aenean sed eros a est blandit dictum.\\ +{\color{blue}Equation numérotée pouvant être citée (\ref{eq:eq1}) : } +\begin{equation} +\label{eq:eq1} +( a + b )^2 = a^2 + b^2 + 2 a b. +\end{equation} + +\noindent{\color{blue}Système d'équations : } +\begin{eqnarray} +\label{eq:eq2} +( a + b )^2 &=& a^2 + b^2 + 2 a b,\\ +( a - b )^2 &=& a^2 + b^2 - 2 a b. +\end{eqnarray} + + + +Quisque in dui porttitor, finibus lacus quis, pretium dui. Nullam vitae augue ligula. Nulla vel nisl tincidunt, ullamcorper enim nec, sollicitudin justo. Praesent vitae ex elit. Sed placerat velit a lectus fringilla, in tempor lorem efficitur. Maecenas mattis $n = 1,\dots,m_i$, tellus ipsum, a laoreet quam aliquam eu. Donec eu interdum lectus. Morbi suscipit nibh (\ref{eq:eq1})sed enim interdum, eget aliquam odio ullamcorper. Sed at mauris maximus, mollis mi ut, dapibus mauris. Morbi efficitur ultricies massa, et vulputate est pellentesque nec $\alpha_i^n$. Curabitur rutrum ullamcorper efficitur. Curabitur vestibulum consequat orci quis dapibus. Ut a ullamcorper tellus. Proin fermentum malesuada dui ac mollis. Mauris volutpat finibus lacus et placerat. \\ +{\color{blue}Equation non numérotée : } +\begin{equation*} +( a - b )^2 = a^2 + b^2 - 2 a b. +\end{equation*} + +\section{Seconde section} + +\subsection{Première sous-section} + +Curabitur nulla libero, viverra at tempus vitae, ornare ac metus. Nullam sed imperdiet erat, a vestibulum arcu. Sed non nisi cursus, sagittis libero in, pellentesque est. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed congue turpis ligula, et tristique neque scelerisque sit amet. Vivamus neque est, pharetra eu libero at, tincidunt feugiat augue. + +\begin{definition} +On appelle... +\end{definition} + + +\subsection{Seconde sous-section} + +\begin{theorem} +Soit une fonction $\Phi$... +\end{theorem} + +\begin{lemma} +Soit $x \in \mathbf{R}$,... +\end{lemma} + +\begin{remark} +On remarque que... +\end{remark} + +Morbi mollis sapien nisi, non fringilla felis placerat vitae. Donec ac enim justo. Cras placerat purus vel ex volutpat, eget placerat lorem fermentum. Duis quam risus, eleifend quis iaculis eu, efficitur at nisl. Pellentesque pharetra dui nisi, sit amet sodales mi hendrerit nec. Nullam et gravida lorem, ut faucibus dolor. Mauris bibendum pulvinar tortor, eget consequat nulla luctus eget. + +\begin{figure}[t] + \begin{center} + \includegraphics[width=0.295\linewidth]{terre.png} + + \end{center} + \caption{La Terre} +\label{fig:fig1} +\end{figure} + +{\color{blue}Bien penser à citer et à commenter toutes les figures du texte : (Figure \ref{fig:fig1})} +Quisque in dui porttitor, finibus lacus quis, pretium dui. Nullam vitae augue ligula. Nulla vel nisl tincidunt, ullamcorper enim nec, sollicitudin justo. Praesent vitae ex elit. Sed placerat velit a lectus fringilla, in tempor lorem efficitur. Maecenas mattis tellus ipsum, a laoreet quam aliquam eu. Donec eu interdum lectus. Morbi suscipit nibh sed enim interdum, eget aliquam odio ullamcorper. Sed at + +\section{Conclusion} + +Curabitur nulla libero, viverra at tempus vitae, ornare ac metus. Nullam sed imperdiet erat, a vestibulum arcu. Sed non nisi cursus, sagittis libero in, pellentesque est. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed congue turpis ligula, et tristique neque scelerisque sit amet. Vivamus neque est, pharetra eu libero at, tincidunt feugiat augue. + +\section*{Remerciements} + +Les auteurs de ce document remercient vivement... + + +\begin{thebibliography}{99} + +\bibitem{helbing09} +D. Helbing, +A. Johansson, +Pedestrian, Crowd and Evacuation Dynamics, +\emph{Encyclopedia of Complexity and Systems Science}, +pp. 6476--6495, Springer New York. + +\bibitem{SchadCA09} +A. Schadschneider, A. Seyfried, Empirical results for pedestrian dynamics and their implications for cellular automata models, +in``Pedestrian Behavior'', Ed.: H. Timmermans, Emerald, p. 27 (2009). + +\bibitem{Zuriguel09} +I. Zuriguel, J. Olivares, J.M. Pastor, C. Mart\'in-G\'omez, L.M. Ferrer, J.J. Ramos, A. Garcimart\'in, +Effect of obstacle position in the flow of sheep through a narrow door, +\emph{Phys. Rev. E}, 94. + +\end{thebibliography} + + + + +\end{document} diff --git a/track/Sylvain/ImmersionTemplate/ImmersionTemplate/terre.png b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/terre.png new file mode 100644 index 0000000..0b6a71e Binary files /dev/null and b/track/Sylvain/ImmersionTemplate/ImmersionTemplate/terre.png differ diff --git a/track/Sylvain/stage_Noham.zip b/track/Sylvain/stage_Noham.zip new file mode 100644 index 0000000..80190dd Binary files /dev/null and b/track/Sylvain/stage_Noham.zip differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/HomoGraphie.iml b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/HomoGraphie.iml new file mode 100644 index 0000000..6711606 --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/HomoGraphie.iml @@ -0,0 +1,11 @@ + + + + + + + + + + \ No newline at end of file diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/misc.xml b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/misc.xml new file mode 100644 index 0000000..65531ca --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/misc.xml @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/modules.xml b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/modules.xml new file mode 100644 index 0000000..eeab053 --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/workspace.xml b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/workspace.xml new file mode 100644 index 0000000..398935b --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/HomoGraphie/.idea/workspace.xml @@ -0,0 +1,289 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pts_dst + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1563365442088 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibration.py b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibration.py new file mode 100644 index 0000000..5ec8189 --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibration.py @@ -0,0 +1,49 @@ +import numpy as np +import cv2 +import glob + +# termination criteria +criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) + +# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) +objp = np.zeros((6*9,3), np.float32) +objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2) + +# Arrays to store object points and image points from all the images. +objpoints = [] # 3d point in real world space +imgpoints = [] # 2d points in image plane. + +images = glob.glob('*.png') + +fname="img_001659.png" +img = cv2.imread(fname) +gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY) + +# Find the chess board corners +ret, corners = cv2.findChessboardCorners(gray, (9,6),None) + +# If found, add object points, image points (after refining them) +if ret == True: + objpoints.append(objp) + corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) + imgpoints.append(corners2) + + # Draw and display the corners + img = cv2.drawChessboardCorners(img, (9,6), corners2,ret) + cv2.imshow('img',img) + cv2.waitKey(500) + print(objpoints) + print(imgpoints) + ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None) + + img = cv2.imread('img_000259.png') + h, w = img.shape[:2] + newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h)) + # undistort + dst = cv2.undistort(img, mtx, dist, None, newcameramtx) + + # crop the image + x,y,w,h = roi + dst = dst[y:y+h, x:x+w] + cv2.imwrite('calibresult.png',dst) + cv2.destroyAllWindows() diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult.png new file mode 100644 index 0000000..8266706 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult2.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult2.png new file mode 100644 index 0000000..ee388eb Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/calibresult2.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000236.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000236.png new file mode 100644 index 0000000..fae03ad Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000236.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000238.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000238.png new file mode 100644 index 0000000..a7b43d8 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000238.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000259.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000259.png new file mode 100644 index 0000000..0c36524 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_000259.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001659.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001659.png new file mode 100644 index 0000000..df7339d Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001659.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001670.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001670.png new file mode 100644 index 0000000..8eb789b Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_001670.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_837.png b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_837.png new file mode 100644 index 0000000..c0faa1f Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/HomographyCalibration2/testCalibration/img_837.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/frame.png b/track/Sylvain/stage_Noham/stage_Noham/frame.png new file mode 100644 index 0000000..55a165d Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/frame.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/frame_ground_pts.png b/track/Sylvain/stage_Noham/stage_Noham/frame_ground_pts.png new file mode 100644 index 0000000..a055423 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/frame_ground_pts.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/gmap.png b/track/Sylvain/stage_Noham/stage_Noham/gmap.png new file mode 100644 index 0000000..7bc2e06 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/gmap.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png b/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png new file mode 100644 index 0000000..0afcfee Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts_labels.png b/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts_labels.png new file mode 100644 index 0000000..8698dd1 Binary files /dev/null and b/track/Sylvain/stage_Noham/stage_Noham/image_vide_pts_labels.png differ diff --git a/track/Sylvain/stage_Noham/stage_Noham/projection_sol.py b/track/Sylvain/stage_Noham/stage_Noham/projection_sol.py new file mode 100644 index 0000000..3f4175d --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/projection_sol.py @@ -0,0 +1,203 @@ +import cv2 +import numpy as np +import argparse +import time +import os +import matplotlib +import matplotlib.pyplot as plt +from matplotlib.patches import Ellipse, Circle, Rectangle, Polygon, Arrow +from matplotlib.lines import Line2D +from matplotlib.collections import EllipseCollection, LineCollection +import sys +from scipy.optimize import least_squares +from scipy.spatial import cKDTree +from imageio import imread +from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox +from shapely.geometry import Point +import geopandas as gpd +import cartopy +import cartopy.crs as ccrs +import cameratransform as ct +import geodatasets + +img = cv2.imread("frame_ground_pts.png") +nh,nw,_ = img.shape +## img : b g r + +#mask = (img[:,:,0]==0)*(img[:,:,1]==0)*(img[:,:,2]==255) +#ind_px_ground_pts = np.where(mask) +#px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T + +tab = np.array([ + [215, 257, 41.94076496048223, -85.00154950929712], + [286, 310, 41.94073282540695, -85.00133550964574], + [532, 496, 41.94066182925292, -85.00090550095656], + [359, 462, 41.94064090405561, -85.00098487171928], + [391, 489, 41.94063193611181, -85.00093564175253], + [471, 428, 41.940688733067965, -85.00101802659485], + [536, 433, 41.94069994298754, -85.00099391395807], + [242, 528, 41.94058410705547, -85.00092057135889], + [636, 486, 41.9406894803946, -85.00089243994931], + [243, 552, 41.940578130109905, -85.00088875077661], + [279, 548, 41.9405866094118, -85.00088875077661], + [316, 543, 41.940595088712534, -85.00088539801524], + [347, 540, 41.94060157288293, -85.0008820452539], + [382, 536, 41.94061055096393, -85.00088070414937], + [414, 532, 41.94061803269714, -85.00087869249255], + [447, 526, 41.940624516865206, -85.00087668083574], + [479, 526, 41.94063449250709, -85.00087198696986], + [1131, 495, 41.94081056233172, -85.00078883849507], + [1286, 561, 41.94079011232066, -85.00068557344535], + [1429, 652, 41.94075918790118, -85.00059236667968], + [1410, 702, 41.94072826346671, -85.00056554458887], + [1389, 734, 41.940709309773645, -85.00054811022981], + [1233, 754, 41.940672399934165, -85.00055816851388], + [1078, 778, 41.940637485201485, -85.0005668856934], + [945, 803, 41.94060755827253, -85.00057359121611], + [1164, 695, 41.94068486947693, -85.00060242496377], + [892, 556, 41.94070432195875, -85.00075866364288], + [964, 521, 41.940746219591766, -85.00078816794279], + [1637, 895, 41.9406788840967, -85.00044752738918], + [1354, 998, 41.940611049748284, -85.00044819794145], + [1206, 976, 41.94059758262643, -85.00047233782321], + [846, 906, 41.94056915202646, -85.00054341636391], + [944, 999, 41.94056216907017, -85.00049848936177], + [486, 925, 41.94051977253203, -85.00057962618882], + [339, 838, 41.94052027131499, -85.00064332865455], + [98, 826, 41.94048934676464, -85.00068356179082], + [34, 672, 41.94050630539088, -85.00079621457232], + [164, 538, 41.94056466298198, -85.00091758453335], + [131, 593, 41.94054570924032, -85.00086528145621], + [150, 619, 41.94053872628142, -85.00083108329041], + [172, 660, 41.94053074575605, -85.00078883849733], + [1341, 694, 41.940719285401634, -85.00057560287796], + [1313, 701, 41.94071230246175, -85.00057627343024], + [1281, 706, 41.940704321958115, -85.00057828508704], + [1249, 708, 41.94069534389031, -85.00058230840068], + [265, 379, 41.94066890846167, -85.00116770052296], + [287, 399, 41.94066092795256, -85.00111807965492]]) +px_ground_pts = tab[:,:2].astype(int) + +# lat long +# [216, 258] [144,172] [41.94076551439789, -85.00155042979091] 65663.889 45130.928 MGRS / UTMREF (WGS84) +# [1287, 561] [857,372] [41.9407914510061, -85.00068541739631] +# [1354, 999] [905,668] [41.94061008025459, -85.00044835086464] +# [35, 673] [25,449] [41.94050633514897, -85.0007970380291] +#ind_pts = [0, 35, 46, 91] +#px_ground_pts = px_ground_pts[ind_pts,:] + +#real_ground_pts = np.array([[41.94076551439789, -85.00155042979091], [41.9407914510061, -85.00068541739631], [41.94061008025459, -85.00044835086464], [41.94050633514897, -85.0007970380291] ]) +#real_ground_pts = tab[:,2:] + +usa = gpd.read_file(geodatasets.get_path('geoda.natregimes')) +print("usa.crs =",usa.crs) + +ax = usa.plot() +ax.set_title("WGS84 (lat/lon)"); +# Reproject to Albers contiguous USA +usa = usa.to_crs("ESRI:102003") +ax = usa.plot() +ax.set_title("NAD 1983 Albers contiguous USA"); + +geometry = gpd.points_from_xy(tab[:,3], tab[:,2]) # long, lat +gts = gpd.GeoDataFrame({"lat": tab[:,2], "lon": tab[:,3]}, geometry=geometry, crs="EPSG:4326") +gts = gts.to_crs("ESRI:102003") +X = gts["geometry"].x +Y = gts["geometry"].y +gts["X"] = X +gts["Y"] = Y +print("gts =",gts.head(10)) + +real_ground_pts = gts[["X","Y"]].values + +fig, ax = plt.subplots() +usa.plot(ax=ax) +ax.scatter(X,Y,color="red") +ax.set_title("pts coord") +ax.set_xlim([903530.0, 903660.0]) +ax.set_ylim([549816.0, 549865.0]) + +img_pts = img.copy() +for i,pt in enumerate(px_ground_pts): + img_pts = cv2.circle(img_pts, pt, 1, (0,0,255), 1) + txt = str(i)+": "+str(pt) + img_pts = cv2.putText(img_pts, txt, pt, cv2.FONT_HERSHEY_SIMPLEX, + 0.3, (0,255,0), 1, cv2.LINE_AA) + + +## parametres caméra pour initialiser la minimisation de la "cost" fonction +f = 3.2 # en mm +sensor_size = (6.17, 4.55) # en mm +image_size = (nw,nh) # en px +elevation = 10 # en m +angle = 45 # inclinaison de la caméra. (0° : caméra orientée vers le bas, 90° : caméra orientée parallèlement au sol, 180° : caméra orientée vers le haut) +heading_deg = 45 # la direction dans laquelle la caméra regarde. (0° : la caméra est orientée « nord », 90° : est, 180° : sud, 270° : ouest) +roll_deg = 0 # rotation de l'image. (0°: camera image is not rotated (landscape format), 90°: camera image is in portrait format, 180°: camera is in upside down landscape format) + + +#px_ground_pts = [ [], [] ] +#ground_pts = [ [], [] ] + + + + +## Find camera parameters: [focal,sensorx,sensory,elevation,angle] +def fct_cost(param): + #print("cost param : ",param) + f,sx,sy,e,a,b,c = param + camloc = ct.Camera( + ct.RectilinearProjection( + focallength_mm=f, + sensor=(sx,sy), + image=image_size + ), + ct.SpatialOrientation( + elevation_m=e, + tilt_deg=a, + heading_deg=b, + roll_deg=c + ) + ) + pts = [] + for pt in px_ground_pts: + gpt = camloc.spaceFromImage(pt) + pts.append(gpt) + pts = np.array(pts) + #print(pts) + #print(np.linalg.norm( real_ground_pts-pts[:,:2], axis=1 )**2) + return np.linalg.norm( real_ground_pts-pts[:,:2], axis=1 ) + +param = [f, sensor_size[0], sensor_size[1], elevation, angle, heading_deg , roll_deg] +#cost = fct_cost(param) +#print("cost =",cost) + +res = least_squares(fct_cost, param) +print(res) + +# initialize the camera +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res.x[0], + sensor=(res.x[1],res.x[2]), + image=image_size), + ct.SpatialOrientation(elevation_m=res.x[3], + tilt_deg=res.x[4], + heading_deg = res.x[5], + roll_deg = res.x[6] ) + ) + +test_ground_pts = [] +for pt in px_ground_pts: + gpt = cam.spaceFromImage(pt) + test_ground_pts.append(gpt) +test_ground_pts = np.array(test_ground_pts) +print("test_ground_pts =",test_ground_pts) + +plt.figure() +plt.plot(test_ground_pts[:,0], test_ground_pts[:,1],linewidth=0, marker="o") + + +cv2.imshow("pts", img_pts) +cv2.waitKey(0) +cv2.destroyAllWindows() + +plt.show() +sys.exit() diff --git a/track/Sylvain/stage_Noham/stage_Noham/projection_sol2.py b/track/Sylvain/stage_Noham/stage_Noham/projection_sol2.py new file mode 100644 index 0000000..46029aa --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/projection_sol2.py @@ -0,0 +1,156 @@ +import cv2 +import numpy as np +import argparse +import time +import os +import matplotlib +import matplotlib.pyplot as plt +from matplotlib.patches import Ellipse, Circle, Rectangle, Polygon, Arrow +from matplotlib.lines import Line2D +from matplotlib.collections import EllipseCollection, LineCollection +import sys +from scipy.optimize import least_squares +from scipy.spatial import cKDTree +from imageio import imread +from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox +from shapely.geometry import Point +import geopandas as gpd +import cartopy +import cartopy.crs as ccrs +import cameratransform as ct +import geodatasets + +img = cv2.imread("track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png") +nh,nw,_ = img.shape +## img : b g r + +mask = (img[:,:,0]==0)*(img[:,:,1]==0)*(img[:,:,2]==255) +ind_px_ground_pts = np.where(mask) +px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T + +mask2 = (img[:,:,0]==255)*(img[:,:,1]==0)*(img[:,:,2]==0) +ind_px_ground_pts2 = np.where(mask2) +px_ground_pts2 = np.vstack([ind_px_ground_pts2[1],ind_px_ground_pts2[0]]).T + + +img_pts = img.copy() +for i,pt in enumerate(px_ground_pts): + img_pts = cv2.circle(img_pts, pt, 1, (0,0,255), 1) + txt = str(i)+": "+str(pt) + img_pts = cv2.putText(img_pts, txt, pt, cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,255,0), 1, cv2.LINE_AA) + +distances = np.array([ + [ 0, 8, 37.1], + [ 1, 7, 10.0], + [ 2, 4, 6.8], + [ 2, 5, 28.3], + [ 2, 10, 17.7], + [ 2, 12, 19.5], + [ 3, 11, 20.4], + [ 4, 7, 3.8], + [ 5, 9, 9.1], + [ 5, 13, 12.7], + [ 6, 11, 11.9], + [ 9, 10, 7.0], + [ 9, 13, 9.2], + [ 9, 15, 16.3], + [10, 12, 5.3], + [11, 16, 13.6], + [14, 20, 16.1], + [16, 20, 9.7], + [17, 23, 18.4], + [17, 25, 16.0], + [18, 19, 11.6], + [19, 20, 16.0], + [19, 24, 8.6], + [22, 23, 6.0], + [22, 25, 3.8], + [23, 24, 12.2] +]) +for i,dd in enumerate(distances): + pt1 = px_ground_pts[int(dd[0]),:] + pt2 = px_ground_pts[int(dd[1]),:] + img_pts = cv2.line(img_pts, pt1, pt2, (255,255,0), 2) + +# cv2.imwrite("image_vide_pts_labels.png",img_pts) +# cv2.imshow("pts", img_pts) +# cv2.waitKey(0) +# cv2.destroyAllWindows() + + +## parametres caméra pour initialiser la minimisation de la "cost" fonction +f = 3.2 # en mm +sensor_size = (6.17, 4.55) # en mm +image_size = (nw,nh) # en px +elevation = 10 # en m +angle = 45 # inclinaison de la caméra. (0° : caméra orientée vers le bas, 90° : caméra orientée parallèlement au sol, 180° : caméra orientée vers le haut) +heading_deg = 45 # la direction dans laquelle la caméra regarde. (0° : la caméra est orientée « nord », 90° : est, 180° : sud, 270° : ouest) +roll_deg = 0 # rotation de l'image. (0°: camera image is not rotated (landscape format), 90°: camera image is in portrait format, 180°: camera is in upside down landscape format) + +## Find camera parameters: [focal,sensorx,sensory,elevation,angle] +def fct_cost(param): + #print("cost param : ",param) + f,sx,sy,e,a,b,c = param + camloc = ct.Camera( + ct.RectilinearProjection( + focallength_mm=f, + sensor=(sx,sy), + image=image_size + ), + ct.SpatialOrientation( + elevation_m=e, + tilt_deg=a, + heading_deg=b, + roll_deg=c + ) + ) + pts = [] + for pt in px_ground_pts: + gpt = camloc.spaceFromImage(pt) + pts.append(gpt) + pts = np.array(pts) + cost = [] + for dd in distances: + cost.append( np.linalg.norm( pts[int(dd[0]),:]-pts[int(dd[1]),:])-dd[2] ) + + return np.array(cost) + +param = [f, sensor_size[0], sensor_size[1], elevation, angle, heading_deg , roll_deg] +#cost = fct_cost(param) +#print("cost =",cost) + +res = least_squares(fct_cost, param) +print(res) + + +# initialize the camera +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res.x[0], + sensor=(res.x[1],res.x[2]), + image=image_size), + ct.SpatialOrientation(elevation_m=res.x[3], + tilt_deg=res.x[4], + heading_deg = res.x[5], + roll_deg = res.x[6] ) + ) + + +space_pts = [] +for pt in px_ground_pts: + space_pts.append(cam.spaceFromImage(pt)) +space_pts = np.array(space_pts) + +space_pts2 = [] +for pt in px_ground_pts2: + space_pts2.append(cam.spaceFromImage(pt)) +space_pts2 = np.array(space_pts2) +#print("space_pts2 =", space_pts2) + +plt.figure() +plt.scatter(space_pts[:,0], space_pts[:,1], color="red", s=2) +# plt.scatter(space_pts2[:,0], space_pts2[:,1], color="blue", s=1) +plt.plot([28.569, 51.681],[26.665, 89.904], color='blue', linestyle='-', linewidth=1) +for dd in distances: + plt.plot( [space_pts[int(dd[0]),0], space_pts[int(dd[1]),0]], [space_pts[int(dd[0]),1], space_pts[int(dd[1]),1]], color="green" ) +plt.axis("equal") + +plt.show() \ No newline at end of file diff --git a/track/Sylvain/stage_Noham/stage_Noham/test_camera.py b/track/Sylvain/stage_Noham/stage_Noham/test_camera.py new file mode 100644 index 0000000..a3d6600 --- /dev/null +++ b/track/Sylvain/stage_Noham/stage_Noham/test_camera.py @@ -0,0 +1,24 @@ +import cameratransform as ct +import matplotlib.pyplot as plt + +im = plt.imread("gmap.png") +nh,nw,_ = im.shape + +# intrinsic camera parameters +f = 6.2 # in mm +sensor_size = (6.17, 4.55) # in mm +image_size = (nw, nh) # in px + +# initialize the camera +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=f, + sensor=sensor_size, + image=image_size), + ct.SpatialOrientation(elevation_m=10, + tilt_deg=45)) + +# display a top view of the image +top_im = cam.getTopViewOfImage(im, [-150, 150, 50, 300], scaling=0.5, do_plot=True) +plt.xlabel("x position in m") +plt.ylabel("y position in m") + +plt.show() diff --git a/track/calibrated.py b/track/calibrated.py new file mode 100644 index 0000000..f8eab31 --- /dev/null +++ b/track/calibrated.py @@ -0,0 +1,36 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt +from scipy.optimize import least_squares +from imageio import imread +import cameratransform as ct + +img = cv2.imread("track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png") +nh,nw,_ = img.shape + +res = np.array([ 3.99594676, 3.53413555, 4.55 , 16.41739973, 74.96395791, 49.11271189, 2.79384615]) +image_size = (nw,nh) +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res[0], sensor=(res[1],res[2]), image=image_size), + ct.SpatialOrientation(elevation_m=res[3], tilt_deg=res[4], heading_deg = res[5], roll_deg = res[6] ) ) + + +mask = (img[:,:,0]==0)*(img[:,:,1]==0)*(img[:,:,2]==255) +ind_px_ground_pts = np.where(mask) +print('ind_px_ground_pts: ', ind_px_ground_pts) +px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T +print(px_ground_pts) + + + +space_pts = [] +for pt in px_ground_pts: + space_pts.append(cam.spaceFromImage(pt)) +space_pts = np.array(space_pts) + + +plt.figure() +plt.scatter(space_pts[:,0], space_pts[:,1], color="red", s=2) +plt.axis("equal") + +plt.draw() +plt.pause(1) \ No newline at end of file diff --git a/track/extract + calibrated.py b/track/extract + calibrated.py new file mode 100644 index 0000000..10a852f --- /dev/null +++ b/track/extract + calibrated.py @@ -0,0 +1,110 @@ +import pandas as pd +import numpy as np +import cv2 +import matplotlib.pyplot as plt +from scipy.optimize import least_squares +from imageio import imread +import cameratransform as ct + +cap = cv2.VideoCapture('cams/new/cut2.mp4') +folder_path = "track/expgood/labels/" +name = 'cut2' +fps = 780 + +allfiles = [] +for i in range(1, fps+1): + allfiles.append(folder_path + name + '_' + str(i) + '.txt') + +# Set the desired dimensions for displaying the video +display_width = 1280 +display_height = 720 + +display_width = 1920 +display_height = 1080 + +width = 1920 +height = 1080 + +frame_nb = 0 + +bleu = (255, 0, 0) +vert = (0, 255, 0) + +# # Cam part +# img = cv2.imread("track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png") +# nh,nw,_ = img.shape + +res = np.array([ 3.99594676, 3.53413555, 4.55 , 16.41739973, 74.96395791, 49.11271189, 2.79384615]) +image_size = (width,height) +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res[0], sensor=(res[1],res[2]), image=image_size), + ct.SpatialOrientation(elevation_m=res[3], tilt_deg=res[4], heading_deg = res[5], roll_deg = res[6] ) ) + + +if not cap.isOpened(): + print("Error opening video stream or file") + +while cap.isOpened(): + ret, frame = cap.read() + if ret: + df = pd.read_csv(allfiles[frame_nb], header=None, sep=' ') + ind_px_ground_pts = [] + for index, row in df.iterrows(): + class_id, center_x, center_y, bbox_width, bbox_height, object_id = row + + center_x = int(center_x * width) + center_y = int(center_y * height) + bbox_width = int(bbox_width * width) + bbox_height = int(bbox_height * height) + + top_left_x = int(center_x - bbox_width / 2) + top_left_y = int(center_y - bbox_height / 2) + bottom_right_x = int(center_x + bbox_width / 2) + bottom_right_y = int(center_y + bbox_height / 2) + + # (19;112) à (636;714) et (86;86) à (1087;715) + if (((112-714)/(19-636)) * top_left_x + 112 - ((112-714)/(19-636)) *19 > top_left_y ) and (((86-715)/(86-1097)) * bottom_right_x + 112 - ((86-715)/(86-1097)) *86 < bottom_right_y ): + + label = f'Class: {int(class_id)}, Object ID: {int(object_id)}' + cv2.putText(frame, label, (top_left_x, top_left_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vert, 1) + + # obetnir le centre du rectangle + center_x = (top_left_x + bottom_right_x) // 2 + center_y = (top_left_y + bottom_right_y) // 2 + cv2.circle(frame, (center_x, center_y), 5, vert, -1) + + ind_px_ground_pts += [center_x, center_y] + + + else : + pass + ind_px_ground_pts = np.array(ind_px_ground_pts) + print('ind_px_ground_pts: ', len(ind_px_ground_pts)) + + px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T + + space_pts = [] + for pt in px_ground_pts: + space_pts.append(cam.spaceFromImage(pt)) + space_pts = np.array(space_pts) + + # resized_frame = cv2.resize(frame, (display_width, display_height)) + # cv2.imshow('Frame', resized_frame) + # plt.figure() + +####################### + plt.scatter(space_pts[:,0], space_pts[:,1], color="red", s=2) + plt.plot([28.569, 51.681],[26.665, 89.904], color='blue', linestyle='-', linewidth=1) + # plt.axis("equal") + plt.xlim([0, 100]) + plt.ylim([0, 150]) + plt.draw() + plt.pause(0.0000000000001) + plt.clf() +###################### + + if cv2.waitKey(25) & 0xFF == ord('q'):break + frame_nb = frame_nb + 1 + else:break + +cap.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/track/extract.py b/track/extract.py index 944b1cb..3f9f6ac 100644 --- a/track/extract.py +++ b/track/extract.py @@ -1,8 +1,6 @@ -import os import pandas as pd import numpy as np import cv2 -import time cap = cv2.VideoCapture('cams/new/cut2.mp4') folder_path = "track/expgood/labels/" @@ -39,8 +37,6 @@ while cap.isOpened(): for index, row in df.iterrows(): class_id, center_x, center_y, bbox_width, bbox_height, object_id = row - - center_x = int(center_x * width) center_y = int(center_y * height) bbox_width = int(bbox_width * width) @@ -53,15 +49,25 @@ while cap.isOpened(): # (19;112) à (636;714) et (86;86) à (1087;715) if (((112-714)/(19-636)) * top_left_x + 112 - ((112-714)/(19-636)) *19 > top_left_y ) and (((86-715)/(86-1097)) * bottom_right_x + 112 - ((86-715)/(86-1097)) *86 < bottom_right_y ): - cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), vert, 2) + + # cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), vert, 2) label = f'Class: {int(class_id)}, Object ID: {int(object_id)}' cv2.putText(frame, label, (top_left_x, top_left_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, vert, 1) - else : - cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), bleu, 2) - label = f'Class: {int(class_id)}, Object ID: {int(object_id)}' - cv2.putText(frame, label, (top_left_x, top_left_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, bleu, 1) + # obetnir le centre du rectangle + center_x = (top_left_x + bottom_right_x) // 2 + center_y = (top_left_y + bottom_right_y) // 2 + cv2.circle(frame, (center_x, center_y), 5, vert, -1) + + + + else : + pass + # cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), bleu, 2) + + # label = f'Class: {int(class_id)}, Object ID: {int(object_id)}' + # cv2.putText(frame, label, (top_left_x, top_left_y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, bleu, 1) resized_frame = cv2.resize(frame, (display_width, display_height)) diff --git a/track/extract_V2.py b/track/extract_V2.py new file mode 100644 index 0000000..bbe1a62 --- /dev/null +++ b/track/extract_V2.py @@ -0,0 +1,155 @@ +import cv2 +import numpy as np +import argparse +import time +import os +import matplotlib +import matplotlib.pyplot as plt +from matplotlib.patches import Ellipse, Circle, Rectangle, Polygon, Arrow +from matplotlib.lines import Line2D +from matplotlib.collections import EllipseCollection, LineCollection +import sys +from scipy.optimize import least_squares +from scipy.spatial import cKDTree +from imageio import imread +from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox +from shapely.geometry import Point +import geopandas as gpd +import cartopy +import cartopy.crs as ccrs +import cameratransform as ct +import geodatasets + +img = cv2.imread("track/Sylvain/stage_Noham/stage_Noham/image_vide_pts.png") +nh,nw,_ = img.shape +## img : b g r + +mask = (img[:,:,0]==0)*(img[:,:,1]==0)*(img[:,:,2]==255) +ind_px_ground_pts = np.where(mask) +px_ground_pts = np.vstack([ind_px_ground_pts[1],ind_px_ground_pts[0]]).T + +mask2 = (img[:,:,0]==255)*(img[:,:,1]==0)*(img[:,:,2]==0) +ind_px_ground_pts2 = np.where(mask2) +px_ground_pts2 = np.vstack([ind_px_ground_pts2[1],ind_px_ground_pts2[0]]).T + + +img_pts = img.copy() +for i,pt in enumerate(px_ground_pts): + img_pts = cv2.circle(img_pts, pt, 1, (0,0,255), 1) + txt = str(i)+": "+str(pt) + img_pts = cv2.putText(img_pts, txt, pt, cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,255,0), 1, cv2.LINE_AA) + +distances = np.array([ + [ 0, 8, 37.1], + [ 1, 7, 10.0], + [ 2, 4, 6.8], + [ 2, 5, 28.3], + [ 2, 10, 17.7], + [ 2, 12, 19.5], + [ 3, 11, 20.4], + [ 4, 7, 3.8], + [ 5, 9, 9.1], + [ 5, 13, 12.7], + [ 6, 11, 11.9], + [ 9, 10, 7.0], + [ 9, 13, 9.2], + [ 9, 15, 16.3], + [10, 12, 5.3], + [11, 16, 13.6], + [14, 20, 16.1], + [16, 20, 9.7], + [17, 23, 18.4], + [17, 25, 16.0], + [18, 19, 11.6], + [19, 20, 16.0], + [19, 24, 8.6], + [22, 23, 6.0], + [22, 25, 3.8], + [23, 24, 12.2] +]) +for i,dd in enumerate(distances): + pt1 = px_ground_pts[int(dd[0]),:] + pt2 = px_ground_pts[int(dd[1]),:] + img_pts = cv2.line(img_pts, pt1, pt2, (255,255,0), 2) + +# cv2.imwrite("image_vide_pts_labels.png",img_pts) +cv2.imshow("pts", img_pts) +cv2.waitKey(0) +cv2.destroyAllWindows() + + +## parametres caméra pour initialiser la minimisation de la "cost" fonction +f = 3.2 # en mm +sensor_size = (6.17, 4.55) # en mm +image_size = (nw,nh) # en px +elevation = 10 # en m +angle = 45 # inclinaison de la caméra. (0° : caméra orientée vers le bas, 90° : caméra orientée parallèlement au sol, 180° : caméra orientée vers le haut) +heading_deg = 45 # la direction dans laquelle la caméra regarde. (0° : la caméra est orientée « nord », 90° : est, 180° : sud, 270° : ouest) +roll_deg = 0 # rotation de l'image. (0°: camera image is not rotated (landscape format), 90°: camera image is in portrait format, 180°: camera is in upside down landscape format) + +## Find camera parameters: [focal,sensorx,sensory,elevation,angle] +def fct_cost(param): + #print("cost param : ",param) + f,sx,sy,e,a,b,c = param + camloc = ct.Camera( + ct.RectilinearProjection( + focallength_mm=f, + sensor=(sx,sy), + image=image_size + ), + ct.SpatialOrientation( + elevation_m=e, + tilt_deg=a, + heading_deg=b, + roll_deg=c + ) + ) + pts = [] + for pt in px_ground_pts: + gpt = camloc.spaceFromImage(pt) + pts.append(gpt) + pts = np.array(pts) + cost = [] + for dd in distances: + cost.append( np.linalg.norm( pts[int(dd[0]),:]-pts[int(dd[1]),:])-dd[2] ) + + return np.array(cost) + +param = [f, sensor_size[0], sensor_size[1], elevation, angle, heading_deg , roll_deg] +#cost = fct_cost(param) +#print("cost =",cost) + +res = least_squares(fct_cost, param) +print(res) + + +# initialize the camera +cam = ct.Camera(ct.RectilinearProjection(focallength_mm=res.x[0], + sensor=(res.x[1],res.x[2]), + image=image_size), + ct.SpatialOrientation(elevation_m=res.x[3], + tilt_deg=res.x[4], + heading_deg = res.x[5], + roll_deg = res.x[6] ) + ) + + +space_pts = [] +for pt in px_ground_pts: + space_pts.append(cam.spaceFromImage(pt)) +space_pts = np.array(space_pts) + +space_pts2 = [] +for pt in px_ground_pts2: + space_pts2.append(cam.spaceFromImage(pt)) +space_pts2 = np.array(space_pts2) +#print("space_pts2 =", space_pts2) + +plt.figure() +plt.scatter(space_pts[:,0], space_pts[:,1], color="red", s=2) +plt.scatter(space_pts2[:,0], space_pts2[:,1], color="blue", s=1) +for dd in distances: + plt.plot( [space_pts[int(dd[0]),0], space_pts[int(dd[1]),0]], [space_pts[int(dd[0]),1], space_pts[int(dd[1]),1]], color="green" ) +plt.axis("equal") + +plt.show() \ No newline at end of file