mirror of
https://github.com/NohamR/Stage-2024.git
synced 2025-05-24 00:49:06 +00:00
test setup
This commit is contained in:
parent
9a48d44095
commit
5c9313c4ca
96
yolov7-setup/compute_yolov3_anchors.py
Normal file
96
yolov7-setup/compute_yolov3_anchors.py
Normal file
@ -0,0 +1,96 @@
|
||||
import json
|
||||
import os, sys
|
||||
import math
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import seaborn as sns; sns.set() # for plot styling
|
||||
from PIL import Image
|
||||
|
||||
imgsize = int(input("Image size for train and run? "))
|
||||
print("=> imgsize =",imgsize)
|
||||
# imgsize = 256
|
||||
|
||||
BBDlabeldict = {"runner":0}
|
||||
|
||||
datasetpath = '/Users/noham/Documents/GitHub/Stage-2024/test/yolov7-tracker/data/dataset2fps/'
|
||||
images_path = datasetpath+"images"
|
||||
labels_path = datasetpath+"labels"
|
||||
|
||||
filenames = sorted(os.listdir(images_path))
|
||||
print("Nb d'images =",len(filenames))
|
||||
w,h = [] , []
|
||||
for i,file in enumerate(filenames):
|
||||
#print(file)
|
||||
try:
|
||||
# if True:
|
||||
im = Image.open(images_path+"/"+file)
|
||||
img_width = im.width
|
||||
img_height = im.height
|
||||
#print(img_width,img_height)
|
||||
fo = open(images_path.replace("images","labels")+"/"+file.replace("jpg","txt"), "r+")
|
||||
lines = fo.readlines()
|
||||
for line in lines:
|
||||
a = np.array(line.split(" ")).astype(float)
|
||||
w.append(a[3]*img_width)
|
||||
h.append(a[4]*img_height)
|
||||
if (a[3]*img_height<0.001) or (a[4]*img_height<0.001):
|
||||
print("!! ATTENTION : boite trop petite dans le fichier ",file, "=> il faut vérifier et supprimer la ligne dans le fichier label au format txt !!")
|
||||
except:
|
||||
pass
|
||||
w=np.asarray(w)#+0.001*(np.random.random((len(w),)) - 0.5)
|
||||
h=np.asarray(h)#+0.001*(np.random.random((len(w),)) - 0.5)
|
||||
print("h => min =",h.min()," max =",h.max())
|
||||
print("w => min =",w.min()," max =",h.max())
|
||||
print("Nb objets entourés =",len(h))
|
||||
|
||||
x=[w,h]
|
||||
x=np.asarray(x)
|
||||
x=x.transpose()
|
||||
########################################## K- Means
|
||||
##########################################
|
||||
|
||||
from sklearn.cluster import KMeans
|
||||
kmeans3 = KMeans(n_clusters=9)
|
||||
kmeans3.fit(x)
|
||||
y_kmeans3 = kmeans3.predict(x)
|
||||
|
||||
##########################################
|
||||
centers3 = kmeans3.cluster_centers_
|
||||
|
||||
yolo_anchor_average=[]
|
||||
for ind in range (9):
|
||||
yolo_anchor_average.append(np.mean(x[y_kmeans3==ind],axis=0))
|
||||
|
||||
yolo_anchor_average=np.array(yolo_anchor_average)
|
||||
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans3, s=2, cmap='viridis')
|
||||
plt.scatter(yolo_anchor_average[:, 0], yolo_anchor_average[:, 1], c='red', s=50)
|
||||
yoloV3anchors = yolo_anchor_average
|
||||
yoloV3anchors[:, 0] =yolo_anchor_average[:, 0] /1920 *imgsize
|
||||
yoloV3anchors[:, 1] =yolo_anchor_average[:, 1] /1056 *imgsize
|
||||
yoloV3anchors = np.rint(yoloV3anchors)
|
||||
fig, ax = plt.subplots()
|
||||
for ind in range(9):
|
||||
rectangle= plt.Rectangle((0.5*imgsize-yoloV3anchors[ind,0]/2,0.5*imgsize-yoloV3anchors[ind,1]/2), yoloV3anchors[ind,0],yoloV3anchors[ind,1] , fc='b',edgecolor='b',fill = None)
|
||||
ax.add_patch(rectangle)
|
||||
ax.set_aspect(1.0)
|
||||
plt.axis([0,imgsize,0,imgsize])
|
||||
plt.savefig(datasetpath+"boites.png",dpi=150)
|
||||
plt.show()
|
||||
|
||||
yoloV3anchors.sort(axis=0)
|
||||
print("Your custom anchor boxes are {}".format(yoloV3anchors))
|
||||
|
||||
F = open(datasetpath+"YOLOV_BDD_Anchors_"+str(imgsize)+".txt", "w")
|
||||
F.write("{}".format(yoloV3anchors))
|
||||
F.close()
|
||||
|
||||
print("Anchors box for yaml file :")
|
||||
s_anchors = yoloV3anchors[yoloV3anchors[:, 0].argsort()]
|
||||
anchor_lists = [s_anchors[i:i+3].tolist() for i in range(0, len(s_anchors), 3)]
|
||||
# anchor_lists = [[[14.0, 37.0], [15.0, 48.0], [19.0, 53.0]], [[21.0, 70.0], [24.0, 88.0], [31.0, 112.0]], [[34.0, 136.0], [35.0, 155.0], [76.0, 161.0]]]
|
||||
p = ['] # P3/8', '] # P4/16', '] # P5/32']
|
||||
out = "anchors:"
|
||||
for l in anchor_lists:
|
||||
out += '\n' + " - [" + ", ".join([f"{int(i[0])},{int(i[1])}" for i in l]) + p[anchor_lists.index(l)]
|
||||
print(out)
|
46
yolov7-setup/config_dataset1_2024_06_19.yaml
Normal file
46
yolov7-setup/config_dataset1_2024_06_19.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
# Config file of dataset
|
||||
|
||||
DATASET_ROOT: '/home/sylvain.faure/RN/athle/yolov7-tracker/dataset1_2024_06_19/'
|
||||
SPLIT: train
|
||||
CATEGORY_NAMES:
|
||||
- 'coureur'
|
||||
|
||||
CATEGORY_DICT:
|
||||
0: 'coureur'
|
||||
|
||||
CERTAIN_SEQS:
|
||||
-
|
||||
|
||||
IGNORE_SEQS: # Seqs you want to ignore
|
||||
-
|
||||
|
||||
YAML_DICT: './data/files_dataset1_2024_06_19.yaml' # NOTE: ONLY for yolo v5 model loader(func DetectMultiBackend)
|
||||
|
||||
|
||||
# Pas utilisé :
|
||||
#
|
||||
TRACK_EVAL: # If use TrackEval to evaluate, use these configs
|
||||
'DISPLAY_LESS_PROGRESS': False
|
||||
'GT_FOLDER': '/data/wujiapeng/datasets/VisDrone2019/VisDrone2019/VisDrone2019-MOT-test-dev/annotations'
|
||||
'TRACKERS_FOLDER': './tracker/results'
|
||||
'SKIP_SPLIT_FOL': True
|
||||
'TRACKER_SUB_FOLDER': ''
|
||||
'SEQ_INFO':
|
||||
'uav0000009_03358_v': 219
|
||||
'uav0000073_00600_v': 328
|
||||
'uav0000073_04464_v': 312
|
||||
'uav0000077_00720_v': 780
|
||||
'uav0000088_00290_v': 296
|
||||
'uav0000119_02301_v': 179
|
||||
'uav0000120_04775_v': 1000
|
||||
'uav0000161_00000_v': 308
|
||||
'uav0000188_00000_v': 260
|
||||
'uav0000201_00000_v': 677
|
||||
'uav0000249_00001_v': 360
|
||||
'uav0000249_02688_v': 244
|
||||
'uav0000297_00000_v': 146
|
||||
'uav0000297_02761_v': 373
|
||||
'uav0000306_00230_v': 420
|
||||
'uav0000355_00001_v': 468
|
||||
'uav0000370_00001_v': 265
|
||||
'GT_LOC_FORMAT': '{gt_folder}/{seq}.txt'
|
63
yolov7-setup/info_SF.txt
Normal file
63
yolov7-setup/info_SF.txt
Normal file
@ -0,0 +1,63 @@
|
||||
Dépôt :
|
||||
git clone https://github.com/JackWoo0831/Yolov7-tracker
|
||||
|
||||
mv Yolov7-tracker yolov7-tracker
|
||||
cd yolov7-tracker
|
||||
git checkout v2 # change to v2 branch !!
|
||||
|
||||
conda create -n yolov7 python=3.9 pytorch=1.12 ## ancienne version, comme indiquée dans le readme, sinon numpy.distutils manque...
|
||||
conda activate yolov7
|
||||
|
||||
( si besoin : conda remove --name yolov7 --all )
|
||||
|
||||
(yolov7) 14:37:21 sylvain.faure@cinaps ~/RN/athle/yolov7-tracker $ pip3 install numpy scipy matplotlib cython pandas cuda-python
|
||||
(yolov7) 15:02:25 sylvain.faure@cinaps ~/RN/athle/yolov7-tracker $ pip3 install -r requirements.txt
|
||||
|
||||
(yolov7) 15:02:25 sylvain.faure@cinaps ~/RN/athle/yolov7-tracker $ pip3 install ultralytics==8.0.94
|
||||
|
||||
#################
|
||||
## TRAIN
|
||||
#################
|
||||
|
||||
## entrainement avec un premier dataset : ~/RN/athle/yolov7-tracker/dataset1_2024_06_19/
|
||||
Deux fichiers de configuration créées :
|
||||
tracker/config_files/dataset1_2024_06_19.yaml
|
||||
data/files_dataset1_2024_06_19.yaml
|
||||
|
||||
yolov7 sur node20 du cluster :
|
||||
|
||||
## pour voir la version de CUDA du noeud :
|
||||
(yolov7) sylvain.faure@node20:~$ nvidia-smi
|
||||
=> cuda 11.4 sur node20
|
||||
|
||||
cf notes d'installation de pytorch pour trouver les bonnes versions : https://pytorch.org/get-started/previous-versions/
|
||||
pip3 install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu118
|
||||
pip3 install filterpy
|
||||
|
||||
## test pour verifier que cuda est bien ok :
|
||||
>>> import torch
|
||||
>>> torch.cuda.is_available()
|
||||
True
|
||||
|
||||
## lancement de l'entrainement :
|
||||
|
||||
(yolov7) sylvain.faure@node20:~/RN/athle/yolov7-tracker$ python3 train.py --dataset dataset1_2024_06_19__ --workers 1 --device 0 --batch-size 4 --d
|
||||
ata data/dataset1_2024_06_19/dataset.yaml --img 1280 720 --cfg cfg/training/yolov7x_dataset1_2024_06_19.yaml --weights '' --name yolov7x-dataset1_
|
||||
2024_06_19 --hyp data/hyp.scratch.custom.yaml
|
||||
|
||||
|
||||
Si erreur : _pickle.UnpicklingError: STACK_GLOBAL requires str
|
||||
Effacer les fichiers .cache : liste_images.cache par exemple...
|
||||
(yolov7) sylvain.faure@node20:~/RN/athle/yolov7-tracker$rm data/dataset1_2024_06_19/liste_images.cache
|
||||
|
||||
Si erreur np.int => remplacer par des np.int64
|
||||
|
||||
Si erreur cuda/cpu dans loss.py :
|
||||
you have to replace the line in the file yolo7/utils/loss.py
|
||||
"from_which_layer.append((torch.ones(size=(len(b),)) * i)"
|
||||
to "from_which_layer.append((torch.ones(size=(len(b),)) * i).to('cuda'))",
|
||||
and add new line "fg_mask_inboxes = fg_mask_inboxes.to(torch.device('cuda'))"
|
||||
after "fg_mask_inboxes = matching_matrix.sum(0) > 0.0"
|
||||
so you need to do it 3 times in the file
|
||||
|
||||
ssh cinaps
|
73
yolov7-setup/instructions.md
Normal file
73
yolov7-setup/instructions.md
Normal file
@ -0,0 +1,73 @@
|
||||
https://github.com/JackWoo0831/Yolov7-tracker
|
||||
https://github.com/microsoft/VoTT?tab=readme-ov-file#build-and-run-from-source
|
||||
|
||||
# Yolov7-tracker
|
||||
git clone https://github.com/JackWoo0831/Yolov7-tracker.git
|
||||
mv Yolov7-tracker yolov7-tracker
|
||||
cd yolov7-tracker
|
||||
git checkout v2 # change to v2 branch !!
|
||||
|
||||
# Python:3.9, Pytorch: 1.12
|
||||
conda create -n yolov7 python=3.9 pytorch=1.12
|
||||
conda activate yolov7
|
||||
pip3 install numpy scipy matplotlib cython pandas cuda-python
|
||||
## ERROR: Could not find a version that satisfies the requirement cuda-python (from versions: none) Requires-Python >=3.10
|
||||
pip3 install -r requirements.txt
|
||||
pip3 install ultralytics==8.0.94
|
||||
|
||||
# Setup cluster
|
||||
nvidia-smi
|
||||
=> cuda 11.4 sur node20
|
||||
|
||||
cf notes d'installation de pytorch pour trouver les bonnes versions : https://pytorch.org/get-started/previous-versions/
|
||||
pip3 install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu118
|
||||
pip3 install filterpy
|
||||
|
||||
>>> import torch
|
||||
>>> torch.cuda.is_available()
|
||||
True
|
||||
|
||||
# Setup macos
|
||||
conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 -c pytorch
|
||||
|
||||
# Config train
|
||||
- create config tracker/config_files/dataset1_2024_06_19.yaml
|
||||
data/files_dataset1_2024_06_19.yaml
|
||||
|
||||
- export from VoTT as json
|
||||
- convert to Yolov using vott2yolov.py (data/dataset1_2024_06_19/vott2yolov.py) -> data/dataset1_2024_06_19/labels/dataset1/ + data/dataset1_2024_06_19/images/dataset1/
|
||||
- edit data/dataset1_2024_06_19/dataset.yaml
|
||||
- get box sizes from data/dataset1_2024_06_19/YOLOV_BDD_Anchors_1280.txt -> cfg/training/yolov7x_dataset1_2024_06_19.yaml
|
||||
|
||||
structure :
|
||||
dataset_name
|
||||
|---images
|
||||
|---train
|
||||
|---sequence_name1
|
||||
|---000001.jpg
|
||||
|---000002.jpg ...
|
||||
|---val ...
|
||||
|---test ...
|
||||
|
||||
|
|
||||
|
||||
# Launch train
|
||||
python3 train.py --dataset dataset1_2024_06_19__ --workers 1 --device 0 --batch-size 4 --data data/dataset1_2024_06_19/datasetyaml --img 1280 720 --cfg cfg/training/yolov7x_dataset1_2024_06_19.yaml --weights '' --name yolov7x-dataset1_2024_06_19 --hyp data/hyp.scratch.custom.yaml
|
||||
|
||||
Si erreur : _pickle.UnpicklingError: STACK_GLOBAL requires str
|
||||
Effacer les fichiers .cache : liste_images.cache par exemple...
|
||||
|
||||
Si erreur np.int => remplacer par des np.int64
|
||||
|
||||
Si erreur cuda/cpu dans loss.py :
|
||||
you have to replace the line in the file yolo7/utils/loss.py
|
||||
"from_which_layer.append((torch.ones(size=(len(b),)) * i)"
|
||||
to "from_which_layer.append((torch.ones(size=(len(b),)) * i).to('cuda'))",
|
||||
and add new line "fg_mask_inboxes = fg_mask_inboxes.to(torch.device('cuda'))"
|
||||
after "fg_mask_inboxes = matching_matrix.sum(0) > 0.0"
|
||||
so you need to do it 3 times in the file
|
||||
|
||||
<!-- https://wandb.ai/noham- -->
|
||||
|
||||
<!-- /Users/noham/Documents/GitHub/Stage/2024 -->
|
||||
<!-- /Users/noham/Documents/GitHub/Stage-2024 -->
|
75
yolov7-setup/vott2yolov.py
Normal file
75
yolov7-setup/vott2yolov.py
Normal file
@ -0,0 +1,75 @@
|
||||
import numpy as np
|
||||
import json, sys, os, shutil
|
||||
|
||||
## cliquer sur export dans Vott puis :
|
||||
|
||||
# prefix = "../exports_VOTT/CH01-20230614-083436-091514-Zone2/vott-json-export/"
|
||||
# vottjson_filename = "Poulets-export.json"
|
||||
# cc = 1000000
|
||||
|
||||
prefix = "/Users/noham/Documents/GitHub/Stage-2024/yolov7-tracker/"
|
||||
print('Working folder: ', prefix)
|
||||
jsonname = 'vott-json-export'
|
||||
jsonfile = 'track-2fps-export.json'
|
||||
json_path = prefix + jsonname + '/' + jsonfile
|
||||
print('Working json file: ', json_path)
|
||||
jsonfolder = prefix+jsonname
|
||||
print('Working json folder: ', jsonfolder)
|
||||
|
||||
outputname = 'dataset2fps'
|
||||
print('Ooutput folder: ', outputname)
|
||||
output = '/Users/noham/Documents/GitHub/Stage-2024/test/yolov7-tracker/data/'+outputname
|
||||
print('Output folder: ', output)
|
||||
cc = 0
|
||||
|
||||
label = 'runner'
|
||||
|
||||
for folder in [output, output+"/images",output+"/labels"]:
|
||||
if not os.path.exists(folder):
|
||||
os.makedirs(folder)
|
||||
|
||||
with open(json_path, "r") as read_file:
|
||||
data = json.load(read_file)
|
||||
|
||||
imglist = []
|
||||
for a in data["assets"]:
|
||||
asset = data["assets"][a]["asset"]
|
||||
regions = data["assets"][a]["regions"]
|
||||
img_name = asset["name"]
|
||||
width = asset["size"]["width"]
|
||||
height = asset["size"]["height"]
|
||||
# print("\nimage name : ",img_name," width =",width," height =",height)
|
||||
shutil.copyfile(jsonfolder+"/"+img_name,output+"/images/image_"+str(cc).zfill(12)+".jpg")
|
||||
imglist.append(output+"/images/image_"+str(cc).zfill(12)+".jpg")
|
||||
f = open(output+"/labels/image_"+str(cc).zfill(12)+".txt", "w")
|
||||
# f = open(prefix+"/"+img_name.replace(".jpg",".txt"), "w")
|
||||
for region in regions:
|
||||
boundingBox = region["boundingBox"]
|
||||
points = region["points"]
|
||||
# print(" tags =",region["tags"]," boundingBox =",boundingBox," points =",points)
|
||||
# ## Labels :
|
||||
# D’après la doc on doit avoir :
|
||||
# Label_ID_1 X_CENTER_NORM Y_CENTER_NORM WIDTH_NORM HEIGHT_NORM
|
||||
# The label_id is the index number in the classes.names file. The id of the first label will be 0 and an increasing integer after that. Note all the position attributes in the label file are not absolute but normalised.
|
||||
# X_CENTER_NORM = X_CENTER_ABS/IMAGE_WIDTH centre image
|
||||
# Y_CENTER_NORM = Y_CENTER_ABS/IMAGE_HEIGHT centre image
|
||||
# WIDTH_NORM = WIDTH_OF_LABEL_ABS/IMAGE_WIDTH largeur boite
|
||||
# HEIGHT_NORM = HEIGHT_OF_LABEL_ABS/IMAGE_HEIGHT hauteur boite
|
||||
# dans un repere dont l'origine est en haut a gauche et X de gauche à droite, Y du haut vers le bas
|
||||
Label_ID = "0"
|
||||
X_CENTER_NORM = (boundingBox["left"]+boundingBox["width"]/2)/width
|
||||
Y_CENTER_NORM = (boundingBox["top"]+boundingBox["height"]/2)/height
|
||||
WIDTH_NORM = boundingBox["width"]/width
|
||||
HEIGHT_NORM = boundingBox["height"]/height
|
||||
f.write(Label_ID+" "+str(X_CENTER_NORM)+" "+str(Y_CENTER_NORM)+" "+str(WIDTH_NORM)+" "+str(HEIGHT_NORM)+"\n" ) # 0 = poulet si l'on a qu'une classe d'objet...
|
||||
f.close()
|
||||
cc += 1
|
||||
# print(" written file :",prefix+"/"+img_name.replace(".jpg",".txt"))
|
||||
with open(output+"/liste_images.txt", "w") as file:
|
||||
for item in imglist:
|
||||
file.write(item + "\n")
|
||||
with open(output+"/dataset.yaml", "w") as file:
|
||||
file.write(f"""train: data/{outputname}/liste_images.txt
|
||||
val: data/{outputname}/liste_images.txt
|
||||
nc: 1
|
||||
names: ['{label}']""")
|
Loading…
x
Reference in New Issue
Block a user