int64 fix

This commit is contained in:
√(noham)² 2024-07-18 02:11:29 +02:00
parent 4a429638b7
commit be9f76b759
3 changed files with 14 additions and 10 deletions

View File

@ -429,7 +429,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
bi = np.floor(np.arange(n) / batch_size).astype(np.int64) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
@ -457,7 +457,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int64) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
@ -699,7 +699,8 @@ class LoadImagesAndLabelsCustom(LoadImagesAndLabels):
self.stride = stride
self.path = path
PREFIX = '/data/wujiapeng/datasets/'
## SF ## PREFIX = '/data/wujiapeng/datasets/'
PREFIX = "data/"
path = Path(path)
assert path.is_file(), 'wrong format for VisDrone'
@ -741,7 +742,7 @@ class LoadImagesAndLabelsCustom(LoadImagesAndLabels):
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
bi = np.floor(np.arange(n) / batch_size).astype(np.int64) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
@ -769,7 +770,7 @@ class LoadImagesAndLabelsCustom(LoadImagesAndLabels):
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int64) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
@ -1331,7 +1332,7 @@ def pastein(image, labels, sample_labels, sample_images, sample_masks):
r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h))
temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w]
m_ind = r_mask > 0
if m_ind.astype(np.int).sum() > 60:
if m_ind.astype(np.int64).sum() > 60:
temp_crop[m_ind] = r_image[m_ind]
#print(sample_labels[sel_ind])
#print(sample_images[sel_ind].shape)
@ -1414,7 +1415,7 @@ def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_box
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int64)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)

View File

@ -219,7 +219,7 @@ def labels_to_class_weights(labels, nc=80):
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
classes = labels[:, 0].astype(np.int64) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurrences per class
# Prepend gridpoint count (for uCE training)
@ -234,7 +234,7 @@ def labels_to_class_weights(labels, nc=80):
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class_weights and image contents
class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
class_counts = np.array([np.bincount(x[:, 0].astype(np.int64), minlength=nc) for x in labels])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights

View File

@ -54,7 +54,10 @@ dataset_name
# Launch train
python3 train.py --dataset dataset1_2024_06_19__ --workers 1 --device 0 --batch-size 4 --data data/dataset1_2024_06_19/datasetyaml --img 1280 720 --cfg cfg/training/yolov7x_dataset1_2024_06_19.yaml --weights '' --name yolov7x-dataset1_2024_06_19 --hyp data/hyp.scratch.custom.yaml
python3 train.py --dataset dataset2fps_20240718_ --workers 1 --device 0 --batch-size 4 --data data/dataset2fps/dataset.yaml --img 1280 720 --cfg cfg/training/yolov7x_dataset2fps.yaml --weights '' --name yolov7x-dataset2fps_20240718 --hyp data/hyp.scratch.custom.yaml
<!-- python3 train.py --dataset dataset2fps_20240718_ --workers 1 --device 0 --batch-size 4 --data data/dataset2fps/dataset.yaml --img 1280 720 --cfg cfg/training/yolov7x_dataset2fps.yaml --weights '' --name yolov7x-dataset2fps_20240718 --hyp data/hyp.scratch.custom.yaml -->
python3 train.py --dataset dataset2fps_20240718_ --epochs 20 --workers 1 --device cpu --batch-size 4 --data data/dataset2fps/dataset.yaml --img 1280 720 --cfg cfg/training/yolov7x_dataset2fps.yaml --weights '' --name yolov7x-dataset2fps_20240718 --hyp data/hyp.scratch.custom.yaml
python3 train.py --dataset dataset2fps_20240718_ --epochs 20 --workers 1 --device cpu --batch-size 4 --data data/dataset2fps/dataset.yaml --img 1280 720 --cfg cfg/training/yolov7x_dataset2fps.yaml --weights '' --name yolov7x-dataset2fps_20240718 --hyp data/hyp.scratch.custom.yaml --upload_dataset
Si erreur : _pickle.UnpicklingError: STACK_GLOBAL requires str