diff --git a/utils/datasets.py b/utils/datasets.py index e98a60c4fb59..f172de233614 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -56,10 +56,10 @@ def exif_size(img): def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, data_type="train", image_weights=False, quad=False, prefix=''): + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): - dataset = LoadImagesAndLabels(path, imgsz, batch_size, data_type=data_type, + dataset = LoadImagesAndLabels(path, imgsz, batch_size, augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training @@ -337,7 +337,7 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing - def __init__(self, path, img_size=640, batch_size=16, data_type="train", augment=False, hyp=None, rect=False, image_weights=False, + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size self.augment = augment