From bf449699fcb84e2929e82170eae261db817ae354 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Sun, 18 Jul 2021 22:24:36 +0900 Subject: [PATCH 01/16] Add cache-on-disk and cache-directory to cache images on disk --- train.py | 8 +++++--- utils/datasets.py | 49 ++++++++++++++++++++++++++++++++--------------- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/train.py b/train.py index 1c48fa49f0f7..6fc9bca051b9 100644 --- a/train.py +++ b/train.py @@ -195,11 +195,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') + logging.info('Using SyncBatchNorm()') # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache=opt.cache_images, cache_on_disk=opt.cache_on_disk, cache_directory=opt.cache_directory, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -209,7 +209,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + hyp=hyp, cache=opt.cache_images and not noval, cache_on_disk=opt.cache_on_disk, cache_directory=opt.cache_directory, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -440,6 +440,8 @@ def parse_opt(known=False): parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--cache-on-disk', action='store_true', help='cache images on disk, and use the --cache-directory option together') + parser.add_argument('--cache-directory', type=str, default='', help='A directory for cache, and it is not available by default') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/datasets.py b/utils/datasets.py index d3edafa99bd0..1124a7e928b9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -46,7 +46,6 @@ def get_hash(paths): h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash - def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) @@ -88,7 +87,7 @@ def exif_transpose(image): return image -def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, cache_on_disk=False, cache_directory="", pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): @@ -97,6 +96,8 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, + cache_on_disk=cache_on_disk, + cache_directory=cache_directory, single_cls=single_cls, stride=int(stride), pad=pad, @@ -361,7 +362,7 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + cache_images=False, cache_on_disk=False, cache_directory="", single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -372,6 +373,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None + self.cache_on_disk = cache_on_disk + self.cache_directory = cache_directory try: f = [] # image files @@ -397,6 +400,11 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + if cache_directory: + cache_dir = Path(cache_directory) + if not cache_dir.is_dir(): + cache_dir.mkdir(parents=True) + cache_path = Path(cache_directory + "/label_files_"+hashlib.md5(self.label_files[0].encode()).hexdigest()).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) @@ -462,8 +470,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes + if cache_on_disk: + img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + np.save(self.cache_directory+"/"+str(i)+".npy",img) + else: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() @@ -622,15 +634,19 @@ def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # ratio - if r != 1: # if sizes are not equal - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + if self.img_hw[index] is None: + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + img = np.load(self.cache_directory+"/"+str(index)+'.npy') + return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized @@ -923,7 +939,10 @@ def unzip(path): x = [] dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset if split == 'train': - cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path + if cache_directory: + cache_path = Path(cache_directory + "/label_files_"+hashlib.md5(self.dataset.label_files[0].encode()).hexdigest()).with_suffix('.cache') + else: + cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) x = np.array(x) # shape(128x80) From 1beadbed89fee7a5e3f499c5430ed01587d006dd Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Mon, 19 Jul 2021 18:31:23 +0900 Subject: [PATCH 02/16] Fix load_image with cache_on_disk --- utils/datasets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 1124a7e928b9..4b642e6f2449 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -634,7 +634,10 @@ def load_image(self, index): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached - if self.img_hw[index] is None: + if self.cache_on_disk: + img = np.load(self.cache_directory+"/"+str(index)+'.npy') + return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + else: path = self.img_files[index] img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path @@ -644,9 +647,6 @@ def load_image(self, index): img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized - else: - img = np.load(self.cache_directory+"/"+str(index)+'.npy') - return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized From c635721429a069506e5453799a631a12683f9a50 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Tue, 20 Jul 2021 01:12:40 +0900 Subject: [PATCH 03/16] Add no_cache flag for load_image --- utils/datasets.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4b642e6f2449..627c2b783ec7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -467,7 +467,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) + results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x, no_cache=True), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: if cache_on_disk: @@ -630,11 +630,11 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): +def load_image(self, index, no_cache=False): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached - if self.cache_on_disk: + if no_cache == False and self.cache_on_disk: img = np.load(self.cache_directory+"/"+str(index)+'.npy') return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: From 8cda940bfb181ec5438dbacdc1478268c932ab19 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Tue, 20 Jul 2021 01:14:45 +0900 Subject: [PATCH 04/16] Revert the parts('logging' and a new line) that do not need to be modified --- train.py | 2 +- utils/datasets.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 6fc9bca051b9..01382f57bcdd 100644 --- a/train.py +++ b/train.py @@ -195,7 +195,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logging.info('Using SyncBatchNorm()') + LOGGER.info('Using SyncBatchNorm()') # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, diff --git a/utils/datasets.py b/utils/datasets.py index 627c2b783ec7..c7887733d1a3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -46,6 +46,7 @@ def get_hash(paths): h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash + def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) From c956a5315a9c78e81aa04f4fdbb16c1556218310 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Tue, 20 Jul 2021 01:52:05 +0900 Subject: [PATCH 05/16] Add the assertion for shapes of cached images --- utils/datasets.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index c7887733d1a3..45a0e57a4a31 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -474,6 +474,10 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_on_disk: img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) np.save(self.cache_directory+"/"+str(i)+".npy",img) + timg, thw0, thw = load_image(self, i) + assert (img.shape == timg.shape), f'{img.shape} should the same as {timg.shape}.' + assert (self.img_hw0[i] == thw0), f'{self.img_hw0[i]} should the same as {thw0}.' + assert (self.img_hw[i] == thw), f'{self.img_hw[i]} should the same as {thw}.' else: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes From 5589476e0c12466f48e9f8a8a993b13eeb7f6e4e Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Wed, 21 Jul 2021 19:21:36 +0900 Subject: [PATCH 06/16] Add a suffix string for cached images --- utils/datasets.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 45a0e57a4a31..89be48dee528 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -46,6 +46,8 @@ def get_hash(paths): h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash +def str2md5(str_arg): + return hashlib.md5(str_arg.encode()).hexdigest() def exif_size(img): # Returns exif-corrected PIL size @@ -473,7 +475,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r for i, x in pbar: if cache_on_disk: img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - np.save(self.cache_directory+"/"+str(i)+".npy",img) + parent_path = Path(self.img_files[i]).parent + np.save(self.cache_directory+"/"+str2md5(str(parent_path))+"_"+str(i)+".npy",img) timg, thw0, thw = load_image(self, i) assert (img.shape == timg.shape), f'{img.shape} should the same as {timg.shape}.' assert (self.img_hw0[i] == thw0), f'{self.img_hw0[i]} should the same as {thw0}.' @@ -640,7 +643,8 @@ def load_image(self, index, no_cache=False): img = self.imgs[index] if img is None: # not cached if no_cache == False and self.cache_on_disk: - img = np.load(self.cache_directory+"/"+str(index)+'.npy') + parent_path = Path(self.img_files[index]).parent + img = np.load(self.cache_directory+"/"+str2md5(str(parent_path)) + "_" + str(index)+".npy") return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: path = self.img_files[index] From 5d4b2ae67bc3393ac166d1d015880a85586f54d8 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Sat, 24 Jul 2021 15:53:32 +0900 Subject: [PATCH 07/16] Fix boundary-error of letterbox for load_mosaic --- utils/datasets.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index 89be48dee528..13a2221153d1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -686,6 +686,13 @@ def load_mosaic(self, index): x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + x2b_t = min(x2b, w) + y2b_t = min(y2b, h) + x2a = x2a - (x2b - x2b_t) + y2a = y2a - (y2b - y2b_t) + x2b = x2b_t + y2b = y2b_t + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b From 29298691da1923cdd30c20d9b6a116230ceec938 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Sat, 24 Jul 2021 23:20:20 +0900 Subject: [PATCH 08/16] Add prefix as cache-key of cache-on-disk --- utils/datasets.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 13a2221153d1..057521c019d2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -378,6 +378,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.albumentations = Albumentations() if augment else None self.cache_on_disk = cache_on_disk self.cache_directory = cache_directory + # Use self.prefix as cache-key for on-disk-cache + self.prefix = prefix try: f = [] # image files @@ -476,11 +478,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_on_disk: img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) parent_path = Path(self.img_files[i]).parent - np.save(self.cache_directory+"/"+str2md5(str(parent_path))+"_"+str(i)+".npy",img) - timg, thw0, thw = load_image(self, i) - assert (img.shape == timg.shape), f'{img.shape} should the same as {timg.shape}.' - assert (self.img_hw0[i] == thw0), f'{self.img_hw0[i]} should the same as {thw0}.' - assert (self.img_hw[i] == thw), f'{self.img_hw[i]} should the same as {thw}.' + np.save(self.cache_directory+"/"+str2md5(prefix+str(parent_path))+"_"+str(i)+".npy",img) else: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes @@ -644,7 +642,7 @@ def load_image(self, index, no_cache=False): if img is None: # not cached if no_cache == False and self.cache_on_disk: parent_path = Path(self.img_files[index]).parent - img = np.load(self.cache_directory+"/"+str2md5(str(parent_path)) + "_" + str(index)+".npy") + img = np.load(self.cache_directory+"/"+str2md5(self.prefix+str(parent_path)) + "_" + str(index)+".npy") return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: path = self.img_files[index] From f036037ff64194bf5dbcd80c205b32dc56b12627 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Mon, 2 Aug 2021 22:35:26 +0900 Subject: [PATCH 09/16] Update cache-function on disk --- train.py | 8 +++---- utils/datasets.py | 59 ++++++++++++++++++++--------------------------- 2 files changed, 28 insertions(+), 39 deletions(-) diff --git a/train.py b/train.py index 01382f57bcdd..0d4e2ca6c719 100644 --- a/train.py +++ b/train.py @@ -199,7 +199,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, cache_on_disk=opt.cache_on_disk, cache_directory=opt.cache_directory, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache_device=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -209,7 +209,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, cache_on_disk=opt.cache_on_disk, cache_directory=opt.cache_directory, rect=True, rank=-1, + hyp=hyp, cache_device= ('' if noval else opt.cache), rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -439,9 +439,7 @@ def parse_opt(known=False): parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') - parser.add_argument('--cache-on-disk', action='store_true', help='cache images on disk, and use the --cache-directory option together') - parser.add_argument('--cache-directory', type=str, default='', help='A directory for cache, and it is not available by default') + parser.add_argument('--cache', type=str, default='', help='Select a device for cache. In default, cache is not used. When you use cache, set ram or disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/datasets.py b/utils/datasets.py index 057521c019d2..2c073d66a745 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -21,6 +21,8 @@ from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm +import psutil +import re from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ @@ -46,9 +48,6 @@ def get_hash(paths): h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash -def str2md5(str_arg): - return hashlib.md5(str_arg.encode()).hexdigest() - def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) @@ -90,7 +89,7 @@ def exif_transpose(image): return image -def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, cache_on_disk=False, cache_directory="", pad=0.0, +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache_device='', pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): @@ -98,9 +97,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training - cache_images=cache, - cache_on_disk=cache_on_disk, - cache_directory=cache_directory, + cache_device=cache_device, single_cls=single_cls, stride=int(stride), pad=pad, @@ -365,7 +362,7 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, cache_on_disk=False, cache_directory="", single_cls=False, stride=32, pad=0.0, prefix=''): + cache_device='', single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -376,10 +373,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None - self.cache_on_disk = cache_on_disk - self.cache_directory = cache_directory + self.cache_device = cache_device # Use self.prefix as cache-key for on-disk-cache - self.prefix = prefix + self.prefix = re.sub(r'\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]', "", prefix).replace(' ','_') + + if cache_device != '' and cache_device != 'ram' and cache_device != 'disk': + raise Exception(f'{cache_device} is set in cache_device. It should be ram or disk.') try: f = [] # image files @@ -405,11 +404,10 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') - if cache_directory: - cache_dir = Path(cache_directory) + if cache_device == "disk": + cache_dir = Path(self.img_files[0]).parent / "images_npy" if not cache_dir.is_dir(): cache_dir.mkdir(parents=True) - cache_path = Path(cache_directory + "/label_files_"+hashlib.md5(self.label_files[0].encode()).hexdigest()).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) @@ -469,20 +467,23 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs = [None] * n - if cache_images: + if cache_device != '': gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x, no_cache=True), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) - for i, x in pbar: - if cache_on_disk: + if cache_device == "disk": + parent_path = Path(self.img_files[0]).parent / "images_npy" + disk = psutil.disk_usage(parent_path) + for i, x in pbar: img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - parent_path = Path(self.img_files[i]).parent - np.save(self.cache_directory+"/"+str2md5(prefix+str(parent_path))+"_"+str(i)+".npy",img) - else: + np.save(parent_path / (self.prefix + str(i)+".npy"),img) + pbar.desc = f'{prefix}Disk usage for cache({disk.used / 1E9:.1f}GB / {disk.total / 1E9:.1f}GB = {disk.percent}%)' + else: + for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): @@ -640,9 +641,9 @@ def load_image(self, index, no_cache=False): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached - if no_cache == False and self.cache_on_disk: + if no_cache == False and self.cache_device == "disk": parent_path = Path(self.img_files[index]).parent - img = np.load(self.cache_directory+"/"+str2md5(self.prefix+str(parent_path)) + "_" + str(index)+".npy") + img = np.load(parent_path / "images_npy" / (self.prefix + str(index) + ".npy")) return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized else: path = self.img_files[index] @@ -684,13 +685,6 @@ def load_mosaic(self, index): x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - x2b_t = min(x2b, w) - y2b_t = min(y2b, h) - x2a = x2a - (x2b - x2b_t) - y2a = y2a - (y2b - y2b_t) - x2b = x2b_t - y2b = y2b_t - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] padw = x1a - x1b padh = y1a - y1b @@ -953,10 +947,7 @@ def unzip(path): x = [] dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset if split == 'train': - if cache_directory: - cache_path = Path(cache_directory + "/label_files_"+hashlib.md5(self.dataset.label_files[0].encode()).hexdigest()).with_suffix('.cache') - else: - cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path + cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) x = np.array(x) # shape(128x80) From 73ea5a6209a132d7534370988615dee92ae18705 Mon Sep 17 00:00:00 2001 From: Junji Hashimoto Date: Mon, 2 Aug 2021 22:47:32 +0900 Subject: [PATCH 10/16] Add psutil in requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index f1629eafc65a..c328b6d23b90 100755 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 +psutil # logging ------------------------------------- tensorboard>=2.4.1 From d543b4264ccdfae2c162339d3c1c72f77f1ba565 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 16:53:01 +0200 Subject: [PATCH 11/16] Update train.py --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 0d4e2ca6c719..ef7646538fa3 100644 --- a/train.py +++ b/train.py @@ -439,7 +439,7 @@ def parse_opt(known=False): parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, default='', help='Select a device for cache. In default, cache is not used. When you use cache, set ram or disk') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') From 0419df1ed93a9767190df5b9abfaa4eba8c2bcd7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 17:10:13 +0200 Subject: [PATCH 12/16] Cleanup1 --- train.py | 4 ++-- utils/datasets.py | 22 +++++++++------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/train.py b/train.py index 713a4d67ca68..a9e5558042a5 100644 --- a/train.py +++ b/train.py @@ -201,7 +201,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache_device=opt.cache, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -211,7 +211,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache_device= ('' if noval else opt.cache), rect=True, rank=-1, + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] diff --git a/utils/datasets.py b/utils/datasets.py index afd1d2bb2d5e..b3a089f817aa 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -89,7 +89,7 @@ def exif_transpose(image): return image -def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache_device='', pad=0.0, +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): @@ -97,7 +97,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non augment=augment, # augment images hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training - cache_device=cache_device, + cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, @@ -362,7 +362,7 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_device='', single_cls=False, stride=32, pad=0.0, prefix=''): + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -373,12 +373,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None - self.cache_device = cache_device - # Use self.prefix as cache-key for on-disk-cache - self.prefix = re.sub(r'\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]', "", prefix).replace(' ','_') - - if cache_device != '' and cache_device != 'ram' and cache_device != 'disk': - raise Exception(f'{cache_device} is set in cache_device. It should be ram or disk.') + self.cache_images = cache_images + self.prefix = prefix try: f = [] # image files @@ -404,7 +400,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') - if cache_device == "disk": + if cache_images == "disk": cache_dir = Path(self.img_files[0]).parent / "images_npy" if not cache_dir.is_dir(): cache_dir.mkdir(parents=True) @@ -467,12 +463,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) self.imgs = [None] * n - if cache_device != '': + if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x, no_cache=True), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) - if cache_device == "disk": + if cache_images == "disk": parent_path = Path(self.img_files[0]).parent / "images_npy" disk = psutil.disk_usage(parent_path) for i, x in pbar: @@ -641,7 +637,7 @@ def load_image(self, index, no_cache=False): # loads 1 image from dataset, returns img, original hw, resized hw img = self.imgs[index] if img is None: # not cached - if no_cache == False and self.cache_device == "disk": + if no_cache == False and self.cache_images == "disk": parent_path = Path(self.img_files[index]).parent img = np.load(parent_path / "images_npy" / (self.prefix + str(index) + ".npy")) return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized From fa7e4ceec17f890c15fd25eec51583dc92b71b2c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 18:07:53 +0200 Subject: [PATCH 13/16] Cleanup2 --- requirements.txt | 1 - train.py | 4 +-- utils/datasets.py | 73 +++++++++++++++++++++-------------------------- 3 files changed, 35 insertions(+), 43 deletions(-) diff --git a/requirements.txt b/requirements.txt index c328b6d23b90..f1629eafc65a 100755 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,6 @@ scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 -psutil # logging ------------------------------------- tensorboard>=2.4.1 diff --git a/train.py b/train.py index a9e5558042a5..d0b86d7ec8a9 100644 --- a/train.py +++ b/train.py @@ -201,7 +201,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -211,7 +211,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] diff --git a/utils/datasets.py b/utils/datasets.py index b3a089f817aa..8218afded92c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -21,8 +21,6 @@ from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm -import psutil -import re from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ @@ -48,6 +46,7 @@ def get_hash(paths): h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash + def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) @@ -373,8 +372,6 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.stride = stride self.path = path self.albumentations = Albumentations() if augment else None - self.cache_images = cache_images - self.prefix = prefix try: f = [] # image files @@ -400,10 +397,6 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') - if cache_images == "disk": - cache_dir = Path(self.img_files[0]).parent / "images_npy" - if not cache_dir.is_dir(): - cache_dir.mkdir(parents=True) try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) @@ -462,24 +455,25 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n + self.imgs, self.img_npy = [None] * n, [None] * n if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x, no_cache=True), zip(repeat(self), range(n))) + results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) - if cache_images == "disk": - parent_path = Path(self.img_files[0]).parent / "images_npy" - disk = psutil.disk_usage(parent_path) - for i, x in pbar: - img, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - np.save(parent_path / (self.prefix + str(i)+".npy"),img) - pbar.desc = f'{prefix}Disk usage for cache({disk.used / 1E9:.1f}GB / {disk.total / 1E9:.1f}GB = {disk.percent}%)' - else: - for i, x in pbar: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + for i, x in pbar: + im, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + if cache_images == 'disk': + np.save(self.img_npy[i].as_posix(), im) + gb += self.img_npy[i].stat().st_size + else: + self.imgs[i] = im gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): @@ -633,26 +627,25 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index, no_cache=False): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - if no_cache == False and self.cache_images == "disk": - parent_path = Path(self.img_files[index]).parent - img = np.load(parent_path / "images_npy" / (self.prefix + str(index) + ".npy")) - return img, self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized - else: - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # ratio - if r != 1: # if sizes are not equal - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized +def load_image(self, i): + # loads 1 image from dataset index 'i', returns im, original hw, resized hw + im = self.imgs[i] + if im is None: # not cached in ram + npy = self.img_npy[i] + if npy and npy.exists(): # load npy + im = np.load(npy) + else: # read image + path = self.img_files[i] + im = cv2.imread(path) # BGR + assert im is not None, 'Image Not Found ' + path + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized def load_mosaic(self, index): From 7c51880b29d3b8feb23cecf040d796d26cb428ac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 18:17:12 +0200 Subject: [PATCH 14/16] Skip existing npy --- utils/datasets.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8218afded92c..1c780cdbac4b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -466,12 +466,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: - im, self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) if cache_images == 'disk': - np.save(self.img_npy[i].as_posix(), im) + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) gb += self.img_npy[i].stat().st_size else: - self.imgs[i] = im + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() From a5bc8d88b820824fe3bf84782ae653580132fd74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 18:29:53 +0200 Subject: [PATCH 15/16] Include re-space --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index d0b86d7ec8a9..34bd8e73c290 100644 --- a/train.py +++ b/train.py @@ -389,7 +389,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests From dd5eef2e0d54aa634e8a3db093d31bae3cd09476 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 18:35:21 +0200 Subject: [PATCH 16/16] Export return character fix --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 83e293b72e73..cec85958b4a9 100644 --- a/export.py +++ b/export.py @@ -156,8 +156,8 @@ def run(weights='./yolov5s.pt', # weights path # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' - f"Results saved to {colorstr('bold', file.parent.resolve())}\n" - f'Visualize with https://netron.app') + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nVisualize with https://netron.app') def parse_opt():