From 73a92dc1b6cb04c8f56a0f458d9aaaacf26402c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 12:42:36 +0200 Subject: [PATCH 001/757] Explicit `git clone` master (#3311) --- utils/aws/userdata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 890606b76a06..5846fedb16f9 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -7,7 +7,7 @@ cd home/ubuntu if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 bash data/scripts/get_coco.sh && echo "Data done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & From 61ea23c3fe9b86e476cc1c79a12c03ebb3636254 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 13:23:09 +0200 Subject: [PATCH 002/757] Implement `@torch.no_grad()` decorator (#3312) * `@torch.no_grad()` decorator * Update detect.py --- detect.py | 12 ++++++------ test.py | 32 ++++++++++++++++---------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/detect.py b/detect.py index 732fec698006..c6b76d981541 100644 --- a/detect.py +++ b/detect.py @@ -14,6 +14,7 @@ from utils.torch_utils import select_device, load_classifier, time_synchronized +@torch.no_grad() def detect(opt): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images @@ -175,10 +176,9 @@ def detect(opt): print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) - with torch.no_grad(): - if opt.update: # update all models (to fix SourceChangeWarning) - for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect(opt=opt) - strip_optimizer(opt.weights) - else: + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: detect(opt=opt) + strip_optimizer(opt.weights) + else: + detect(opt=opt) diff --git a/test.py b/test.py index f8936d3b4f9d..0716c5d8b93c 100644 --- a/test.py +++ b/test.py @@ -18,6 +18,7 @@ from utils.torch_utils import select_device, time_synchronized +@torch.no_grad() def test(data, weights=None, batch_size=32, @@ -105,22 +106,21 @@ def test(data, targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - with torch.no_grad(): - # Run model - t = time_synchronized() - out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t - - # Compute loss - if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls - - # Run NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_synchronized() - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t1 += time_synchronized() - t + # Run model + t = time_synchronized() + out, train_out = model(img, augment=augment) # inference and training outputs + t0 += time_synchronized() - t + + # Compute loss + if compute_loss: + loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + + # Run NMS + targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t = time_synchronized() + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): From 407dc5008e47b1aad5ce69f0c91b4f1ec321dd7f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 17:17:32 +0200 Subject: [PATCH 003/757] Update README.md (#3320) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b25c6fca983c..a638657b313b 100755 --- a/README.md +++ b/README.md @@ -162,9 +162,9 @@ Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of - **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.** - **Custom data training**, hyperparameter evolution, and model exportation to any destination. -For business inquiries and professional support requests please visit us at https://www.ultralytics.com. +For business inquiries and professional support requests please visit us at https://ultralytics.com. ## Contact -**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. +**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. From aad99b63d6ac63278021a66f8a096e5232ed24b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 May 2021 11:45:24 +0200 Subject: [PATCH 004/757] TensorBoard DP/DDP graph fix (#3325) --- train.py | 6 +++--- utils/torch_utils.py | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index c8d617fc228f..3e8d5075aef1 100644 --- a/train.py +++ b/train.py @@ -32,7 +32,7 @@ from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) @@ -331,7 +331,7 @@ def train(hyp, opt, device, tb_writer=None): f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() if tb_writer: - tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in @@ -390,7 +390,7 @@ def train(hyp, opt, device, tb_writer=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5074fa95ae4b..aa54c3cf561e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -134,9 +134,15 @@ def profile(x, ops, n=100, device=None): def is_parallel(model): + # Returns True if model is of type DP or DDP return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} From 1f8d716ec943d9265cd33422d29560716e8b483c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 12:06:08 +0200 Subject: [PATCH 005/757] yolo.py header (#3347) --- models/yolo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 06b80032d3d3..2844cd0410e0 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,8 @@ -# YOLOv5 YOLO-specific modules +"""YOLOv5-specific modules + +Usage: + $ python path/to/models/yolo.py --cfg yolov5s.yaml +""" import argparse import logging From c6b5bfca8592c3426ce0b5f65e559c45d42ff378 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 14:26:52 +0200 Subject: [PATCH 006/757] Updated cache v0.2 with `hashlib` (#3350) * Update cache v0.2 to include parent hash Possible fix for https://github.com/ultralytics/yolov5/issues/3349 * Update datasets.py --- utils/datasets.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 36416b14e138..882c7764c4ab 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,6 +1,7 @@ # Dataset utils and dataloaders import glob +import hashlib import logging import math import os @@ -36,9 +37,12 @@ break -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash def exif_size(img): @@ -383,7 +387,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + if cache['hash'] != get_hash(self.label_files + self.img_files): # changed cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -501,9 +505,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, i + 1 - x['version'] = 0.1 # cache version + x['version'] = 0.2 # cache version try: - torch.save(x, path) # save for next time + torch.save(x, path) # save cache for next time logging.info(f'{prefix}New cache created: {path}') except Exception as e: logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable From 2435bfe8968cd80f3caa5ba46f4ec0fe3ad0aa2b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 15:51:49 +0200 Subject: [PATCH 007/757] Add URL download to check_file() (#3330) * Add URL file download to check_file() * cleanup * pathlib bug fix --- utils/general.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 9a882715f0ad..006e64859f32 100755 --- a/utils/general.py +++ b/utils/general.py @@ -173,12 +173,19 @@ def check_imshow(): def check_file(file): - # Search for file if not found - if Path(file).is_file() or file == '': + # Search/download file (if necessary) and return path + file = str(file) # convert to str() + if Path(file).is_file() or file == '': # exists return file - else: + elif file.startswith(('http://', 'https://')): # download + url, file = file, Path(file).name + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + else: # search files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From ef4d53818d720f4c1d742125fbb48eafe481fd21 Mon Sep 17 00:00:00 2001 From: WangChaofeng Date: Thu, 27 May 2021 20:10:14 +0800 Subject: [PATCH 008/757] ONNX export in .train() mode fix (#3362) --- models/export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/export.py b/models/export.py index 65721f65d888..0d1147938e37 100644 --- a/models/export.py +++ b/models/export.py @@ -97,6 +97,8 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not opt.train, dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) From 4d4a2b05208ec82d11d43767a6e8df2c35de85ea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 27 May 2021 14:31:26 +0200 Subject: [PATCH 009/757] Ignore blank lines in `*.txt` labels (#3366) Fix for https://github.com/ultralytics/yolov5/issues/958#issuecomment-849512083 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 882c7764c4ab..7dd181400da5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -474,7 +474,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if os.path.isfile(lb_file): nf += 1 # label found with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines()] + l = [x.split() for x in f.read().strip().splitlines() if len(x)] if any([len(x) > 8 for x in l]): # is segment classes = np.array([x[0] for x in l], dtype=np.float32) segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) From bb131238aabf94fe619199bc2bee81be70989776 Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Thu, 27 May 2021 17:01:36 +0200 Subject: [PATCH 010/757] update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix --- .github/workflows/ci-testing.yml | 6 ++---- .github/workflows/greetings.yml | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..4e502fe9af7b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git rebase upstream/develop git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From 3fea06838468a669d189c4498f999be2d4b3c0ce Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Thu, 27 May 2021 17:01:36 +0200 Subject: [PATCH 011/757] update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix --- .github/workflows/ci-testing.yml | 6 ++---- .github/workflows/greetings.yml | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..4e502fe9af7b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git rebase upstream/develop git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From ba6f3f974bfc4a2968964dbe5eedea73c9f5efcb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 May 2021 15:18:44 +0200 Subject: [PATCH 012/757] Enable direct `--weights URL` definition (#3373) * Enable direct `--weights URL` definition @KalenMike this PR will enable direct --weights URL definition. Example use case: ``` python train.py --weights https://storage.googleapis.com/bucket/dir/model.pt ``` * cleanup * bug fixes * weights = attempt_download(weights) * Update experimental.py * Update hubconf.py * return bug fix * comment mirror * min_bytes --- hubconf.py | 3 +-- models/experimental.py | 3 +-- train.py | 2 +- utils/google_utils.py | 53 ++++++++++++++++++++++++++---------------- 4 files changed, 36 insertions(+), 25 deletions(-) diff --git a/hubconf.py b/hubconf.py index f74e70c85a65..40bbb1ed0826 100644 --- a/hubconf.py +++ b/hubconf.py @@ -41,8 +41,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter diff --git a/models/experimental.py b/models/experimental.py index afa787907104..d316b18373c3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -116,8 +116,7 @@ def attempt_load(weights, map_location=None, inplace=True): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load + ckpt = torch.load(attempt_download(w), map_location=map_location) # load model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates diff --git a/train.py b/train.py index 3e8d5075aef1..b74cdb28be66 100644 --- a/train.py +++ b/train.py @@ -83,7 +83,7 @@ def train(hyp, opt, device, tb_writer=None): pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally + weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys diff --git a/utils/google_utils.py b/utils/google_utils.py index 63d3e5b212f3..ac5c54dba97f 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -16,11 +16,37 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + try: # GitHub + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes # check + except Exception as e: # GCP + file.unlink(missing_ok=True) # remove partial downloads + print(f'Download error: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {error_msg or url}') + print('') + + def attempt_download(file, repo='ultralytics/yolov5'): # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): + # URL specified + name = file.name + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + safe_download(file=name, url=url, min_bytes=1E5) + return name + + # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api @@ -34,27 +60,14 @@ def attempt_download(file, repo='ultralytics/yolov5'): except: tag = 'v5.0' # current release - name = file.name if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): From 57f773b0ae2a2d477c1f85d07d37827f3ba82c6e Mon Sep 17 00:00:00 2001 From: Peretz Cohen Date: Sat, 29 May 2021 11:49:24 -0700 Subject: [PATCH 013/757] Update tutorial.ipynb (#3368) add Open in Kaggle badge --- tutorial.ipynb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3954feadfcb2..1bc9a8cda032 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,8 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open", + "\"Kaggle\"" ] }, { @@ -1260,4 +1261,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24 Mon Sep 17 00:00:00 2001 From: tudoulei <34886368+tudoulei@users.noreply.github.com> Date: Sun, 30 May 2021 03:12:01 +0800 Subject: [PATCH 014/757] `cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379) * Update datasets.py * comment Co-authored-by: Glenn Jocher --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..331df8ffd047 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -181,7 +181,7 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path) # BGR + img0 = cv2.imread(path, -1) # BGR (-1 is IMREAD_UNCHANGED) assert img0 is not None, 'Image Not Found ' + path print(f'image {self.count}/{self.nf} {path}: ', end='') From 4b52e19a61a39870fc4234da8906daa495def792 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 May 2021 22:49:34 +0200 Subject: [PATCH 015/757] COCO evolution fix (#3388) * COCO evolution fix * cleanup * update print * print fix --- train.py | 58 +++++++++++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/train.py b/train.py index b74cdb28be66..1041ec30c257 100644 --- a/train.py +++ b/train.py @@ -62,7 +62,6 @@ def train(hyp, opt, device, tb_writer=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict @@ -78,6 +77,7 @@ def train(hyp, opt, device, tb_writer=None): nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') @@ -358,6 +358,7 @@ def train(hyp, opt, device, tb_writer=None): single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, + save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, @@ -409,41 +410,38 @@ def train(hyp, opt, device, tb_writer=None): # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: - # Plots + logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - # Test best.pt - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False, - is_coco=is_coco) - - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb and not opt.evolve: # Log the stripped model - wandb_logger.wandb.log_artifact(str(final), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + + if not opt.evolve: + if is_coco: # COCO dataset + for m in [last, best] if best.exists() else [last]: # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco) + + # Strip optimizers + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() From d833ab3d2529626d4cc4c6ae28ce7858b9ca738f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 May 2021 20:52:42 +0200 Subject: [PATCH 016/757] Create `is_pip()` function (#3391) Returns `True` if file is part of pip package. Useful for contextual behavior modification. ```python def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).absolute().parts ``` --- utils/general.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 006e64859f32..1f805c56af29 100755 --- a/utils/general.py +++ b/utils/general.py @@ -53,12 +53,12 @@ def get_latest_run(search_dir='.'): def is_docker(): - # Is environment a Docker container + # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): - # Is environment a Google Colab instance + # Is environment a Google Colab instance? try: import google.colab return True @@ -66,6 +66,11 @@ def is_colab(): return False +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).absolute().parts + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str From fdbe527dc02f6f8891a1fd0baa3c5638ed5f53a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 31 May 2021 10:39:00 +0200 Subject: [PATCH 017/757] Revert "`cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379)" (#3395) This reverts commit 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 331df8ffd047..7dd181400da5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -181,7 +181,7 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path, -1) # BGR (-1 is IMREAD_UNCHANGED) + img0 = cv2.imread(path) # BGR assert img0 is not None, 'Image Not Found ' + path print(f'image {self.count}/{self.nf} {path}: ', end='') From 3cb9ad4fc49872cf21ea529277708f1707649cbb Mon Sep 17 00:00:00 2001 From: chocosaj Date: Thu, 3 Jun 2021 18:31:51 +0800 Subject: [PATCH 018/757] Update FLOPs description (#3422) * Update README.md * Changing FLOPS to FLOPs. Co-authored-by: BuildTools --- README.md | 4 ++-- models/yolo.py | 6 +++--- requirements.txt | 2 +- tutorial.ipynb | 6 +++--- utils/torch_utils.py | 12 ++++++------ 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index a638657b313b..1601efdee3b7 100755 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 @@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) Results saved to runs/detect/exp2 diff --git a/models/yolo.py b/models/yolo.py index 2844cd0410e0..1a7be913023c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,7 +21,7 @@ select_device, copy_attr try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None @@ -140,13 +140,13 @@ def forward_once(self, x, profile=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_synchronized() for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run diff --git a/requirements.txt b/requirements.txt index 1c07c651150e..a20fb6ad0ea5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP -thop # FLOPS computation +thop # FLOPs computation diff --git a/tutorial.ipynb b/tutorial.ipynb index 1bc9a8cda032..97b128182d85 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -611,7 +611,7 @@ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", @@ -734,7 +734,7 @@ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", @@ -964,7 +964,7 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", diff --git a/utils/torch_utils.py b/utils/torch_utils.py index aa54c3cf561e..6a7d07634813 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -18,7 +18,7 @@ import torchvision try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None logger = logging.getLogger(__name__) @@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None): x = x.to(device) x.requires_grad = True print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except: flops = 0 @@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640): print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - try: # FLOPS + try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): fs = '' From f8651c388fa7af3d32a4f7968da6afd4ebb0e533 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Jun 2021 18:44:58 +0200 Subject: [PATCH 019/757] Parse URL authentication (#3424) * Parse URL authentication * urllib.parse.unquote() * improved error handling * improved error handling * remove %3F * update check_file() --- utils/general.py | 4 +++- utils/google_utils.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/utils/general.py b/utils/general.py index 1f805c56af29..546fccd84066 100755 --- a/utils/general.py +++ b/utils/general.py @@ -9,6 +9,7 @@ import re import subprocess import time +import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path @@ -183,7 +184,8 @@ def check_file(file): if Path(file).is_file() or file == '': # exists return file elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(file).name + url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) + file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check diff --git a/utils/google_utils.py b/utils/google_utils.py index ac5c54dba97f..aefc7de2db2e 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -4,6 +4,7 @@ import platform import subprocess import time +import urllib from pathlib import Path import requests @@ -19,30 +20,32 @@ def gsutil_getsize(url=''): def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes file = Path(file) - try: # GitHub + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file)) - assert file.exists() and file.stat().st_size > min_bytes # check - except Exception as e: # GCP + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads - print(f'Download error: {e}\nRe-attempting {url2 or url} to {file}...') + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {error_msg or url}') + print(f"ERROR: {assert_msg}\n{error_msg}") print('') -def attempt_download(file, repo='ultralytics/yolov5'): +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): # URL specified - name = file.name + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... safe_download(file=name, url=url, min_bytes=1E5) return name From af2bc3a1c3414ce75e49f884f828be96be556e97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 11:46:40 +0200 Subject: [PATCH 020/757] Add FLOPs title to table (#3453) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1601efdee3b7..cefb82b0e9fd 100755 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |
640 (B) +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 From 4aa2959101dc42559104d3c5f5bf734b5c7fd40e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 12:37:41 +0200 Subject: [PATCH 021/757] Suppress jit trace warning + graph once (#3454) * Suppress jit trace warning + graph once Suppress harmless jit trace warning on TensorBoard add_graph call. Also fix multiple add_graph() calls bug, now only on batch 0. * Update train.py --- train.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 1041ec30c257..093a6197ff06 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -323,18 +324,19 @@ def train(hyp, opt, device, tb_writer=None): mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer: - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + if tb_writer and ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ From 8e3b4a0bf3be599ef7316059130547a1837a7030 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 12:47:53 +0200 Subject: [PATCH 022/757] Update MixUp augmentation `alpha=beta=32.0` (#3455) Per VOC empirical results https://github.com/ultralytics/yolov5/issues/3380#issuecomment-853001307 by @developer0hye --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..350fa53cc443 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -535,7 +535,7 @@ def __getitem__(self, index): # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) From d40481acc5f73a06fa5ced5fd2cfa8fce73a744d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 15:23:33 +0200 Subject: [PATCH 023/757] Add `timeout()` class (#3460) * Add `timeout()` class * rearrange order --- utils/general.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 546fccd84066..591fc8474339 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,5 +1,6 @@ # YOLOv5 general utils +import contextlib import glob import logging import math @@ -7,6 +8,7 @@ import platform import random import re +import signal import subprocess import time import urllib @@ -34,6 +36,26 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +class timeout(contextlib.ContextDecorator): + # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_message="", suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_message + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -86,7 +108,7 @@ def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False From c37f072ba73a7b0286b041936a1ebf3d86beafa2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 5 Jun 2021 03:02:20 +0900 Subject: [PATCH 024/757] Faster HSV augmentation (#3462) remove datatype conversion process that can be skipped --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 350fa53cc443..b6e43b94cfe9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -655,12 +655,12 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 - x = np.arange(0, 256, dtype=np.int16) + x = np.arange(0, 256, dtype=r.dtype) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed From 563ea9475a580b959bcddbb280261c41d80fd798 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 21:17:36 +0200 Subject: [PATCH 025/757] Add `check_git_status()` 5 second timeout (#3464) * Add check_git_status() 5 second timeout This should prevent the SSH Git bug that we were discussing @KalenMike * cleanup * replace timeout with check_output built-in timeout --- utils/general.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/utils/general.py b/utils/general.py index 591fc8474339..d9ee432dcae3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -9,12 +9,12 @@ import random import re import signal -import subprocess import time import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path +from subprocess import check_output import cv2 import numpy as np @@ -38,9 +38,9 @@ class timeout(contextlib.ContextDecorator): # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager - def __init__(self, seconds, *, timeout_message="", suppress_timeout_errors=True): + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) - self.timeout_message = timeout_message + self.timeout_message = timeout_msg self.suppress = bool(suppress_timeout_errors) def _timeout_handler(self, signum, frame): @@ -114,7 +114,7 @@ def check_online(): return False -def check_git_status(): +def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: @@ -123,9 +123,9 @@ def check_git_status(): assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ f"Use 'git pull' to update or 'git clone {url}' to download latest." @@ -133,7 +133,7 @@ def check_git_status(): s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe except Exception as e: - print(e) + print(f'{e}{err_msg}') def check_python(minimum='3.7.0', required=True): @@ -166,7 +166,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + print(check_output(f"pip install '{r}'", shell=True).decode()) except Exception as e: print(f'{prefix} {e}') From 317f2ccc9d4a16661cc102660cab54084421b516 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 21:34:36 +0200 Subject: [PATCH 026/757] Improved `check_requirements()` offline-handling (#3466) Improve robustness of `check_requirements()` function to offline environments (do not attempt pip installs when offline). --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index d9ee432dcae3..a12b0aafba0e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -163,10 +163,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: + assert check_online(), f"'pip install {r}' skipped (offline)" print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 except Exception as e: print(f'{prefix} {e}') From 044daafd9da4a14331a3885711695592a0317b39 Mon Sep 17 00:00:00 2001 From: Sam_S Date: Sat, 5 Jun 2021 00:01:58 +0400 Subject: [PATCH 027/757] Add `output_names` argument for ONNX export with dynamic axes (#3456) * Add output names & dynamic axes for onnx export Add output_names and dynamic_axes names for all outputs in torch.onnx.export. The first four outputs of the model will have names output0, output1, output2, output3 * use first output only + cleanup Co-authored-by: Samridha Shrestha Co-authored-by: Glenn Jocher --- models/export.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/models/export.py b/models/export.py index 0d1147938e37..2db6a7699953 100644 --- a/models/export.py +++ b/models/export.py @@ -96,11 +96,14 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not opt.train, - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) - 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if opt.dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model From b31229ae897f6a93438882b6d2f45607a86c9640 Mon Sep 17 00:00:00 2001 From: edificewang <609552430@qq.com> Date: Sat, 5 Jun 2021 04:28:34 +0800 Subject: [PATCH 028/757] Revert FP16 `test.py` and `detect.py` inference to FP32 default (#3423) * fixed inference bug ,while use half precision * replace --use-half with --half * replace space and PEP8 in detect.py * PEP8 detect.py * update --half help comment * Update test.py * revert space Co-authored-by: Glenn Jocher --- detect.py | 3 ++- test.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/detect.py b/detect.py index c6b76d981541..aba87687e666 100644 --- a/detect.py +++ b/detect.py @@ -28,7 +28,7 @@ def detect(opt): # Initialize set_logging() device = select_device(opt.device) - half = device.type != 'cpu' # half precision only supported on CUDA + half = opt.half and device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -172,6 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/test.py b/test.py index 0716c5d8b93c..113316ff0b8b 100644 --- a/test.py +++ b/test.py @@ -306,6 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file @@ -326,6 +327,7 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + half_precision=opt.half, opt=opt ) From 739451da5a5d28e03f745175361f310bafd99707 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 23:45:24 +0200 Subject: [PATCH 029/757] Add additional links/resources to stale.yml message (#3467) * Update stale.yml * cleanup * Update stale.yml * reformat --- .github/workflows/stale.yml | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 0a094e237b34..2332cf5d53db 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,8 +10,26 @@ jobs: - uses: actions/stale@v3 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' - stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-issue-message: | + 👋 Hello @${{ github.actor }}, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://hub.ultralytics.com + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 exempt-issue-labels: 'documentation,tutorial' From 3597d280eeee5cd2049999f7b1a5640cf0e1c89a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 23:49:43 +0200 Subject: [PATCH 030/757] Update stale.yml HUB URL (#3468) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 2332cf5d53db..ec24517fd659 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: - **Docs** – https://docs.ultralytics.com Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://hub.ultralytics.com + - **Ultralytics HUB** – https://ultralytics.com/pricing - **Vision API** – https://ultralytics.com/yolov5 - **About Us** – https://ultralytics.com/about - **Join Our Team** – https://ultralytics.com/work From cf4f95bc5f5ee2027e5819e5ec7c3f9ae822d433 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Jun 2021 18:06:24 +0200 Subject: [PATCH 031/757] Stale `github.actor` bug fix (#3483) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ec24517fd659..a81e4007cffb 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -11,7 +11,7 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | - 👋 Hello @${{ github.actor }}, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: - **Wiki** – https://github.com/ultralytics/yolov5/wiki From a1c3572bc9e0db60f9978dcf047435a703f58a93 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 7 Jun 2021 01:39:36 +0900 Subject: [PATCH 032/757] Explicit `model.eval()` call `if opt.train=False` (#3475) * call model.eval() when opt.train is False call model.eval() when opt.train is False * single-line if statement * cleanup Co-authored-by: Glenn Jocher --- models/export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index 2db6a7699953..6e3e1207f659 100644 --- a/models/export.py +++ b/models/export.py @@ -58,8 +58,7 @@ # Update model if opt.half: img, model = img.half(), model.half() # to FP16 - if opt.train: - model.train() # training mode (no grid construction in Detect layer) + model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations From 90b7895d652c3bd3d361b2d6e9aee900fd67f5f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 10:03:47 +0200 Subject: [PATCH 033/757] check_requirements() exclude `opencv-python` (#3495) Fix for 3rd party or contrib versions of installed OpenCV as in https://github.com/ultralytics/yolov5/issues/3494. --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 40bbb1ed0826..bedbee18f87f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(requirements=Path(__file__).parent / 'requirements.txt', + exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename From 3f03acb3dba3b4b3a4674fa9bdd6e73fbcbfae6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 15:38:51 +0200 Subject: [PATCH 034/757] check_requirements() exclude `opencv-python` (#3507) Duplicate of #3495 merged to `develop`. This PR will be merged to `master`. Fixes https://github.com/ultralytics/yolov5/issues/3494. --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index f74e70c85a65..a52aae9fd1b7 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(requirements=Path(__file__).parent / 'requirements.txt', + exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename From 8d1ddc93c717c0708f9478636b7647a774e07521 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 8 Jun 2021 01:56:41 +0900 Subject: [PATCH 035/757] Earlier `assert` for cpu and half option (#3508) * early assert for cpu and half option early assert for cpu and half option * Modified comment Modified comment --- models/export.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index 6e3e1207f659..c03770178829 100644 --- a/models/export.py +++ b/models/export.py @@ -44,15 +44,13 @@ # Load PyTorch model device = select_device(opt.device) + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names - # Checks + # Input gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - - # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model From eede7dc48c8abd1b1ba7cae657e556a505e80549 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 22:52:16 +0200 Subject: [PATCH 036/757] Update tutorial.ipynb (#3510) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 97b128182d85..4e760b13bb41 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -530,7 +530,7 @@ "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" + "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { From d986145b9a57b3c055e8cdea6b40cb979ebfe2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 23:21:34 +0200 Subject: [PATCH 037/757] Reduce test.py results spacing (#3511) --- test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 113316ff0b8b..12141f71c2c1 100644 --- a/test.py +++ b/test.py @@ -95,7 +95,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -228,7 +228,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format + pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class From abb2a96e91340df74b2526d925f2ecba24973dec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 00:39:02 +0200 Subject: [PATCH 038/757] Update README.md (#3512) * Update README.md Minor modifications * 850 width --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index cefb82b0e9fd..3a785cc85003 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - +   CI CPU testing @@ -30,19 +30,19 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) ---- |--- |--- |--- |--- |--- |---|--- |--- -[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | -[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 | | | | | | || | -[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) From c058a61e3bb0e2ea4e862ee790afe709d86ca3d2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 10:11:06 +0200 Subject: [PATCH 039/757] Update greetings.yml revert greeting change as PRs will now merge to master. --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 4e502fe9af7b..ee472297107e 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/develop + git rebase upstream/master git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From f3c3d2ce5d85ba77336a9d0a87c6a446732cdda6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 10:22:10 +0200 Subject: [PATCH 040/757] Merge `develop` branch into `master` (#3518) * update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix * update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix * Enable direct `--weights URL` definition (#3373) * Enable direct `--weights URL` definition @KalenMike this PR will enable direct --weights URL definition. Example use case: ``` python train.py --weights https://storage.googleapis.com/bucket/dir/model.pt ``` * cleanup * bug fixes * weights = attempt_download(weights) * Update experimental.py * Update hubconf.py * return bug fix * comment mirror * min_bytes * Update tutorial.ipynb (#3368) add Open in Kaggle badge * `cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379) * Update datasets.py * comment Co-authored-by: Glenn Jocher * COCO evolution fix (#3388) * COCO evolution fix * cleanup * update print * print fix * Create `is_pip()` function (#3391) Returns `True` if file is part of pip package. Useful for contextual behavior modification. ```python def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).absolute().parts ``` * Revert "`cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379)" (#3395) This reverts commit 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24. * Update FLOPs description (#3422) * Update README.md * Changing FLOPS to FLOPs. Co-authored-by: BuildTools * Parse URL authentication (#3424) * Parse URL authentication * urllib.parse.unquote() * improved error handling * improved error handling * remove %3F * update check_file() * Add FLOPs title to table (#3453) * Suppress jit trace warning + graph once (#3454) * Suppress jit trace warning + graph once Suppress harmless jit trace warning on TensorBoard add_graph call. Also fix multiple add_graph() calls bug, now only on batch 0. * Update train.py * Update MixUp augmentation `alpha=beta=32.0` (#3455) Per VOC empirical results https://github.com/ultralytics/yolov5/issues/3380#issuecomment-853001307 by @developer0hye * Add `timeout()` class (#3460) * Add `timeout()` class * rearrange order * Faster HSV augmentation (#3462) remove datatype conversion process that can be skipped * Add `check_git_status()` 5 second timeout (#3464) * Add check_git_status() 5 second timeout This should prevent the SSH Git bug that we were discussing @KalenMike * cleanup * replace timeout with check_output built-in timeout * Improved `check_requirements()` offline-handling (#3466) Improve robustness of `check_requirements()` function to offline environments (do not attempt pip installs when offline). * Add `output_names` argument for ONNX export with dynamic axes (#3456) * Add output names & dynamic axes for onnx export Add output_names and dynamic_axes names for all outputs in torch.onnx.export. The first four outputs of the model will have names output0, output1, output2, output3 * use first output only + cleanup Co-authored-by: Samridha Shrestha Co-authored-by: Glenn Jocher * Revert FP16 `test.py` and `detect.py` inference to FP32 default (#3423) * fixed inference bug ,while use half precision * replace --use-half with --half * replace space and PEP8 in detect.py * PEP8 detect.py * update --half help comment * Update test.py * revert space Co-authored-by: Glenn Jocher * Add additional links/resources to stale.yml message (#3467) * Update stale.yml * cleanup * Update stale.yml * reformat * Update stale.yml HUB URL (#3468) * Stale `github.actor` bug fix (#3483) * Explicit `model.eval()` call `if opt.train=False` (#3475) * call model.eval() when opt.train is False call model.eval() when opt.train is False * single-line if statement * cleanup Co-authored-by: Glenn Jocher * check_requirements() exclude `opencv-python` (#3495) Fix for 3rd party or contrib versions of installed OpenCV as in https://github.com/ultralytics/yolov5/issues/3494. * Earlier `assert` for cpu and half option (#3508) * early assert for cpu and half option early assert for cpu and half option * Modified comment Modified comment * Update tutorial.ipynb (#3510) * Reduce test.py results spacing (#3511) * Update README.md (#3512) * Update README.md Minor modifications * 850 width * Update greetings.yml revert greeting change as PRs will now merge to master. Co-authored-by: Piotr Skalski Co-authored-by: SkalskiP Co-authored-by: Peretz Cohen Co-authored-by: tudoulei <34886368+tudoulei@users.noreply.github.com> Co-authored-by: chocosaj Co-authored-by: BuildTools Co-authored-by: Yonghye Kwon Co-authored-by: Sam_S Co-authored-by: Samridha Shrestha Co-authored-by: edificewang <609552430@qq.com> --- .github/workflows/ci-testing.yml | 6 +-- .github/workflows/stale.yml | 22 +++++++++- README.md | 26 ++++++------ detect.py | 3 +- hubconf.py | 3 +- models/experimental.py | 3 +- models/export.py | 18 ++++---- models/yolo.py | 6 +-- requirements.txt | 2 +- test.py | 6 ++- train.py | 72 ++++++++++++++++---------------- tutorial.ipynb | 13 +++--- utils/datasets.py | 6 +-- utils/general.py | 54 ++++++++++++++++++------ utils/google_utils.py | 58 +++++++++++++++---------- utils/torch_utils.py | 12 +++--- 16 files changed, 187 insertions(+), 123 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 0a094e237b34..a81e4007cffb 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,8 +10,26 @@ jobs: - uses: actions/stale@v3 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' - stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-issue-message: | + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://ultralytics.com/pricing + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 exempt-issue-labels: 'documentation,tutorial' diff --git a/README.md b/README.md index a638657b313b..3a785cc85003 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - +   CI CPU testing @@ -30,19 +30,19 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) ---- |--- |--- |--- |--- |--- |---|--- |--- -[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | -[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 | | | | | | || | -[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) @@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) Results saved to runs/detect/exp2 diff --git a/detect.py b/detect.py index c6b76d981541..aba87687e666 100644 --- a/detect.py +++ b/detect.py @@ -28,7 +28,7 @@ def detect(opt): # Initialize set_logging() device = select_device(opt.device) - half = device.type != 'cpu' # half precision only supported on CUDA + half = opt.half and device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -172,6 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/hubconf.py b/hubconf.py index a52aae9fd1b7..bedbee18f87f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -42,8 +42,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter diff --git a/models/experimental.py b/models/experimental.py index afa787907104..d316b18373c3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -116,8 +116,7 @@ def attempt_load(weights, map_location=None, inplace=True): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load + ckpt = torch.load(attempt_download(w), map_location=map_location) # load model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates diff --git a/models/export.py b/models/export.py index 0d1147938e37..c03770178829 100644 --- a/models/export.py +++ b/models/export.py @@ -44,22 +44,19 @@ # Load PyTorch model device = select_device(opt.device) + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names - # Checks + # Input gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - - # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model if opt.half: img, model = img.half(), model.half() # to FP16 - if opt.train: - model.train() # training mode (no grid construction in Detect layer) + model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations @@ -96,11 +93,14 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not opt.train, - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) - 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if opt.dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model diff --git a/models/yolo.py b/models/yolo.py index 2844cd0410e0..1a7be913023c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,7 +21,7 @@ select_device, copy_attr try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None @@ -140,13 +140,13 @@ def forward_once(self, x, profile=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_synchronized() for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run diff --git a/requirements.txt b/requirements.txt index 1c07c651150e..a20fb6ad0ea5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP -thop # FLOPS computation +thop # FLOPs computation diff --git a/test.py b/test.py index 0716c5d8b93c..12141f71c2c1 100644 --- a/test.py +++ b/test.py @@ -95,7 +95,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -228,7 +228,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format + pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class @@ -306,6 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file @@ -326,6 +327,7 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + half_precision=opt.half, opt=opt ) diff --git a/train.py b/train.py index 3e8d5075aef1..093a6197ff06 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -62,7 +63,6 @@ def train(hyp, opt, device, tb_writer=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict @@ -78,12 +78,13 @@ def train(hyp, opt, device, tb_writer=None): nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally + weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys @@ -323,18 +324,19 @@ def train(hyp, opt, device, tb_writer=None): mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer: - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + if tb_writer and ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ @@ -358,6 +360,7 @@ def train(hyp, opt, device, tb_writer=None): single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, + save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, @@ -409,41 +412,38 @@ def train(hyp, opt, device, tb_writer=None): # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: - # Plots + logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - # Test best.pt - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False, - is_coco=is_coco) - - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb and not opt.evolve: # Log the stripped model - wandb_logger.wandb.log_artifact(str(final), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + + if not opt.evolve: + if is_coco: # COCO dataset + for m in [last, best] if best.exists() else [last]: # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco) + + # Strip optimizers + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() diff --git a/tutorial.ipynb b/tutorial.ipynb index 3954feadfcb2..4e760b13bb41 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,8 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open", + "\"Kaggle\"" ] }, { @@ -529,7 +530,7 @@ "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" + "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { @@ -610,7 +611,7 @@ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", @@ -733,7 +734,7 @@ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", @@ -963,7 +964,7 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", @@ -1260,4 +1261,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..b6e43b94cfe9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -535,7 +535,7 @@ def __getitem__(self, index): # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) @@ -655,12 +655,12 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 - x = np.arange(0, 256, dtype=np.int16) + x = np.arange(0, 256, dtype=r.dtype) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed diff --git a/utils/general.py b/utils/general.py index 006e64859f32..a12b0aafba0e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,5 +1,6 @@ # YOLOv5 general utils +import contextlib import glob import logging import math @@ -7,11 +8,13 @@ import platform import random import re -import subprocess +import signal import time +import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path +from subprocess import check_output import cv2 import numpy as np @@ -33,6 +36,26 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +class timeout(contextlib.ContextDecorator): + # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -53,12 +76,12 @@ def get_latest_run(search_dir='.'): def is_docker(): - # Is environment a Docker container + # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): - # Is environment a Google Colab instance + # Is environment a Google Colab instance? try: import google.colab return True @@ -66,6 +89,11 @@ def is_colab(): return False +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).absolute().parts + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str @@ -80,13 +108,13 @@ def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False -def check_git_status(): +def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: @@ -95,9 +123,9 @@ def check_git_status(): assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ f"Use 'git pull' to update or 'git clone {url}' to download latest." @@ -105,7 +133,7 @@ def check_git_status(): s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe except Exception as e: - print(e) + print(f'{e}{err_msg}') def check_python(minimum='3.7.0', required=True): @@ -135,10 +163,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + assert check_online(), f"'pip install {r}' skipped (offline)" + print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 except Exception as e: print(f'{prefix} {e}') @@ -178,7 +207,8 @@ def check_file(file): if Path(file).is_file() or file == '': # exists return file elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(file).name + url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) + file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check diff --git a/utils/google_utils.py b/utils/google_utils.py index 63d3e5b212f3..aefc7de2db2e 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -4,6 +4,7 @@ import platform import subprocess import time +import urllib from pathlib import Path import requests @@ -16,11 +17,39 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes -def attempt_download(file, repo='ultralytics/yolov5'): +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f"ERROR: {assert_msg}\n{error_msg}") + print('') + + +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + safe_download(file=name, url=url, min_bytes=1E5) + return name + + # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api @@ -34,27 +63,14 @@ def attempt_download(file, repo='ultralytics/yolov5'): except: tag = 'v5.0' # current release - name = file.name if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index aa54c3cf561e..6a7d07634813 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -18,7 +18,7 @@ import torchvision try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None logger = logging.getLogger(__name__) @@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None): x = x.to(device) x.requires_grad = True print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except: flops = 0 @@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640): print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - try: # FLOPS + try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): fs = '' From 28bff22df8e45e60e37fbf3af2c508a9536a73c7 Mon Sep 17 00:00:00 2001 From: Dean Mark <2552482+deanmark@users.noreply.github.com> Date: Tue, 8 Jun 2021 19:00:21 +0300 Subject: [PATCH 041/757] Use multi-threading in cache_labels (#3505) * Use multi threading in cache_labels * PEP8 reformat * Add num_threads * changed ThreadPool.imap_unordered to Pool.imap_unordered * Remove inplace additions * Update datasets.py refactor initial desc Co-authored-by: Glenn Jocher --- utils/datasets.py | 99 +++++++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 43 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index b6e43b94cfe9..bda435776629 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -9,7 +9,7 @@ import shutil import time from itertools import repeat -from multiprocessing.pool import ThreadPool +from multiprocessing.pool import ThreadPool, Pool from pathlib import Path from threading import Thread @@ -29,6 +29,7 @@ help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +num_threads = min(8, os.cpu_count()) # number of multiprocessing threads logger = logging.getLogger(__name__) # Get orientation exif tag @@ -447,7 +448,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) @@ -458,53 +459,24 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate - pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) - for i, (im_file, lb_file) in enumerate(pbar): - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - segments = [] # instance segments - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' - - # verify labels - if os.path.isfile(lb_file): - nf += 1 # label found - with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any([len(x) > 8 for x in l]): # is segment - classes = np.array([x[0] for x in l], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) - l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - l = np.array(l, dtype=np.float32) - if len(l): - assert l.shape[1] == 5, 'labels require 5 columns each' - assert (l >= 0).all(), 'negative labels' - assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' - else: - ne += 1 # label empty - l = np.zeros((0, 5), dtype=np.float32) - else: - nm += 1 # label missing - l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape, segments] - except Exception as e: - nc += 1 - logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ - f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(num_threads) as pool: + pbar = tqdm(pool.imap_unordered(verify_image_label, + zip(self.img_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.img_files)) + for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + if im_file: + x[im_file] = [l, shape, segments] + nm, nf, ne, nc = nm + nm_f, nf + nf_f, ne + ne_f, nc + nc_f + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, i + 1 + x['results'] = nf, nm, ne, nc, len(self.img_files) x['version'] = 0.2 # cache version try: torch.save(x, path) # save cache for next time @@ -1069,3 +1041,44 @@ def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file + + +def verify_image_label(params): + # Verify one image-label pair + im_file, lb_file, prefix = params + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne = 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + return im_file, l, shape, segments, nm, nf, ne, nc + except Exception as e: + nc = 1 + logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + return [None] * 4 + [nm, nf, ne, nc] From 8d52c1c5c58a4c5cf64a6fa718cfb4e5350a2045 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:36:40 +0200 Subject: [PATCH 042/757] Update datasets.py (#3531) Minor updates to https://github.com/ultralytics/yolov5/pull/3505, inplace accumulation. --- utils/datasets.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index bda435776629..daaa8d24855e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -462,19 +462,20 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(num_threads) as pool: - pbar = tqdm(pool.imap_unordered(verify_image_label, - zip(self.img_files, self.label_files, repeat(prefix))), + pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f if im_file: x[im_file] = [l, shape, segments] - nm, nf, ne, nc = nm + nm_f, nf + nf_f, ne + ne_f, nc + nc_f pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - pbar.close() + pbar.close() if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') - x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['version'] = 0.2 # cache version From c6b51f4189efbda055a08709cc35fcf5743379fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:47:13 +0200 Subject: [PATCH 043/757] Update FP16 `--half` argument for test.py and detect.py (#3532) * Update FP16 `--half` argument for test.py and detect.py * Update detect.py --- detect.py | 2 +- test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index aba87687e666..537f47dfafab 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/test.py b/test.py index 12141f71c2c1..6a2a4e47c142 100644 --- a/test.py +++ b/test.py @@ -306,7 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file From 78cf4885565302603fd1b211d498160bdf88ad38 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:54:33 +0200 Subject: [PATCH 044/757] Created using Colaboratory --- tutorial.ipynb | 259 +++++++++++++++++++++++++++++++------------------ 1 file changed, 164 insertions(+), 95 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4e760b13bb41..4429c1044cfe 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "8815626359d84416a2f44a95500580a4": { + "cef5e9351ca743bcba5febac0b096a30": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", + "layout": "IPY_MODEL_ec326c52378f4410920c328f221e0514", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_876609753c2946248890344722963d44", - "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" + "IPY_MODEL_83000c64a11c4ae8abd6f0ef2f108cef", + "IPY_MODEL_0f7899eb719f4a9c9852426551f97be9" ] } }, - "3b85609c4ce94a74823f2cfe141ce68e": { + "ec326c52378f4410920c328f221e0514": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "876609753c2946248890344722963d44": { + "83000c64a11c4ae8abd6f0ef2f108cef": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", + "style": "IPY_MODEL_886ac5b18b3c4c82bf15ad5055f1e17e", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" + "layout": "IPY_MODEL_4e67b3c3a49849c7a7ba28b7eec96e7a" } }, - "8abfdd8778e44b7ca0d29881cb1ada05": { + "0f7899eb719f4a9c9852426551f97be9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", + "style": "IPY_MODEL_62c3682ff1804571a483d46664533969", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [08:43<00:00, 1.56MB/s]", + "value": " 781M/781M [00:12<00:00, 67.1MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" + "layout": "IPY_MODEL_599dda3b608b432393760b2ca4ae7c7d" } }, - "78c6c3d97c484916b8ee167c63556800": { + "886ac5b18b3c4c82bf15ad5055f1e17e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "9dd0f182db5d45378ceafb855e486eb8": { + "4e67b3c3a49849c7a7ba28b7eec96e7a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "a3dab28b45c247089a3d1b8b09f327de": { + "62c3682ff1804571a483d46664533969": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "32451332b7a94ba9aacddeaa6ac94d50": { + "599dda3b608b432393760b2ca4ae7c7d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "0fffa335322b41658508e06aed0acbf0": { + "217ca488c82a4b7a80318b70887a556e": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb", + "layout": "IPY_MODEL_4e63af16f1084ca98a6fa5a282f2a81e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_85823e71fea54c39bd11e2e972348836", - "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd" + "IPY_MODEL_49f4b3c7f6ff42b4b9132a8550e12186", + "IPY_MODEL_8ec9e1a4883245daaf029458ee09721f" ] } }, - "a354c6f80ce347e5a3ef64af87c0eccb": { + "4e63af16f1084ca98a6fa5a282f2a81e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "85823e71fea54c39bd11e2e972348836": { + "49f4b3c7f6ff42b4b9132a8550e12186": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa", + "style": "IPY_MODEL_9d3e775ee11e4cf4b587b64fbc3cc6f7", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f" + "layout": "IPY_MODEL_70f68a9a51ac46e6ab7e51fb4fc6bda3" } }, - "fb11acd663fa4e71b041d67310d045fd": { + "8ec9e1a4883245daaf029458ee09721f": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee", + "style": "IPY_MODEL_fdb8ab377c114bc3b862ba76eb93cef7", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]", + "value": " 21.1M/21.1M [00:36<00:00, 605kB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb" + "layout": "IPY_MODEL_cd267c153c244621a1f50706d2ddc897" } }, - "8a919053b780449aae5523658ad611fa": { + "9d3e775ee11e4cf4b587b64fbc3cc6f7": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "5bae9393a58b44f7b69fb04816f94f6f": { + "70f68a9a51ac46e6ab7e51fb4fc6bda3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "d26c6d16c7f24030ab2da5285bf198ee": { + "fdb8ab377c114bc3b862ba76eb93cef7": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "f7767886b2364c8d9efdc79e175ad8eb": { + "cd267c153c244621a1f50706d2ddc897": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -517,8 +517,7 @@ "colab_type": "text" }, "source": [ - "\"Open", - "\"Kaggle\"" + "\"Open" ] }, { @@ -551,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9b022435-4197-41fc-abea-81f86ce857d0" + "outputId": "0cabe440-e06c-48b9-9180-4b4ea1790ff5" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -564,7 +563,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", @@ -663,32 +662,32 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 65, + "height": 66, "referenced_widgets": [ - "8815626359d84416a2f44a95500580a4", - "3b85609c4ce94a74823f2cfe141ce68e", - "876609753c2946248890344722963d44", - "8abfdd8778e44b7ca0d29881cb1ada05", - "78c6c3d97c484916b8ee167c63556800", - "9dd0f182db5d45378ceafb855e486eb8", - "a3dab28b45c247089a3d1b8b09f327de", - "32451332b7a94ba9aacddeaa6ac94d50" + "cef5e9351ca743bcba5febac0b096a30", + "ec326c52378f4410920c328f221e0514", + "83000c64a11c4ae8abd6f0ef2f108cef", + "0f7899eb719f4a9c9852426551f97be9", + "886ac5b18b3c4c82bf15ad5055f1e17e", + "4e67b3c3a49849c7a7ba28b7eec96e7a", + "62c3682ff1804571a483d46664533969", + "599dda3b608b432393760b2ca4ae7c7d" ] }, - "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" + "outputId": "56b6402a-81d5-41d0-a3c8-8889db1fca6c" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8815626359d84416a2f44a95500580a4", + "model_id": "cef5e9351ca743bcba5febac0b096a30", "version_minor": 0, "version_major": 2 }, @@ -716,45 +715,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f" + "outputId": "a5d41761-f1a0-41fe-d0bb-4cceebd7c4a6" }, "source": [ "# Run YOLOv5x on COCO val2017\n", - "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" + "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", + "100% 168M/168M [00:01<00:00, 156MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3008.87it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", - " all 5000 36335 0.745 0.627 0.68 0.49\n", - "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:17<00:00, 2.02it/s]\n", + " all 5000 36335 0.746 0.626 0.68 0.49\n", + "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.48s)\n", + "Done (t=0.44s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.08s)\n", + "DONE (t=4.88s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=90.51s).\n", + "DONE (t=83.47s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.16s).\n", + "DONE (t=12.96s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", @@ -827,32 +826,32 @@ "id": "Knxi2ncxWffW", "colab": { "base_uri": "https://localhost:8080/", - "height": 65, + "height": 66, "referenced_widgets": [ - "0fffa335322b41658508e06aed0acbf0", - "a354c6f80ce347e5a3ef64af87c0eccb", - "85823e71fea54c39bd11e2e972348836", - "fb11acd663fa4e71b041d67310d045fd", - "8a919053b780449aae5523658ad611fa", - "5bae9393a58b44f7b69fb04816f94f6f", - "d26c6d16c7f24030ab2da5285bf198ee", - "f7767886b2364c8d9efdc79e175ad8eb" + "217ca488c82a4b7a80318b70887a556e", + "4e63af16f1084ca98a6fa5a282f2a81e", + "49f4b3c7f6ff42b4b9132a8550e12186", + "8ec9e1a4883245daaf029458ee09721f", + "9d3e775ee11e4cf4b587b64fbc3cc6f7", + "70f68a9a51ac46e6ab7e51fb4fc6bda3", + "fdb8ab377c114bc3b862ba76eb93cef7", + "cd267c153c244621a1f50706d2ddc897" ] }, - "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43" + "outputId": "9e4788c2-e1d4-4a13-c3d2-984f5df7ffab" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "0fffa335322b41658508e06aed0acbf0", + "model_id": "217ca488c82a4b7a80318b70887a556e", "version_minor": 0, "version_major": 2 }, @@ -918,23 +917,93 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" + "outputId": "70004839-0c90-4bc0-c0e5-9a92f3e65b01" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" + "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=1, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-06-08 16:52:25.719745: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 18.7MB/s]\n", + "\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", + " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", + "\n", + "Transferred 362/362 items from yolov5s.pt\n", + "\n", + "WARNING: Dataset not found, nonexistent paths: ['/content/coco128/images/train2017']\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", + "100% 21.1M/21.1M [00:00<00:00, 68.2MB/s]\n", + "Dataset autodownload success\n", + "\n", + "Scaled weight_decay = 0.0005\n", + "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2036.51it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 189.76it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 687414.74it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 93.37it/s]\n", + "Plotting labels... \n", + "\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "Image sizes 640 train, 640 test\n", + "Using 2 dataloader workers\n", + "Logging results to runs/train/exp\n", + "Starting training for 1 epochs...\n", + "\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 0/0 10.8G 0.04226 0.06068 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.35it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:06<00:00, 1.53s/it]\n", + " all 128 929 0.633 0.641 0.668 0.439\n", + "1 epochs completed in 0.005 hours.\n", + "\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", + "2021-06-08 16:53:03.275914: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", @@ -969,10 +1038,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 824686.50it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 201.90it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 23766.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.35it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -982,19 +1051,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n", - " all 128 929 0.605 0.657 0.666 0.434\n", + " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.41it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.21s/it]\n", + " all 128 929 0.633 0.641 0.668 0.439\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n", - " all 128 929 0.61 0.66 0.669 0.438\n", + " 1/2 8.29G 0.04571 0.06616 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.65it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", + " all 128 929 0.613 0.659 0.669 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n", - " all 128 929 0.618 0.659 0.671 0.438\n", + " 2/2 8.29G 0.04542 0.0718 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 4.89it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.48it/s]\n", + " all 128 929 0.636 0.652 0.67 0.44\n", "3 epochs completed in 0.007 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", @@ -1261,4 +1330,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From ac8691e20827ec6103c6f521397bb9f699ac8a52 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 19:01:52 +0200 Subject: [PATCH 045/757] Created using Colaboratory --- tutorial.ipynb | 104 ++++++++----------------------------------------- 1 file changed, 17 insertions(+), 87 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4429c1044cfe..b6d672d10e52 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -917,93 +917,23 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "70004839-0c90-4bc0-c0e5-9a92f3e65b01" + "outputId": "c4dfc591-b6f9-4a60-9149-ee7eff970c90" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 4, + "execution_count": 9, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", - "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=1, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 16:52:25.719745: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 18.7MB/s]\n", - "\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", - " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", - "\n", - "Transferred 362/362 items from yolov5s.pt\n", - "\n", - "WARNING: Dataset not found, nonexistent paths: ['/content/coco128/images/train2017']\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", - "100% 21.1M/21.1M [00:00<00:00, 68.2MB/s]\n", - "Dataset autodownload success\n", - "\n", - "Scaled weight_decay = 0.0005\n", - "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2036.51it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 189.76it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 687414.74it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 93.37it/s]\n", - "Plotting labels... \n", - "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", - "Image sizes 640 train, 640 test\n", - "Using 2 dataloader workers\n", - "Logging results to runs/train/exp\n", - "Starting training for 1 epochs...\n", - "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/0 10.8G 0.04226 0.06068 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.35it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:06<00:00, 1.53s/it]\n", - " all 128 929 0.633 0.641 0.668 0.439\n", - "1 epochs completed in 0.005 hours.\n", - "\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", - "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 16:53:03.275914: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", @@ -1038,10 +968,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 824686.50it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 201.90it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 23766.92it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.35it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 198.74it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 475107.00it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.63it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -1051,19 +981,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.41it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.21s/it]\n", - " all 128 929 0.633 0.641 0.668 0.439\n", + " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.45it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.17s/it]\n", + " all 128 929 0.633 0.641 0.668 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 8.29G 0.04571 0.06616 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.65it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", - " all 128 929 0.613 0.659 0.669 0.438\n", + " 1/2 6.66G 0.04571 0.06615 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.10it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.88it/s]\n", + " all 128 929 0.614 0.661 0.67 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 8.29G 0.04542 0.0718 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 4.89it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.48it/s]\n", - " all 128 929 0.636 0.652 0.67 0.44\n", + " 2/2 6.66G 0.04542 0.07179 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 5.40it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.43it/s]\n", + " all 128 929 0.636 0.652 0.67 0.439\n", "3 epochs completed in 0.007 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", From b6fdd2e5e54aa3464b360fe6d9c6f3cb216f3778 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 23:09:45 +0200 Subject: [PATCH 046/757] Create `dataset_stats()` for HUB --- utils/datasets.py | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index daaa8d24855e..7c74d2c01322 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -17,12 +17,13 @@ import numpy as np import torch import torch.nn.functional as F +import yaml from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ - resample_segments, clean_str +from utils.general import check_requirements, check_file, check_dataset, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, \ + segment2box, segments2boxes, resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -1083,3 +1084,34 @@ def verify_image_label(params): nc = 1 logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') return [None] * 4 + [nm, nf, ne, nc] + + +def dataset_stats(path='data/coco128.yaml', verbose=False): + """ Return dataset statistics dictionary with images and instances counts per split per class + Usage: from utils.datasets import *; dataset_stats('data/coco128.yaml') + Arguments + path: Path to data.yaml + verbose: Print stats dictionary + """ + path = check_file(Path(path)) + with open(path) as f: + data = yaml.safe_load(f) # data dict + check_dataset(data) # download dataset if missing + + nc = data['nc'] # number of classes + stats = {'nc': nc, 'names': data['names']} # statistics dictionary + for split in 'train', 'val', 'test': + if split not in data: + stats[split] = None # i.e. no test set + continue + x = [] + dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): + x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) + x = np.array(x) # shape(128x80) + stats[split] = {'instances': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'images': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}} + if verbose: + print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) + return stats From 1b5edb6f8eb14a12f21ed0370e9a0e74085424e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 10:56:11 +0200 Subject: [PATCH 047/757] Update `dataset_stats()` for HUB (#3536) * Update `dataset_stats()` for HUB Cleanup of b6fdd2e * autodownload flag * Update general.py * cleanup --- utils/datasets.py | 11 +++++------ utils/general.py | 6 +++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7c74d2c01322..108005c8de65 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1086,18 +1086,17 @@ def verify_image_label(params): return [None] * 4 + [nm, nf, ne, nc] -def dataset_stats(path='data/coco128.yaml', verbose=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage: from utils.datasets import *; dataset_stats('data/coco128.yaml') + Usage: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) Arguments path: Path to data.yaml + autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ - path = check_file(Path(path)) - with open(path) as f: + with open(check_file(Path(path))) as f: data = yaml.safe_load(f) # data dict - check_dataset(data) # download dataset if missing - + check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': diff --git a/utils/general.py b/utils/general.py index a12b0aafba0e..367f30b925f4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -220,14 +220,14 @@ def check_file(file): return files[0] # return file -def check_dataset(dict): +def check_dataset(data, autodownload=True): # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') + val, s = data.get('val'), data.get('download') if val and len(val): val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script + if s and len(s) and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} ...') From ef0b5c9d29192ae1c4a931f9db808114bb486001 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:22:21 +0200 Subject: [PATCH 048/757] On-demand `pycocotools` pip install (#3547) --- detect.py | 2 +- hubconf.py | 2 +- requirements.txt | 2 +- test.py | 3 ++- train.py | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/detect.py b/detect.py index 537f47dfafab..8dbb656ed95f 100644 --- a/detect.py +++ b/detect.py @@ -175,7 +175,7 @@ def detect(opt): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.update: # update all models (to fix SourceChangeWarning) for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: diff --git a/hubconf.py b/hubconf.py index bedbee18f87f..429e61bbab1b 100644 --- a/hubconf.py +++ b/hubconf.py @@ -31,7 +31,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.torch_utils import select_device check_requirements(requirements=Path(__file__).parent / 'requirements.txt', - exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) + exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename diff --git a/requirements.txt b/requirements.txt index a20fb6ad0ea5..b413ec01b31c 100755 --- a/requirements.txt +++ b/requirements.txt @@ -26,5 +26,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 -pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0 # COCO mAP thop # FLOPs computation diff --git a/test.py b/test.py index 6a2a4e47c142..515b984bc7be 100644 --- a/test.py +++ b/test.py @@ -260,6 +260,7 @@ def test(data, json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements(['pycocotools']) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval @@ -311,7 +312,7 @@ def test(data, opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally test(opt.data, diff --git a/train.py b/train.py index 093a6197ff06..aad8ff258d6e 100644 --- a/train.py +++ b/train.py @@ -495,7 +495,7 @@ def train(hyp, opt, device, tb_writer=None): set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() - check_requirements(exclude=('pycocotools', 'thop')) + check_requirements(exclude=['thop']) # Resume wandb_run = check_wandb_resume(opt) From f8ec71e1c2ca1c01763f754332eff393b24c23d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:34:15 +0200 Subject: [PATCH 049/757] Update `check_python(minimum=3.6.2)` (#3548) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 367f30b925f4..1d61f16d7771 100755 --- a/utils/general.py +++ b/utils/general.py @@ -136,7 +136,7 @@ def check_git_status(err_msg=', for updates see https://github.com/ultralytics/y print(f'{e}{err_msg}') -def check_python(minimum='3.7.0', required=True): +def check_python(minimum='3.6.2', required=True): # Check current python version vs. required python version current = platform.python_version() result = pkg.parse_version(current) >= pkg.parse_version(minimum) From 0cfc5b2c181fd02f5613227aa5ae31e29b99d6b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:49:25 +0200 Subject: [PATCH 050/757] Update README.md (#3550) Add permanent splash URL and update hyperlink from iOS landing page to Ultralytics YOLOv5 landing page at https://ultralytics.com/yolov5 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3a785cc85003..08a6eb272117 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - - + +   CI CPU testing From 958ab92dc1a29f41f4c813937fda2bc99e1f147b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 13:14:56 +0200 Subject: [PATCH 051/757] Remove `opt` from `create_dataloader()`` (#3552) --- test.py | 2 +- train.py | 17 +++++++++-------- utils/datasets.py | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index 515b984bc7be..16a31fd17a54 100644 --- a/test.py +++ b/test.py @@ -88,7 +88,7 @@ def test(data, if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] seen = 0 diff --git a/train.py b/train.py index aad8ff258d6e..2465a8c22a37 100644 --- a/train.py +++ b/train.py @@ -41,8 +41,9 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ + opt.single_cls # Directories wdir = save_dir / 'weights' @@ -75,8 +76,8 @@ def train(hyp, opt, device, tb_writer=None): if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes - names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset @@ -187,7 +188,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) @@ -197,7 +198,7 @@ def train(hyp, opt, device, tb_writer=None): # Process 0 if rank in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -357,7 +358,7 @@ def train(hyp, opt, device, tb_writer=None): batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, - single_cls=opt.single_cls, + single_cls=single_cls, dataloader=testloader, save_dir=save_dir, save_json=is_coco and final_epoch, @@ -429,7 +430,7 @@ def train(hyp, opt, device, tb_writer=None): conf_thres=0.001, iou_thres=0.7, model=attempt_load(m, device).half(), - single_cls=opt.single_cls, + single_cls=single_cls, dataloader=testloader, save_dir=save_dir, save_json=True, diff --git a/utils/datasets.py b/utils/datasets.py index 108005c8de65..444b3ff2f60c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -62,8 +62,8 @@ def exif_size(img): return s -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, + rect=False, rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, @@ -71,7 +71,7 @@ def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=Fa hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, - single_cls=opt.single_cls, + single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, From 63157d214d09ab9c3b4588347dcf3307d85d4410 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 15:09:51 +0200 Subject: [PATCH 052/757] Remove `is_coco` argument from `test()` (#3553) --- test.py | 3 +-- train.py | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/test.py b/test.py index 16a31fd17a54..b17415431615 100644 --- a/test.py +++ b/test.py @@ -39,7 +39,6 @@ def test(data, wandb_logger=None, compute_loss=None, half_precision=True, - is_coco=False, opt=None): # Initialize/load model and set device training = model is not None @@ -71,10 +70,10 @@ def test(data, # Configure model.eval() if isinstance(data, str): - is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.safe_load(f) check_dataset(data) # check + is_coco = data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() diff --git a/train.py b/train.py index 2465a8c22a37..43c63b6ff3c1 100644 --- a/train.py +++ b/train.py @@ -365,8 +365,7 @@ def train(hyp, opt, device, tb_writer=None): verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, - compute_loss=compute_loss, - is_coco=is_coco) + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -434,8 +433,7 @@ def train(hyp, opt, device, tb_writer=None): dataloader=testloader, save_dir=save_dir, save_json=True, - plots=False, - is_coco=is_coco) + plots=False) # Strip optimizers for f in last, best: From 8b5086c21ba227c0257d94ea34cb46124a9c559a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 15:31:47 +0200 Subject: [PATCH 053/757] Multi-GPU default to single device 0 (#3554) * Multi-GPU default to single device 0 * Multi-GPU default to single device 0 * add space --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6a7d07634813..b690dbe96700 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -72,11 +72,11 @@ def select_device(device='', batch_size=None): cuda = not cpu and torch.cuda.is_available() if cuda: - devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) + space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB From 5948f20a3d29fa3e0589538650afc17431420e28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 16:25:17 +0200 Subject: [PATCH 054/757] Update test.py profiling (#3555) * Update test.py profiling * half_precision to half * inplace --- test.py | 30 +++++++++++++++++------------- train.py | 26 +++++++++++++------------- utils/plots.py | 17 +++++++++-------- 3 files changed, 39 insertions(+), 34 deletions(-) diff --git a/test.py b/test.py index b17415431615..4e554cf1fe43 100644 --- a/test.py +++ b/test.py @@ -38,7 +38,7 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, - half_precision=True, + half=True, opt=None): # Initialize/load model and set device training = model is not None @@ -63,7 +63,7 @@ def test(data, # model = nn.DataParallel(model) # Half - half = device.type != 'cpu' and half_precision # half precision only supported on CUDA + half &= device.type != 'cpu' # half precision only supported on CUDA if half: model.half() @@ -95,20 +95,22 @@ def test(data, names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. + p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + t_ = time_synchronized() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width + t = time_synchronized() + t0 += t - t_ # Run model - t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t + t1 += time_synchronized() - t # Compute loss if compute_loss: @@ -119,7 +121,7 @@ def test(data, lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t1 += time_synchronized() - t + t2 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): @@ -236,9 +238,10 @@ def test(data, print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple + t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image if not training: - print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) + shape = (batch_size, 3, imgsz, imgsz) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: @@ -327,24 +330,25 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, - half_precision=opt.half, + half=opt.half, opt=opt ) elif opt.task == 'speed': # speed benchmarks - for w in opt.weights: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) + for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, half=True, + opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in opt.weights: + for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, opt=opt) + plots=False, half=True, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index 43c63b6ff3c1..b92936d762b5 100644 --- a/train.py +++ b/train.py @@ -74,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None): loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names @@ -354,18 +354,18 @@ def train(hyp, opt, device, tb_writer=None): final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, times = test.test(data_dict, - batch_size=batch_size * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = test.test(data_dict, + batch_size=batch_size * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: diff --git a/utils/plots.py b/utils/plots.py index 8313ef210f90..973b9ae19b54 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,7 +3,6 @@ import glob import math import os -import random from copy import copy from pathlib import Path @@ -252,21 +251,23 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() # Plot study.txt generated by test.py - fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - # ax = ax.ravel() + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: for f in sorted(Path(path).glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - # for i in range(7): - # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - # ax[i].set_title(s[i]) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], From 4ef92618700cb1a4fc54de970f05e3126283d0da Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 16:30:01 +0200 Subject: [PATCH 055/757] Remove redundant speed/study `half` argument (#3557) --- test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index 4e554cf1fe43..971c4b005ca1 100644 --- a/test.py +++ b/test.py @@ -336,8 +336,7 @@ def test(data, elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, half=True, - opt=opt) + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -348,7 +347,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, half=True, opt=opt) + plots=False, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From c6deb73a895bd09b6110236cf29594211a2a42f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Jun 2021 21:14:09 +0200 Subject: [PATCH 056/757] Bump pip from 18.1 to 19.2 in /utils/google_app_engine (#3561) Bumps [pip](https://github.com/pypa/pip) from 18.1 to 19.2. - [Release notes](https://github.com/pypa/pip/releases) - [Changelog](https://github.com/pypa/pip/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/pip/compare/18.1...19.2) --- updated-dependencies: - dependency-name: pip dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 5fcc30524a59..2f81c8b40056 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones -pip==18.1 +pip==19.2 Flask==1.0.2 gunicorn==19.9.0 From a9553c04a7d32d5c8f29b0917fbeb6b1ef6cfe5f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 21:36:10 +0200 Subject: [PATCH 057/757] Refactor test.py arguments (#3558) * remove opt from test() * pass kwargs * update comments * revert accidental default change * multiple --img options * add comments --- detect.py | 2 +- test.py | 46 ++++++++++++++++++---------------------------- 2 files changed, 19 insertions(+), 29 deletions(-) diff --git a/detect.py b/detect.py index 8dbb656ed95f..5551824a4110 100644 --- a/detect.py +++ b/detect.py @@ -33,7 +33,7 @@ def detect(opt): # Load model model = attempt_load(weights, map_location=device) # load FP32 model stride = int(model.stride.max()) # model stride - imgsz = check_img_size(imgsz, s=stride) # check img_size + imgsz = check_img_size(imgsz, s=stride) # check image size names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 diff --git a/test.py b/test.py index 971c4b005ca1..a10f0f88f8e6 100644 --- a/test.py +++ b/test.py @@ -22,9 +22,9 @@ def test(data, weights=None, batch_size=32, - imgsz=640, - conf_thres=0.001, - iou_thres=0.6, # for NMS + imgsz=640, # image size + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold save_json=False, single_cls=False, augment=False, @@ -38,8 +38,12 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, - half=True, - opt=None): + half=True, # FP16 half-precision inference + project='runs/test', + name='exp', + exist_ok=False, + task='val', + device=''): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -47,16 +51,16 @@ def test(data, else: # called directly set_logging() - device = select_device(opt.device, batch_size=batch_size) + device = select_device(device, batch_size=batch_size) # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check img_size + imgsz = check_img_size(imgsz, s=gs) # check image size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: @@ -86,7 +90,7 @@ def test(data, if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] @@ -294,7 +298,7 @@ def test(data, parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') parser.add_argument('--task', default='val', help='train, val, test, speed or study') @@ -312,31 +316,17 @@ def test(data, parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid opt.data = check_file(opt.data) # check file print(opt) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally - test(opt.data, - opt.weights, - opt.batch_size, - opt.img_size, - opt.conf_thres, - opt.iou_thres, - opt.save_json, - opt.single_cls, - opt.augment, - opt.verbose, - save_txt=opt.save_txt | opt.save_hybrid, - save_hybrid=opt.save_hybrid, - save_conf=opt.save_conf, - half=opt.half, - opt=opt - ) + test(**vars(opt)) elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) + test(opt.data, w, opt.batch_size, opt.imgsz, 0.25, 0.45, save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -347,7 +337,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, opt=opt) + plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From 66cf5c28c1c9c593532b71610c81b7292af2bebd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:19:34 +0200 Subject: [PATCH 058/757] Refactor detect.py arguments (#3559) * Refactor detect.py arguments @SkalskiP @KalenMike * unused ok * comment arguments --- detect.py | 73 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/detect.py b/detect.py index 5551824a4110..5a13b5303238 100644 --- a/detect.py +++ b/detect.py @@ -15,20 +15,42 @@ @torch.no_grad() -def detect(opt): - source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size - save_img = not opt.nosave and not source.endswith('.txt') # save inference images +def detect(weights='yolov5s.pt', # model.pt path(s) + source='data/images', # file/dir/URL/glob, 0 for webcam + imgsz=640, # inference size (pixels) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + update=False, # update all models + project='runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + ): + save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() - device = select_device(opt.device) - half = opt.half and device.type != 'cpu' # half precision only supported on CUDA + device = select_device(device) + half &= device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -66,11 +88,10 @@ def detect(opt): # Inference t1 = time_synchronized() - pred = model(img, augment=opt.augment)[0] + pred = model(img, augment=augment)[0] # Apply NMS - pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms, - max_det=opt.max_det) + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) t2 = time_synchronized() # Apply Classifier @@ -89,7 +110,7 @@ def detect(opt): txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop + imc = im0.copy() if save_crop else im0 # for save_crop if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -103,15 +124,15 @@ def detect(opt): for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - if save_img or opt.save_crop or view_img: # Add bbox to image + if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) - if opt.save_crop: + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness) + if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference + NMS) @@ -145,19 +166,22 @@ def detect(opt): s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") + if update: + strip_optimizer(weights) # update model (to fix SourceChangeWarning) + print(f'Done. ({time.time() - t0:.3f}s)') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') - parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image') + parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IOU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') @@ -177,9 +201,4 @@ def detect(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) - if opt.update: # update all models (to fix SourceChangeWarning) - for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect(opt=opt) - strip_optimizer(opt.weights) - else: - detect(opt=opt) + detect(**vars(opt)) From 0e5cfdbea756716d5bbdfe6f3b26b2731e2facc4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:43:46 +0200 Subject: [PATCH 059/757] Refactor models/export.py arguments (#3564) * Refactor models/export.py arguments * cleanup * cleanup --- models/export.py | 108 +++++++++++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/models/export.py b/models/export.py index c03770178829..6f8799e55593 100644 --- a/models/export.py +++ b/models/export.py @@ -1,4 +1,4 @@ -"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats +"""Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats Usage: $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 @@ -21,42 +21,39 @@ from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only - parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only - parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - opt.include = [x.lower() for x in opt.include] - print(opt) - set_logging() + +def export(weights='./yolov5s.pt', # weights path + img_size=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx', 'coreml'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + train=False, # model.train() mode + optimize=False, # TorchScript: optimize for mobile + dynamic=False, # ONNX: dynamic axes + simplify=False, # ONNX: simplify model + opset_version=12, # ONNX: opset version + ): t = time.time() + include = [x.lower() for x in include] + img_size *= 2 if len(img_size) == 1 else 1 # expand # Load PyTorch model - device = select_device(opt.device) - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(opt.weights, map_location=device) # load FP32 model + device = select_device(device) + assert not (device.type == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + model = attempt_load(weights, map_location=device) # load FP32 model labels = model.names # Input gs = int(max(model.stride)) # grid size (max stride) - opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + img_size = [check_img_size(x, gs) for x in img_size] # verify img_size are gs-multiples + img = torch.zeros(batch_size, 3, *img_size).to(device) # image size(1,3,320,192) iDetection # Update model - if opt.half: + if half: img, model = img.half(), model.half() # to FP16 - model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction + model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations @@ -65,42 +62,42 @@ elif isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, models.yolo.Detect): - m.inplace = opt.inplace - m.onnx_dynamic = opt.dynamic + m.inplace = inplace + m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) for _ in range(2): y = model(img) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") + print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # TorchScript export ----------------------------------------------------------------------------------------------- - if 'torchscript' in opt.include or 'coreml' in opt.include: + if 'torchscript' in include or 'coreml' in include: prefix = colorstr('TorchScript:') try: print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = opt.weights.replace('.pt', '.torchscript.pt') # filename + f = weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if opt.optimize else ts).save(f) + (optimize_for_mobile(ts) if optimize else ts).save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') # ONNX export ------------------------------------------------------------------------------------------------------ - if 'onnx' in opt.include: + if 'onnx' in include: prefix = colorstr('ONNX:') try: import onnx print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, - training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not opt.train, + f = weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, input_names=['images'], output_names=['output'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if opt.dynamic else None) + } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model @@ -108,7 +105,7 @@ # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify - if opt.simplify: + if simplify: try: check_requirements(['onnx-simplifier']) import onnxsim @@ -116,8 +113,8 @@ print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify( model_onnx, - dynamic_input_shape=opt.dynamic, - input_shapes={'images': list(img.shape)} if opt.dynamic else None) + dynamic_input_shape=dynamic, + input_shapes={'images': list(img.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -127,15 +124,15 @@ print(f'{prefix} export failure: {e}') # CoreML export ---------------------------------------------------------------------------------------------------- - if 'coreml' in opt.include: + if 'coreml' in include: prefix = colorstr('CoreML:') try: import coremltools as ct print(f'{prefix} starting export with coremltools {ct.__version__}...') - assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' + assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename + f = weights.replace('.pt', '.mlmodel') # filename model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: @@ -143,3 +140,24 @@ # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--train', action='store_true', help='model.train() mode') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') + opt = parser.parse_args() + print(opt) + set_logging() + + export(**vars(opt)) From 4695ca8314269c9a9f4b8cf89c7962205f27fdad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:50:27 +0200 Subject: [PATCH 060/757] Refactoring cleanup (#3565) * Refactoring cleanup * Update test.py --- detect.py | 2 +- test.py | 52 +++++++++++++++++++++++++++------------------------- train.py | 2 +- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/detect.py b/detect.py index 5a13b5303238..67916c652742 100644 --- a/detect.py +++ b/detect.py @@ -178,7 +178,7 @@ def detect(weights='yolov5s.pt', # model.pt path(s) parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IOU threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') diff --git a/test.py b/test.py index a10f0f88f8e6..cbc97b420155 100644 --- a/test.py +++ b/test.py @@ -20,30 +20,31 @@ @torch.no_grad() def test(data, - weights=None, - batch_size=32, - imgsz=640, # image size + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold - save_json=False, - single_cls=False, - augment=False, - verbose=False, + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a cocoapi-compatible JSON results file + project='runs/test', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference model=None, dataloader=None, - save_dir=Path(''), # for saving images - save_txt=False, # for auto-labelling - save_hybrid=False, # for hybrid auto-labelling - save_conf=False, # save auto-label confidences + save_dir=Path(''), plots=True, wandb_logger=None, compute_loss=None, - half=True, # FP16 half-precision inference - project='runs/test', - name='exp', - exist_ok=False, - task='val', - device=''): + ): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -155,7 +156,7 @@ def test(data, with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - # W&B logging - Media Panel Plots + # W&B logging - Media Panel plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -295,12 +296,12 @@ def test(data, if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test.py') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') - parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') @@ -326,7 +327,8 @@ def test(data, elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.imgsz, 0.25, 0.45, save_json=False, plots=False) + test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, + save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -336,8 +338,8 @@ def test(data, y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') - r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False) + r, _, t = test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index b92936d762b5..505556075af5 100644 --- a/train.py +++ b/train.py @@ -454,7 +454,7 @@ def train(hyp, opt, device, tb_writer=None): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') From 095197bd4a011b867f1bb7118d1735dd84ac5ee6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 13:51:29 +0200 Subject: [PATCH 061/757] Ignore Seaborn plot warnings (#3576) * Ignore Seaborn plot warnings * Update plots.py * Update metrics.py --- utils/metrics.py | 9 ++++++--- utils/plots.py | 8 ++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 323c84b6c873..6b61d6d6ef02 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,5 +1,6 @@ # Model validation metrics +import warnings from pathlib import Path import matplotlib.pyplot as plt @@ -167,9 +168,11 @@ def plot(self, save_dir='', names=()): fig = plt.figure(figsize=(12, 9), tight_layout=True) sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) diff --git a/utils/plots.py b/utils/plots.py index 973b9ae19b54..66a30918190e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -11,7 +11,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -import seaborn as sns +import seaborn as sn import torch import yaml from PIL import Image, ImageDraw, ImageFont @@ -291,7 +291,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram - sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) plt.close() @@ -306,8 +306,8 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): ax[0].set_xticklabels(names, rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') - sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center From 53ed872c282fea6d909d2052b25be53c9c05cfb6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 15:35:22 +0200 Subject: [PATCH 062/757] Update export.py, yolo.py `sys.path.append()` (#3579) --- models/export.py | 12 +++++++----- models/yolo.py | 6 ++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/models/export.py b/models/export.py index 6f8799e55593..3c04b07fdc95 100644 --- a/models/export.py +++ b/models/export.py @@ -9,13 +9,15 @@ import time from pathlib import Path -sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories - import torch import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile -import models +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path + +from models.common import Conv +from models.yolo import Detect from models.experimental import attempt_load from utils.activations import Hardswish, SiLU from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging @@ -56,12 +58,12 @@ def export(weights='./yolov5s.pt', # weights path model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() elif isinstance(m.act, nn.SiLU): m.act = SiLU() - elif isinstance(m, models.yolo.Detect): + elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) diff --git a/models/yolo.py b/models/yolo.py index 1a7be913023c..4a2514edd295 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -10,8 +10,8 @@ from copy import deepcopy from pathlib import Path -sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path from models.common import * from models.experimental import * @@ -25,6 +25,8 @@ except ImportError: thop = None +logger = logging.getLogger(__name__) + class Detect(nn.Module): stride = None # strides computed during build From 5c32bd3080c8643aed9c167bb2fc655f502facaf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 16:01:34 +0200 Subject: [PATCH 063/757] Created using Colaboratory --- tutorial.ipynb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b6d672d10e52..48780f94c856 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -523,10 +523,11 @@ { "cell_type": "markdown", "metadata": { - "id": "HvhYZrIZCEyo" + "id": "t6MPjfT5NrKQ" }, "source": [ - "\n", + "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" @@ -563,7 +564,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -681,7 +682,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -721,7 +722,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -845,7 +846,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -923,7 +924,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 9, + "execution_count": null, "outputs": [ { "output_type": "stream", From 46e1fdfbc65c450c7bac9f7f0438a6b542dbe2ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:23:39 +0200 Subject: [PATCH 064/757] Update stale.yml (#3585) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index a81e4007cffb..d620e540706a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: - **Docs** – https://docs.ultralytics.com Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://ultralytics.com/pricing + - **Ultralytics HUB** – https://ultralytics.com - **Vision API** – https://ultralytics.com/yolov5 - **About Us** – https://ultralytics.com/about - **Join Our Team** – https://ultralytics.com/work From ec2da4a82c92dc594d8d05112cbded1d8576bdd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:37:08 +0200 Subject: [PATCH 065/757] Add ConfusionMatrix `normalize=True` flag (#3586) --- utils/metrics.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 6b61d6d6ef02..09b994414ffc 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -158,11 +158,12 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix - def plot(self, save_dir='', names=()): + def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn - - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + + if normalize: + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) From e8c52374035fd2fb5a0b0029eaa5e5705186df17 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:46:05 +0200 Subject: [PATCH 066/757] ConfusionMatrix `normalize=True` fix (#3587) --- utils/metrics.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 09b994414ffc..8512197956e7 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -161,9 +161,8 @@ def matrix(self): def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn - - if normalize: - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize columns + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) @@ -178,7 +177,7 @@ def plot(self, normalize=True, save_dir='', names=()): fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) except Exception as e: - pass + print(f'WARNING: ConfusionMatrix plot failure: {e}') def print(self): for i in range(self.nc + 1): From 4984cf54be4eb88f00ccf33a05f57681b2a770ab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 20:24:03 +0200 Subject: [PATCH 067/757] train.py GPU memory fix (#3590) * train.py GPU memory fix * ema * cuda * cuda * zeros input * to device * batch index 0 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 505556075af5..142268b273f1 100644 --- a/train.py +++ b/train.py @@ -335,7 +335,7 @@ def train(hyp, opt, device, tb_writer=None): if tb_writer and ni == 0: with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) From 53d4fc2e265c469112e86f3ed1dec9817a7c9936 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 12 Jun 2021 02:48:57 +0530 Subject: [PATCH 068/757] W&B: Allow changed in config variable #3588 --- utils/wandb_logging/wandb_utils.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 57ce9035a777..9975af63d02c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -103,7 +103,11 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name assert wandb, 'install wandb to resume wandb runs' # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, project=project, entity=entity, resume='allow') + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) opt.resume = model_artifact_name elif self.wandb: self.wandb_run = wandb.init(config=opt, @@ -112,7 +116,8 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): entity=opt.entity, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if not opt.resume: From 7a565f130a257aed46a0cac77cca945b489696bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Jun 2021 13:26:41 +0200 Subject: [PATCH 069/757] Update `dataset_stats()` (#3593) @KalenMike this is a PR to add image filenames and labels to our stats dictionary and to save the dictionary to JSON. Save location is next to the train labels.cache file. The single JSON contains all stats for entire dataset. Usage example: ```python from utils.datasets import * dataset_stats('coco128.yaml', verbose=True) ``` --- utils/datasets.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 444b3ff2f60c..f18569a7665b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -2,6 +2,7 @@ import glob import hashlib +import json import logging import math import os @@ -1105,12 +1106,20 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): continue x = [] dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset + if split == 'train': + cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) x = np.array(x) # shape(128x80) - stats[split] = {'instances': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, - 'images': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}} + stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': {str(Path(k).name): v.tolist() for k, v in zip(dataset.img_files, dataset.labels)}} + + # Save, print and return + with open(cache_path.with_suffix('.json'), 'w') as f: + json.dump(stats, f) # save stats *.json if verbose: print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) + # print(json.dumps(stats, indent=2, sort_keys=False)) return stats From 88b1945241dd0ef491da2ae0ce89f15ab67733e9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Jun 2021 15:21:37 +0200 Subject: [PATCH 070/757] Delete __init__.py (#3596) --- __init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 __init__.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 31336075609a3fbcb4afe398eba2967b22056bfa Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Sun, 13 Jun 2021 02:37:20 +0200 Subject: [PATCH 071/757] Simplify README.md (#3530) * Update README.md * added hosted images * added new logo * testing image hosting * changed svgs to pngs * removed old header * Update README.md * correct colab image source * splash.jpg * rocket and W&B fix * added contributing template * added social media to top section * increased size of top social media * cleanup and updates * rearrange quickstarts * API cleanup * PyTorch Hub cleanup * Add tutorials * cleanup * update CONTRIBUTING.md * Update README.md * update wandb link * Update README.md * remove tutorials header * update environments and integrations * Comment API image * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * double spaces after section * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 49 +++++++ README.md | 337 +++++++++++++++++++++++++++++++----------------- 2 files changed, 268 insertions(+), 118 deletions(-) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..acf74448c1fd --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,49 @@ +## Contributing to YOLOv5 🚀 + +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be helping push the frontiers of what's possible in AI 😃! + + +## Submitting a Pull Request (PR) 🛠️ + +To allow your work to be integrated as seamlessly as possible, we advise you to: +- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: +```bash +git remote add upstream https://github.com/ultralytics/yolov5.git +git fetch upstream +git checkout feature # <----- replace 'feature' with local branch name +git merge upstream/master +git push -u origin -f +``` +- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + + +## Submitting a Bug Report 🐛 + +For us to investigate an issue we would need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started investigating a possible problem. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: + +* ✅ **Minimal** – Use as little code as possible that still produces the same problem +* ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +* ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: + +* ✅ **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. +* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. + + +## License + +By contributing, you agree that your contributions will be licensed under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/README.md b/README.md index 08a6eb272117..e3caa6d77f14 100755 --- a/README.md +++ b/README.md @@ -1,70 +1,136 @@ +
+

-  - +

+
+
CI CPU testing +Open In Kaggle +
+Open In Colab +Open In Kaggle +Docker Pulls +
+
+ + +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ + + +
+ + +##
Documentation
+ +See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + + +##
Quick Start Examples
+ + +
+ +Install + + +Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: + +```bash +$ git clone https://github.com/ultralytics/yolov5 +$ pip install -r requirements.txt +``` +
-This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. +
+Inference -

-
- YOLOv5-P5 640 Figure (click to expand) - -

-
-
- Figure Notes (click to expand) - - * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. - * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
+Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). -- **April 11, 2021**: [v5.0 release](https://github.com/ultralytics/yolov5/releases/tag/v5.0): YOLOv5-P6 1280 models, [AWS](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart), [Supervise.ly](https://github.com/ultralytics/yolov5/issues/2518) and [YouTube](https://github.com/ultralytics/yolov5/pull/2752) integrations. -- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. -- **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. -- **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. +```python +import torch +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom -## Pretrained Checkpoints +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple -[assets]: https://github.com/ultralytics/yolov5/releases +# Inference +results = model(img) -|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) -|--- |--- |--- |--- |--- |--- |---|--- |--- -|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 -| | | | | | || | -|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 -| | | | | | || | -|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` -
- Table Notes (click to expand) - - * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` - * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-## Requirements -Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: - +
+Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -$ pip install -r requirements.txt +$ python detect.py --source 0 # webcam + file.jpg # image + file.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` +
+ +
+Training + +Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +```bash +$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + +
-## Tutorials +
+Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED @@ -80,91 +146,126 @@ $ pip install -r requirements.txt * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) +
-## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - -## Inference - -`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. -```bash -$ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube video - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -To run inference on example images in `data/images`: -```bash -$ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 - -Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) -YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) - -Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs -image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) -image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) -Results saved to runs/detect/exp2 -Done. (0.103s) -``` - - -### PyTorch Hub -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): -```python -import torch +##
Environments and Integrations
-# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') +Get started in seconds with our verified environments and integrations, including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment logging. Click each icon below for details. -# Image -img = 'https://ultralytics.com/images/zidane.jpg' + -# Inference -results = model(img) -results.print() # or .show(), .save() -``` +##
Compete and Win
-## Training +We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). -```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - +
+ + + +
-## Citation +##
Why YOLOv5
-[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686) +

+
+ YOLOv5-P5 640 Figure (click to expand) + +

+
+
+ Figure Notes (click to expand) + + * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. + * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. + * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +
-## About Us +### Pretrained Checkpoints -Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of expertise supporting government, academic and business clients. We offer a wide range of vision AI services, spanning from simple expert advice up to delivery of fully customized, end-to-end production solutions, including: -- **Cloud-based AI** systems operating on **hundreds of HD video streams in realtime.** -- **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.** -- **Custom data training**, hyperparameter evolution, and model exportation to any destination. +[assets]: https://github.com/ultralytics/yolov5/releases -For business inquiries and professional support requests please visit us at https://ultralytics.com. +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +| | | | | | | | | +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +| | | | | | | | | +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +
+ Table Notes (click to expand) + + * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment` +
-## Contact -**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. +##
Contribute
+ +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started. + + +##
Contact
+ +For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or professional support requests please visit +[https://ultralytics.com/contact](https://ultralytics.com/contact). + +
+ + From 6062319ec3415573f280ece31cdb0d5585e032c4 Mon Sep 17 00:00:00 2001 From: masood azhar Date: Mon, 14 Jun 2021 03:28:18 -0700 Subject: [PATCH 072/757] Update datasets.py (#3591) * 'changes-in_dataset' * Update datasets.py Co-authored-by: Glenn Jocher From 239a11c19777d8b5d4e2a69aac2cc83796313fd3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Jun 2021 13:58:54 +0200 Subject: [PATCH 073/757] Download COCO and VOC by default (#3608) --- utils/aws/userdata.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 5846fedb16f9..52c0fe33d90f 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -9,7 +9,8 @@ if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 - bash data/scripts/get_coco.sh && echo "Data done." & + bash data/scripts/get_coco.sh && echo "COCO done." & + bash data/scripts/get_voc.sh && echo "VOC done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks From daab682b06f8416319c99bdf25aec56616bf6ac1 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 14 Jun 2021 22:24:58 +0530 Subject: [PATCH 074/757] Suppress wandb images size mismatch warning (#3611) * supress wandb images size mismatch warning * supress wandb images size mismatch warning * PEP8 reformat and optimize imports Co-authored-by: Glenn Jocher --- utils/wandb_logging/wandb_utils.py | 32 +++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 9975af63d02c..7652f964f2c0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,16 +1,16 @@ """Utilities and tools for tracking runs with Weights & Biases.""" -import json +import logging import sys +from contextlib import contextmanager from pathlib import Path -import torch import yaml from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths -from utils.general import colorstr, xywh2xyxy, check_dataset, check_file +from utils.general import colorstr, check_dataset, check_file try: import wandb @@ -92,6 +92,7 @@ class WandbLogger(): For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type @@ -272,7 +273,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): "box_caption": "%s" % (class_to_id[cls])}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), Path(paths).name) artifact.add(table, name) return artifact @@ -306,8 +307,9 @@ def log(self, log_dict): def end_epoch(self, best_result=False): if self.wandb_run: - wandb.log(self.log_dict) - self.log_dict = {} + with all_logging_disabled(): + wandb.log(self.log_dict) + self.log_dict = {} if self.result_artifact: train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") self.result_artifact.add(train_results, 'result') @@ -319,5 +321,21 @@ def end_epoch(self, best_result=False): def finish_run(self): if self.wandb_run: if self.log_dict: - wandb.log(self.log_dict) + with all_logging_disabled(): + wandb.log(self.log_dict) wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) From 4c5d9bff80526b1120b2f78ce81bd20ec1a50b4e Mon Sep 17 00:00:00 2001 From: Wei Quan Date: Tue, 15 Jun 2021 05:24:56 -0400 Subject: [PATCH 075/757] Fix incorrect end epoch comment (#3612) --- train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/train.py b/train.py index 142268b273f1..85bdf1bf9a1f 100644 --- a/train.py +++ b/train.py @@ -341,8 +341,7 @@ def train(hyp, opt, device, tb_writer=None): save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ - # end epoch ---------------------------------------------------------------------------------------------------- - + # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() From 7d3686a686478c78beb2b32cf8a35c1a5dbe81b8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Jun 2021 13:21:04 +0200 Subject: [PATCH 076/757] Update `check_file()` (#3622) * Update `check_file()` * Update datasets.py --- utils/datasets.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index f18569a7665b..0bb657f30414 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1095,7 +1095,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ - with open(check_file(Path(path))) as f: + with open(check_file(path)) as f: data = yaml.safe_load(f) # data dict check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes diff --git a/utils/general.py b/utils/general.py index 1d61f16d7771..3e3bd6997a7c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -206,9 +206,9 @@ def check_file(file): file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file - elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) - file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... + elif file.startswith(('http:/', 'https:/')): # download + url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check From 30e4c4f09297b67afedf8b2bcd851833ddc9dead Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Jun 2021 15:44:10 +0200 Subject: [PATCH 077/757] Update README.md (#3624) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e3caa6d77f14..57188f687cc1 100755 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralyt ```bash $ git clone https://github.com/ultralytics/yolov5 +$ cd yolov5 $ pip install -r requirements.txt ```
From de56813ba8165fdbcaad2618beea693bd02ea6a5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 10:40:31 +0200 Subject: [PATCH 078/757] FROM nvcr.io/nvidia/pytorch:21.05-py3 (#3633) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b47e5bbff194..be19e3036187 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.03-py3 +FROM nvcr.io/nvidia/pytorch:21.05-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 65f81bfefa7ea1f4fdd019dae9b675b7914e0c21 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 10:49:50 +0200 Subject: [PATCH 079/757] Add `**/*.torchscript.pt` (#3634) --- .dockerignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 3c6b6ab02e03..9c9663f006ca 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,12 +12,12 @@ data/samples/* *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.weights **/*.pt **/*.pth **/*.onnx **/*.mlmodel **/*.torchscript +**/*.torchscript.pt # Below Copied From .gitignore ----------------------------------------------------------------------------------------- From 6c0e1d9fd7fe83a28972d4f35b2111553de0fcb6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 11:12:15 +0200 Subject: [PATCH 080/757] Update `verify_image_label()` (#3635) --- utils/datasets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 0bb657f30414..20109e739c02 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1046,20 +1046,20 @@ def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): f.write(str(img) + '\n') # add image to txt file -def verify_image_label(params): +def verify_image_label(args): # Verify one image-label pair - im_file, lb_file, prefix = params + im_file, lb_file, prefix = args nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt try: # verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size - segments = [] # instance segments assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' # verify labels + segments = [] # instance segments if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file, 'r') as f: @@ -1084,7 +1084,7 @@ def verify_image_label(params): except Exception as e: nc = 1 logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - return [None] * 4 + [nm, nf, ne, nc] + return [None, None, None, None, nm, nf, ne, nc] def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): From bb79e13d521c54b20b06555fe79cdff055f28721 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 12:35:33 +0200 Subject: [PATCH 081/757] RUN pip install --no-cache -U torch torchvision (#3637) --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index be19e3036187..ecf6d1e3723c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook +RUN pip install --no-cache -U torch torchvision # Create working directory RUN mkdir -p /usr/src/app From d808855f7703f12025c0a169136c624397add112 Mon Sep 17 00:00:00 2001 From: xiaowk5516 <59595896+xiaowk5516@users.noreply.github.com> Date: Wed, 16 Jun 2021 19:31:26 +0800 Subject: [PATCH 082/757] Assert non-premature end of JPEG images (#3638) * premature end of JPEG images * PEP8 reformat Co-authored-by: Glenn Jocher --- utils/datasets.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index 20109e739c02..a1a8fa8f32a9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1057,6 +1057,10 @@ def verify_image_label(args): shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + assert f.read() == b'\xff\xd9', 'corrupted JPEG' # verify labels segments = [] # instance segments From 3ce0db89b05e62b352befdadc33a148088a33e03 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 19:48:30 +0200 Subject: [PATCH 083/757] Update CONTRIBUTING.md (#3645) * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md --- CONTRIBUTING.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index acf74448c1fd..09d93b0573ba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,6 +12,25 @@ YOLOv5 works so well due to our combined community effort, and for every small i ## Submitting a Pull Request (PR) 🛠️ +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update +Select `requirements.txt` to update by clicking on it in GitHub. +PR_step1 + +### 2. Click 'Edit this file' +Button is in top-right corner. +PR_step2 + +### 3. Make Changes +Change `matplotlib` version from `3.2.2` to `3.3`. +PR_step3 + +### 4. Preview Changes and Submit PR +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! +PR_step4 + +### PR recommendations To allow your work to be integrated as seamlessly as possible, we advise you to: - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: @@ -28,7 +47,9 @@ git push -u origin -f ## Submitting a Bug Report 🐛 -For us to investigate an issue we would need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started investigating a possible problem. +If you spot a problem with YOLOv5 please submit a Bug Report! + +For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: From 6187edcb53eb7982a23c5b0d3f1ab35d5d906ba6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 19:57:11 +0200 Subject: [PATCH 084/757] Update CONTRIBUTING.md (#3647) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 09d93b0573ba..7c0ba3ae9f18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,19 +16,19 @@ Submitting a PR is easy! This example shows how to submit a PR for updating `req ### 1. Select File to Update Select `requirements.txt` to update by clicking on it in GitHub. -PR_step1 +

PR_step1

### 2. Click 'Edit this file' Button is in top-right corner. -PR_step2 +

PR_step2

### 3. Make Changes Change `matplotlib` version from `3.2.2` to `3.3`. -PR_step3 +

PR_step3

### 4. Preview Changes and Submit PR Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! -PR_step4 +

PR_step4

### PR recommendations From fa29125f1816e87b44763675bc661452868fdced Mon Sep 17 00:00:00 2001 From: Mai Thanh Minh Date: Thu, 17 Jun 2021 03:56:16 +0700 Subject: [PATCH 085/757] `is_coco` list fix (#3646) --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index cbc97b420155..a38298da54da 100644 --- a/test.py +++ b/test.py @@ -78,7 +78,7 @@ def test(data, with open(data) as f: data = yaml.safe_load(f) check_dataset(data) # check - is_coco = data['val'].endswith('coco/val2017.txt') # COCO dataset + is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() From df7706d8026c8277fa47ab04a89238f16c9a19b7 Mon Sep 17 00:00:00 2001 From: SpongeBab <2078825250@qq.com> Date: Thu, 17 Jun 2021 18:37:53 +0800 Subject: [PATCH 086/757] Update README.md (#3650) Be more user-friendly to new users --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 57188f687cc1..ab8f60169947 100755 --- a/README.md +++ b/README.md @@ -130,7 +130,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
-
+
Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED From 9b6dba6207182f5b1bca596a947fc32d4150db2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 13:59:52 +0200 Subject: [PATCH 087/757] Update `dataset_stats()` to list of dicts (#3657) * Update `dataset_stats()` to list of dicts @KalenMike * Update datasets.py --- utils/datasets.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index a1a8fa8f32a9..bcb8c36e0e64 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1099,6 +1099,11 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ + + def round_labels(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] + with open(check_file(path)) as f: data = yaml.safe_load(f) # data dict check_dataset(data, autodownload) # download dataset if missing @@ -1118,12 +1123,13 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), 'per_class': (x > 0).sum(0).tolist()}, - 'labels': {str(Path(k).name): v.tolist() for k, v in zip(dataset.img_files, dataset.labels)}} + 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in + zip(dataset.img_files, dataset.labels)]} # Save, print and return with open(cache_path.with_suffix('.json'), 'w') as f: json.dump(stats, f) # save stats *.json if verbose: - print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) - # print(json.dumps(stats, indent=2, sort_keys=False)) + print(json.dumps(stats, indent=2, sort_keys=False)) + # print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) return stats From 2754adad463e6f097521946f20e601c8370a6728 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 14:44:12 +0200 Subject: [PATCH 088/757] Remove `/weights` directory (#3659) * Remove `/weights` directory * cleanup --- .github/workflows/ci-testing.yml | 10 +++++----- Dockerfile | 3 --- {weights => data/scripts}/download_weights.sh | 0 detect.py | 4 ++-- 4 files changed, 7 insertions(+), 10 deletions(-) rename {weights => data/scripts}/download_weights.sh (100%) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index bb8b173cdb31..36318f6ae562 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -64,15 +64,15 @@ jobs: di=cpu # inference devices # define device # train - python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di # detect - python detect.py --weights weights/${{ matrix.model }}.pt --device $di + python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di # test - python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di + python test.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub - python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect - python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export + python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect + python models/export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export shell: bash diff --git a/Dockerfile b/Dockerfile index ecf6d1e3723c..d32e3960046b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -46,8 +46,5 @@ ENV HOME=/usr/src/app # Bash into stopped container # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash -# Send weights to GCP -# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt - # Clean up # docker system prune -a --volumes diff --git a/weights/download_weights.sh b/data/scripts/download_weights.sh similarity index 100% rename from weights/download_weights.sh rename to data/scripts/download_weights.sh diff --git a/detect.py b/detect.py index 67916c652742..7daa87436daa 100644 --- a/detect.py +++ b/detect.py @@ -63,8 +63,8 @@ def detect(weights='yolov5s.pt', # model.pt path(s) # Second-stage classifier classify = False if classify: - modelc = load_classifier(name='resnet101', n=2) # initialize - modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + modelc = load_classifier(name='resnet50', n=2) # initialize + modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() # Set Dataloader vid_path, vid_writer = None, None From ac34834563cfc90f499248fc14b11812da5f14af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 16:21:10 +0200 Subject: [PATCH 089/757] Update download_weights.sh comment (#3662) --- data/scripts/download_weights.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 43c8e31d80fd..6a279f1636fc 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash # Download latest models from https://github.com/ultralytics/yolov5/releases # Usage: -# $ bash weights/download_weights.sh +# $ bash path/to/download_weights.sh python - < Date: Thu, 17 Jun 2021 21:32:39 +0200 Subject: [PATCH 090/757] Update train.py (#3667) --- train.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 85bdf1bf9a1f..27f42c9a9c1d 100644 --- a/train.py +++ b/train.py @@ -22,7 +22,7 @@ from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm -import test # import test.py to get mAP after each epoch +import test # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -39,7 +39,11 @@ logger = logging.getLogger(__name__) -def train(hyp, opt, device, tb_writer=None): +def train(hyp, + opt, + device, + tb_writer=None + ): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ @@ -341,7 +345,7 @@ def train(hyp, opt, device, tb_writer=None): save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ - + # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() @@ -404,12 +408,11 @@ def train(hyp, opt, device, tb_writer=None): torch.save(ckpt, best) if wandb_logger.wandb: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: - wandb_logger.log_model( - last.parent, opt, epoch, fi, best_model=best_fitness == fi) + wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- - # end training + # end training ----------------------------------------------------------------------------------------------------- if rank in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: From fa201f968ecb552774c37da01b8ea1ac01f3d261 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 22:03:25 +0200 Subject: [PATCH 091/757] Update `train(hyp, *args)` to accept `hyp` file or dict (#3668) --- train.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/train.py b/train.py index 27f42c9a9c1d..113d084336ad 100644 --- a/train.py +++ b/train.py @@ -39,12 +39,11 @@ logger = logging.getLogger(__name__) -def train(hyp, +def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, tb_writer=None ): - logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ opt.single_cls @@ -56,6 +55,12 @@ def train(hyp, best = wdir / 'best.pt' results_file = save_dir / 'results.txt' + # Hyperparameters + if isinstance(hyp, str): + with open(hyp) as f: + hyp = yaml.safe_load(f) # load hyps dict + logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.safe_dump(hyp, f, sort_keys=False) @@ -529,10 +534,6 @@ def train(hyp, assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' opt.batch_size = opt.total_batch_size // opt.world_size - # Hyperparameters - with open(opt.hyp) as f: - hyp = yaml.safe_load(f) # load hyps - # Train logger.info(opt) if not opt.evolve: @@ -541,7 +542,7 @@ def train(hyp, prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer) + train(opt.hyp, opt, device, tb_writer) # Evolve hyperparameters (optional) else: @@ -575,6 +576,8 @@ def train(hyp, 'mosaic': (1, 0.0, 1.0), # image mixup (probability) 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + with open(opt.hyp) as f: + hyp = yaml.safe_load(f) # load hyps dict assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices From 045d5d86299a4a724fca40faaf0225ded91a68b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 22:12:42 +0200 Subject: [PATCH 092/757] Update TensorBoard (#3669) --- train.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 113d084336ad..9d71e7056800 100644 --- a/train.py +++ b/train.py @@ -42,7 +42,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, - tb_writer=None ): save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ @@ -74,9 +73,16 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - # Logging- Doing this before checking the dataset. Might update data_dict - loggers = {'wandb': None} # loggers dict + # Loggers + loggers = {'wandb': None, 'tb': None} # loggers dict if rank in [-1, 0]: + # TensorBoard + if not opt.evolve: + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + loggers['tb'] = SummaryWriter(opt.save_dir) + + # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) @@ -219,8 +225,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) - if tb_writer: - tb_writer.add_histogram('classes', c, 0) + if loggers['tb']: + loggers['tb'].add_histogram('classes', c, 0) # TensorBoard # Anchors if not opt.noautoanchor: @@ -341,10 +347,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer and ni == 0: + if loggers['tb'] and ni == 0: # TensorBoard with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) @@ -352,7 +358,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end batch ------------------------------------------------------------------------------------------------ # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() # DDP process 0 or single-GPU @@ -385,8 +391,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if tb_writer: - tb_writer.add_scalar(tag, x, epoch) # tensorboard + if loggers['tb']: + loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B @@ -537,12 +543,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Train logger.info(opt) if not opt.evolve: - tb_writer = None # init loggers - if opt.global_rank in [-1, 0]: - prefix = colorstr('tensorboard: ') - logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(opt.hyp, opt, device, tb_writer) + train(opt.hyp, opt, device) # Evolve hyperparameters (optional) else: From 2296f1546fe252d7293b48ffb8e192d1e5f2a85b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 23:24:30 +0200 Subject: [PATCH 093/757] Update `WORLD_SIZE` and `RANK` retrieval (#3670) --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 9d71e7056800..3eb866345d47 100644 --- a/train.py +++ b/train.py @@ -502,8 +502,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt = parser.parse_args() # Set DDP variables - opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 - opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) + opt.global_rank = int(getattr(os.environ, 'RANK', -1)) set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() From f527704cd32c42bc0bba9cce04601783b8563204 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Jun 2021 10:21:47 +0200 Subject: [PATCH 094/757] Cache v0.3: improved corrupt image/label reporting (#3676) * Cache v0.3: improved corrupt image/label reporting Fix for https://github.com/ultralytics/yolov5/issues/3656#issuecomment-863660899 * cleanup --- utils/datasets.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index bcb8c36e0e64..f927abb20f5a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files): # changed + if cache['hash'] != get_hash(self.label_files + self.img_files) or cache['version'] != 0.3: cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -400,11 +400,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if exists: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + if cache['msgs']: + logging.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' # Read cache - cache.pop('hash') # remove hash - cache.pop('version') # remove version + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) @@ -461,26 +462,31 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(num_threads) as pool: pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) - for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f ne += ne_f nc += nc_f if im_file: x[im_file] = [l, shape, segments] + if msg: + msgs.append(msg) pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() + if msgs: + logging.info('\n'.join(msgs)) if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) - x['version'] = 0.2 # cache version + x['msgs'] = msgs # warnings + x['version'] = 0.3 # cache version try: torch.save(x, path) # save cache for next time logging.info(f'{prefix}New cache created: {path}') @@ -1084,11 +1090,11 @@ def verify_image_label(args): else: nm = 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - return im_file, l, shape, segments, nm, nf, ne, nc + return im_file, l, shape, segments, nm, nf, ne, nc, '' except Exception as e: nc = 1 - logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - return [None, None, None, None, nm, nf, ne, nc] + msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): From 2729761458cdf868af3dad496a0dfcb2fd1d5aa4 Mon Sep 17 00:00:00 2001 From: ZouJiu1 <34758215+ZouJiu1@users.noreply.github.com> Date: Fri, 18 Jun 2021 22:26:52 +0800 Subject: [PATCH 095/757] EMA changes for pre-model's batch_size (#3681) * EMA changes for pre-model's batch_size * Update train.py * Update torch_utils.py Co-authored-by: Glenn Jocher From 463628a4d88d2375d7b4c4556f77a24d44332772 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Jun 2021 17:12:42 +0200 Subject: [PATCH 096/757] Update README.md (#3684) --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index ab8f60169947..82d408ef5cde 100755 --- a/README.md +++ b/README.md @@ -62,9 +62,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
- -Install - +Install Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: From 814806c61de06525dc7346334a08a2024272799c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 11:22:09 +0200 Subject: [PATCH 097/757] Update cache check (#3691) Swapped order of operations for faster first per https://github.com/ultralytics/yolov5/commit/f527704cd32c42bc0bba9cce04601783b8563204#r52362419 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index f927abb20f5a..8fce61bb08a2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or cache['version'] != 0.3: + if cache['version'] != 0.3 or cache['hash'] != get_hash(self.label_files + self.img_files): cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache From bf209f6fe92abc4c0ea66a549f6527c7fe8ec20a Mon Sep 17 00:00:00 2001 From: Mai Thanh Minh Date: Sat, 19 Jun 2021 16:51:21 +0700 Subject: [PATCH 098/757] Skip HSV augmentation when hyperparameters are [0, 0, 0] (#3686) * Create shortcircuit in augment_hsv when hyperparameter are zero * implement faster opt-in Co-authored-by: Glenn Jocher --- utils/datasets.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8fce61bb08a2..21388db7ff46 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -632,17 +632,18 @@ def load_image(self, index): def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed def hist_equalize(img, clahe=True, bgr=False): From bfb2276b1d32b5360312802fd6065661f3ea6b00 Mon Sep 17 00:00:00 2001 From: lb-desupervised <86119248+lb-desupervised@users.noreply.github.com> Date: Sat, 19 Jun 2021 12:06:59 +0200 Subject: [PATCH 099/757] Slightly modify CLI execution (#3687) * Slightly modify CLI execution This simple change makes it easier to run the primary functions of this repo (train/detect/test) from within Python. An object which represents `opt` can be constructed and fed to the `main` function of each of these modules, rather than having to call the lower level functions directly, or run the module as a script. * Update export.py Add CLI parsing update for more convenient module usage within Python. Co-authored-by: Lewis Belcher --- detect.py | 12 ++++++++++-- models/export.py | 12 ++++++++++-- test.py | 11 ++++++++++- train.py | 12 +++++++++++- 4 files changed, 41 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 7daa87436daa..c51c6fa4e0c1 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(weights='yolov5s.pt', # model.pt path(s) print(f'Done. ({time.time() - t0:.3f}s)') -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') @@ -198,7 +198,15 @@ def detect(weights='yolov5s.pt', # model.pt path(s) parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() + return opt + + +def main(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) - detect(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/models/export.py b/models/export.py index 3c04b07fdc95..8c491dabddc0 100644 --- a/models/export.py +++ b/models/export.py @@ -144,7 +144,7 @@ def export(weights='./yolov5s.pt', # weights path print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') @@ -159,7 +159,15 @@ def export(weights='./yolov5s.pt', # weights path parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') opt = parser.parse_args() + return opt + + +def main(opt): print(opt) set_logging() - export(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/test.py b/test.py index a38298da54da..5ebfb36509ea 100644 --- a/test.py +++ b/test.py @@ -294,7 +294,7 @@ def test(data, return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser(prog='test.py') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') @@ -319,6 +319,10 @@ def test(data, opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid opt.data = check_file(opt.data) # check file + return opt + + +def main(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) @@ -344,3 +348,8 @@ def test(data, np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_study_txt(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/train.py b/train.py index 3eb866345d47..8056183242a6 100644 --- a/train.py +++ b/train.py @@ -463,7 +463,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary return results -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') @@ -504,6 +504,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Set DDP variables opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) opt.global_rank = int(getattr(os.environ, 'RANK', -1)) + return opt + + +def main(opt): + print(opt) set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() @@ -628,3 +633,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary plot_evolution(yaml_file) print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) From 5bab9a28e45f119839c14d91dc93bbdedadaf7de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 14:21:28 +0200 Subject: [PATCH 100/757] Reformat (#3694) --- .github/ISSUE_TEMPLATE/feature-request.md | 1 + .github/dependabot.yml | 20 ++++---- .github/workflows/ci-testing.yml | 8 +-- .github/workflows/codeql-analysis.yml | 62 +++++++++++------------ .github/workflows/greetings.yml | 14 ++--- .github/workflows/rebase.yml | 2 +- 6 files changed, 54 insertions(+), 53 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 87db3eacbf02..02320771b5f5 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -8,6 +8,7 @@ assignees: '' --- ## 🚀 Feature + ## Motivation diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9910689197f5..c489a753aa95 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,12 +1,12 @@ version: 2 updates: -- package-ecosystem: pip - directory: "/" - schedule: - interval: weekly - time: "04:00" - open-pull-requests-limit: 10 - reviewers: - - glenn-jocher - labels: - - dependencies + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 36318f6ae562..956199314726 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,6 +1,6 @@ name: CI CPU testing -on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows +on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: branches: [ master, develop ] pull_request: @@ -14,9 +14,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8] - model: ['yolov5s'] # models to test + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ 3.8 ] + model: [ 'yolov5s' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1f07888509f8..458465d90eef 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -21,34 +21,34 @@ jobs: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - - name: Checkout repository - uses: actions/checkout@v2 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..bbbe8e676f82 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,6 +1,6 @@ name: Greetings -on: [pull_request_target, issues] +on: [ pull_request_target, issues ] jobs: greeting: @@ -39,18 +39,18 @@ jobs: ``` ## Environments - + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - + - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - + + ## Status - + ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index e86c57744b84..38e14578216c 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -3,7 +3,7 @@ name: Automatic Rebase on: issue_comment: - types: [created] + types: [ created ] jobs: rebase: From fad27c004661692d715b31e8830122f93a09347f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 16:30:25 +0200 Subject: [PATCH 101/757] Update DDP for `torch.distributed.run` with `gloo` backend (#3680) * Update DDP for `torch.distributed.run` * Add LOCAL_RANK * remove opt.local_rank * backend="gloo|nccl" * print * print * debug * debug * os.getenv * gloo * gloo * gloo * cleanup * fix getenv * cleanup * cleanup destroy * try nccl * return opt * add --local_rank * add timeout * add init_method * gloo * move destroy * move destroy * move print(opt) under if RANK * destroy only RANK 0 * move destroy inside train() * restore destroy outside train() * update print(opt) * cleanup * nccl * gloo with 60 second timeout * update namespace printing --- detect.py | 6 +- models/export.py | 2 +- test.py | 4 +- train.py | 95 +++++++++++++++--------------- utils/datasets.py | 4 +- utils/torch_utils.py | 5 +- utils/wandb_logging/wandb_utils.py | 6 +- 7 files changed, 61 insertions(+), 61 deletions(-) diff --git a/detect.py b/detect.py index c51c6fa4e0c1..fb2d2702234d 100644 --- a/detect.py +++ b/detect.py @@ -8,8 +8,8 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box +from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ + apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import colors, plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized @@ -202,7 +202,7 @@ def parse_opt(): def main(opt): - print(opt) + print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) detect(**vars(opt)) diff --git a/models/export.py b/models/export.py index 8c491dabddc0..15d6a87ecea6 100644 --- a/models/export.py +++ b/models/export.py @@ -163,8 +163,8 @@ def parse_opt(): def main(opt): - print(opt) set_logging() + print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) export(**vars(opt)) diff --git a/test.py b/test.py index 5ebfb36509ea..1e82fd2d1611 100644 --- a/test.py +++ b/test.py @@ -51,7 +51,6 @@ def test(data, device = next(model.parameters()).device # get model device else: # called directly - set_logging() device = select_device(device, batch_size=batch_size) # Directories @@ -323,7 +322,8 @@ def parse_opt(): def main(opt): - print(opt) + set_logging() + print(colorstr('test: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally diff --git a/train.py b/train.py index 8056183242a6..8f206a9401c5 100644 --- a/train.py +++ b/train.py @@ -37,15 +37,17 @@ from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ - opt.single_cls + save_dir, epochs, batch_size, total_batch_size, weights, single_cls = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.single_cls # Directories wdir = save_dir / 'weights' @@ -69,13 +71,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' - init_seeds(2 + rank) + init_seeds(2 + RANK) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict # Loggers loggers = {'wandb': None, 'tb': None} # loggers dict - if rank in [-1, 0]: + if RANK in [-1, 0]: # TensorBoard if not opt.evolve: prefix = colorstr('tensorboard: ') @@ -99,7 +101,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Model pretrained = weights.endswith('.pt') if pretrained: - with torch_distributed_zero_first(rank): + with torch_distributed_zero_first(RANK): weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create @@ -110,7 +112,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - with torch_distributed_zero_first(rank): + with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] @@ -158,7 +160,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model) if rank in [-1, 0] else None + ema = ModelEMA(model) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 @@ -194,28 +196,28 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode - if cuda and rank == -1 and torch.cuda.device_count() > 1: + if cuda and RANK == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm - if opt.sync_bn and cuda and rank != -1: + if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, - world_size=opt.world_size, workers=opt.workers, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) # Process 0 - if rank in [-1, 0]: + if RANK in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, - world_size=opt.world_size, workers=opt.workers, + workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] if not opt.resume: @@ -234,8 +236,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model.half().float() # pre-reduce anchor precision # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + if cuda and RANK != -1: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) @@ -269,15 +271,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Update image weights (optional) if opt.image_weights: # Generate indices - if rank in [-1, 0]: + if RANK in [-1, 0]: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP - if rank != -1: - indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + if RANK != -1: + indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) - if rank != 0: + if RANK != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border @@ -285,11 +287,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses - if rank != -1: + if RANK != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) - if rank in [-1, 0]: + if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- @@ -319,8 +321,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size - if rank != -1: - loss *= opt.world_size # gradient averaged between devices in DDP mode + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. @@ -336,7 +338,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.update(model) # Print - if rank in [-1, 0]: + if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( @@ -362,7 +364,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.step() # DDP process 0 or single-GPU - if rank in [-1, 0]: + if RANK in [-1, 0]: # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs @@ -424,7 +426,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- - if rank in [-1, 0]: + if RANK in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png @@ -457,8 +459,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() - else: - dist.destroy_process_group() + torch.cuda.empty_cache() return results @@ -486,7 +487,6 @@ def parse_opt(): parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') @@ -499,18 +499,15 @@ def parse_opt(): parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') opt = parser.parse_args() - - # Set DDP variables - opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) - opt.global_rank = int(getattr(os.environ, 'RANK', -1)) return opt def main(opt): - print(opt) - set_logging(opt.global_rank) - if opt.global_rank in [-1, 0]: + set_logging(RANK) + if RANK in [-1, 0]: + print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_git_status() check_requirements(exclude=['thop']) @@ -519,11 +516,9 @@ def main(opt): if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ - '', ckpt, True, opt.total_batch_size, *apriori # reinstate + opt.cfg, opt.weights, opt.resume, opt.batch_size = '', ckpt, True, opt.total_batch_size # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -536,19 +531,21 @@ def main(opt): # DDP mode opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) - if opt.local_rank != -1: - assert torch.cuda.device_count() > opt.local_rank - torch.cuda.set_device(opt.local_rank) - device = torch.device('cuda', opt.local_rank) - dist.init_process_group(backend='nccl', init_method='env://') # distributed backend - assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + if LOCAL_RANK != -1: + from datetime import timedelta + assert torch.cuda.device_count() > LOCAL_RANK, 'too few GPUS for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) + assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' - opt.batch_size = opt.total_batch_size // opt.world_size + opt.batch_size = opt.total_batch_size // WORLD_SIZE # Train - logger.info(opt) if not opt.evolve: train(opt.hyp, opt, device) + if WORLD_SIZE > 1 and RANK == 0: + _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] # Evolve hyperparameters (optional) else: @@ -584,7 +581,7 @@ def main(opt): with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict - assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here diff --git a/utils/datasets.py b/utils/datasets.py index 21388db7ff46..93d6511ac658 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -64,7 +64,7 @@ def exif_size(img): def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, - rect=False, rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, @@ -79,7 +79,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b690dbe96700..2d5382471e3c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -13,6 +13,7 @@ import torch import torch.backends.cudnn as cudnn +import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torchvision @@ -30,10 +31,10 @@ def torch_distributed_zero_first(local_rank: int): Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: - torch.distributed.barrier() + dist.barrier() yield if local_rank == 0: - torch.distributed.barrier() + dist.barrier() def init_torch_seeds(seed=0): diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 7652f964f2c0..43b4c3d04e8e 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,5 +1,6 @@ """Utilities and tools for tracking runs with Weights & Biases.""" import logging +import os import sys from contextlib import contextmanager from pathlib import Path @@ -18,6 +19,7 @@ except ImportError: wandb = None +RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -42,10 +44,10 @@ def get_run_info(run_path): def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if opt.global_rank not in [-1, 0]: # For resuming DDP runs + if RANK not in [-1, 0]: # For resuming DDP runs entity, project, run_id, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') From b3e2f4e08d6a16bf153c9b56bbc0001a52dd24e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 19:14:59 +0200 Subject: [PATCH 102/757] Eliminate `total_batch_size` variable (#3697) * Eliminate `total_batch_size` variable * cleanup * Update train.py --- train.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/train.py b/train.py index 8f206a9401c5..5ad47fe9ea6a 100644 --- a/train.py +++ b/train.py @@ -46,10 +46,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, total_batch_size, weights, single_cls = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.single_cls + save_dir, epochs, batch_size, weights, single_cls = \ + opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls # Directories + save_dir = Path(save_dir) wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' @@ -127,8 +128,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Optimizer nbs = 64 # nominal batch size - accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups @@ -205,7 +206,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) @@ -215,7 +216,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, + testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -302,7 +303,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) @@ -371,7 +372,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, _ = test.test(data_dict, - batch_size=batch_size * 2, + batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, model=ema.ema, single_cls=single_cls, @@ -439,7 +440,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, + batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, iou_thres=0.7, @@ -518,7 +519,7 @@ def main(opt): assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size = '', ckpt, True, opt.total_batch_size # reinstate + opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -529,17 +530,15 @@ def main(opt): opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode - opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: from datetime import timedelta - assert torch.cuda.device_count() > LOCAL_RANK, 'too few GPUS for DDP command' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' - opt.batch_size = opt.total_batch_size // WORLD_SIZE # Train if not opt.evolve: From c1af67dcd4372ac230e9dafe6d4c4023b59a3ceb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 19:50:46 +0200 Subject: [PATCH 103/757] Add torch DP warning (#3698) --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index 5ad47fe9ea6a..68cd7fab574c 100644 --- a/train.py +++ b/train.py @@ -198,6 +198,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: + logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm From fbf41e09134b113f8e79ae01b4eee40d00797b2d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Jun 2021 15:06:58 +0200 Subject: [PATCH 104/757] Add `train.run()` method (#3700) * Update train.py explicit arguments * Update train.py * Add run method --- train.py | 81 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/train.py b/train.py index 68cd7fab574c..fbda7320839a 100644 --- a/train.py +++ b/train.py @@ -46,8 +46,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, notest, nosave, workers, = \ + opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.notest, opt.nosave, opt.workers # Directories save_dir = Path(save_dir) @@ -70,34 +71,34 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure - plots = not opt.evolve # create plots + plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + RANK) - with open(opt.data) as f: + with open(data) as f: data_dict = yaml.safe_load(f) # data dict # Loggers loggers = {'wandb': None, 'tb': None} # loggers dict if RANK in [-1, 0]: # TensorBoard - if not opt.evolve: + if not evolve: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - loggers['tb'] = SummaryWriter(opt.save_dir) + loggers['tb'] = SummaryWriter(str(save_dir)) # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb - data_dict = wandb_logger.data_dict - if wandb_logger.wandb: + if loggers['wandb']: + data_dict = wandb_logger.data_dict weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check - is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, data) # check + is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') @@ -105,14 +106,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with torch_distributed_zero_first(RANK): weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint - model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: - model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] @@ -182,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Epochs start_epoch = ckpt['epoch'] + 1 - if opt.resume: + if resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % @@ -210,20 +211,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, - workers=opt.workers, + workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) # Process 0 if RANK in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, - workers=opt.workers, + hyp=hyp, cache=opt.cache_images and not notest, rect=True, rank=-1, + workers=workers, pad=0.5, prefix=colorstr('val: '))[0] - if not opt.resume: + if not resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency @@ -356,8 +357,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + elif plots and ni == 10 and loggers['wandb']: + wandb_logger.log({'Mosaics': [loggers['wandb'].Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ @@ -371,7 +372,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs - if not opt.notest or final_epoch: # Calculate mAP + if not notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, _ = test.test(data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -398,7 +399,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if loggers['tb']: loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard - if wandb_logger.wandb: + if loggers['wandb']: wandb_logger.log({tag: x}) # W&B # Update best mAP @@ -408,7 +409,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model - if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + if (not nosave) or (final_epoch and not evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), @@ -416,13 +417,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} + 'wandb_id': wandb_logger.wandb_run.id if loggers['wandb'] else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if wandb_logger.wandb: + if loggers['wandb']: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt @@ -433,15 +434,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png - if wandb_logger.wandb: + if loggers['wandb']: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + wandb_logger.log({"Results": [loggers['wandb'].Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - if not opt.evolve: + if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, + results, _, _ = test.test(data, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, @@ -457,17 +458,17 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - if wandb_logger.wandb: # Log the stripped model - wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + if loggers['wandb']: # Log the stripped model + loggers['wandb'].log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() torch.cuda.empty_cache() return results -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') @@ -503,7 +504,7 @@ def parse_opt(): parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt @@ -633,6 +634,14 @@ def main(opt): f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') +def run(**kwargs): + # Usage: import train; train.run(imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + + if __name__ == "__main__": opt = parse_opt() main(opt) From e8810a53e83ddb5dd6bf8e871c2ede701007047c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Jun 2021 17:15:42 +0200 Subject: [PATCH 105/757] Update DDP backend `if dist.is_nccl_available()` (#3705) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index fbda7320839a..19bd97faca1f 100644 --- a/train.py +++ b/train.py @@ -539,7 +539,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' From 75c0ff43af18d9d90b32ccfadd6029573b2a502a Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 21 Jun 2021 17:30:25 +0530 Subject: [PATCH 106/757] [x]W&B: Don't resume transfer learning runs (#3604) * Allow config cahnge * Allow val change in wandb config * Don't resume transfer learning runs * Add entity in log dataset --- train.py | 1 + utils/wandb_logging/log_dataset.py | 2 ++ utils/wandb_logging/wandb_utils.py | 3 +-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 19bd97faca1f..67d835d60691 100644 --- a/train.py +++ b/train.py @@ -89,6 +89,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + run_id = run_id if opt.resume else None # start fresh run if transfer learning wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index f45a23011f15..3a9a3d79fe01 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -18,6 +18,8 @@ def create_dataset_artifact(opt): parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 43b4c3d04e8e..d82633c7e2f6 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -126,8 +126,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.opt = vars(opt) - self.wandb_run.config.data_dict = wandb_data_dict + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': data_dict}, allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) From 1f69d1259183321205dcd7b6d884e798e8bdcf61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Jun 2021 17:25:04 +0200 Subject: [PATCH 107/757] Update 4 main ops for paths and .run() (#3715) * Add yolov5/ to path * rename functions to run() * cleanup * rename fix * CI fix * cleanup find models/export.py --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/greetings.yml | 2 +- detect.py | 60 +++++++++++++++----------- models/export.py => export.py | 32 +++++++------- test.py | 72 ++++++++++++++++++-------------- train.py | 58 ++++++++++++++----------- tutorial.ipynb | 4 +- 7 files changed, 130 insertions(+), 100 deletions(-) rename models/export.py => export.py (89%) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 956199314726..20c1d5b026b0 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -74,5 +74,5 @@ jobs: python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python models/export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export shell: bash diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index bbbe8e676f82..fdf1cfae8df5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -52,5 +52,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/detect.py b/detect.py index fb2d2702234d..808f3584c93d 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,11 @@ +"""Run inference with a YOLOv5 model on images, videos, directories, streams + +Usage: + $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 +""" + import argparse +import sys import time from pathlib import Path @@ -6,6 +13,9 @@ import torch import torch.backends.cudnn as cudnn +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ @@ -15,30 +25,30 @@ @torch.no_grad() -def detect(weights='yolov5s.pt', # model.pt path(s) - source='data/images', # file/dir/URL/glob, 0 for webcam - imgsz=640, # inference size (pixels) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - update=False, # update all models - project='runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - ): +def run(weights='yolov5s.pt', # model.pt path(s) + source='data/images', # file/dir/URL/glob, 0 for webcam + imgsz=640, # inference size (pixels) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + update=False, # update all models + project='runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) @@ -204,7 +214,7 @@ def parse_opt(): def main(opt): print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) - detect(**vars(opt)) + run(**vars(opt)) if __name__ == "__main__": diff --git a/models/export.py b/export.py similarity index 89% rename from models/export.py rename to export.py index 15d6a87ecea6..8f4000cdad39 100644 --- a/models/export.py +++ b/export.py @@ -1,7 +1,7 @@ """Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats Usage: - $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 + $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 """ import argparse @@ -14,7 +14,7 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).absolute() -sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.common import Conv from models.yolo import Detect @@ -24,19 +24,19 @@ from utils.torch_utils import select_device -def export(weights='./yolov5s.pt', # weights path - img_size=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx', 'coreml'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode - optimize=False, # TorchScript: optimize for mobile - dynamic=False, # ONNX: dynamic axes - simplify=False, # ONNX: simplify model - opset_version=12, # ONNX: opset version - ): +def run(weights='./yolov5s.pt', # weights path + img_size=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx', 'coreml'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + train=False, # model.train() mode + optimize=False, # TorchScript: optimize for mobile + dynamic=False, # ONNX: dynamic axes + simplify=False, # ONNX: simplify model + opset_version=12, # ONNX: opset version + ): t = time.time() include = [x.lower() for x in include] img_size *= 2 if len(img_size) == 1 else 1 # expand @@ -165,7 +165,7 @@ def parse_opt(): def main(opt): set_logging() print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - export(**vars(opt)) + run(**vars(opt)) if __name__ == "__main__": diff --git a/test.py b/test.py index 1e82fd2d1611..0e0f01efa531 100644 --- a/test.py +++ b/test.py @@ -1,6 +1,13 @@ +"""Test a trained YOLOv5 model accuracy on a custom dataset + +Usage: + $ python path/to/test.py --data coco128.yaml --weights yolov5s.pt --img 640 +""" + import argparse import json import os +import sys from pathlib import Path from threading import Thread @@ -9,6 +16,9 @@ import yaml from tqdm import tqdm +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ @@ -19,32 +29,32 @@ @torch.no_grad() -def test(data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a cocoapi-compatible JSON results file - project='runs/test', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - wandb_logger=None, - compute_loss=None, - ): +def run(data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a cocoapi-compatible JSON results file + project='runs/test', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + wandb_logger=None, + compute_loss=None, + ): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -327,12 +337,12 @@ def main(opt): check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally - test(**vars(opt)) + run(**vars(opt)) elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, - save_json=False, plots=False) + run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, + save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -342,8 +352,8 @@ def main(opt): y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') - r, _, t = test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) + r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index 67d835d60691..05542b48bb59 100644 --- a/train.py +++ b/train.py @@ -1,8 +1,15 @@ +"""Train a YOLOv5 model on a custom dataset + +Usage: + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 +""" + import argparse import logging import math import os import random +import sys import time import warnings from copy import deepcopy @@ -22,6 +29,9 @@ from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + import test # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -89,7 +99,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - run_id = run_id if opt.resume else None # start fresh run if transfer learning + run_id = run_id if opt.resume else None # start fresh run if transfer learning wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: @@ -375,18 +385,18 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary final_epoch = epoch + 1 == epochs if not notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, _ = test.test(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = test.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -443,17 +453,17 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(data, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False) + results, _, _ = test.run(data, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False) # Strip optimizers for f in last, best: diff --git a/tutorial.ipynb b/tutorial.ipynb index 48780f94c856..b45b321b42e4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1125,7 +1125,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1212,7 +1212,7 @@ " done\n", " python hubconf.py # hub\n", " python models/yolo.py --cfg $m.yaml # inspect\n", - " python models/export.py --weights $m.pt --img 640 --batch 1 # export\n", + " python export.py --weights $m.pt --img 640 --batch 1 # export\n", "done" ], "execution_count": null, From b83e1a4adcf77ccafa72b22ade6cb3898ccb0e05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Jun 2021 22:50:56 +0200 Subject: [PATCH 108/757] Fix `img2label_paths()` order (#3720) * Fix `img2label_paths()` order * fix, 1 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 93d6511ac658..25a7b2f67355 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -348,7 +348,7 @@ def __len__(self): def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] class LoadImagesAndLabels(Dataset): # for training/testing From 0e2d0d54d76698111a446c2499786a1f8df856af Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Tue, 22 Jun 2021 14:33:38 +0300 Subject: [PATCH 109/757] Fix typo (#3729) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 8f4000cdad39..b7ff0748ba93 100644 --- a/export.py +++ b/export.py @@ -43,7 +43,7 @@ def run(weights='./yolov5s.pt', # weights path # Load PyTorch model device = select_device(device) - assert not (device.type == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device) # load FP32 model labels = model.names From 9ac7d388a99c2344c2e1ddeb495faccf586b7dc3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Jun 2021 13:50:47 +0200 Subject: [PATCH 110/757] Backwards compatible cache version checks (#3730) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 25a7b2f67355..abb4a3650bfc 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['version'] != 0.3 or cache['hash'] != get_hash(self.label_files + self.img_files): + if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files): cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache From 63060910a68bfde238872d629ab88e2e7bc736e8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Jun 2021 16:05:38 +0200 Subject: [PATCH 111/757] Update `check_datasets()` for dynamic unzip path (#3732) @KalenMike --- utils/general.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 3e3bd6997a7c..e39f2ac09ca3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,16 +223,17 @@ def check_file(file): def check_dataset(data, autodownload=True): # Download dataset if not found locally val, s = data.get('val'), data.get('download') - if val and len(val): + if val: + root = Path(val).parts[0] + os.sep # unzip directory i.e. '../' val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s) and autodownload: # download script + if s and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} ...') torch.hub.download_url_to_file(s, f) - r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip + r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) From fdc22398fa06a96d6c3f0114ca4bc08a246ae67a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Jun 2021 12:49:38 +0200 Subject: [PATCH 112/757] Create `data/hyps` directory (#3747) --- .gitignore | 10 +--------- data/{ => hyps}/hyp.finetune.yaml | 0 data/{ => hyps}/hyp.finetune_objects365.yaml | 0 data/{ => hyps}/hyp.scratch.yaml | 0 train.py | 2 +- tutorial.ipynb | 2 +- 6 files changed, 3 insertions(+), 11 deletions(-) rename data/{ => hyps}/hyp.finetune.yaml (100%) rename data/{ => hyps}/hyp.finetune_objects365.yaml (100%) rename data/{ => hyps}/hyp.scratch.yaml (100%) diff --git a/.gitignore b/.gitignore index 91ce33fb931e..91299e263b86 100755 --- a/.gitignore +++ b/.gitignore @@ -19,26 +19,18 @@ *.avi *.data *.json - *.cfg !cfg/yolov3*.cfg storage.googleapis.com runs/* data/* +!data/hyps/* !data/images/zidane.jpg !data/images/bus.jpg -!data/coco.names -!data/coco_paper.names -!data/coco.data -!data/coco_*.data -!data/coco_*.txt -!data/trainvalno5k.shapes !data/*.sh -pycocotools/* results*.txt -gcp_test*.sh # Datasets ------------------------------------------------------------------------------------------------------------- coco/ diff --git a/data/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml similarity index 100% rename from data/hyp.finetune.yaml rename to data/hyps/hyp.finetune.yaml diff --git a/data/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml similarity index 100% rename from data/hyp.finetune_objects365.yaml rename to data/hyps/hyp.finetune_objects365.yaml diff --git a/data/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml similarity index 100% rename from data/hyp.scratch.yaml rename to data/hyps/hyp.scratch.yaml diff --git a/train.py b/train.py index 05542b48bb59..e934441d1182 100644 --- a/train.py +++ b/train.py @@ -484,7 +484,7 @@ def parse_opt(known=False): parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') diff --git a/tutorial.ipynb b/tutorial.ipynb index b45b321b42e4..bcdbc014dfb4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -932,7 +932,7 @@ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", From ae4261c7749ff644f45c66b79ecb1fff06437052 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Jun 2021 12:56:22 +0200 Subject: [PATCH 113/757] Force non-zero hyp evolution weights `w` (#3748) Fix for https://github.com/ultralytics/yolov5/issues/3741 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index e934441d1182..ba84b432f660 100644 --- a/train.py +++ b/train.py @@ -608,7 +608,7 @@ def main(opt): x = np.loadtxt('evolve.txt', ndmin=2) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() # weights + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection From 417a2f425cd553c1f2a41374a6cd2710aa91d5f0 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Thu, 24 Jun 2021 22:57:27 +0900 Subject: [PATCH 114/757] Edit comment (#3759) edit comment --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index abb4a3650bfc..d3714d745b88 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -574,7 +574,7 @@ def __getitem__(self, index): labels_out[:, 1:] = torch.from_numpy(labels) # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3 x img_height x img_width img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes From f79d7479daa5ed2af55159ab621be82fbbb8ef1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 01:25:03 +0200 Subject: [PATCH 115/757] Add optional dataset.yaml `path` attribute (#3753) * Add optional dataset.yaml `path` attribute @KalenMike * pass locals to python scripts * handle lists * update coco128.yaml * Capitalize first letter * add test key * finalize GlobalWheat2020.yaml * finalize objects365.yaml * finalize SKU-110K.yaml * finalize SKU-110K.yaml * finalize VisDrone.yaml * NoneType fix * update download comment * voc to VOC * update * update VOC.yaml * update VOC.yaml * remove dashes * delete get_voc.sh * force coco and coco128 to ../datasets * Capitalize Argoverse_HD.yaml * Capitalize Objects365.yaml * update Argoverse_HD.yaml * coco segments fix * VOC single-thread * update Argoverse_HD.yaml * update data_dict in test handling * create root --- data/Argoverse_HD.yaml | 66 ++++++++++++ data/GlobalWheat2020.yaml | 55 +++++----- data/{objects365.yaml => Objects365.yaml} | 23 +++-- data/SKU-110K.yaml | 29 +++--- data/VOC.yaml | 79 +++++++++++++++ data/VisDrone.yaml | 23 ++--- data/argoverse_hd.yaml | 21 ---- data/coco.yaml | 46 +++++---- data/coco128.yaml | 27 ++--- data/hyps/hyp.finetune.yaml | 2 +- data/scripts/get_argoverse_hd.sh | 61 ------------ data/scripts/get_voc.sh | 116 ---------------------- data/voc.yaml | 21 ---- test.py | 9 +- train.py | 2 +- tutorial.ipynb | 2 +- utils/general.py | 15 ++- 17 files changed, 268 insertions(+), 329 deletions(-) create mode 100644 data/Argoverse_HD.yaml rename data/{objects365.yaml => Objects365.yaml} (92%) create mode 100644 data/VOC.yaml delete mode 100644 data/argoverse_hd.yaml delete mode 100644 data/scripts/get_argoverse_hd.sh delete mode 100644 data/scripts/get_voc.sh delete mode 100644 data/voc.yaml diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml new file mode 100644 index 000000000000..ad1a52254d74 --- /dev/null +++ b/data/Argoverse_HD.yaml @@ -0,0 +1,66 @@ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Train command: python train.py --data Argoverse_HD.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/Argoverse +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +nc: 8 # number of classes +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + + from tqdm import tqdm + from utils.general import download, Path + + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = img_name[:-3] + "txt" + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path('../datasets/Argoverse') # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir, delete=False) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index f45182b43e25..b77534944ed7 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,43 +1,40 @@ # Global Wheat 2020 dataset http://www.global-wheat.com/ # Train command: python train.py --data GlobalWheat2020.yaml # Default dataset location is next to YOLOv5: -# /parent_folder +# /parent # /datasets/GlobalWheat2020 # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: # 3422 images - - ../datasets/GlobalWheat2020/images/arvalis_1 - - ../datasets/GlobalWheat2020/images/arvalis_2 - - ../datasets/GlobalWheat2020/images/arvalis_3 - - ../datasets/GlobalWheat2020/images/ethz_1 - - ../datasets/GlobalWheat2020/images/rres_1 - - ../datasets/GlobalWheat2020/images/inrae_1 - - ../datasets/GlobalWheat2020/images/usask_1 - -val: # 748 images (WARNING: train set contains ethz_1) - - ../datasets/GlobalWheat2020/images/ethz_1 - -test: # 1276 images - - ../datasets/GlobalWheat2020/images/utokyo_1 - - ../datasets/GlobalWheat2020/images/utokyo_2 - - ../datasets/GlobalWheat2020/images/nau_1 - - ../datasets/GlobalWheat2020/images/uq_1 - -# number of classes -nc: 1 - -# class names -names: [ 'wheat_head' ] - - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +nc: 1 # number of classes +names: [ 'wheat_head' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, Path # Download - dir = Path('../datasets/GlobalWheat2020') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] download(urls, dir=dir) diff --git a/data/objects365.yaml b/data/Objects365.yaml similarity index 92% rename from data/objects365.yaml rename to data/Objects365.yaml index eb99995903cf..e365c82cab08 100644 --- a/data/objects365.yaml +++ b/data/Objects365.yaml @@ -1,18 +1,19 @@ # Objects365 dataset https://www.objects365.org/ -# Train command: python train.py --data objects365.yaml +# Train command: python train.py --data Objects365.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /datasets/objects365 +# /parent +# /datasets/Objects365 # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/objects365/images/train # 1742289 images -val: ../datasets/objects365/images/val # 5570 images -# number of classes -nc: 365 +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 5570 images +test: # test images (optional) -# class names +# Classes +nc: 365 # number of classes names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', @@ -56,7 +57,7 @@ names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gl 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from pycocotools.coco import COCO from tqdm import tqdm @@ -64,7 +65,7 @@ download: | from utils.general import download, Path # Make Directories - dir = Path('../datasets/objects365') # dataset directory + dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': (dir / p).mkdir(parents=True, exist_ok=True) for q in 'train', 'val': diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index a8c1f25b385a..7087bb9c2893 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,39 +1,38 @@ # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 # Train command: python train.py --data SKU-110K.yaml # Default dataset location is next to YOLOv5: -# /parent_folder +# /parent # /datasets/SKU-110K # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/SKU-110K/train.txt # 8219 images -val: ../datasets/SKU-110K/val.txt # 588 images -test: ../datasets/SKU-110K/test.txt # 2936 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images -# number of classes -nc: 1 +# Classes +nc: 1 # number of classes +names: [ 'object' ] # class names -# class names -names: [ 'object' ] - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil from tqdm import tqdm from utils.general import np, pd, Path, download, xyxy2xywh # Download - datasets = Path('../datasets') # download directory + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] - download(urls, dir=datasets, delete=False) + download(urls, dir=parent, delete=False) # Rename directories - dir = (datasets / 'SKU-110K') if dir.exists(): shutil.rmtree(dir) - (datasets / 'SKU110K_fixed').rename(dir) # rename dir + (parent / 'SKU110K_fixed').rename(dir) # rename dir (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir # Convert labels diff --git a/data/VOC.yaml b/data/VOC.yaml new file mode 100644 index 000000000000..3d878fa67a60 --- /dev/null +++ b/data/VOC.yaml @@ -0,0 +1,79 @@ +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ +# Train command: python train.py --data VOC.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/VOC +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +nc: 20 # number of classes +names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from utils.general import download, Path + + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = yaml['names'].index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', delete=False) + + # Convert + path = dir / f'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index c4603b200132..c1cd38d1e10f 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,24 +1,23 @@ # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset # Train command: python train.py --data VisDrone.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /VisDrone +# /parent +# /datasets/VisDrone # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../VisDrone/VisDrone2019-DET-train/images # 6471 images -val: ../VisDrone/VisDrone2019-DET-val/images # 548 images -test: ../VisDrone/VisDrone2019-DET-test-dev/images # 1610 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images -# number of classes -nc: 10 - -# class names +# Classes +nc: 10 # number of classes names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ] -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, os, Path @@ -49,7 +48,7 @@ download: | # Download - dir = Path('../VisDrone') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml deleted file mode 100644 index 0ba314d82ce1..000000000000 --- a/data/argoverse_hd.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /argoverse -# /yolov5 - - -# download command/URL (optional) -download: bash data/scripts/get_argoverse_hd.sh - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images -val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges -test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview - -# number of classes -nc: 8 - -# class names -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/coco.yaml b/data/coco.yaml index f818a49ff0fa..c6053c984bc0 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,23 +1,19 @@ # COCO 2017 dataset http://cocodataset.org # Train command: python train.py --data coco.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /coco +# /parent +# /datasets/coco # /yolov5 -# download command/URL (optional) -download: bash data/scripts/get_coco.sh +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # train images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco/train2017.txt # 118287 images -val: ../coco/val2017.txt # 5000 images -test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 - -# number of classes -nc: 80 - -# class names +# Classes +nc: 80 # number of classes names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', @@ -26,10 +22,22 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] + 'hair drier', 'toothbrush' ] # class names + + +# Download script/URL (optional) +download: | + from utils.general import download, Path + + # Download labels + segments = False # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) -# Print classes -# with open('data/coco.yaml') as f: -# d = yaml.safe_load(f) # dict -# for i, x in enumerate(d['names']): -# print(i, x) + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/data/coco128.yaml b/data/coco128.yaml index 83fbc29d3404..e70ad687dd88 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,22 +1,19 @@ # COCO 2017 dataset http://cocodataset.org - first 128 training images # Train command: python train.py --data coco128.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /coco128 +# /parent +# /datasets/coco128 # /yolov5 -# download command/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco128/images/train2017/ # 128 images -val: ../coco128/images/train2017/ # 128 images - -# number of classes -nc: 80 - -# class names +# Classes +nc: 80 # number of classes names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', @@ -25,4 +22,8 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] + 'hair drier', 'toothbrush' ] # class names + + +# Download script/URL (optional) +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip \ No newline at end of file diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index 1b84cff95c2c..a77597741356 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -1,5 +1,5 @@ # Hyperparameters for VOC finetuning -# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 +# python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh deleted file mode 100644 index 331509914568..000000000000 --- a/data/scripts/get_argoverse_hd.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Download command: bash data/scripts/get_argoverse_hd.sh -# Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /argoverse -# /yolov5 - -# Download/unzip images -d='../argoverse/' # unzip directory -mkdir $d -url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ -f=Argoverse-HD-Full.zip -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background -wait # finish background tasks - -cd ../argoverse/Argoverse-1.1/ -ln -s tracking images - -cd ../Argoverse-HD/annotations/ - -python3 - "$@" <train.txt -cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt - -mkdir ../VOC ../VOC/images ../VOC/images/train ../VOC/images/val -mkdir ../VOC/labels ../VOC/labels/train ../VOC/labels/val - -python3 - "$@" < 1: # model = nn.DataParallel(model) + # Data + with open(data) as f: + data = yaml.safe_load(f) + check_dataset(data) # check + # Half half &= device.type != 'cpu' # half precision only supported on CUDA if half: @@ -83,10 +88,6 @@ def run(data, # Configure model.eval() - if isinstance(data, str): - with open(data) as f: - data = yaml.safe_load(f) - check_dataset(data) # check is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 diff --git a/train.py b/train.py index ba84b432f660..6b04e8ff3a6a 100644 --- a/train.py +++ b/train.py @@ -453,7 +453,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.run(data, + results, _, _ = test.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, diff --git a/tutorial.ipynb b/tutorial.ipynb index bcdbc014dfb4..d136803659fb 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1255,7 +1255,7 @@ "source": [ "# VOC\n", "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", - " !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" + " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" ], "execution_count": null, "outputs": [] diff --git a/utils/general.py b/utils/general.py index e39f2ac09ca3..555975f07c5d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -222,9 +222,14 @@ def check_file(file): def check_dataset(data, autodownload=True): # Download dataset if not found locally - val, s = data.get('val'), data.get('download') + path = Path(data.get('path', '')) # optional 'path' field + if path: + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + + train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: - root = Path(val).parts[0] + os.sep # unzip directory i.e. '../' val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) @@ -233,12 +238,14 @@ def check_dataset(data, autodownload=True): f = Path(s).name # filename print(f'Downloading {s} ...') torch.hub.download_url_to_file(s, f) + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' + Path(root).mkdir(parents=True, exist_ok=True) # create root r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) else: # python script - r = exec(s) # return None + r = exec(s, {'yaml': data}) # return None print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result else: raise Exception('Dataset not found.') @@ -258,7 +265,7 @@ def download_one(url, dir): if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': - s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite + s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite elif f.suffix == '.gz': s = f'tar xfz {f} --directory {f.parent}' # unzip if delete: # delete zip file after unzip From 03281f8c7613ad808d7d356f0195152b2d46ab99 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 01:55:53 +0200 Subject: [PATCH 116/757] COCO annotations JSON fix (#3764) --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 31d57221a3d5..643dc441e521 100644 --- a/test.py +++ b/test.py @@ -270,7 +270,7 @@ def run(data, # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = '../coco/annotations/instances_val2017.json' # annotations json + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: From 374957317a5469742b24291caa52dedfd9d31c99 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Fri, 25 Jun 2021 18:47:46 +0900 Subject: [PATCH 117/757] Add `xyxy2xywhn()` (#3765) * Edit Comments for numpy2torch tensor process Edit Comments for numpy2torch tensor process * add xyxy2xywhn add xyxy2xywhn * add xyxy2xywhn * formatting * pass arguments pass arguments * edit comment for xyxy2xywhn() edit comment for xyxy2xywhn() * cleanup datasets.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 14 ++++++-------- utils/general.py | 10 ++++++++++ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d3714d745b88..eac0c7834308 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -23,8 +23,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import check_requirements, check_file, check_dataset, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, \ - segment2box, segments2boxes, resample_segments, clean_str +from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ + xyn2xy, segment2box, segments2boxes, resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -192,7 +192,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW img = np.ascontiguousarray(img) return path, img, img0, self.cap @@ -255,7 +255,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW img = np.ascontiguousarray(img) return img_path, img, img0, None @@ -336,7 +336,7 @@ def __next__(self): img = np.stack(img, 0) # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None @@ -552,9 +552,7 @@ def __getitem__(self, index): nL = len(labels) # number of labels if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized if self.augment: # flip up-down diff --git a/utils/general.py b/utils/general.py index 555975f07c5d..6a5b42f374e6 100755 --- a/utils/general.py +++ b/utils/general.py @@ -393,6 +393,16 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y +def xyxy2xywhn(x, w=640, h=640): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) From f2d97ebb251e689f55879709179248d4acf8e2a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 12:52:05 +0200 Subject: [PATCH 118/757] Remove DDP MultiHeadAttention fix (#3768) --- train.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/train.py b/train.py index 6b04e8ff3a6a..d4a1b48b5aa4 100644 --- a/train.py +++ b/train.py @@ -252,9 +252,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DDP mode if cuda and RANK != -1: - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, - # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 - find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # Model parameters hyp['box'] *= 3. / nl # scale to layers From 09246a5a331c05df230ab41d41f972bfa3f97d4c Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Fri, 25 Jun 2021 16:16:18 +0200 Subject: [PATCH 119/757] fix/incorrect_fitness_import (#3770) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index d4a1b48b5aa4..3390e838803a 100644 --- a/train.py +++ b/train.py @@ -38,13 +38,14 @@ from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume +from utils.metrics import fitness logger = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html From ffb6e11050c1379de120af4e687d9623a0535b41 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 26 Jun 2021 00:15:33 +0530 Subject: [PATCH 120/757] W&B: Update Tables API and comply with new dataset_check (#3772) * Update tables API and windows path fix * update dataset check --- utils/wandb_logging/wandb_utils.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d82633c7e2f6..f031a819b977 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -136,7 +136,6 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' - check_dataset(self.data_dict) config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) @@ -171,9 +170,11 @@ def setup_training(self, opt, data_dict): data_dict['val'] = str(val_path) self.val_table = self.val_artifact.get("val") self.map_val_table_path() + wandb.log({"validation dataset": self.val_table}) + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict @@ -181,7 +182,7 @@ def setup_training(self, opt, data_dict): def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix()) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\","/")) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact @@ -216,6 +217,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): with open(data_file) as f: data = yaml.safe_load(f) # data dict + check_dataset(data) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -228,6 +230,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path data.pop('download', None) + data.pop('path', None) with open(path, 'w') as f: yaml.safe_dump(data, f) @@ -297,6 +300,7 @@ def log_training_progress(self, predn, path, names): id = self.val_table_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, + self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), total_conf / max(1, len(box_data)) ) @@ -312,11 +316,12 @@ def end_epoch(self, best_result=False): wandb.log(self.log_dict) self.log_dict = {} if self.result_artifact: - train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") - self.result_artifact.add(train_results, 'result') + self.result_artifact.add(self.result_table, 'result') wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + + wandb.log({"evaluation": self.result_table}) + self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): From f89941711cc9b59f35f8991e6324a0ee80aad07e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 00:49:05 +0200 Subject: [PATCH 121/757] NGA xView 2018 Dataset Auto-Download (#3775) * update clip_coords for numpy * uncomment * cleanup * Add autosplits * fix * cleanup --- data/xView.yaml | 101 ++++++++++++++++++++++++++++++++++++++++++++++ utils/datasets.py | 21 +++++----- utils/general.py | 18 ++++++--- 3 files changed, 125 insertions(+), 15 deletions(-) create mode 100644 data/xView.yaml diff --git a/data/xView.yaml b/data/xView.yaml new file mode 100644 index 000000000000..5212193a0bf0 --- /dev/null +++ b/data/xView.yaml @@ -0,0 +1,101 @@ +# xView 2018 dataset https://challenge.xviewdataset.org +# ----> NOTE: DOWNLOAD DATA MANUALLY from URL above and unzip to /datasets/xView before running train command below +# Train command: python train.py --data xView.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/xView +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/xView # dataset root dir +train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images +val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images + +# Classes +nc: 60 # number of classes +names: [ 'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', + 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', + 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', + 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', + 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', + 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', + 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', + 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', + 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + import os + from pathlib import Path + + import numpy as np + from PIL import Image + from tqdm import tqdm + + from utils.datasets import autosplit + from utils.general import download, xyxy2xywhn + + + def convert_labels(fname=Path('xView/xView_train.geojson')): + # Convert xView geoJSON labels to YOLO format + path = fname.parent + with open(fname) as f: + print(f'Loading {fname}...') + data = json.load(f) + + # Make dirs + labels = Path(path / 'labels' / 'train') + os.system(f'rm -rf {labels}') + labels.mkdir(parents=True, exist_ok=True) + + # xView classes 11-94 to 0-59 + xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11, + 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1, + 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46, + 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59] + + shapes = {} + for feature in tqdm(data['features'], desc=f'Converting {fname}'): + p = feature['properties'] + if p['bounds_imcoords']: + id = p['image_id'] + file = path / 'train_images' / id + if file.exists(): # 1395.tif missing + try: + box = np.array([int(num) for num in p['bounds_imcoords'].split(",")]) + assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}' + cls = p['type_id'] + cls = xview_class2index[int(cls)] # xView class to 0-60 + assert 59 >= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir, delete=False) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/utils/datasets.py b/utils/datasets.py index eac0c7834308..4658dc524be0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -985,7 +985,7 @@ def create_folder(path='./new'): os.makedirs(path) # make new output folder -def flatten_recursive(path='../coco128'): +def flatten_recursive(path='../datasets/coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(path + '_flat') create_folder(new_path) @@ -993,7 +993,7 @@ def flatten_recursive(path='../coco128'): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') +def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir @@ -1028,27 +1028,28 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' -def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): +def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.datasets import *; autosplit('../coco128') + Usage: from utils.datasets import *; autosplit() Arguments - path: Path to images directory - weights: Train, val, test weights (list) - annotated_only: Only use images with an annotated txt file + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only n = len(files) # number of files + random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path / txt[i], 'a') as f: - f.write(str(img) + '\n') # add image to txt file + with open(path.parent / txt[i], 'a') as f: + f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file def verify_image_label(args): diff --git a/utils/general.py b/utils/general.py index 6a5b42f374e6..83eb95744678 100755 --- a/utils/general.py +++ b/utils/general.py @@ -393,8 +393,10 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y -def xyxy2xywhn(x, w=640, h=640): +def xyxy2xywhn(x, w=640, h=640, clip=False): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_coords(x, (h, w)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -455,10 +457,16 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): def clip_coords(boxes, img_shape): # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 + if isinstance(boxes, torch.Tensor): + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + else: # np.array + boxes[:, 0].clip(0, img_shape[1], out=boxes[:, 0]) # x1 + boxes[:, 1].clip(0, img_shape[0], out=boxes[:, 1]) # y1 + boxes[:, 2].clip(0, img_shape[1], out=boxes[:, 2]) # x2 + boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): From 9dc5d35fce4c768427e790f20d4b425ecad15d08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 13:05:46 +0200 Subject: [PATCH 122/757] Update README.md fix banner width (#3785) --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 82d408ef5cde..c26b3c264771 100755 --- a/README.md +++ b/README.md @@ -178,11 +178,8 @@ Get started in seconds with our verified environments and integrations, includin We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -
- - - -
+

+

##
Why YOLOv5
From 157aa2f88696061348d60a1d5019223a2126e258 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 14:45:53 +0200 Subject: [PATCH 123/757] Objectness IoU Sort (#3610) Co-authored-by: U-LAPTOP-5N89P8V7\banhu --- utils/loss.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index 9e78df17fdf3..576c1c79e6f8 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -133,7 +133,10 @@ def __call__(self, p, targets): # predictions, targets, model lbox += (1.0 - iou).mean() # iou loss # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + score_iou = iou.detach().clamp(0).type(tobj.dtype) + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) From 8035b61682cba7b10be24b0ab35cc0295f14d6cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 14:52:18 +0200 Subject: [PATCH 124/757] Update objectness IoU sort (#3786) --- utils/loss.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 576c1c79e6f8..d4c261a5cc97 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -89,6 +89,7 @@ class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False): super(ComputeLoss, self).__init__() + self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -134,8 +135,9 @@ def __call__(self, p, targets): # predictions, targets, model # Objectness score_iou = iou.detach().clamp(0).type(tobj.dtype) - sort_id = torch.argsort(score_iou) - b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + if self.sort_obj_iou: + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio # Classification From bd581b330bb33136653613e8c97d3478f0beaf1c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:13:06 +0200 Subject: [PATCH 125/757] Create hyp.scratch-p6.yaml (#3787) --- data/hyps/hyp.scratch-p6.yaml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 data/hyps/hyp.scratch-p6.yaml diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml new file mode 100644 index 000000000000..faf565423968 --- /dev/null +++ b/data/hyps/hyp.scratch-p6.yaml @@ -0,0 +1,33 @@ +# Hyperparameters for COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) From 8e7f285051ed394acaf561767306237a41f0642d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:29:16 +0200 Subject: [PATCH 126/757] Fix datasets for aws and get_coco.sh (#3788) * merge master * Update get_coco.sh --- data/scripts/get_coco.sh | 4 ++-- utils/aws/userdata.sh | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index caae37504780..bce692c29ae2 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -8,14 +8,14 @@ # /yolov5 # Download/unzip labels -d='../' # unzip directory +d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background # Download/unzip images -d='../coco/images' # unzip directory +d='../datasets/coco/images' # unzip directory url=http://images.cocodataset.org/zips/ f1='train2017.zip' # 19G, 118k images f2='val2017.zip' # 1G, 5k images diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 52c0fe33d90f..5fc1332ac1b0 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -10,7 +10,6 @@ if [ ! -d yolov5 ]; then git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 bash data/scripts/get_coco.sh && echo "COCO done." & - bash data/scripts/get_voc.sh && echo "VOC done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks From 92d49fde354126405b866c69b055fb9156115d55 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:42:40 +0200 Subject: [PATCH 127/757] Update seeds for single-GPU reproducibility (#3789) For seed=0 on single-GPU. --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 3390e838803a..9ac12b12aacf 100644 --- a/train.py +++ b/train.py @@ -84,7 +84,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Configure plots = not evolve # create plots cuda = device.type != 'cpu' - init_seeds(2 + RANK) + init_seeds(1 + RANK) with open(data) as f: data_dict = yaml.safe_load(f) # data dict From 07166ba38cd8b15a48bad33c8f4455236304eb18 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:09:56 +0200 Subject: [PATCH 128/757] Update Usage examples (#3790) --- Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index d32e3960046b..eca690003fbd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,17 +22,16 @@ COPY . /usr/src/app ENV HOME=/usr/src/app -# --------------------------------------------------- Extras Below --------------------------------------------------- +# Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t -# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done # Pull and Run # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t # Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t # Kill all # sudo docker kill $(sudo docker ps -q) From 96c87f1711e3baa396ea4ec9ca6d390fd76013bb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:28:06 +0200 Subject: [PATCH 129/757] nvcr.io/nvidia/pytorch:21.06-py3 (#3791) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index eca690003fbd..4754801b5b56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.05-py3 +FROM nvcr.io/nvidia/pytorch:21.06-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -9,7 +9,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook -RUN pip install --no-cache -U torch torchvision +RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 5e6886c8605cf9f9687866b6ef766aabbdacf580 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:51:17 +0200 Subject: [PATCH 130/757] Update Dockerfile (#3792) --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4754801b5b56..a5b9da06a3d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,8 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook -RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install --no-cache -U torch torchvision numpy +# RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 47543f97b1e5602600529aef48dcdad878cbb73e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 17:12:43 +0200 Subject: [PATCH 131/757] FROM nvcr.io/nvidia/pytorch:21.05-py3 (#3794) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a5b9da06a3d5..e22c1106f23d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.06-py3 +FROM nvcr.io/nvidia/pytorch:21.05-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 5e976a274d2458869ae9743a93d5c2f67d6fa79a Mon Sep 17 00:00:00 2001 From: batrlatom Date: Sun, 27 Jun 2021 15:28:50 +0200 Subject: [PATCH 132/757] Fix competition link (#3799) * link to the competition repaired * Update README.md Co-authored-by: Glenn Jocher --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c26b3c264771..360afd2cd7e6 100755 --- a/README.md +++ b/README.md @@ -178,8 +178,10 @@ Get started in seconds with our verified environments and integrations, includin We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -

-

+

+ + +

##
Why YOLOv5
From 3974d725b60308b46298b5f0a4cc4bb2c4b7807b Mon Sep 17 00:00:00 2001 From: yellowdolphin <42343818+yellowdolphin@users.noreply.github.com> Date: Mon, 28 Jun 2021 12:25:13 +0200 Subject: [PATCH 133/757] Fix warmup `accumulate` (#3722) * gradient accumulation during warmup in train.py Context: `accumulate` is the number of batches/gradients accumulated before calling the next optimizer.step(). During warmup, it is ramped up from 1 to the final value nbs / batch_size. Although I have not seen this in other libraries, I like the idea. During warmup, as grads are large, too large steps are more of on issue than gradient noise due to small steps. The bug: The condition to perform the opt step is wrong > if ni % accumulate == 0: This produces irregular step sizes if `accumulate` is not constant. It becomes relevant when batch_size is small and `accumulate` changes many times during warmup. This demo also shows the proposed solution, to use a ">=" condition instead: https://colab.research.google.com/drive/1MA2z2eCXYB_BC5UZqgXueqL_y1Tz_XVq?usp=sharing Further, I propose not to restrict the number of warmup iterations to >= 1000. If the user changes hyp['warmup_epochs'], this causes unexpected behavior. Also, it makes evolution unstable if this parameter was to be optimized. * replace last_opt_step tracking by do_step(ni) * add docstrings * move down nw * Update train.py * revert math import move Co-authored-by: Glenn Jocher --- train.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 9ac12b12aacf..257be065f641 100644 --- a/train.py +++ b/train.py @@ -270,6 +270,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move @@ -344,12 +345,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scaler.scale(loss).backward() # Optimize - if ni % accumulate == 0: + if ni - last_opt_step >= accumulate: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) + last_opt_step = ni # Print if RANK in [-1, 0]: From 20d45aa4f18d7df7148fc6dd69a9c0607003f004 Mon Sep 17 00:00:00 2001 From: Zigarss <32835472+Zigars@users.noreply.github.com> Date: Mon, 28 Jun 2021 19:18:45 +0800 Subject: [PATCH 134/757] Add feature map visualization (#3804) * Add feature map visualization Add a feature_visualization function to visualize the mid feature map of the model. * Update yolo.py * remove boolean from forward and reorder if statement * remove print from forward * General cleanup * Indent * Update plots.py Co-authored-by: Glenn Jocher --- models/yolo.py | 6 +++++- utils/plots.py | 30 ++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 4a2514edd295..4c9456edd687 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -17,6 +17,7 @@ from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging +from utils.plots import feature_visualization from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ select_device, copy_attr @@ -135,7 +136,7 @@ def forward_augment(self, x): y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False): + def forward_once(self, x, profile=False, feature_vis=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer @@ -153,6 +154,9 @@ def forward_once(self, x, profile=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output + + if feature_vis and m.type == 'models.common.SPP': + feature_visualization(x, m.type, m.i) if profile: logger.info('%.1fms total' % sum(dt)) diff --git a/utils/plots.py b/utils/plots.py index 66a30918190e..36386371dbec 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -15,8 +15,9 @@ import torch import yaml from PIL import Image, ImageDraw, ImageFont +from torchvision import transforms -from utils.general import xywh2xyxy, xyxy2xywh +from utils.general import increment_path, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -299,7 +300,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 + # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) @@ -445,3 +446,28 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): ax[1].legend() fig.savefig(Path(save_dir) / 'results.png', dpi=200) + + +def feature_visualization(features, module_type, module_idx, n=64): + """ + features: Features to be visualized + module_type: Module type + module_idx: Module layer index within model + n: Maximum number of feature maps to plot + """ + project, name = 'runs/features', 'exp' + save_dir = increment_path(Path(project) / name) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + plt.figure(tight_layout=True) + blocks = torch.chunk(features, features.shape[1], dim=1) # block by channel dimension + n = min(n, len(blocks)) + for i in range(n): + feature = transforms.ToPILImage()(blocks[i].squeeze()) + ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) + ax.axis('off') + plt.imshow(feature) # cmap='gray' + + f = f"layer_{module_idx}_{module_type.split('.')[-1]}_features.png" + print(f'Saving {save_dir / f}...') + plt.savefig(save_dir / f, dpi=300) From 02719dde52a99f28603be383334028a7ab9f1e06 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Jun 2021 13:48:14 +0200 Subject: [PATCH 135/757] Update `feature_visualization()` (#3807) * Update `feature_visualization()` Only plot for data with height, width > 1 * cleanup * Cleanup --- utils/plots.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 36386371dbec..4b6c63992ac7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -448,26 +448,28 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(features, module_type, module_idx, n=64): +def feature_visualization(x, module_type, stage, n=64): """ - features: Features to be visualized + x: Features to be visualized module_type: Module type - module_idx: Module layer index within model + stage: Module stage within model n: Maximum number of feature maps to plot """ - project, name = 'runs/features', 'exp' - save_dir = increment_path(Path(project) / name) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - plt.figure(tight_layout=True) - blocks = torch.chunk(features, features.shape[1], dim=1) # block by channel dimension - n = min(n, len(blocks)) - for i in range(n): - feature = transforms.ToPILImage()(blocks[i].squeeze()) - ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) - ax.axis('off') - plt.imshow(feature) # cmap='gray' - - f = f"layer_{module_idx}_{module_type.split('.')[-1]}_features.png" - print(f'Saving {save_dir / f}...') - plt.savefig(save_dir / f, dpi=300) + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + project, name = 'runs/features', 'exp' + save_dir = increment_path(Path(project) / name) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + plt.figure(tight_layout=True) + blocks = torch.chunk(x, channels, dim=1) # block by channel dimension + n = min(n, len(blocks)) + for i in range(n): + feature = transforms.ToPILImage()(blocks[i].squeeze()) + ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) + ax.axis('off') + plt.imshow(feature) # cmap='gray' + + f = f"stage_{stage}_{module_type.split('.')[-1]}_features.png" + print(f'Saving {save_dir / f}...') + plt.savefig(save_dir / f, dpi=300) From 3213d8713f631072fd309bbe827d065a48160bb7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 12:44:59 +0200 Subject: [PATCH 136/757] Fix for `dataset_stats()` with updated data.yaml (#3819) @KalenMike --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4658dc524be0..c2859a148106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1116,7 +1116,7 @@ def round_labels(labels): nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': - if split not in data: + if data.get(split) is None: stats[split] = None # i.e. no test set continue x = [] From 5ea771d93d3d75502959168a44de39de9f45af1b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 13:18:13 +0200 Subject: [PATCH 137/757] Move IoU functions to metrics.py (#3820) --- utils/general.py | 80 +--------------------------------------------- utils/loss.py | 2 +- utils/metrics.py | 83 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 82 insertions(+), 83 deletions(-) diff --git a/utils/general.py b/utils/general.py index 83eb95744678..4606a8ec54f5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -25,7 +25,7 @@ import yaml from utils.google_utils import gsutil_getsize -from utils.metrics import fitness +from utils.metrics import box_iou, fitness from utils.torch_utils import init_torch_seeds # Settings @@ -469,84 +469,6 @@ def clip_coords(boxes, img_shape): boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) - - def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300): """Runs Non-Maximum Suppression (NMS) on inference results diff --git a/utils/loss.py b/utils/loss.py index d4c261a5cc97..88f57693307c 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn -from utils.general import bbox_iou +from utils.metrics import bbox_iou from utils.torch_utils import is_parallel diff --git a/utils/metrics.py b/utils/metrics.py index 8512197956e7..4f001c046285 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,5 +1,6 @@ # Model validation metrics +import math import warnings from pathlib import Path @@ -7,8 +8,6 @@ import numpy as np import torch -from . import general - def fitness(x): # Model fitness as a weighted combination of metrics @@ -128,7 +127,7 @@ def process_batch(self, detections, labels): detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) + iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: @@ -184,6 +183,84 @@ def print(self): print(' '.join(map(str, self.matrix[i]))) +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + # Plots ---------------------------------------------------------------------------------------------------------------- def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): From 57c5d02bbed8ad16b4ac3f8903d106e978448431 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 16:03:10 +0200 Subject: [PATCH 138/757] Concise `TransformerBlock()` (#3821) --- models/common.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index 4211db406c3d..96d63a07a1b1 100644 --- a/models/common.py +++ b/models/common.py @@ -77,18 +77,8 @@ def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape - p = x.flatten(2) - p = p.unsqueeze(0) - p = p.transpose(0, 3) - p = p.squeeze(3) - e = self.linear(p) - x = p + e - - x = self.tr(x) - x = x.unsqueeze(3) - x = x.transpose(0, 3) - x = x.reshape(b, self.c2, w, h) - return x + p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3) + return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): From 7d6af6963883a87f63f63b00c6ab56cc4a4a1db6 Mon Sep 17 00:00:00 2001 From: Feras Oughali <47706157+feras-oughali@users.noreply.github.com> Date: Wed, 30 Jun 2021 13:11:29 +0300 Subject: [PATCH 139/757] Fix `LoadStreams()` dataloader frame skip issue (#3833) * Update datasets.py to read every 4th frame of streams * Update datasets.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c2859a148106..5baf9c5b1906 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -4,7 +4,6 @@ import hashlib import json import logging -import math import os import random import shutil @@ -15,6 +14,7 @@ from threading import Thread import cv2 +import math import numpy as np import torch import torch.nn.functional as F @@ -210,15 +210,8 @@ class LoadWebcam: # for inference def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size self.stride = stride - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): @@ -233,18 +226,8 @@ def __next__(self): raise StopIteration # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' @@ -308,12 +291,12 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): def update(self, i, cap): # Read stream `i` frames in daemon thread - n, f = 0, self.frames[i] + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() - if n % 4: # read every 4th frame + if n % read == 0: success, im = cap.retrieve() self.imgs[i] = im if success else self.imgs[i] * 0 time.sleep(1 / self.fps[i]) # wait time From 25d1f2932c37a0b7cf7bf32e8cdcfb14dd5d3657 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 30 Jun 2021 15:10:40 +0200 Subject: [PATCH 140/757] Plot `AutoShape()` detections in ascending order (#3843) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 96d63a07a1b1..5ffb8440b60f 100644 --- a/models/common.py +++ b/models/common.py @@ -311,7 +311,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - for *box, conf, cls in pred: # xyxy, confidence, class + for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) From c6c88dc601fbdbe4e3391ba14245ec2740b5d01a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Jul 2021 00:35:04 +0200 Subject: [PATCH 141/757] Copy-Paste augmentation for YOLOv5 (#3845) * Copy-paste augmentation initial commit * if any segments * Add obscuration rejection * Add copy_paste hyperparameter * Update comments --- data/hyps/hyp.finetune.yaml | 1 + data/hyps/hyp.finetune_objects365.yaml | 1 + data/hyps/hyp.scratch-p6.yaml | 1 + data/hyps/hyp.scratch.yaml | 1 + train.py | 5 +-- utils/datasets.py | 44 +++++++++++++++----------- utils/metrics.py | 26 ++++++++++++++- 7 files changed, 58 insertions(+), 21 deletions(-) diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index a77597741356..237cd5bc19a1 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -36,3 +36,4 @@ flipud: 0.00856 fliplr: 0.5 mosaic: 1.0 mixup: 0.243 +copy_paste: 0.0 diff --git a/data/hyps/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml index 2b104ef2d9bf..435fa7a45119 100644 --- a/data/hyps/hyp.finetune_objects365.yaml +++ b/data/hyps/hyp.finetune_objects365.yaml @@ -26,3 +26,4 @@ flipud: 0.0 fliplr: 0.5 mosaic: 1.0 mixup: 0.0 +copy_paste: 0.0 diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml index faf565423968..fc1d8ebe0876 100644 --- a/data/hyps/hyp.scratch-p6.yaml +++ b/data/hyps/hyp.scratch-p6.yaml @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml index 44f26b6658ae..b2cf2e32c638 100644 --- a/data/hyps/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch.yaml @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/train.py b/train.py index 257be065f641..386f3d90dd73 100644 --- a/train.py +++ b/train.py @@ -6,7 +6,6 @@ import argparse import logging -import math import os import random import sys @@ -16,6 +15,7 @@ from pathlib import Path from threading import Thread +import math import numpy as np import torch.distributed as dist import torch.nn as nn @@ -591,7 +591,8 @@ def main(opt): 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict diff --git a/utils/datasets.py b/utils/datasets.py index 5baf9c5b1906..55f046cd56db 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -25,6 +25,7 @@ from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segment2box, segments2boxes, resample_segments, clean_str +from utils.metrics import bbox_ioa from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -683,6 +684,7 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], @@ -907,6 +909,30 @@ def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, s return img, targets +def copy_paste(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img, labels, segments + + def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] @@ -919,24 +945,6 @@ def cutout(image, labels): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 h, w = image.shape[:2] - def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: diff --git a/utils/metrics.py b/utils/metrics.py index 4f001c046285..c94c4a76a964 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,9 +1,9 @@ # Model validation metrics -import math import warnings from pathlib import Path +import math import matplotlib.pyplot as plt import numpy as np import torch @@ -253,6 +253,30 @@ def box_area(box): return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) +def bbox_ioa(box1, box2, eps=1E-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + def wh_iou(wh1, wh2): # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 wh1 = wh1[:, None] # [N,1,2] From b6863385b571998984cc782d4d50ed34b9f631d0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Jul 2021 12:23:09 +0200 Subject: [PATCH 142/757] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d136803659fb..f2e35b3db12e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1074,7 +1074,7 @@ "id": "7KN5ghjE6ZWh" }, "source": [ - "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)." + "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and `runs/train/exp/results.txt`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.txt` file manually:" ] }, { @@ -1084,7 +1084,7 @@ }, "source": [ "from utils.plots import plot_results \n", - "plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n", + "plot_results(save_dir='runs/train/exp') # plot all results*.txt files in 'runs/train/exp'\n", "Image(filename='runs/train/exp/results.png', width=800)" ], "execution_count": null, @@ -1096,7 +1096,7 @@ "id": "lfrEegCSW3fK" }, "source": [ - "\n" + "

\"COCO128

" ] }, { From 4717a3b0380c540210a60c38a20442e8884d5459 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Jul 2021 12:24:27 +0200 Subject: [PATCH 143/757] Created using Colaboratory --- tutorial.ipynb | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f2e35b3db12e..76e02e95a29d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1139,20 +1139,6 @@ "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n" ] }, - { - "cell_type": "code", - "metadata": { - "id": "gI6NoBev8Ib1" - }, - "source": [ - "# Re-clone repo\n", - "%cd ..\n", - "%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n", - "%cd yolov5" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { From 831773f5a23926658ee76459ce37550643432123 Mon Sep 17 00:00:00 2001 From: Valentin Aliferov Date: Fri, 2 Jul 2021 14:25:54 +0300 Subject: [PATCH 144/757] Add EXIF rotation to YOLOv5 Hub inference (#3852) * rotating an image according to its exif tag * Update common.py * Update datasets.py * Update datasets.py faster * delete extraneous gpg file * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 9 +++++---- utils/datasets.py | 26 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 5ffb8440b60f..9911b207d060 100644 --- a/models/common.py +++ b/models/common.py @@ -1,9 +1,9 @@ # YOLOv5 common modules -import math from copy import copy from pathlib import Path +import math import numpy as np import pandas as pd import requests @@ -12,7 +12,7 @@ from PIL import Image from torch.cuda import amp -from utils.datasets import letterbox +from utils.datasets import exif_transpose, letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import colors, plot_one_box from utils.torch_utils import time_synchronized @@ -252,9 +252,10 @@ def forward(self, imgs, size=640, augment=False, profile=False): for i, im in enumerate(imgs): f = f'image{i}' # filename if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im + im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename') or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) diff --git a/utils/datasets.py b/utils/datasets.py index 55f046cd56db..f7315522e375 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -64,6 +64,32 @@ def exif_size(img): return s +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = {2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache From d3e9d69850b2b910e0ea5e4ffba372a241fede5a Mon Sep 17 00:00:00 2001 From: san-soucie <44901782+san-soucie@users.noreply.github.com> Date: Sun, 4 Jul 2021 06:14:35 -0400 Subject: [PATCH 145/757] `--evolve 300` generations CLI argument (#3863) * evolve command accepts argument for number of generations * evolve generations argument used in evolve for loop * evolve argument boolean fixes * default to 300 evolve generations * Update train.py Co-authored-by: John San Soucie Co-authored-by: Glenn Jocher --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 386f3d90dd73..2e864a60cefc 100644 --- a/train.py +++ b/train.py @@ -494,7 +494,7 @@ def parse_opt(known=False): parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--notest', action='store_true', help='only test final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') - parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') @@ -542,7 +542,7 @@ def main(opt): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -603,7 +603,7 @@ def main(opt): if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists - for _ in range(300): # generations to evolve + for _ in range(opt.evolve): # generations to evolve if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' From 9d86b54eb37ea9d2b4ae3cda0dfe8ab7aa16f2c6 Mon Sep 17 00:00:00 2001 From: ketan-b <54092325+ketan-b@users.noreply.github.com> Date: Sun, 4 Jul 2021 16:25:57 +0530 Subject: [PATCH 146/757] Add multi-stream saving feature (#3864) * Added the recording feature for multiple streams Thanks for the very cool repo!! I was trying to record multiple feeds at the same time, but the current version of the detector only had one video writer and one vid_path! So the streams were not being saved and only were initialized with one frame and this process didn't record the whole thing. Fix: I made a list of `vid_writer` and `vid_path` and the `i` from the loop over the `pred` took care of the writer which need to work! I hope this helps, Thanks! * Cleanup list lengths * batch size variable * Update datasets.py Co-authored-by: Glenn Jocher --- detect.py | 18 ++++++++++-------- utils/datasets.py | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/detect.py b/detect.py index 808f3584c93d..a4542f7e8802 100644 --- a/detect.py +++ b/detect.py @@ -76,14 +76,16 @@ def run(weights='yolov5s.pt', # model.pt path(s) modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() - # Set Dataloader - vid_path, vid_writer = None, None + # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) + bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if device.type != 'cpu': @@ -158,10 +160,10 @@ def run(weights='yolov5s.pt', # model.pt path(s) if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' - if vid_path != save_path: # new video - vid_path = save_path - if isinstance(vid_writer, cv2.VideoWriter): - vid_writer.release() # release previous video writer + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -169,8 +171,8 @@ def run(weights='yolov5s.pt', # model.pt path(s) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer.write(im0) + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' diff --git a/utils/datasets.py b/utils/datasets.py index f7315522e375..8560f7cfeb88 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -352,7 +352,7 @@ def __next__(self): return self.sources, img, img0, None def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years def img2label_paths(img_paths): From bd88e7f4f2cfd7cca6893262da6d748ca23e2807 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 15:55:23 +0200 Subject: [PATCH 147/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 76e02e95a29d..a87f787cca8e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -680,7 +680,7 @@ "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../ && rm tmp.zip" + "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], "execution_count": null, "outputs": [ From 81b31824f550dfd5ba9322a114864d9843de0c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 16:55:08 +0200 Subject: [PATCH 148/757] Models `*.yaml` reformat (#3875) --- models/hub/yolov3-spp.yaml | 68 ++++++++++++++--------------- models/hub/yolov3-tiny.yaml | 50 ++++++++++----------- models/hub/yolov3.yaml | 68 ++++++++++++++--------------- models/hub/yolov5-fpn.yaml | 50 ++++++++++----------- models/hub/yolov5-p2.yaml | 4 +- models/hub/yolov5-p6.yaml | 4 +- models/hub/yolov5-p7.yaml | 4 +- models/hub/yolov5-panet.yaml | 60 ++++++++++++------------- models/hub/yolov5l6.yaml | 4 +- models/hub/yolov5m6.yaml | 4 +- models/hub/yolov5s-transformer.yaml | 60 ++++++++++++------------- models/hub/yolov5s6.yaml | 4 +- models/hub/yolov5x6.yaml | 4 +- models/yolo.py | 2 +- models/yolov5l.yaml | 4 +- models/yolov5m.yaml | 4 +- models/yolov5s.yaml | 4 +- models/yolov5x.yaml | 4 +- 18 files changed, 184 insertions(+), 218 deletions(-) diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 38dcc449f0d0..0ca7b7f6577b 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,51 +1,49 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 + [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 + [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 + [ -1, 1, Bottleneck, [ 64 ] ], + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 + [ -1, 2, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 + [ -1, 8, Bottleneck, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 + [ -1, 8, Bottleneck, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 + [ -1, 4, Bottleneck, [ 1024 ] ], # 10 ] # YOLOv3-SPP head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, SPP, [512, [5, 9, 13]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Bottleneck, [ 1024, False ] ], + [ -1, 1, SPP, [ 512, [ 5, 9, 13 ] ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + [ -2, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Bottleneck, [ 256, False ] ], + [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index ff7638cad3be..d39a6b1f581c 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,41 +1,39 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,14, 23,27, 37,58] # P4/16 - - [81,82, 135,169, 344,319] # P5/32 + - [ 10,14, 23,27, 37,58 ] # P4/16 + - [ 81,82, 135,169, 344,319 ] # P5/32 # YOLOv3-tiny backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [16, 3, 1]], # 0 - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 - [-1, 1, Conv, [32, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 - [-1, 1, Conv, [64, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 - [-1, 1, Conv, [128, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 - [-1, 1, Conv, [256, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 - [-1, 1, Conv, [512, 3, 1]], - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 - [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + [ [ -1, 1, Conv, [ 16, 3, 1 ] ], # 0 + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 1-P1/2 + [ -1, 1, Conv, [ 32, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 3-P2/4 + [ -1, 1, Conv, [ 64, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 5-P3/8 + [ -1, 1, Conv, [ 128, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 7-P4/16 + [ -1, 1, Conv, [ 256, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 9-P5/32 + [ -1, 1, Conv, [ 512, 3, 1 ] ], + [ -1, 1, nn.ZeroPad2d, [ [ 0, 1, 0, 1 ] ] ], # 11 + [ -1, 1, nn.MaxPool2d, [ 2, 1, 0 ] ], # 12 ] # YOLOv3-tiny head head: - [[-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Conv, [ 256, 3, 1 ] ], # 19 (P4/16-medium) - [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + [ [ 19, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P4, P5) ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index f2e761355469..09df0d9ef362 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,51 +1,49 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 + [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 + [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 + [ -1, 1, Bottleneck, [ 64 ] ], + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 + [ -1, 2, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 + [ -1, 8, Bottleneck, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 + [ -1, 8, Bottleneck, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 + [ -1, 4, Bottleneck, [ 1024 ] ], # 10 ] # YOLOv3 head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, Conv, [512, [1, 1]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Bottleneck, [ 1024, False ] ], + [ -1, 1, Conv, [ 512, [ 1, 1 ] ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + [ -2, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Bottleneck, [ 256, False ] ], + [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index e772bffecbbc..b8b7fc1a23d4 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,42 +1,40 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 6, BottleneckCSP, [1024]], # 9 + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 ] # YOLOv5 FPN head head: - [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [512, 1, 1]], - [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Conv, [256, 1, 1]], - [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) - [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 0633a90fd065..62122363df2d 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 3728a118f090..c5ef5177f0c8 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index ca8f8492ce0e..505c590ca168 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 340f95a4dbc9..aee5dab01fa1 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,48 +1,46 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, BottleneckCSP, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9 ] # YOLOv5 PANet head head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 13 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large) - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 11298b01f479..91c57da1939e 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 48afc865593a..4bef2e074a96 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index f2d666722b30..8023ba480d24 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,48 +1,46 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, C3TR, [ 1024, False ] ], # 9 <-------- C3TR() Transformer module ] # YOLOv5 head head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 1024, False ] ], # 23 (P5/32-large) - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 1df577a2cc97..ba1025ec87ad 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 5ebc02124fe7..4fc9c9a119b8 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/yolo.py b/models/yolo.py index 4c9456edd687..826590bd9783 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -154,7 +154,7 @@ def forward_once(self, x, profile=False, feature_vis=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output - + if feature_vis and m.type == 'models.common.SPP': feature_visualization(x, m.type, m.i) diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 71ebf86e5791..0c130c1514af 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 3c749c916246..e477b3433d39 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index aca669d60d8b..e85442dc9188 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index d3babdf7baf0..c7ca03589ab8 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 From 9e8fb9fd0b4e6ad840991823f7342ca6227ddb62 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 18:14:04 +0200 Subject: [PATCH 149/757] Create `utils/augmentations.py` (#3877) * Create `utils/augmentations.py` * cleanup --- utils/augmentations.py | 244 +++++++++++++++++++++++++++++++++++++++++ utils/datasets.py | 241 +--------------------------------------- 2 files changed, 250 insertions(+), 235 deletions(-) create mode 100644 utils/augmentations.py diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 000000000000..f7b13165daf0 --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,244 @@ +# YOLOv5 image augmentation functions + +import random + +import cv2 +import math +import numpy as np + +from utils.general import segment2box, resample_segments +from utils.metrics import bbox_ioa + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return im, labels, segments + + +def cutout(im, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = im.shape[:2] + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/utils/datasets.py b/utils/datasets.py index 8560f7cfeb88..5c76a908c559 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,4 +1,4 @@ -# Dataset utils and dataloaders +# YOLOv5 dataset utils and dataloaders import glob import hashlib @@ -14,7 +14,6 @@ from threading import Thread import cv2 -import math import numpy as np import torch import torch.nn.functional as F @@ -23,9 +22,9 @@ from torch.utils.data import Dataset from tqdm import tqdm +from utils.augmentations import augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ - xyn2xy, segment2box, segments2boxes, resample_segments, clean_str -from utils.metrics import bbox_ioa + xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -523,12 +522,10 @@ def __getitem__(self, index): img, labels = load_mosaic(self, index) shapes = None - # MixUp https://arxiv.org/pdf/1710.09412.pdf + # MixUp augmentation if random.random() < hyp['mixup']: - img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) + img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) + else: # Load image @@ -639,32 +636,6 @@ def load_image(self, index): return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - -def hist_equalize(img, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - def load_mosaic(self, index): # loads images in a 4-mosaic @@ -796,205 +767,6 @@ def load_mosaic9(self, index): return img9, labels9 -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return img, targets - - -def copy_paste(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if probability and n: - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img, labels, segments - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - def create_folder(path='./new'): # Create folder if os.path.exists(path): @@ -1012,7 +784,6 @@ def flatten_recursive(path='../datasets/coco128'): def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class - path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) From 3c3f8fbd5d2e5bf2cbeaf824dc3a74c8a7bf6300 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 20:12:32 +0200 Subject: [PATCH 150/757] Improved BGR2RGB speeds (#3880) * Update BGR2RGB ops * speed improvements * cleanup --- models/common.py | 2 +- utils/datasets.py | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index 9911b207d060..6b8b4e4cb42f 100644 --- a/models/common.py +++ b/models/common.py @@ -259,7 +259,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain diff --git a/utils/datasets.py b/utils/datasets.py index 5c76a908c559..5a3fbefa28b7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -218,7 +218,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return path, img, img0, self.cap @@ -264,7 +264,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return img_path, img, img0, None @@ -345,7 +345,7 @@ def __next__(self): img = np.stack(img, 0) # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and BHWC to BCHW + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None @@ -526,7 +526,6 @@ def __getitem__(self, index): if random.random() < hyp['mixup']: img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) - else: # Load image img, (h0, w0), (h, w) = load_image(self, index) @@ -579,7 +578,7 @@ def __getitem__(self, index): labels_out[:, 1:] = torch.from_numpy(labels) # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3 x img_height x img_width + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes From 8930e22cceca4f07e8adb26baa5afa2745e77053 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 12:48:27 +0200 Subject: [PATCH 151/757] Evolution commented `hyp['anchors']` fix (#3887) Fix for `KeyError: 'anchors'` error when start hyperparameter evolution: ```bash python train.py --evolve ``` ```bash Traceback (most recent call last): File "E:\yolov5\train.py", line 623, in hyp[k] = max(hyp[k], v[1]) # lower limit KeyError: 'anchors' ``` --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index 2e864a60cefc..5a434773eff7 100644 --- a/train.py +++ b/train.py @@ -596,6 +596,8 @@ def main(opt): with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices From 6a3ee7cf03efb17fbffde0e68b1a854e80fe3213 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 16:20:46 +0200 Subject: [PATCH 152/757] Hub models `map_location=device` (#3894) * Hub models `map_location=device` * cleanup --- hubconf.py | 7 ++++--- utils/torch_utils.py | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hubconf.py b/hubconf.py index 429e61bbab1b..2de71d617f1e 100644 --- a/hubconf.py +++ b/hubconf.py @@ -36,13 +36,15 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo fname = Path(name).with_suffix('.pt') # checkpoint filename try: + device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) + if pretrained and channels == 3 and classes == 80: - model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model + model = attempt_load(fname, map_location=device) # download/load FP32 model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=device) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter @@ -51,7 +53,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.names = ckpt['model'].names # set class names attribute if autoshape: model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device) return model.to(device) except Exception as e: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2d5382471e3c..36b6845a8c48 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -2,7 +2,6 @@ import datetime import logging -import math import os import platform import subprocess @@ -11,6 +10,7 @@ from copy import deepcopy from pathlib import Path +import math import torch import torch.backends.cudnn as cudnn import torch.distributed as dist @@ -64,7 +64,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' + device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested From 33202b7f0bbfcc55860e8c0cafbf13f227e77a84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 18:01:54 +0200 Subject: [PATCH 153/757] YOLOv5 + Albumentations integration (#3882) * Albumentations integration * ToGray p=0.01 * print confirmation * create instance in dataloader init method * improved version handling * transform not defined fix * assert string update * create check_version() * add spaces * update class comment --- requirements.txt | 1 + utils/augmentations.py | 30 +++++++++++++++++++++++++++++- utils/datasets.py | 40 +++++++++++++++++++++------------------- utils/general.py | 17 ++++++++++------- 4 files changed, 61 insertions(+), 27 deletions(-) diff --git a/requirements.txt b/requirements.txt index b413ec01b31c..ef1736a12d5f 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP +# albumentations>=1.0.0 thop # FLOPs computation diff --git a/utils/augmentations.py b/utils/augmentations.py index f7b13165daf0..74ee4de2131e 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,15 +1,43 @@ # YOLOv5 image augmentation functions +import logging import random import cv2 import math import numpy as np -from utils.general import segment2box, resample_segments +from utils.general import colorstr, segment2box, resample_segments, check_version from utils.metrics import bbox_ioa +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + try: + import albumentations as A + check_version(A.__version__, '1.0.0') # version requirement + + self.transform = A.Compose([ + A.Blur(p=0.1), + A.MedianBlur(p=0.1), + A.ToGray(p=0.01)], + bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms)) + except ImportError: # package not installed, skip + pass + except Exception as e: + logging.info(colorstr('albumentations: ') + f'{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # HSV color-space augmentation if hgain or sgain or vgain: diff --git a/utils/datasets.py b/utils/datasets.py index 5a3fbefa28b7..0bcfdcc1cda6 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first @@ -372,6 +372,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path + self.albumentations = Albumentations() if augment else None try: f = [] # image files @@ -539,9 +540,7 @@ def __getitem__(self, index): if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - if self.augment: - # Augment imagespace - if not mosaic: + if self.augment: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], @@ -549,32 +548,35 @@ def __getitem__(self, index): shear=hyp['shear'], perspective=hyp['perspective']) - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - nL = len(labels) # number of labels - if nL: + nl = len(labels) # number of labels + if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized if self.augment: - # flip up-down + # Albumentations + img, labels = self.albumentations(img, labels) + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) - if nL: + if nl: labels[:, 2] = 1 - labels[:, 2] - # flip left-right + # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) - if nL: + if nl: labels[:, 1] = 1 - labels[:, 1] - labels_out = torch.zeros((nL, 6)) - if nL: + # Cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + labels_out = torch.zeros((nl, 6)) + if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert diff --git a/utils/general.py b/utils/general.py index 4606a8ec54f5..b4c8994d233a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -3,7 +3,6 @@ import contextlib import glob import logging -import math import os import platform import random @@ -17,6 +16,7 @@ from subprocess import check_output import cv2 +import math import numpy as np import pandas as pd import pkg_resources as pkg @@ -136,13 +136,16 @@ def check_git_status(err_msg=', for updates see https://github.com/ultralytics/y print(f'{e}{err_msg}') -def check_python(minimum='3.6.2', required=True): +def check_python(minimum='3.6.2'): # Check current python version vs. required python version - current = platform.python_version() - result = pkg.parse_version(current) >= pkg.parse_version(minimum) - if required: - assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed' - return result + check_version(platform.python_version(), minimum, name='Python ') + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) + assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' def check_requirements(requirements='requirements.txt', exclude=()): From 61047a2b4fb318a2cf86475c0099ead7832e45cf Mon Sep 17 00:00:00 2001 From: johnohagan <86861886+johnohagan@users.noreply.github.com> Date: Wed, 7 Jul 2021 21:41:46 +1000 Subject: [PATCH 154/757] Save PyTorch Hub models to `/root/hub/cache/dir` (#3904) * Create hubconf.py * Add save_dir variable Co-authored-by: Glenn Jocher --- hubconf.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hubconf.py b/hubconf.py index 2de71d617f1e..df268b18d177 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,9 +4,12 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ +from pathlib import Path import torch +FILE = Path(__file__).absolute() + def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates a specified YOLOv5 model @@ -23,28 +26,26 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo Returns: YOLOv5 pytorch model """ - from pathlib import Path - from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(requirements=Path(__file__).parent / 'requirements.txt', - exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - fname = Path(name).with_suffix('.pt') # checkpoint filename + save_dir = Path('') if str(name).endswith('.pt') else FILE.parent + path = (save_dir / name).with_suffix('.pt') # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) if pretrained and channels == 3 and classes == 80: - model = attempt_load(fname, map_location=device) # download/load FP32 model + model = attempt_load(path, map_location=device) # download/load FP32 model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - ckpt = torch.load(attempt_download(fname), map_location=device) # load + ckpt = torch.load(attempt_download(path), map_location=device) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter From 87b094bcbcf209c89febcc9a3bb0ae119fee882d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 15:41:58 +0200 Subject: [PATCH 155/757] Feature visualization update (#3920) * Feature visualization update * Save to jpg (faster) * Save to png --- detect.py | 6 +++++- models/yolo.py | 11 +++++------ utils/plots.py | 39 ++++++++++++++++++--------------------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/detect.py b/detect.py index a4542f7e8802..44b33eb42289 100644 --- a/detect.py +++ b/detect.py @@ -40,6 +40,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference + visualize=False, # visualize features update=False, # update all models project='runs/detect', # save results to project/name name='exp', # save results to project/name @@ -100,7 +101,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Inference t1 = time_synchronized() - pred = model(img, augment=augment)[0] + pred = model(img, + augment=augment, + visualize=increment_path(save_dir / 'features', mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) @@ -201,6 +204,7 @@ def parse_opt(): parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') diff --git a/models/yolo.py b/models/yolo.py index 826590bd9783..b11443377080 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -117,11 +117,10 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.info() logger.info('') - def forward(self, x, augment=False, profile=False): + def forward(self, x, augment=False, profile=False, visualize=False): if augment: return self.forward_augment(x) # augmented inference, None - else: - return self.forward_once(x, profile) # single-scale inference, train + return self.forward_once(x, profile, visualize) # single-scale inference, train def forward_augment(self, x): img_size = x.shape[-2:] # height, width @@ -136,7 +135,7 @@ def forward_augment(self, x): y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False, feature_vis=False): + def forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer @@ -155,8 +154,8 @@ def forward_once(self, x, profile=False, feature_vis=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output - if feature_vis and m.type == 'models.common.SPP': - feature_visualization(x, m.type, m.i) + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) if profile: logger.info('%.1fms total' % sum(dt)) diff --git a/utils/plots.py b/utils/plots.py index 4b6c63992ac7..1ab3bb6f21fe 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,12 +1,12 @@ # Plotting utils import glob -import math import os from copy import copy from pathlib import Path import cv2 +import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -15,7 +15,6 @@ import torch import yaml from PIL import Image, ImageDraw, ImageFont -from torchvision import transforms from utils.general import increment_path, xywh2xyxy, xyxy2xywh from utils.metrics import fitness @@ -448,28 +447,26 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(x, module_type, stage, n=64): +def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot + save_dir: Directory to save results """ - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - project, name = 'runs/features', 'exp' - save_dir = increment_path(Path(project) / name) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - plt.figure(tight_layout=True) - blocks = torch.chunk(x, channels, dim=1) # block by channel dimension - n = min(n, len(blocks)) - for i in range(n): - feature = transforms.ToPILImage()(blocks[i].squeeze()) - ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) - ax.axis('off') - plt.imshow(feature) # cmap='gray' - - f = f"stage_{stage}_{module_type.split('.')[-1]}_features.png" - print(f'Saving {save_dir / f}...') - plt.savefig(save_dir / f, dpi=300) + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + plt.figure(tight_layout=True) + blocks = torch.chunk(x[0], channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + print(f'Saving {save_dir / f}... ({n}/{channels})') + plt.savefig(save_dir / f, dpi=300) From 411842e0583ea77970a35a367faf3cf5017845eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 16:08:42 +0200 Subject: [PATCH 156/757] Fix `torch.hub.list('ultralytics/yolov5')` pathlib bug (#3921) --- hubconf.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hubconf.py b/hubconf.py index df268b18d177..55536c3a42f3 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,12 +4,9 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ -from pathlib import Path import torch -FILE = Path(__file__).absolute() - def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates a specified YOLOv5 model @@ -26,15 +23,18 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo Returns: YOLOv5 pytorch model """ + from pathlib import Path + from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) + file = Path(__file__).absolute() + check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - save_dir = Path('') if str(name).endswith('.pt') else FILE.parent + save_dir = Path('') if str(name).endswith('.pt') else file.parent path = (save_dir / name).with_suffix('.pt') # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) From 588094eb7adbbdc8d89d87ce0cf689d1d9f31bfc Mon Sep 17 00:00:00 2001 From: jmiranda-laplateforme <67475949+jmiranda-laplateforme@users.noreply.github.com> Date: Wed, 7 Jul 2021 16:13:12 +0200 Subject: [PATCH 157/757] Update `setattr()` default for Hub PIL images (#3923) Fix inference from PIL source. --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6b8b4e4cb42f..f4b91da62250 100644 --- a/models/common.py +++ b/models/common.py @@ -255,7 +255,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename') or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) From 850970e081687df6427898948a27df37ab4de5d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 16:23:31 +0200 Subject: [PATCH 158/757] `feature_visualization()` CUDA fix (#3925) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 1ab3bb6f21fe..23a48620e6b5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -461,7 +461,7 @@ def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detec f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename plt.figure(tight_layout=True) - blocks = torch.chunk(x[0], channels, dim=0) # select batch index 0, block by channels + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols for i in range(n): From 8c6f9e15bfc0000d18b976a95b9d7c17d407ec91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Jul 2021 11:42:30 +0200 Subject: [PATCH 159/757] Update `dataset_stats()` for zipped datasets (#3926) * Update `dataset_stats()` for zipped datasets @KalenMike * cleanup --- utils/datasets.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 0bcfdcc1cda6..a527230b868a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -888,9 +888,11 @@ def verify_image_label(args): def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128.zip', verbose=True) + Arguments - path: Path to data.yaml + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ @@ -899,8 +901,20 @@ def round_labels(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] - with open(check_file(path)) as f: + def unzip(path): + # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' + if str(path).endswith('.zip'): # path is data.zip + assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' + data_dir = path.with_suffix('') # dataset directory + return True, data_dir, list(data_dir.rglob('*.yaml'))[0] # zipped, data_dir, yaml_path + else: # path is data.yaml + return False, None, path + + zipped, data_dir, yaml_path = unzip(Path(path)) + with open(check_file(yaml_path)) as f: data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir # TODO: should this be dir.resolve()? check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary From e7888af94c0ee232f6d47e768c090b05e3baebb8 Mon Sep 17 00:00:00 2001 From: Eldar Kurtic Date: Thu, 8 Jul 2021 15:29:02 +0200 Subject: [PATCH 160/757] Fix inconsistent NMS IoU value for COCO (#3934) Evaluation of 'best' and 'last' models will use the same params as the evaluation during the training phase. This PR fixes https://github.com/ultralytics/yolov5/issues/3907 --- train.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/train.py b/train.py index 5a434773eff7..e58d7c4f0348 100644 --- a/train.py +++ b/train.py @@ -457,8 +457,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results, _, _ = test.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, model=attempt_load(m, device).half(), single_cls=single_cls, dataloader=testloader, From dabad5793a638cba1e5a2bbb878c9b87fe1a14a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Jul 2021 15:45:53 +0200 Subject: [PATCH 161/757] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a87f787cca8e..2641743b8c36 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1058,14 +1058,14 @@ "id": "OYG4WFEnTVrI" }, "source": [ - "> \n", + "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", - "> \n", + "> \n", "`test_batch0_labels.jpg` shows test batch 0 labels\n", "\n", - "> \n", - "`test_batch0_pred.jpg` shows test batch 0 _predictions_\n" + "> \n", + "`test_batch0_pred.jpg` shows test batch 0 _predictions_" ] }, { From 248504cf13c2cba9e211e6110089a3e6f916109c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Jul 2021 15:23:02 +0200 Subject: [PATCH 162/757] Feature visualization improvements 32 (#3947) --- detect.py | 2 +- utils/plots.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 44b33eb42289..be2c5969c6d7 100644 --- a/detect.py +++ b/detect.py @@ -103,7 +103,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) t1 = time_synchronized() pred = model(img, augment=augment, - visualize=increment_path(save_dir / 'features', mkdir=True) if visualize else False)[0] + visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) diff --git a/utils/plots.py b/utils/plots.py index 23a48620e6b5..4e6b001dcc2f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -16,7 +16,7 @@ import yaml from PIL import Image, ImageDraw, ImageFont -from utils.general import increment_path, xywh2xyxy, xyxy2xywh +from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -447,7 +447,7 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detect/exp')): +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized module_type: Module type @@ -460,13 +460,14 @@ def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detec if height > 1 and width > 1: f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - plt.figure(tight_layout=True) blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots - ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300) + plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') From a26e7de2bffaf5a87e7ed83aeabd0f0b2e8ad861 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Jul 2021 16:45:04 +0200 Subject: [PATCH 163/757] Update augmentations.py (#3948) --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 74ee4de2131e..81652c191bc1 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -25,7 +25,7 @@ def __init__(self): A.ToGray(p=0.01)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms)) + logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: From 443af8b25ae5121e920623511e38465bacde75b8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Jul 2021 14:18:46 +0200 Subject: [PATCH 164/757] Cache v0.4 update (#3954) --- utils/datasets.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index a527230b868a..c0b51ee39711 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -397,12 +397,11 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels - if cache_path.is_file(): - cache, exists = torch.load(cache_path), True # load - if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files): - cache, exists = self.cache_labels(cache_path, prefix), False # re-cache - else: + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) + except: cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache @@ -496,9 +495,10 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.3 # cache version + x['version'] = 0.4 # cache version try: - torch.save(x, path) # save cache for next time + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix logging.info(f'{prefix}New cache created: {path}') except Exception as e: logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable From 80299a57e26accf196558da01c071e13caec14ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Jul 2021 19:50:53 +0200 Subject: [PATCH 165/757] Numerical stability fix for Albumentations (#3958) --- utils/datasets.py | 2 +- utils/general.py | 24 +++++++++++------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c0b51ee39711..d95677a133e1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -550,7 +550,7 @@ def __getitem__(self, index): nl = len(labels) # number of labels if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) if self.augment: # Albumentations diff --git a/utils/general.py b/utils/general.py index b4c8994d233a..23a827d03d80 100755 --- a/utils/general.py +++ b/utils/general.py @@ -396,10 +396,10 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y -def xyxy2xywhn(x, w=640, h=640, clip=False): +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: - clip_coords(x, (h, w)) # warning: inplace clip + clip_coords(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -458,18 +458,16 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): return coords -def clip_coords(boxes, img_shape): +def clip_coords(boxes, shape): # Clip bounding xyxy bounding boxes to image shape (height, width) - if isinstance(boxes, torch.Tensor): - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - else: # np.array - boxes[:, 0].clip(0, img_shape[1], out=boxes[:, 0]) # x1 - boxes[:, 1].clip(0, img_shape[0], out=boxes[:, 1]) # y1 - boxes[:, 2].clip(0, img_shape[1], out=boxes[:, 2]) # x2 - boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, From 8298ce5e885a129891db598a43a490ed1a78cb92 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 15:49:30 +0200 Subject: [PATCH 166/757] Update `albumentations>=1.0.2` (#3966) --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 81652c191bc1..5eaeabdb665d 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -17,7 +17,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.0') # version requirement + check_version(A.__version__, '1.0.2') # version requirement self.transform = A.Compose([ A.Blur(p=0.1), From 90e60b403d0e349cecdbe98a9763e32dd733da2b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 16:56:36 +0200 Subject: [PATCH 167/757] Update `np.random.random()` to `random.random()` (#3967) --- utils/autoanchor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 87dc394c832e..6abdd2d38832 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,5 +1,7 @@ # Auto-anchor utils +import random + import numpy as np import torch import yaml @@ -149,7 +151,7 @@ def print_results(k): for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) kg = (k.copy() * v).clip(min=2.0) fg = anchor_fitness(kg) if fg > f: From a544d59f52d167c7ef2d86d514a5737bd52dc818 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 19:07:42 +0200 Subject: [PATCH 168/757] Update requirements.txt `albumentations>=1.0.2` (#3972) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ef1736a12d5f..886d21ce8047 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.0 +# albumentations>=1.0.2 thop # FLOPs computation From 647223a7a8e5d8fca69481d477fe9a2930a8004c Mon Sep 17 00:00:00 2001 From: KEN <33506506+seven320@users.noreply.github.com> Date: Mon, 12 Jul 2021 02:47:08 +0900 Subject: [PATCH 169/757] `Ensemble()` visualize fix (#3973) * fix visualize error * Revert "fix visualize error" * add visualise profile --- models/experimental.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index d316b18373c3..30dc36192bc0 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -100,10 +100,10 @@ class Ensemble(nn.ModuleList): def __init__(self): super(Ensemble, self).__init__() - def forward(self, x, augment=False): + def forward(self, x, augment=False, profile=False, visualize=False): y = [] for module in self: - y.append(module(x, augment)[0]) + y.append(module(x, augment, profile, visualize)[0]) # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 1) # nms ensemble From 41fdf9fa53bdc178f3df76764df5b655c94b6f7b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 12:43:26 +0200 Subject: [PATCH 170/757] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 2641743b8c36..15d003c19606 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -527,7 +527,7 @@ }, "source": [ "\n", - "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" @@ -1025,7 +1025,7 @@ "\n", "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", - "" + "" ] }, { @@ -1096,7 +1096,7 @@ "id": "lfrEegCSW3fK" }, "source": [ - "

\"COCO128

" + "

\"COCO128

" ] }, { From b3dabdcc380b45bbc802a3808457d8d0091e9148 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 15:54:43 +0200 Subject: [PATCH 171/757] Update `probability` to `p` (#3980) --- models/common.py | 4 +-- utils/augmentations.py | 65 +++++++++++++++++++++--------------------- utils/datasets.py | 7 ++--- 3 files changed, 37 insertions(+), 39 deletions(-) diff --git a/models/common.py b/models/common.py index f4b91da62250..418034ddeaac 100644 --- a/models/common.py +++ b/models/common.py @@ -215,7 +215,7 @@ def forward(self, x): class AutoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class @@ -287,7 +287,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: - # detections class for YOLOv5 inference results + # YOLOv5 detections class for inference results def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device diff --git a/utils/augmentations.py b/utils/augmentations.py index 5eaeabdb665d..c953fcbcc90b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -50,12 +50,12 @@ def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) if clahe: c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) @@ -76,7 +76,7 @@ def replicate(im, labels): bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return im, labels @@ -162,8 +162,8 @@ def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, sc # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) @@ -204,13 +204,13 @@ def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, sc return im, targets -def copy_paste(im, labels, segments, probability=0.5): +def copy_paste(im, labels, segments, p=0.5): # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) n = len(segments) - if probability and n: + if p and n: h, w, c = im.shape # height, width, channels im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): + for j in random.sample(range(n), k=round(p * n)): l, s = labels[j], segments[j] box = w - l[3], l[2], w - l[1], l[4] ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area @@ -223,35 +223,34 @@ def copy_paste(im, labels, segments, probability=0.5): result = cv2.flip(result, 1) # augment segments (flip left-right) i = result > 0 # pixels to replace # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments -def cutout(im, labels): +def cutout(im, labels, p=0.5): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = im.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels return labels diff --git a/utils/datasets.py b/utils/datasets.py index d95677a133e1..0763b56d31e3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective, cutout from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first @@ -572,8 +572,7 @@ def __getitem__(self, index): labels[:, 1] = 1 - labels[:, 1] # Cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) + # labels = cutout(img, labels, p=0.5) labels_out = torch.zeros((nl, 6)) if nl: @@ -682,7 +681,7 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], From d204a61834d0f6b2e73c1f43facf32fbadb6b284 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 19:48:47 +0200 Subject: [PATCH 172/757] Alert (no detections) (#3984) * `Detections()` class `print()` overload * Update common.py --- models/common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 418034ddeaac..05372ae149f5 100644 --- a/models/common.py +++ b/models/common.py @@ -307,7 +307,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' - if pred is not None: + if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string @@ -318,6 +318,8 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others plot_one_box(box, im, label=label, color=colors(cls)) + else: + str += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: From 8ee9fd15059e807374f52527951399e61d57b1b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 13 Jul 2021 23:07:09 +0200 Subject: [PATCH 173/757] Update README.md (#3996) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 360afd2cd7e6..64086643373c 100755 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@
CI CPU testing -Open In Kaggle +YOLOv5 Citation
Open In Colab Open In Kaggle From 720aaa65c8873c0d87df09e3c1c14f3581d4ea61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 14 Jul 2021 15:43:54 +0200 Subject: [PATCH 174/757] Rename `test.py` to `val.py` (#4000) --- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- .github/workflows/ci-testing.yml | 6 +-- .github/workflows/greetings.yml | 2 +- README.md | 8 ++-- models/yolo.py | 1 - train.py | 68 ++++++++++++++-------------- tutorial.ipynb | 36 +++++++-------- utils/augmentations.py | 2 +- utils/general.py | 2 +- utils/plots.py | 8 ++-- test.py => val.py | 20 ++++---- 11 files changed, 77 insertions(+), 78 deletions(-) rename test.py => val.py (95%) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 362059b288d5..b7fc7c5a8838 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -12,7 +12,7 @@ Before submitting a bug report, please be aware that your issue **must be reprod - **Common dataset**: coco.yaml or coco128.yaml - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments -If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. +If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. ## 🐛 Bug diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 20c1d5b026b0..a7964ea01d5d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -68,9 +68,9 @@ jobs: # detect python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di - # test - python test.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di - python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + # val + python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di + python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index fdf1cfae8df5..787fbd71721b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -52,5 +52,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/README.md b/README.md index 64086643373c..035b7002774a 100755 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -223,10 +223,10 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi Table Notes (click to expand) * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment` + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
diff --git a/models/yolo.py b/models/yolo.py index b11443377080..7b49dfcf48a3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -310,4 +310,3 @@ def parse_model(d, ch): # model_dict, input_channels(3) # tb_writer = SummaryWriter('.') # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/train.py b/train.py index e58d7c4f0348..205c73d85e20 100644 --- a/train.py +++ b/train.py @@ -32,7 +32,7 @@ FILE = Path(__file__).absolute() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path -import test # for end-of-epoch mAP +import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -57,9 +57,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, notest, nosave, workers, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.notest, opt.nosave, opt.workers + opt.resume, opt.noval, opt.nosave, opt.workers # Directories save_dir = Path(save_dir) @@ -129,7 +129,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] - test_path = data_dict['val'] + val_path = data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + imgsz, imgsz_val = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -231,8 +231,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not notest, rect=True, rank=-1, + valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -276,7 +276,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class - logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' + logger.info(f'Image sizes {imgsz} train, {imgsz_val} val\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') @@ -384,20 +384,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs - if not notest or final_epoch: # Calculate mAP + if not noval or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, _ = test.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_val, + model=ema.ema, + single_cls=single_cls, + dataloader=valloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -454,15 +454,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=attempt_load(m, device).half(), - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False) + results, _, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_val, + model=attempt_load(m, device).half(), + single_cls=single_cls, + dataloader=valloader, + save_dir=save_dir, + save_json=True, + plots=False) # Strip optimizers for f in last, best: @@ -486,11 +486,11 @@ def parse_opt(known=False): parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, val] image sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') @@ -538,7 +538,7 @@ def main(opt): # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, val) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) @@ -597,7 +597,7 @@ def main(opt): if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' - opt.notest, opt.nosave = True, True # only test/save final epoch + opt.noval, opt.nosave = True, True # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: diff --git a/tutorial.ipynb b/tutorial.ipynb index 15d003c19606..957c0e140f88 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -643,8 +643,8 @@ "id": "0eq1SMWl6Sfn" }, "source": [ - "# 2. Test\n", - "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + "# 2. Validate\n", + "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { @@ -720,14 +720,14 @@ }, "source": [ "# Run YOLOv5x on COCO val2017\n", - "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" + "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/val', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", @@ -741,7 +741,7 @@ " all 5000 36335 0.746 0.626 0.68 0.49\n", "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", - "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", + "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", "Done (t=0.44s)\n", "creating index...\n", @@ -767,7 +767,7 @@ " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", - "Results saved to runs/test/exp\n" + "Results saved to runs/val/exp\n" ], "name": "stdout" } @@ -805,7 +805,7 @@ }, "source": [ "# Run YOLOv5s on COCO test-dev2017 using --task test\n", - "!python test.py --weights yolov5s.pt --data coco.yaml --task test" + "!python val.py --weights yolov5s.pt --data coco.yaml --task test" ], "execution_count": null, "outputs": [] @@ -976,7 +976,7 @@ "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", - "Image sizes 640 train, 640 test\n", + "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", @@ -1036,7 +1036,7 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." ] }, { @@ -1046,8 +1046,8 @@ }, "source": [ "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n", - "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions" + "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # val batch 0 labels\n", + "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # val batch 0 predictions" ], "execution_count": null, "outputs": [] @@ -1062,10 +1062,10 @@ "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", "> \n", - "`test_batch0_labels.jpg` shows test batch 0 labels\n", + "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", "> \n", - "`test_batch0_pred.jpg` shows test batch 0 _predictions_" + "`test_batch0_pred.jpg` shows val batch 0 _predictions_" ] }, { @@ -1125,7 +1125,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1147,8 +1147,8 @@ "source": [ "# Reproduce\n", "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] @@ -1193,8 +1193,8 @@ " for d in 0 cpu; do # devices\n", " python detect.py --weights $m.pt --device $d # detect official\n", " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n", - " python test.py --weights $m.pt --device $d # test official\n", - " python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n", + " python val.py --weights $m.pt --device $d # val official\n", + " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", " done\n", " python hubconf.py # hub\n", " python models/yolo.py --cfg $m.yaml # inspect\n", diff --git a/utils/augmentations.py b/utils/augmentations.py index c953fcbcc90b..69b835db0db9 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -90,7 +90,7 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleF # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) + if not scaleup: # only scale down, do not scale up (for better val mAP) r = min(r, 1.0) # Compute padding diff --git a/utils/general.py b/utils/general.py index 23a827d03d80..846c1464c28c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -633,7 +633,7 @@ def apply_classifier(x, model, img, im0): for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) + # cv2.imwrite('example%i.jpg' % j, cutout) im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 diff --git a/utils/plots.py b/utils/plots.py index 4e6b001dcc2f..cd9a45e8c761 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -219,9 +219,9 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): plt.close() -def plot_test_txt(): # from utils.plots import *; plot_test() - # Plot test.txt histograms - x = np.loadtxt('test.txt', dtype=np.float32) +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] @@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by test.py + # Plot study.txt generated by val.py plot2 = False # plot additional results if plot2: ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() diff --git a/test.py b/val.py similarity index 95% rename from test.py rename to val.py index 643dc441e521..fa5cb8f113e0 100644 --- a/test.py +++ b/val.py @@ -1,7 +1,7 @@ -"""Test a trained YOLOv5 model accuracy on a custom dataset +"""Validate a trained YOLOv5 model accuracy on a custom dataset Usage: - $ python path/to/test.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 """ import argparse @@ -44,7 +44,7 @@ def run(data, save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a cocoapi-compatible JSON results file - project='runs/test', # save to project/name + project='runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference @@ -228,9 +228,9 @@ def run(data, # Plot images if plots and batch_i < 3: - f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels + f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() - f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions + f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics @@ -262,7 +262,7 @@ def run(data, if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb_logger and wandb_logger.wandb: - val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] wandb_logger.log({"Validation": val_batches}) if wandb_images: wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) @@ -305,7 +305,7 @@ def run(data, def parse_opt(): - parser = argparse.ArgumentParser(prog='test.py') + parser = argparse.ArgumentParser(prog='val.py') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') @@ -321,7 +321,7 @@ def parse_opt(): parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') - parser.add_argument('--project', default='runs/test', help='save to project/name') + parser.add_argument('--project', default='runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') @@ -334,7 +334,7 @@ def parse_opt(): def main(opt): set_logging() - print(colorstr('test: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally @@ -346,7 +346,7 @@ def main(opt): save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot - # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to From 62409eea0807830669f21a84733e73052ee85c07 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 14 Jul 2021 22:43:02 +0530 Subject: [PATCH 175/757] W&B sweeps support (#3938) * Add support for W&B Sweeps * Update and reformat * Update search space * reformat * reformat sweep.py * Update sweep.py * Move sweeps files to wandb dir * Remove print Co-authored-by: Glenn Jocher --- utils/wandb_logging/sweep.py | 33 +++++++ utils/wandb_logging/sweep.yaml | 143 +++++++++++++++++++++++++++++ utils/wandb_logging/wandb_utils.py | 2 +- 3 files changed, 177 insertions(+), 1 deletion(-) create mode 100644 utils/wandb_logging/sweep.py create mode 100644 utils/wandb_logging/sweep.yaml diff --git a/utils/wandb_logging/sweep.py b/utils/wandb_logging/sweep.py new file mode 100644 index 000000000000..6c8719b32006 --- /dev/null +++ b/utils/wandb_logging/sweep.py @@ -0,0 +1,33 @@ +import sys +from pathlib import Path +import wandb + +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path + +from train import train, parse_opt +import test +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent + hyp_dict = vars(wandb.config).get("_items") + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device) + + +if __name__ == "__main__": + sweep() diff --git a/utils/wandb_logging/sweep.yaml b/utils/wandb_logging/sweep.yaml new file mode 100644 index 000000000000..64e395533c1c --- /dev/null +++ b/utils/wandb_logging/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/wandb_logging/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [ 64 ] + epochs: + values: [ 10 ] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index f031a819b977..2adea9235f6c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -153,7 +153,7 @@ def setup_training(self, opt, data_dict): self.weights = Path(modeldir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( - self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.opt['hyp'] data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download From b7e985e397e12ea2efd19bf3b6329028fb2a4c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Jul 2021 14:38:59 +0200 Subject: [PATCH 176/757] Update greetings.yml (#4024) * Update greetings.yml * Update greetings.yml --- .github/workflows/greetings.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 787fbd71721b..a8990e7222d3 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -33,8 +33,10 @@ jobs: ## Requirements - Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: + **Python>=3.6.0** with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including **PyTorch>=1.7**. To get started: ```bash + $ git clone https://github.com/ultralytics/yolov5 + $ cd yolov5 $ pip install -r requirements.txt ``` @@ -52,5 +54,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. From 951922c735c1e98b596fd9845de25a62fcdc7c73 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 13:07:19 +0200 Subject: [PATCH 177/757] Add `--sync-bn` known issue (#4032) * Add `--sync-bn` known issue * Update train.py --- train.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 205c73d85e20..15c2c356f60e 100644 --- a/train.py +++ b/train.py @@ -217,6 +217,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: + raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') @@ -232,9 +233,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, - workers=workers, - pad=0.5, prefix=colorstr('val: '))[0] + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + workers=workers, + pad=0.5, prefix=colorstr('val: '))[0] if not resume: labels = np.concatenate(dataset.labels, 0) From 0067d9578ab5e4da238e56d5fbe181c389f03a9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 15:47:05 +0200 Subject: [PATCH 178/757] Update greetings.yml (#4037) --- .github/workflows/greetings.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a8990e7222d3..ddd739ea5769 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,17 +23,17 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in 🚀 YOLOv5! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. + For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. ## Requirements - **Python>=3.6.0** with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including **PyTorch>=1.7**. To get started: + [**Python>=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash $ git clone https://github.com/ultralytics/yolov5 $ cd yolov5 From dd62e2d05cdc0312732202c952e2513acdb8dc3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 23:12:27 +0200 Subject: [PATCH 179/757] Update README.md (#4041) * Update README.md * Update README.md * Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 035b7002774a..7dff1a0efd33 100755 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
Install -Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: +[**Python>=3.6.0**](https://www.python.org/) is required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): ```bash $ git clone https://github.com/ultralytics/yolov5 From 9dd33fd20f0f1a07762df129d2c2da2b1e9d09d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Jul 2021 15:25:37 +0200 Subject: [PATCH 180/757] AutoShape PosixPath support (#4047) * AutoShape PosixPath support Usage example: ````python from pathlib import Path model = ... file = Path('data/images/zidane.jpg') results = model(file) ``` * Update common.py --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 05372ae149f5..41fc128c07b9 100644 --- a/models/common.py +++ b/models/common.py @@ -1,7 +1,7 @@ # YOLOv5 common modules from copy import copy -from pathlib import Path +from pathlib import Path, PosixPath import math import numpy as np @@ -232,8 +232,8 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/images/zidane.jpg' - # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # filename: imgs = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC @@ -251,8 +251,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename - if isinstance(im, str): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im + if isinstance(im, (str, PosixPath)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f From f7d85620601f4c2513bcd2b7911c20fbc49e9097 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 10:43:01 +0200 Subject: [PATCH 181/757] `val.py` refactor (#4053) * val.py refactor * cleanup * cleanup * cleanup * cleanup * save after eval * opt.imgsz bug fix * wandb refactor * dataloader to train_loader * capitalize global variables * runs/hub/exp to runs/detect/exp * refactor wandb logging * Refactor wandb operations (#4061) Co-authored-by: Ayush Chaurasia --- detect.py | 6 +- models/common.py | 33 +++--- models/yolo.py | 43 ++++---- train.py | 67 ++++++------ utils/datasets.py | 35 +++---- utils/torch_utils.py | 14 +-- utils/wandb_logging/wandb_utils.py | 65 ++++++++---- val.py | 160 ++++++++++++++--------------- 8 files changed, 220 insertions(+), 203 deletions(-) diff --git a/detect.py b/detect.py index be2c5969c6d7..73f962398442 100644 --- a/detect.py +++ b/detect.py @@ -21,7 +21,7 @@ from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import colors, plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized +from utils.torch_utils import select_device, load_classifier, time_sync @torch.no_grad() @@ -100,14 +100,14 @@ def run(weights='yolov5s.pt', # model.pt path(s) img = img.unsqueeze(0) # Inference - t1 = time_synchronized() + t1 = time_sync() pred = model(img, augment=augment, visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - t2 = time_synchronized() + t2 = time_sync() # Apply Classifier if classify: diff --git a/models/common.py b/models/common.py index 41fc128c07b9..4db90b54663e 100644 --- a/models/common.py +++ b/models/common.py @@ -1,5 +1,6 @@ # YOLOv5 common modules +import logging from copy import copy from pathlib import Path, PosixPath @@ -15,7 +16,9 @@ from utils.datasets import exif_transpose, letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import colors, plot_one_box -from utils.torch_utils import time_synchronized +from utils.torch_utils import time_sync + +LOGGER = logging.getLogger(__name__) def autopad(k, p=None): # kernel, padding @@ -226,7 +229,7 @@ def __init__(self, model): self.model = model.eval() def autoshape(self): - print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() + LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() return self @torch.no_grad() @@ -240,7 +243,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - t = [time_synchronized()] + t = [time_sync()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch with amp.autocast(enabled=p.device.type != 'cpu'): @@ -270,19 +273,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 - t.append(time_synchronized()) + t.append(time_sync()) with amp.autocast(enabled=p.device.type != 'cpu'): # Inference y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + t.append(time_sync()) # Post-process y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) + t.append(time_sync()) return Detections(imgs, y, files, t, self.names, x.shape) @@ -323,31 +326,33 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(str.rstrip(', ')) + LOGGER.info(str.rstrip(', ')) if show: im.show(self.files[i]) # show if save: f = self.files[i] im.save(save_dir / f) # save - print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to '{save_dir}'") if render: self.imgs[i] = np.asarray(im) def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + self.t) def show(self): self.display(show=True) # show results - def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + def save(self, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results - def crop(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + def crop(self, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(crop=True, save_dir=save_dir) # crop results - print(f'Saved results to {save_dir}\n') + LOGGER.info(f'Saved results to {save_dir}\n') def render(self): self.display(render=True) # render results diff --git a/models/yolo.py b/models/yolo.py index 7b49dfcf48a3..3a3af9b5fbde 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -5,7 +5,6 @@ """ import argparse -import logging import sys from copy import deepcopy from pathlib import Path @@ -18,7 +17,7 @@ from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging from utils.plots import feature_visualization -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ +from utils.torch_utils import time_sync, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ select_device, copy_attr try: @@ -26,7 +25,7 @@ except ImportError: thop = None -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) class Detect(nn.Module): @@ -90,15 +89,15 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) - # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + # LOGGER.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -110,12 +109,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # logger.info('Strides: %s' % m.stride.tolist()) + # LOGGER.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) self.info() - logger.info('') + LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): if augment: @@ -143,13 +142,13 @@ def forward_once(self, x, profile=False, visualize=False): if profile: o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_synchronized() + t = time_sync() for _ in range(10): _ = m(x) - dt.append((time_synchronized() - t) * 100) + dt.append((time_sync() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") - logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run y.append(x if m.i in self.save else None) # save output @@ -158,7 +157,7 @@ def forward_once(self, x, profile=False, visualize=False): feature_visualization(x, m.type, m.i, save_dir=visualize) if profile: - logger.info('%.1fms total' % sum(dt)) + LOGGER.info('%.1fms total' % sum(dt)) return x def _descale_pred(self, p, flips, scale, img_size): @@ -192,16 +191,16 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - logger.info( + LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: - # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - logger.info('Fusing layers... ') + LOGGER.info('Fusing layers... ') for m in self.model.modules(): if type(m) is Conv and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv @@ -213,19 +212,19 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers def nms(self, mode=True): # add or remove NMS module present = type(self.model[-1]) is NMS # last layer is NMS if mode and not present: - logger.info('Adding NMS... ') + LOGGER.info('Adding NMS... ') m = NMS() # module m.f = -1 # from m.i = self.model[-1].i + 1 # index self.model.add_module(name='%s' % m.i, module=m) # add self.eval() elif not mode and present: - logger.info('Removing NMS... ') + LOGGER.info('Removing NMS... ') self.model = self.model[:-1] # remove return self def autoshape(self): # add AutoShape module - logger.info('Adding AutoShape... ') + LOGGER.info('Adding AutoShape... ') m = AutoShape(self) # wrap model copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes return m @@ -235,7 +234,7 @@ def info(self, verbose=False, img_size=640): # print model information def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -279,7 +278,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: @@ -308,5 +307,5 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') - # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph diff --git a/train.py b/train.py index 15c2c356f60e..b1afaf8ada75 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.metrics import fitness -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -73,7 +73,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if isinstance(hyp, str): with open(hyp) as f: hyp = yaml.safe_load(f) # load hyps dict - logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: @@ -94,7 +94,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # TensorBoard if not evolve: prefix = colorstr('tensorboard: ') - logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") loggers['tb'] = SummaryWriter(str(save_dir)) # W&B @@ -123,7 +123,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load - logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + LOGGER.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): @@ -143,7 +143,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") + LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): @@ -161,7 +161,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + LOGGER.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf @@ -198,7 +198,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: - logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz, imgsz_val = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + imgsz = check_img_size(opt.imgsz, gs) # verify imgsz is gs-multiple # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -219,33 +219,31 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if opt.sync_bn and cuda and RANK != -1: raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logger.info('Using SyncBatchNorm()') + LOGGER.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, - workers=workers, - image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) + train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + workers=workers, image_weights=opt.image_weights, quad=opt.quad, + prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class - nb = len(dataloader) # number of batches + nb = len(train_loader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) # Process 0 if RANK in [-1, 0]: - valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, - workers=workers, - pad=0.5, prefix=colorstr('val: '))[0] + val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + workers=workers, pad=0.5, + prefix=colorstr('val: '))[0] if not resume: labels = np.concatenate(dataset.labels, 0) - c = torch.tensor(labels[:, 0]) # classes + # c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) - if loggers['tb']: - loggers['tb'].add_histogram('classes', c, 0) # TensorBoard # Anchors if not opt.noautoanchor: @@ -277,8 +275,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class - logger.info(f'Image sizes {imgsz} train, {imgsz_val} val\n' - f'Using {dataloader.num_workers} dataloader workers\n' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ @@ -304,9 +302,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mloss = torch.zeros(4, device=device) # mean losses if RANK != -1: - dataloader.sampler.set_epoch(epoch) - pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() @@ -389,10 +387,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary wandb_logger.current_epoch = epoch + 1 results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_val, + imgsz=imgsz, model=ema.ema, single_cls=single_cls, - dataloader=valloader, + dataloader=val_loader, save_dir=save_dir, save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, @@ -444,7 +442,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') + LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if loggers['wandb']: @@ -457,10 +455,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_val, + imgsz=imgsz, model=attempt_load(m, device).half(), single_cls=single_cls, - dataloader=valloader, + dataloader=val_loader, save_dir=save_dir, save_json=True, plots=False) @@ -487,7 +485,7 @@ def parse_opt(known=False): parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, val] image sizes') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') @@ -534,12 +532,11 @@ def main(opt): with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate - logger.info('Resuming training from %s' % ckpt) + LOGGER.info(f'Resuming training from {ckpt}') else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, val) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) @@ -602,7 +599,7 @@ def main(opt): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: - os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + os.system(f'gsutil cp gs://{opt.bucket}/evolve.txt .') # download evolve.txt if exists for _ in range(opt.evolve): # generations to evolve if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate diff --git a/utils/datasets.py b/utils/datasets.py index 0763b56d31e3..d3edafa99bd0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,17 +22,16 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective, cutout +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -num_threads = min(8, os.cpu_count()) # number of multiprocessing threads -logger = logging.getLogger(__name__) +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -164,8 +163,8 @@ def __init__(self, path, img_size=640, stride=32): else: raise Exception(f'ERROR: {p} does not exist') - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size @@ -179,7 +178,7 @@ def __init__(self, path, img_size=640, stride=32): else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 @@ -389,11 +388,11 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS]) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache self.label_files = img2label_paths(self.img_files) # labels @@ -411,7 +410,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: logging.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items @@ -460,7 +459,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) + results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) @@ -473,7 +472,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." - with Pool(num_threads) as pool: + with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: @@ -491,7 +490,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: logging.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings @@ -789,7 +788,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in img_formats: + if im_file.suffix[1:] in IMG_FORMATS: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] @@ -825,7 +824,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split @@ -850,7 +849,7 @@ def verify_image_label(args): im.verify() # PIL verify shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' if im.format.lower() in ('jpg', 'jpeg'): with open(im_file, 'rb') as f: f.seek(-2, 2) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 36b6845a8c48..d86267b26356 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -22,7 +22,7 @@ import thop # for FLOPs computation except ImportError: thop = None -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) @contextmanager @@ -85,11 +85,11 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') -def time_synchronized(): +def time_sync(): # pytorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() @@ -118,12 +118,12 @@ def profile(x, ops, n=100, device=None): flops = 0 for _ in range(n): - t[0] = time_synchronized() + t[0] = time_sync() y = m(x) - t[1] = time_synchronized() + t[1] = time_sync() try: _ = y.sum().backward() - t[2] = time_synchronized() + t[2] = time_sync() except: # no backward method t[2] = float('nan') dtf += (t[1] - t[0]) * 1000 / n # ms per op forward @@ -231,7 +231,7 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def load_classifier(name='resnet101', n=2): diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 2adea9235f6c..a7e84ca100e4 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -98,7 +98,14 @@ class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.data_dict = data_dict + self.bbox_media_panel_images = [] + self.val_table_path_map = None # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -156,25 +163,27 @@ def setup_training(self, opt, data_dict): self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.opt['hyp'] data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume - if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), opt.artifact_alias) - self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - self.val_table = self.val_artifact.get("val") - self.map_val_table_path() - wandb.log({"validation dataset": self.val_table}) - + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + wandb.log({"validation dataset": self.val_table}) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict @@ -182,7 +191,7 @@ def setup_training(self, opt, data_dict): def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\","/")) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact @@ -246,10 +255,10 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): - self.val_table_map = {} + self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_map[data[3]] = data[0] + self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging @@ -283,7 +292,6 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - if self.val_table and self.result_table: class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -297,7 +305,7 @@ def log_training_progress(self, predn, path, names): "domain": "pixel"}) total_conf = total_conf + conf boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_map[Path(path).name] + id = self.val_table_path_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], @@ -305,6 +313,22 @@ def log_training_progress(self, predn, path, names): total_conf / max(1, len(box_data)) ) + def val_one_image(self, pred, predn, path, names, im): + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + else: # Default to bbox media panelif Val artifact not found + log_imgs = min(self.log_imgs, 100) + if len(self.bbox_media_panel_images) < log_imgs and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): @@ -313,13 +337,16 @@ def log(self, log_dict): def end_epoch(self, best_result=False): if self.wandb_run: with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images wandb.log(self.log_dict) self.log_dict = {} + self.bbox_media_panel_images = [] if self.result_artifact: self.result_artifact.add(self.result_table, 'result') wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) - + wandb.log({"evaluation": self.result_table}) self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") diff --git a/val.py b/val.py index fa5cb8f113e0..5a8486720577 100644 --- a/val.py +++ b/val.py @@ -25,7 +25,52 @@ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt -from utils.torch_utils import select_device, time_synchronized +from utils.torch_utils import select_device, time_sync + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(predictions, labels, iouv): + # Evaluate 1 batch of predictions + correct = torch.zeros(predictions.shape[0], len(iouv), dtype=torch.bool, device=iouv.device) + detected = [] # label indices + tcls, pcls = labels[:, 0], predictions[:, 5] + nl = labels.shape[0] # number of labels + for cls in torch.unique(tcls): + ti = (cls == tcls).nonzero().view(-1) # label indices + pi = (cls == pcls).nonzero().view(-1) # prediction indices + if pi.shape[0]: # find detections + ious, i = box_iou(predictions[pi, 0:4], labels[ti, 1:5]).max(1) # best ious, indices + detected_set = set() + for j in (ious > iouv[0]).nonzero(): + d = ti[i[j]] # detected label + if d.item() not in detected_set: + detected_set.add(d.item()) + detected.append(d) # append detections + correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn + if len(detected) == nl: # all labels already located in image + break + return correct @torch.no_grad() @@ -43,7 +88,7 @@ def run(data, save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a cocoapi-compatible JSON results file + save_json=False, # save a COCO-JSON results file project='runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment @@ -93,10 +138,6 @@ def run(data, iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() - # Logging - log_imgs = 0 - if wandb_logger and wandb_logger.wandb: - log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': @@ -108,24 +149,24 @@ def run(data, seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} - coco91class = coco80_to_coco91_class() + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] + jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - t_ = time_synchronized() + t_ = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - t = time_synchronized() + t = time_sync() t0 += t - t_ # Run model out, train_out = model(img, augment=augment) # inference and training outputs - t1 += time_synchronized() - t + t1 += time_sync() - t # Compute loss if compute_loss: @@ -134,16 +175,16 @@ def run(data, # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_synchronized() + t = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t2 += time_synchronized() - t + t2 += time_sync() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class - path = Path(paths[si]) + path, shape = Path(paths[si]), shapes[si][0] seen += 1 if len(pred) == 0: @@ -155,76 +196,27 @@ def run(data, if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred + scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred - # Append to text file - if save_txt: - gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - # W&B logging - Media Panel plots - if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation - if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) - wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None - - # Append to pycocotools JSON dictionary - if save_json: - # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(pred.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - # Assign all predictions as incorrect - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) + # Evaluate if nl: - detected = [] # target indices - tcls_tensor = labels[:, 0] - - # target boxes - tbox = xywh2xyxy(labels[:, 1:5]) - scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) if plots: - confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) - - # Per target class - for cls in torch.unique(tcls_tensor): - ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices - pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices - - # Search for detections - if pi.shape[0]: - # Prediction to target ious - ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices - - # Append detections - detected_set = set() - for j in (ious > iouv[0]).nonzero(as_tuple=False): - d = ti[i[j]] # detected target - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all targets already located in image - break - - # Append statistics (correct, conf, pcls, tcls) - stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) + confusion_matrix.process_batch(predn, labelsn) + else: + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + if wandb_logger: + wandb_logger.val_one_image(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -264,15 +256,13 @@ def run(data, if wandb_logger and wandb_logger.wandb: val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] wandb_logger.log({"Validation": val_batches}) - if wandb_images: - wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) + print(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -320,7 +310,7 @@ def parse_opt(): parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default='runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') From b1be6850050959a09c3e26813646c52b2a73b1a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 12:41:15 +0200 Subject: [PATCH 182/757] Module `super().__init__()` (#4065) * Module `super().__init__()` * remove NMS --- models/common.py | 42 ++++++++++++++++++------------------------ models/experimental.py | 12 ++++++------ models/yolo.py | 20 +++----------------- 3 files changed, 27 insertions(+), 47 deletions(-) diff --git a/models/common.py b/models/common.py index 4db90b54663e..901648b693a3 100644 --- a/models/common.py +++ b/models/common.py @@ -36,7 +36,7 @@ def DWConv(c1, c2, k=1, s=1, act=True): class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Conv, self).__init__() + super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) @@ -87,7 +87,7 @@ def forward(self, x): class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Bottleneck, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_, c2, 3, 1, g=g) @@ -100,7 +100,7 @@ def forward(self, x): class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSP, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) @@ -119,7 +119,7 @@ def forward(self, x): class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(C3, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) @@ -139,10 +139,18 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): self.m = TransformerBlock(c_, c_, 4, n) +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): - super(SPP, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) @@ -156,7 +164,7 @@ def forward(self, x): class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Focus, self).__init__() + super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act) # self.contract = Contract(gain=2) @@ -196,27 +204,13 @@ def forward(self, x): class Concat(nn.Module): # Concatenate a list of tensors along dimension def __init__(self, dimension=1): - super(Concat, self).__init__() + super().__init__() self.d = dimension def forward(self, x): return torch.cat(x, self.d) -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - max_det = 1000 # maximum number of detections per image - - def __init__(self): - super(NMS, self).__init__() - - def forward(self, x): - return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) - - class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold @@ -225,7 +219,7 @@ class AutoShape(nn.Module): max_det = 1000 # maximum number of detections per image def __init__(self, model): - super(AutoShape, self).__init__() + super().__init__() self.model = model.eval() def autoshape(self): @@ -292,7 +286,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # YOLOv5 detections class for inference results def __init__(self, imgs, pred, files, times=None, names=None, shape=None): - super(Detections, self).__init__() + super().__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays @@ -383,7 +377,7 @@ def __len__(self): class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super(Classify, self).__init__() + super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) self.flat = nn.Flatten() diff --git a/models/experimental.py b/models/experimental.py index 30dc36192bc0..0d996d913b0c 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -12,7 +12,7 @@ class CrossConv(nn.Module): # Cross Convolution Downsample def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super(CrossConv, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, (1, k), (1, s)) self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) @@ -25,7 +25,7 @@ def forward(self, x): class Sum(nn.Module): # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 def __init__(self, n, weight=False): # n: number of inputs - super(Sum, self).__init__() + super().__init__() self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object if weight: @@ -46,7 +46,7 @@ def forward(self, x): class GhostConv(nn.Module): # Ghost Convolution https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super(GhostConv, self).__init__() + super().__init__() c_ = c2 // 2 # hidden channels self.cv1 = Conv(c1, c_, k, s, None, g, act) self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) @@ -59,7 +59,7 @@ def forward(self, x): class GhostBottleneck(nn.Module): # Ghost Bottleneck https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super(GhostBottleneck, self).__init__() + super().__init__() c_ = c2 // 2 self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw @@ -74,7 +74,7 @@ def forward(self, x): class MixConv2d(nn.Module): # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super(MixConv2d, self).__init__() + super().__init__() groups = len(k) if equal_ch: # equal c_ per group i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices @@ -98,7 +98,7 @@ def forward(self, x): class Ensemble(nn.ModuleList): # Ensemble of models def __init__(self): - super(Ensemble, self).__init__() + super().__init__() def forward(self, x, augment=False, profile=False, visualize=False): y = [] diff --git a/models/yolo.py b/models/yolo.py index 3a3af9b5fbde..2e7a20f813e2 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -33,7 +33,7 @@ class Detect(nn.Module): onnx_dynamic = False # ONNX export parameter def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer - super(Detect, self).__init__() + super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers @@ -77,7 +77,7 @@ def _make_grid(nx=20, ny=20): class Model(nn.Module): def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() + super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -209,20 +209,6 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers self.info() return self - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - LOGGER.info('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - LOGGER.info('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - def autoshape(self): # add AutoShape module LOGGER.info('Adding AutoShape... ') m = AutoShape(self) # wrap model @@ -250,7 +236,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR]: + C3, C3TR, C3SPP]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) From c8a98cb7cbbf5d05abb9b134ada0c75d0dc62a6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:10:21 +0200 Subject: [PATCH 183/757] Missing `nc` and `names` handling in check_dataset() (#4066) --- utils/general.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/general.py b/utils/general.py index 846c1464c28c..08a3ff6539b2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -231,6 +231,9 @@ def check_dataset(data, autodownload=True): if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + assert 'nc' in data, "Dataset 'nc' key missing." + if 'names' not in data: + data['names'] = [str(i) for i in range(data['nc'])] # assign class names if missing train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path From ee76a68f1d9a4d2d4ff995bda99ee2748fa49fe6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:14:09 +0200 Subject: [PATCH 184/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 957c0e140f88..f316dc5f550a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1166,7 +1166,7 @@ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", "\n", "# Images\n", - "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n", + "dir = 'https://ultralytics.com/images/'\n", "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", "\n", "# Inference\n", From 7fdcc77bf408a11357be4d6e9be65e4bb85e6a1c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:23:19 +0200 Subject: [PATCH 185/757] Albumentations >= 1.0.3 (#4068) --- requirements.txt | 2 +- utils/augmentations.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 886d21ce8047..f1629eafc65a 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.2 +# albumentations>=1.0.3 thop # FLOPs computation diff --git a/utils/augmentations.py b/utils/augmentations.py index 69b835db0db9..cf64f2f9db1f 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -17,7 +17,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.2') # version requirement + check_version(A.__version__, '1.0.3') # version requirement self.transform = A.Compose([ A.Blur(p=0.1), From 0cc7c587870f31f0fc175a74048ceca616870aea Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 19 Jul 2021 17:27:13 +0530 Subject: [PATCH 186/757] W&B: fix refactor bugs (#4069) --- utils/wandb_logging/wandb_utils.py | 8 ++++---- val.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index a7e84ca100e4..03f2d151bdc3 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -106,6 +106,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None + self.max_imgs_to_log = 16 # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -133,7 +134,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': data_dict}, allow_val_change=True) + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -152,7 +153,7 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): - self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): modeldir, _ = self.download_model_artifact(opt) @@ -317,8 +318,7 @@ def val_one_image(self, pred, predn, path, names, im): if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) else: # Default to bbox media panelif Val artifact not found - log_imgs = min(self.log_imgs, 100) - if len(self.bbox_media_panel_images) < log_imgs and self.current_epoch > 0: + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), diff --git a/val.py b/val.py index 5a8486720577..e493dfe66ae8 100644 --- a/val.py +++ b/val.py @@ -215,7 +215,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - if wandb_logger: + if wandb_logger and wandb_logger.wandb_run: wandb_logger.val_one_image(pred, predn, path, names, img[si]) # Plot images From 442a7abdf263bb24c51e494b4fd41d81cb097943 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Jul 2021 13:21:52 +0200 Subject: [PATCH 187/757] Refactor `export.py` (#4080) * Refactor `export.py` * cleanup * Update check_requirements() * Update export.py --- export.py | 148 +++++++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/export.py b/export.py index b7ff0748ba93..34cd21449bc0 100644 --- a/export.py +++ b/export.py @@ -24,6 +24,78 @@ from utils.torch_utils import select_device +def export_torchscript(model, img, file, optimize): + # TorchScript model export + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript.pt') + ts = torch.jit.trace(model, img, strict=False) + (optimize_for_mobile(ts) if optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return ts + except Exception as e: + print(f'{prefix} export failure: {e}') + + +def export_onnx(model, img, file, opset_version, train, dynamic, simplify): + # ONNX model export + prefix = colorstr('ONNX:') + try: + check_requirements(('onnx', 'onnx-simplifier')) + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if simplify: + try: + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify( + model_onnx, + dynamic_input_shape=dynamic, + input_shapes={'images': list(img.shape)} if dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + +def export_coreml(ts_model, img, file, train): + # CoreML model export + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' + model = ct.convert(ts_model, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + def run(weights='./yolov5s.pt', # weights path img_size=(640, 640), # image (height, width) batch_size=1, # batch size @@ -40,12 +112,13 @@ def run(weights='./yolov5s.pt', # weights path t = time.time() include = [x.lower() for x in include] img_size *= 2 if len(img_size) == 1 else 1 # expand + file = Path(weights) # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device) # load FP32 model - labels = model.names + names = model.names # Input gs = int(max(model.stride)) # grid size (max stride) @@ -57,7 +130,6 @@ def run(weights='./yolov5s.pt', # weights path img, model = img.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() @@ -72,73 +144,13 @@ def run(weights='./yolov5s.pt', # weights path y = model(img) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") - # TorchScript export ----------------------------------------------------------------------------------------------- - if 'torchscript' in include or 'coreml' in include: - prefix = colorstr('TorchScript:') - try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # ONNX export ------------------------------------------------------------------------------------------------------ + # Exports if 'onnx' in include: - prefix = colorstr('ONNX:') - try: - import onnx - - print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) - 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print - - # Simplify - if simplify: - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify( - model_onnx, - dynamic_input_shape=dynamic, - input_shapes={'images': list(img.shape)} if dynamic else None) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # CoreML export ---------------------------------------------------------------------------------------------------- - if 'coreml' in include: - prefix = colorstr('CoreML:') - try: - import coremltools as ct - - print(f'{prefix} starting export with coremltools {ct.__version__}...') - assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' - model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') + export_onnx(model, img, file, opset_version, train, dynamic, simplify) + if 'torchscript' in include or 'coreml' in include: + ts = export_torchscript(model, img, file, optimize) + if 'coreml' in include: + export_coreml(ts, img, file, train) # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') From 3bef77f5cb7eda3fa3cae53f2579cd3363c99744 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Jul 2021 18:42:27 +0200 Subject: [PATCH 188/757] Addition refactor `export.py` (#4089) * Addition refactor `export.py` * Update export.py --- export.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 34cd21449bc0..1e3a5ed6a4af 100644 --- a/export.py +++ b/export.py @@ -45,7 +45,7 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): check_requirements(('onnx', 'onnx-simplifier')) import onnx - print(f'{prefix} starting export with onnx {onnx.__version__}...') + print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, @@ -80,16 +80,17 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): print(f'{prefix} export failure: {e}') -def export_coreml(ts_model, img, file, train): +def export_coreml(model, img, file): # CoreML model export prefix = colorstr('CoreML:') try: import coremltools as ct - print(f'{prefix} starting export with coremltools {ct.__version__}...') + print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') - assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' - model = ct.convert(ts_model, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + model.train() # CoreML exports should be placed in model.train() mode + ts = torch.jit.trace(model, img, strict=False) # TorchScript model + model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: @@ -145,12 +146,12 @@ def run(weights='./yolov5s.pt', # weights path print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # Exports + if 'torchscript' in include: + export_torchscript(model, img, file, optimize) if 'onnx' in include: export_onnx(model, img, file, opset_version, train, dynamic, simplify) - if 'torchscript' in include or 'coreml' in include: - ts = export_torchscript(model, img, file, optimize) - if 'coreml' in include: - export_coreml(ts, img, file, train) + if 'coreml' in include: + export_coreml(model, img, file) # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') From 2c073cd207bae1163b472c561d3fd31b1d2ba870 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Jul 2021 16:50:47 +0200 Subject: [PATCH 189/757] Add train.py ``--img-size` floor (#4099) --- train.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index b1afaf8ada75..9a844ebac0de 100644 --- a/train.py +++ b/train.py @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz = check_img_size(opt.imgsz, gs) # verify imgsz is gs-multiple + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: diff --git a/utils/general.py b/utils/general.py index 08a3ff6539b2..fabd0f35fe9e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -181,11 +181,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): print(emojis(s)) # emoji-safe -def check_img_size(img_size, s=32): +def check_img_size(img_size, s=32, floor=0): # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + new_size = max(make_divisible(img_size, int(s)), floor) # ceil gs-multiple if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}') return new_size From 4bad9147611238f31a66ba5414b35e8ca604ea37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Jul 2021 17:22:11 +0200 Subject: [PATCH 190/757] Update resume.py (#4115) --- utils/aws/resume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 4b0d4246b594..e869834e96e7 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -28,7 +28,7 @@ if ddp: # multi-GPU port += 1 - cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' else: # single-GPU cmd = f'python train.py --resume {last}' From 4495e00016cb18b35011bf99da1beb4eb639186b Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 23 Jul 2021 20:55:00 +0800 Subject: [PATCH 191/757] Fix indentation in `log_training_progress()` (#4126) --- utils/wandb_logging/wandb_utils.py | 40 +++++++++++++++--------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 03f2d151bdc3..4986e01afe36 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -293,26 +293,26 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - total_conf = 0 - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - box_data.append( - {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"}) - total_conf = total_conf + conf - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, - id, - self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) - ) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) def val_one_image(self, pred, predn, path, names, im): if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact From 39ef6c7a801eb666dbea5b36c8223517b84d9b81 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 12:36:07 +0200 Subject: [PATCH 192/757] Update README.md (#4134) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7dff1a0efd33..c27fbc6fa639 100755 --- a/README.md +++ b/README.md @@ -224,7 +224,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
From 2e538443b721a8fa1bca2c51b59f5400fdd38bec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 13:08:51 +0200 Subject: [PATCH 193/757] ONNX inference update (#4073) --- detect.py | 54 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/detect.py b/detect.py index 73f962398442..80517f342a41 100644 --- a/detect.py +++ b/detect.py @@ -64,18 +64,23 @@ def run(weights='yolov5s.pt', # model.pt path(s) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model - model = attempt_load(weights, map_location=device) # load FP32 model - stride = int(model.stride.max()) # model stride + w = weights[0] if isinstance(weights, list) else weights + classify, pt, onnx = False, w.endswith('.pt'), w.endswith('.onnx') # inference type + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + if pt: + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + if half: + model.half() # to FP16 + if classify: # second-stage classifier + modelc = load_classifier(name='resnet50', n=2) # initialize + modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() + elif onnx: + check_requirements(('onnx', 'onnxruntime')) + import onnxruntime + session = onnxruntime.InferenceSession(w, None) imgsz = check_img_size(imgsz, s=stride) # check image size - names = model.module.names if hasattr(model, 'module') else model.names # get class names - if half: - model.half() # to FP16 - - # Second-stage classifier - classify = False - if classify: - modelc = load_classifier(name='resnet50', n=2) # initialize - modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() # Dataloader if webcam: @@ -89,31 +94,36 @@ def run(weights='yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - if device.type != 'cpu': + if pt and device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once t0 = time.time() for path, img, im0s, vid_cap in dataset: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 + if pt: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + elif onnx: + img = img.astype('float32') img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) + if len(img.shape) == 3: + img = img[None] # expand for batch dim # Inference t1 = time_sync() - pred = model(img, - augment=augment, - visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] + if pt: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(img, augment=augment, visualize=visualize)[0] + elif onnx: + pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) - # Apply NMS + # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) t2 = time_sync() - # Apply Classifier + # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) - # Process detections + # Process predictions for i, det in enumerate(pred): # detections per image if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count From 264be1a6162780d46feb3e9eec9b43e3ff157ea0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 13:19:12 +0200 Subject: [PATCH 194/757] Rename `opset_version` to `opset` (#4135) --- export.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1e3a5ed6a4af..ceee15e1644f 100644 --- a/export.py +++ b/export.py @@ -38,7 +38,7 @@ def export_torchscript(model, img, file, optimize): print(f'{prefix} export failure: {e}') -def export_onnx(model, img, file, opset_version, train, dynamic, simplify): +def export_onnx(model, img, file, opset, train, dynamic, simplify): # ONNX model export prefix = colorstr('ONNX:') try: @@ -47,7 +47,7 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + torch.onnx.export(model, img, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], @@ -108,7 +108,7 @@ def run(weights='./yolov5s.pt', # weights path optimize=False, # TorchScript: optimize for mobile dynamic=False, # ONNX: dynamic axes simplify=False, # ONNX: simplify model - opset_version=12, # ONNX: opset version + opset=12, # ONNX: opset version ): t = time.time() include = [x.lower() for x in include] @@ -149,7 +149,7 @@ def run(weights='./yolov5s.pt', # weights path if 'torchscript' in include: export_torchscript(model, img, file, optimize) if 'onnx' in include: - export_onnx(model, img, file, opset_version, train, dynamic, simplify) + export_onnx(model, img, file, opset, train, dynamic, simplify) if 'coreml' in include: export_coreml(model, img, file) @@ -170,7 +170,7 @@ def parse_opt(): parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') opt = parser.parse_args() return opt From 63dd65e7edd96debbefa81e22f3d5cfb07dd2ba4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 16:11:39 +0200 Subject: [PATCH 195/757] Update train.py (#4136) * Refactor train.py * Update imports * Update imports * Update optimizer * cleanup --- train.py | 104 +++++++++++++++++++++-------------------------- utils/general.py | 2 +- utils/loss.py | 2 +- 3 files changed, 49 insertions(+), 59 deletions(-) diff --git a/train.py b/train.py index 9a844ebac0de..ad13ed6a52e4 100644 --- a/train.py +++ b/train.py @@ -17,15 +17,13 @@ import math import numpy as np +import torch import torch.distributed as dist import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler -import torch.utils.data import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP +from torch.optim import Adam, SGD, lr_scheduler from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm @@ -58,16 +56,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary device, ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers # Directories - save_dir = Path(save_dir) - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last = wdir / 'last.pt' - best = wdir / 'best.pt' - results_file = save_dir / 'results.txt' + w = save_dir / 'weights' # weights dir + w.mkdir(parents=True, exist_ok=True) # make dir + last, best, results_file = w / 'last.pt', w / 'best.pt', save_dir / 'results.txt' # Hyperparameters if isinstance(hyp, str): @@ -92,7 +87,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary loggers = {'wandb': None, 'tb': None} # loggers dict if RANK in [-1, 0]: # TensorBoard - if not evolve: + if plots: prefix = colorstr('tensorboard: ') LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") loggers['tb'] = SummaryWriter(str(save_dir)) @@ -105,11 +100,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: data_dict = wandb_logger.data_dict - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update values if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, data) # check + assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model @@ -120,23 +115,22 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - state_dict = ckpt['model'].float().state_dict() # to FP32 - state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(state_dict, strict=False) # load - LOGGER.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check - train_path = data_dict['train'] - val_path = data_dict['val'] + train_path, val_path = data_dict['train'], data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): - print('freezing %s' % k) + print(f'freezing {k}') v.requires_grad = False # Optimizer @@ -145,33 +139,32 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") - pg0, pg1, pg2 = [], [], [] # optimizer parameter groups - for k, v in model.named_modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): - pg2.append(v.bias) # biases - if isinstance(v, nn.BatchNorm2d): - pg0.append(v.weight) # no decay - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): - pg1.append(v.weight) # apply decay + g0, g1, g2 = [], [], [] # optimizer parameter groups + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias + g2.append(v.bias) + if isinstance(v, nn.BatchNorm2d): # weight with decay + g0.append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight without decay + g1.append(v.weight) if opt.adam: - optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: - optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay - optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - LOGGER.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) - del pg0, pg1, pg2 + optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay + optimizer.add_param_group({'params': g2}) # add g2 (biases) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " + f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") + del g0, g1, g2 - # Scheduler https://arxiv.org/pdf/1812.01187.pdf - # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + # Scheduler if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # plot_lr_scheduler(optimizer, scheduler, epochs) + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if RANK in [-1, 0] else None @@ -196,13 +189,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Epochs start_epoch = ckpt['epoch'] + 1 if resume: - assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' if epochs < start_epoch: - LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") epochs += ckpt['epoch'] # finetune additional epochs - del ckpt, state_dict + del ckpt, csd # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) @@ -217,7 +209,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: - raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) LOGGER.info('Using SyncBatchNorm()') @@ -228,7 +219,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(train_loader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 if RANK in [-1, 0]: @@ -261,7 +252,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model - model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names @@ -315,7 +305,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Warmup if ni <= nw: xi = [0, nw] # x interp - # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 @@ -329,7 +319,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with amp.autocast(enabled=cuda): @@ -355,7 +345,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Print if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) @@ -381,7 +371,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DDP process 0 or single-GPU if RANK in [-1, 0]: # mAP - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 @@ -457,6 +447,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(m, device).half(), + iou_thres=0.7, # NMS IoU threshold for best pycocotools results single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, @@ -525,8 +516,7 @@ def main(opt): check_requirements(exclude=['thop']) # Resume - wandb_run = check_wandb_resume(opt) - if opt.resume and not wandb_run: # resume an interrupted run + if opt.resume and not check_wandb_resume(opt): # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: @@ -534,7 +524,6 @@ def main(opt): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.name = 'evolve' if opt.evolve else opt.name @@ -545,11 +534,13 @@ def main(opt): if LOCAL_RANK != -1: from datetime import timedelta assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' + assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' + assert not opt.evolve, '--evolve argument is not compatible with DDP training' + assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) - assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' - assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' # Train if not opt.evolve: @@ -594,7 +585,6 @@ def main(opt): hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 - assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.noval, opt.nosave = True, True # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here @@ -646,7 +636,7 @@ def main(opt): def run(**kwargs): - # Usage: import train; train.run(imgsz=320, weights='yolov5m.pt') + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') opt = parse_opt(True) for k, v in kwargs.items(): setattr(opt, k, v) diff --git a/utils/general.py b/utils/general.py index fabd0f35fe9e..db81f7679cd7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -301,7 +301,7 @@ def clean_str(s): def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 diff --git a/utils/loss.py b/utils/loss.py index 88f57693307c..22061a11ff27 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,7 +108,7 @@ def __init__(self, model, autobalance=False): det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) From efe60b568130612ef9558db14b84462c297ceb3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 01:18:39 +0200 Subject: [PATCH 196/757] Refactor train.py and val.py `loggers` (#4137) * Update loggers * Config * Update val.py * cleanup * fix1 * fix2 * fix3 and reformat * format sweep.py * Logger() class * cleanup * cleanup2 * wandb package import fix * wandb package import fix2 * txt fix * fix4 * fix5 * fix6 * drop wandb into utils/loggers * fix 7 * rename loggers/wandb_logging to loggers/wandb * Update message * Update message * Update message * cleanup * Fix x axis bug * fix rank 0 issue * cleanup --- train.py | 87 +++--------- utils/loggers/__init__.py | 129 ++++++++++++++++++ .../wandb}/__init__.py | 0 .../wandb}/log_dataset.py | 0 .../{wandb_logging => loggers/wandb}/sweep.py | 2 +- .../wandb}/sweep.yaml | 2 +- .../wandb}/wandb_utils.py | 28 ++-- utils/plots.py | 5 +- val.py | 10 +- 9 files changed, 172 insertions(+), 91 deletions(-) create mode 100644 utils/loggers/__init__.py rename utils/{wandb_logging => loggers/wandb}/__init__.py (100%) rename utils/{wandb_logging => loggers/wandb}/log_dataset.py (100%) rename utils/{wandb_logging => loggers/wandb}/sweep.py (98%) rename utils/{wandb_logging => loggers/wandb}/sweep.yaml (98%) rename utils/{wandb_logging => loggers/wandb}/wandb_utils.py (96%) diff --git a/train.py b/train.py index ad13ed6a52e4..1c48fa49f0f7 100644 --- a/train.py +++ b/train.py @@ -10,7 +10,6 @@ import random import sys import time -import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -24,7 +23,6 @@ from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import Adam, SGD, lr_scheduler -from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm FILE = Path(__file__).absolute() @@ -42,8 +40,9 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume +from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness +from utils.loggers import Loggers LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -76,37 +75,23 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) - # Configure + # Config plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) with open(data) as f: data_dict = yaml.safe_load(f) # data dict - - # Loggers - loggers = {'wandb': None, 'tb': None} # loggers dict - if RANK in [-1, 0]: - # TensorBoard - if plots: - prefix = colorstr('tensorboard: ') - LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - loggers['tb'] = SummaryWriter(str(save_dir)) - - # W&B - opt.hyp = hyp # add hyperparameters - run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - run_id = run_id if opt.resume else None # start fresh run if transfer learning - wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) - loggers['wandb'] = wandb_logger.wandb - if loggers['wandb']: - data_dict = wandb_logger.data_dict - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update values if resuming - nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset + # Loggers + if RANK in [-1, 0]: + loggers = Loggers(save_dir, results_file, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict + if loggers.wandb and resume: + weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict + # Model pretrained = weights.endswith('.pt') if pretrained: @@ -351,16 +336,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar.set_description(s) # Plot - if plots and ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if loggers['tb'] and ni == 0: # TensorBoard - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - elif plots and ni == 10 and loggers['wandb']: - wandb_logger.log({'Mosaics': [loggers['wandb'].Image(str(x), caption=x.name) for x in - save_dir.glob('train*.jpg') if x.exists()]}) + if plots: + if ni < 3: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + loggers.on_train_batch_end(ni, model, imgs) # end batch ------------------------------------------------------------------------------------------------ @@ -368,13 +348,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() - # DDP process 0 or single-GPU if RANK in [-1, 0]: # mAP + loggers.on_train_epoch_end(epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP - wandb_logger.current_epoch = epoch + 1 results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, @@ -385,29 +364,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - wandb_logger=wandb_logger, + loggers=loggers, compute_loss=compute_loss) - # Write - with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss - - # Log - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if loggers['tb']: - loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard - if loggers['wandb']: - wandb_logger.log({tag: x}) # W&B - # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - wandb_logger.end_epoch(best_result=best_fitness == fi) + loggers.on_train_val_end(mloss, results, lr, epoch, s, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -418,16 +382,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_logger.wandb_run.id if loggers['wandb'] else None} + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if loggers['wandb']: - if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: - wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt + loggers.on_model_save(last, epoch, final_epoch, best_fitness, fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- @@ -435,10 +397,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png - if loggers['wandb']: - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb_logger.log({"Results": [loggers['wandb'].Image(str(save_dir / f), caption=f) for f in files - if (save_dir / f).exists()]}) if not evolve: if is_coco: # COCO dataset @@ -458,11 +416,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - if loggers['wandb']: # Log the stripped model - loggers['wandb'].log_artifact(str(best if best.exists() else last), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) - wandb_logger.finish_run() + + loggers.on_train_end(last, best) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py new file mode 100644 index 000000000000..ceca84c95252 --- /dev/null +++ b/utils/loggers/__init__.py @@ -0,0 +1,129 @@ +# YOLOv5 experiment logging utils + +import warnings + +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import colorstr, emojis +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.torch_utils import de_parallel + +LOGGERS = ('txt', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp=None, + data_dict=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.results_file = results_file + self.weights = weights + self.opt = opt + self.hyp = hyp + self.data_dict = data_dict + self.logger = logger # for printing results to console + self.include = include + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + + def start(self): + self.txt = True # always log to txt + + # Message + try: + import wandb + except ImportError: + prefix = colorstr('Weights & Biases: ') + s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" + print(emojis(s)) + + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + try: + assert 'wandb' in self.include and wandb + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, s.stem, run_id, self.data_dict) + except: + self.wandb = None + + return self + + def on_train_batch_end(self, ni, model, imgs): + # Callback runs on train batch end + if ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if self.wandb and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + def on_val_batch_end(self, pred, predn, path, names, im): + # Callback runs on train batch end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + + def on_val_end(self): + # Callback runs on val end + if self.wandb: + files = sorted(self.save_dir.glob('val*.jpg')) + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + + def on_train_val_end(self, mloss, results, lr, epoch, s, best_fitness, fi): + # Callback runs on validation end during training + vals = list(mloss[:-1]) + list(results) + lr + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + if self.txt: + with open(self.results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + if self.tb: + for x, tag in zip(vals, tags): + self.tb.add_scalar(tag, x, epoch) # TensorBoard + if self.wandb: + self.wandb.log({k: v for k, v in zip(tags, vals)}) + self.wandb.end_epoch(best_result=best_fitness == fi) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if self.wandb: + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_train_end(self, last, best): + # Callback runs on training end + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + if self.wandb: + wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + self.wandb.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + def log_images(self, paths): + # Log images + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) diff --git a/utils/wandb_logging/__init__.py b/utils/loggers/wandb/__init__.py similarity index 100% rename from utils/wandb_logging/__init__.py rename to utils/loggers/wandb/__init__.py diff --git a/utils/wandb_logging/log_dataset.py b/utils/loggers/wandb/log_dataset.py similarity index 100% rename from utils/wandb_logging/log_dataset.py rename to utils/loggers/wandb/log_dataset.py diff --git a/utils/wandb_logging/sweep.py b/utils/loggers/wandb/sweep.py similarity index 98% rename from utils/wandb_logging/sweep.py rename to utils/loggers/wandb/sweep.py index 6c8719b32006..8e952d03c085 100644 --- a/utils/wandb_logging/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,12 +1,12 @@ import sys from pathlib import Path + import wandb FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path from train import train, parse_opt -import test from utils.general import increment_path from utils.torch_utils import select_device diff --git a/utils/wandb_logging/sweep.yaml b/utils/loggers/wandb/sweep.yaml similarity index 98% rename from utils/wandb_logging/sweep.yaml rename to utils/loggers/wandb/sweep.yaml index 64e395533c1c..dcc95264f8cd 100644 --- a/utils/wandb_logging/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -14,7 +14,7 @@ # You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration -program: utils/wandb_logging/sweep.py +program: utils/loggers/wandb/sweep.py method: random metric: name: metrics/mAP_0.5 diff --git a/utils/wandb_logging/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py similarity index 96% rename from utils/wandb_logging/wandb_utils.py rename to utils/loggers/wandb/wandb_utils.py index 4986e01afe36..db2693a9e11c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,5 @@ """Utilities and tools for tracking runs with Weights & Biases.""" + import logging import os import sys @@ -8,15 +9,18 @@ import yaml from tqdm import tqdm -sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path + from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths -from utils.general import colorstr, check_dataset, check_file +from utils.general import check_dataset, check_file try: import wandb - from wandb import init, finish -except ImportError: + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): wandb = None RANK = int(os.getenv('RANK', -1)) @@ -106,7 +110,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None - self.max_imgs_to_log = 16 + self.max_imgs_to_log = 16 # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -134,13 +138,11 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, allow_val_change=True) + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, + allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) - else: - prefix = colorstr('wandb: ') - print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' @@ -169,7 +171,7 @@ def setup_training(self, opt, data_dict): opt.artifact_alias) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), opt.artifact_alias) - + if self.train_artifact_path is not None: train_path = Path(self.train_artifact_path) / 'data/images/' data_dict['train'] = str(train_path) @@ -177,7 +179,6 @@ def setup_training(self, opt, data_dict): val_path = Path(self.val_artifact_path) / 'data/images/' data_dict['val'] = str(val_path) - if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) @@ -315,9 +316,9 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - else: # Default to bbox media panelif Val artifact not found + else: # Default to bbox media panelif Val artifact not found if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -328,7 +329,6 @@ def val_one_image(self, pred, predn, path, names, im): boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) - def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): diff --git a/utils/plots.py b/utils/plots.py index cd9a45e8c761..f9fd35fce751 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -327,9 +327,8 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): plt.close() # loggers - for k, v in loggers.items() or {}: - if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + if loggers: + loggers.log_images(save_dir.glob('*labels*.jpg')) def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() diff --git a/val.py b/val.py index e493dfe66ae8..2b088dcdf210 100644 --- a/val.py +++ b/val.py @@ -26,6 +26,7 @@ from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync +from utils.loggers import Loggers def save_one_txt(predn, save_conf, shape, file): @@ -97,7 +98,7 @@ def run(data, dataloader=None, save_dir=Path(''), plots=True, - wandb_logger=None, + loggers=Loggers(), compute_loss=None, ): # Initialize/load model and set device @@ -215,8 +216,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - if wandb_logger and wandb_logger.wandb_run: - wandb_logger.val_one_image(pred, predn, path, names, img[si]) + loggers.on_val_batch_end(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -253,9 +253,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - if wandb_logger and wandb_logger.wandb: - val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] - wandb_logger.log({"Validation": val_batches}) + loggers.on_val_end() # Save JSON if save_json and len(jdict): From d17b45eaad041b2fbb219232363bc865d4134d5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 01:22:00 +0200 Subject: [PATCH 197/757] Update README.md (#4143) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c27fbc6fa639..b4aacd78b0ca 100755 --- a/README.md +++ b/README.md @@ -82,10 +82,10 @@ Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/is import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple +img = 'https://ultralytics.com/images/zidane.jpg' # or PosixPath, PIL, OpenCV, numpy, list # Inference results = model(img) From 6e4358f3f3f770a3b4ececfc3da73e25b3d8a004 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 13:46:16 +0200 Subject: [PATCH 198/757] Add `export.py` ONNX inference suggestion (#4146) --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index ceee15e1644f..c98e92d972c6 100644 --- a/export.py +++ b/export.py @@ -76,6 +76,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): except Exception as e: print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + print(f"{prefix} run --dynamic ONNX model inference with detect.py: 'python detect.py --weights {f}'") except Exception as e: print(f'{prefix} export failure: {e}') @@ -94,7 +95,7 @@ def export_coreml(model, img, file): model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'{prefix} export failure: {e}') + print(f'\n{prefix} export failure: {e}') def run(weights='./yolov5s.pt', # weights path From 3764277f95a1419fde96fdf68bc88d520e7dd0ed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 18:59:18 +0200 Subject: [PATCH 199/757] Created using Colaboratory --- tutorial.ipynb | 64 +++++++++----------------------------------------- 1 file changed, 11 insertions(+), 53 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f316dc5f550a..88adc08c0ef1 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1036,28 +1036,8 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "riPdhraOTCO0" - }, - "source": [ - "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # val batch 0 labels\n", - "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # val batch 0 predictions" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OYG4WFEnTVrI" - }, - "source": [ + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combined each original image with 3 additional random training images.\n", + "\n", "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", @@ -1065,38 +1045,16 @@ "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", "> \n", - "`test_batch0_pred.jpg` shows val batch 0 _predictions_" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7KN5ghjE6ZWh" - }, - "source": [ - "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and `runs/train/exp/results.txt`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.txt` file manually:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "MDznIqPF7nk3" - }, - "source": [ + "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", + "\n", + "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", + "\n", + "```python\n", "from utils.plots import plot_results \n", - "plot_results(save_dir='runs/train/exp') # plot all results*.txt files in 'runs/train/exp'\n", - "Image(filename='runs/train/exp/results.png', width=800)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lfrEegCSW3fK" - }, - "source": [ - "

\"COCO128

" + "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", + "```\n", + "\n", + "

\"COCO128

" ] }, { From 96e36a7c913e2433446ff410a4cf60041010a524 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 19:06:37 +0200 Subject: [PATCH 200/757] New CSV Logger (#4148) * New CSV Logger * cleanup * move batch plots into Logger * rename comment * Remove total loss from progress bar * mloss :-1 bug fix * Update plot_results() * Update plot_results() * plot_results bug fix --- .gitignore | 1 + train.py | 40 +++++++---------------- utils/loggers/__init__.py | 63 ++++++++++++++++++++++-------------- utils/loss.py | 3 +- utils/plots.py | 68 +++++++++------------------------------ val.py | 2 +- 6 files changed, 68 insertions(+), 109 deletions(-) diff --git a/.gitignore b/.gitignore index 91299e263b86..b07134d097dd 100755 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ data/* !data/*.sh results*.txt +results*.csv # Datasets ------------------------------------------------------------------------------------------------------------- coco/ diff --git a/train.py b/train.py index 1c48fa49f0f7..db045c766716 100644 --- a/train.py +++ b/train.py @@ -12,7 +12,6 @@ import time from copy import deepcopy from pathlib import Path -from threading import Thread import math import numpy as np @@ -38,7 +37,7 @@ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss -from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.plots import plot_labels, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness @@ -61,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Directories w = save_dir / 'weights' # weights dir w.mkdir(parents=True, exist_ok=True) # make dir - last, best, results_file = w / 'last.pt', w / 'best.pt', save_dir / 'results.txt' + last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters if isinstance(hyp, str): @@ -88,7 +87,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Loggers if RANK in [-1, 0]: - loggers = Loggers(save_dir, results_file, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict + loggers = Loggers(save_dir, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict if loggers.wandb and resume: weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict @@ -167,10 +166,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] - # Results - if ckpt.get('training_results') is not None: - results_file.write_text(ckpt['training_results']) # write results.txt - # Epochs start_epoch = ckpt['epoch'] + 1 if resume: @@ -275,11 +270,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - mloss = torch.zeros(4, device=device) # mean losses + mloss = torch.zeros(3, device=device) # mean losses if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() @@ -327,20 +322,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.update(model) last_opt_step = ni - # Print + # Log if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - s = ('%10s' * 2 + '%10.4g' * 6) % ( - f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) - pbar.set_description(s) - - # Plot - if plots: - if ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - loggers.on_train_batch_end(ni, model, imgs) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + loggers.on_train_batch_end(ni, model, imgs, targets, paths, plots) # end batch ------------------------------------------------------------------------------------------------ @@ -371,13 +359,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - loggers.on_train_val_end(mloss, results, lr, epoch, s, best_fitness, fi) + loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, - 'training_results': results_file.read_text(), 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, @@ -395,9 +382,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') - if plots: - plot_results(save_dir=save_dir) # save as results.png - if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests @@ -411,13 +395,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_dir=save_dir, save_json=True, plots=False) - # Strip optimizers for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - - loggers.on_train_end(last, best) + loggers.on_train_end(last, best, plots) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ceca84c95252..29dd4605341b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,15 +1,17 @@ # YOLOv5 experiment logging utils import warnings +from threading import Thread import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('txt', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases +LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases try: import wandb @@ -21,10 +23,8 @@ class Loggers(): # YOLOv5 Loggers class - def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp=None, - data_dict=None, logger=None, include=LOGGERS): + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS): self.save_dir = save_dir - self.results_file = results_file self.weights = weights self.opt = opt self.hyp = hyp @@ -35,7 +35,7 @@ def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp setattr(self, k, None) # init empty logger dictionary def start(self): - self.txt = True # always log to txt + self.csv = True # always log to csv # Message try: @@ -63,15 +63,19 @@ def start(self): return self - def on_train_batch_end(self, ni, model, imgs): + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end - if ni == 0: - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - if self.wandb and ni == 10: - files = sorted(self.save_dir.glob('train*.jpg')) - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if plots: + if ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + if self.wandb and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) def on_train_epoch_end(self, epoch): # Callback runs on train epoch end @@ -89,21 +93,28 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_train_val_end(self, mloss, results, lr, epoch, s, best_fitness, fi): - # Callback runs on validation end during training - vals = list(mloss[:-1]) + list(results) + lr - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi): + # Callback runs on val end during training + vals = list(mloss) + list(results) + lr + keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params - if self.txt: - with open(self.results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + x = {k: v for k, v in zip(keys, vals)} # dict + + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + if self.tb: - for x, tag in zip(vals, tags): - self.tb.add_scalar(tag, x, epoch) # TensorBoard + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) # TensorBoard + if self.wandb: - self.wandb.log({k: v for k, v in zip(tags, vals)}) + self.wandb.log(x) self.wandb.end_epoch(best_result=best_fitness == fi) def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): @@ -112,8 +123,10 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - def on_train_end(self, last, best): + def on_train_end(self, last, best, plots): # Callback runs on training end + if plots: + plot_results(dir=self.save_dir) # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.wandb: diff --git a/utils/loss.py b/utils/loss.py index 22061a11ff27..79e8f24359c1 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -162,8 +162,7 @@ def __call__(self, p, targets): # predictions, targets, model lcls *= self.hyp['cls'] bs = tobj.shape[0] # batch size - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) diff --git a/utils/plots.py b/utils/plots.py index f9fd35fce751..e13e316314dd 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,7 +1,5 @@ # Plotting utils -import glob -import os from copy import copy from pathlib import Path @@ -387,63 +385,29 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() - # Plot training 'results*.txt', overlaying train and val losses - s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends - t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles - for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) - ax = ax.ravel() - for i in range(5): - for j in [i, i + 5]: - y = results[j, x] - ax[i].plot(x, y, marker='.', label=s[j]) - # y_smooth = butter_lowpass_filtfilt(y) - # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) - - ax[i].set_title(t[i]) - ax[i].legend() - ax[i].set_ylabel(f) if i == 0 else None # add filename - fig.savefig(f.replace('.txt', '.png'), dpi=200) - - -def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): - # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') +def plot_results(file='', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() - s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', - 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] - if bucket: - # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] - files = ['results%g.txt' % x for x in id] - c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) - os.system(c) - else: - files = list(Path(save_dir).glob('results*.txt')) - assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for fi, f in enumerate(files): try: - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - for i in range(10): - y = results[i, x] - if i in [0, 1, 2, 5, 6, 7]: - y[y == 0] = np.nan # don't show zero loss values - # y /= y[0] # normalize - label = labels[fi] if len(labels) else f.stem - ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) - ax[i].set_title(s[i]) - # if i in [5, 6, 7]: # share train and val loss y axes + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(Path(save_dir) / 'results.png', dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): diff --git a/val.py b/val.py index 2b088dcdf210..f20877e8aa0b 100644 --- a/val.py +++ b/val.py @@ -171,7 +171,7 @@ def run(data, # Compute loss if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels From 8acb5734c7f0d1b7baf62b5c5dab6107a37896c6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 00:57:09 +0200 Subject: [PATCH 201/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 88adc08c0ef1..831735cc0830 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1036,7 +1036,7 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combined each original image with 3 additional random training images.\n", + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", "\n", "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", From f8e11483df9055a30843162a33a185c5f4b47ab3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 14:23:43 +0200 Subject: [PATCH 202/757] Update dataset headers (#4162) --- data/Argoverse_HD.yaml | 11 ++++++----- data/GlobalWheat2020.yaml | 11 ++++++----- data/Objects365.yaml | 11 ++++++----- data/SKU-110K.yaml | 11 ++++++----- data/VOC.yaml | 13 +++++++------ data/VisDrone.yaml | 11 ++++++----- data/coco.yaml | 11 ++++++----- data/coco128.yaml | 13 +++++++------ data/xView.yaml | 13 +++++++------ 9 files changed, 57 insertions(+), 48 deletions(-) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml index ad1a52254d74..90721cc0b9fb 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse_HD.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Train command: python train.py --data Argoverse_HD.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/Argoverse -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data Argoverse_HD.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Argoverse ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index b77534944ed7..58b55114c722 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Global Wheat 2020 dataset http://www.global-wheat.com/ -# Train command: python train.py --data GlobalWheat2020.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/GlobalWheat2020 -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data GlobalWheat2020.yaml +# parent +# ├── yolov5 +# └── datasets +# └── GlobalWheat2020 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/Objects365.yaml b/data/Objects365.yaml index e365c82cab08..e29803bc9e02 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Objects365 dataset https://www.objects365.org/ -# Train command: python train.py --data Objects365.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/Objects365 -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data Objects365.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Objects365 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 7087bb9c2893..04d8e0819e7f 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 -# Train command: python train.py --data SKU-110K.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/SKU-110K -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data SKU-110K.yaml +# parent +# ├── yolov5 +# └── datasets +# └── SKU-110K ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VOC.yaml b/data/VOC.yaml index 3d878fa67a60..40df3d9ff001 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,9 +1,10 @@ -# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ -# Train command: python train.py --data VOC.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/VOC -# /yolov5 +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC +# YOLOv5 🚀 example usage: python train.py --data VOC.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VOC ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index c1cd38d1e10f..e7865c5b44f2 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# Train command: python train.py --data VisDrone.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/VisDrone -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data VisDrone.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VisDrone ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco.yaml b/data/coco.yaml index c6053c984bc0..699a761c6f54 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO 2017 dataset http://cocodataset.org -# Train command: python train.py --data coco.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/coco -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data coco.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco128.yaml b/data/coco128.yaml index e70ad687dd88..91e4bde66465 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,9 +1,10 @@ -# COCO 2017 dataset http://cocodataset.org - first 128 training images -# Train command: python train.py --data coco128.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/coco128 -# /yolov5 +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) +# YOLOv5 🚀 example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/xView.yaml b/data/xView.yaml index 5212193a0bf0..0766f9dc8776 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,10 +1,11 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # xView 2018 dataset https://challenge.xviewdataset.org -# ----> NOTE: DOWNLOAD DATA MANUALLY from URL above and unzip to /datasets/xView before running train command below -# Train command: python train.py --data xView.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/xView -# /yolov5 +# -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- +# YOLOv5 🚀 example usage: python train.py --data xView.yaml +# parent +# ├── yolov5 +# └── datasets +# └── xView ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] From 0ad6301c9603130e020dfe52335dbb3e37210c19 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 15:23:33 +0200 Subject: [PATCH 203/757] Update script headers (#4163) * Update download script headers * cleanup * bug fix attempt * bug fix attempt2 * bug fix attempt3 * cleanup --- data/scripts/download_weights.sh | 9 +++++++-- data/scripts/get_coco.sh | 14 +++++++------- data/scripts/get_coco128.sh | 16 ++++++++-------- train.py | 5 +++-- utils/autoanchor.py | 10 ++++------ utils/datasets.py | 2 +- utils/loggers/wandb/log_dataset.py | 4 ++-- utils/loggers/wandb/wandb_utils.py | 6 +++--- val.py | 2 +- 9 files changed, 36 insertions(+), 32 deletions(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 6a279f1636fc..5d74f0266815 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,12 @@ #!/bin/bash +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download latest models from https://github.com/ultralytics/yolov5/releases -# Usage: -# $ bash path/to/download_weights.sh +# YOLOv5 🚀 example usage: bash path/to/download_weights.sh +# parent +# └── yolov5 +# ├── yolov5s.pt ← downloads here +# ├── yolov5m.pt +# └── ... python - < Date: Tue, 27 Jul 2021 18:43:32 +0530 Subject: [PATCH 204/757] Improve docstrings and run names (#4174) --- utils/loggers/__init__.py | 2 +- utils/loggers/wandb/wandb_utils.py | 145 ++++++++++++++++++++++++++--- 2 files changed, 133 insertions(+), 14 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 29dd4605341b..e65c8f9fd085 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -57,7 +57,7 @@ def start(self): assert 'wandb' in self.include and wandb run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, s.stem, run_id, self.data_dict) + self.wandb = WandbLogger(self.opt, run_id, self.data_dict) except: self.wandb = None diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 581041acbdb7..cd5939155169 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -99,7 +99,19 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + def __init__(self, opt, run_id, data_dict, job_type='Training'): + ''' + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup trainig processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + data_dict (Dict) -- Dictionary conataining info about the dataset to be used + job_type (str) -- To set the job_type for this run + + ''' # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run @@ -129,7 +141,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, - name=name, + name=opt.name if opt.name != 'exp' else None, job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run @@ -145,6 +157,15 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): + ''' + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + ''' assert wandb, 'Install wandb to upload dataset' config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, @@ -155,6 +176,19 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): + ''' + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + data_dict (Dict) -- Dataset dictionary for this run + + returns: + data_dict (Dict) -- contains the updated info about the dataset to be used for training + ''' self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): @@ -185,12 +219,22 @@ def setup_training(self, opt, data_dict): self.val_table = self.val_artifact.get("val") if self.val_table_path_map is None: self.map_val_table_path() - wandb.log({"validation dataset": self.val_table}) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict def download_dataset_artifact(self, path, alias): + ''' + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + ''' if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) @@ -200,6 +244,12 @@ def download_dataset_artifact(self, path, alias): return None, None def download_model_artifact(self, opt): + ''' + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + ''' if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' @@ -212,6 +262,16 @@ def download_model_artifact(self, opt): return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): + ''' + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + ''' model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, @@ -226,6 +286,19 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + ''' + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + ''' with open(data_file, encoding='ascii', errors='ignore') as f: data = yaml.safe_load(f) # data dict check_dataset(data) @@ -257,12 +330,27 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): + ''' + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + ''' self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): + ''' + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id (dict(int, str)) -- hash map that maps class ids to labels + name (str) -- name of the artifact + + returns: + dataset artifact to be logged or used + ''' # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None @@ -294,6 +382,14 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): + ''' + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + ''' class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -316,25 +412,45 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): + ''' + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + ''' if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - else: # Default to bbox media panelif Val artifact not found - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): + ''' + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + ''' if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): + ''' + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + ''' if self.wandb_run: with all_logging_disabled(): if self.bbox_media_panel_images: @@ -352,6 +468,9 @@ def end_epoch(self, best_result=False): self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): + ''' + Log metrics if any and finish the current W&B run + ''' if self.wandb_run: if self.log_dict: with all_logging_disabled(): From 3fef11706c384a9b73e6098b006a57bbee7643c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Jul 2021 23:23:41 +0200 Subject: [PATCH 205/757] Update comments header (#4184) --- data/Argoverse_HD.yaml | 4 ++-- data/GlobalWheat2020.yaml | 4 ++-- data/Objects365.yaml | 4 ++-- data/SKU-110K.yaml | 4 ++-- data/VOC.yaml | 4 ++-- data/VisDrone.yaml | 4 ++-- data/coco.yaml | 4 ++-- data/coco128.yaml | 4 ++-- data/scripts/download_weights.sh | 4 ++-- data/scripts/get_coco.sh | 4 ++-- data/scripts/get_coco128.sh | 4 ++-- data/xView.yaml | 4 ++-- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml index 90721cc0b9fb..e379b1ec99df 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse_HD.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# YOLOv5 🚀 example usage: python train.py --data Argoverse_HD.yaml +# Example usage: python train.py --data Argoverse_HD.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 58b55114c722..842456047953 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Global Wheat 2020 dataset http://www.global-wheat.com/ -# YOLOv5 🚀 example usage: python train.py --data GlobalWheat2020.yaml +# Example usage: python train.py --data GlobalWheat2020.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/Objects365.yaml b/data/Objects365.yaml index e29803bc9e02..52577581d7bb 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Objects365 dataset https://www.objects365.org/ -# YOLOv5 🚀 example usage: python train.py --data Objects365.yaml +# Example usage: python train.py --data Objects365.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 04d8e0819e7f..01bf36c0d870 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 -# YOLOv5 🚀 example usage: python train.py --data SKU-110K.yaml +# Example usage: python train.py --data SKU-110K.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/VOC.yaml b/data/VOC.yaml index 40df3d9ff001..55f39d852d31 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC -# YOLOv5 🚀 example usage: python train.py --data VOC.yaml +# Example usage: python train.py --data VOC.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index e7865c5b44f2..12e0e7c4a009 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# YOLOv5 🚀 example usage: python train.py --data VisDrone.yaml +# Example usage: python train.py --data VisDrone.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/coco.yaml b/data/coco.yaml index 699a761c6f54..cab1a0171963 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO 2017 dataset http://cocodataset.org -# YOLOv5 🚀 example usage: python train.py --data coco.yaml +# Example usage: python train.py --data coco.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/coco128.yaml b/data/coco128.yaml index 91e4bde66465..6902eb9397a1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# YOLOv5 🚀 example usage: python train.py --data coco128.yaml +# Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 5d74f0266815..013036978c07 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download latest models from https://github.com/ultralytics/yolov5/releases -# YOLOv5 🚀 example usage: bash path/to/download_weights.sh +# Example usage: bash path/to/download_weights.sh # parent # └── yolov5 # ├── yolov5s.pt ← downloads here diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index b3f838f533ab..1f484beee34c 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download COCO 2017 dataset http://cocodataset.org -# YOLOv5 🚀 example usage: bash data/scripts/get_coco.sh +# Example usage: bash data/scripts/get_coco.sh # parent # ├── yolov5 # └── datasets diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 26bb8ad09e14..3d705890b56d 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# YOLOv5 🚀 example usage: bash data/scripts/get_coco128.sh +# Example usage: bash data/scripts/get_coco128.sh # parent # ├── yolov5 # └── datasets diff --git a/data/xView.yaml b/data/xView.yaml index 0766f9dc8776..f4f27bfbc8ec 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,7 +1,7 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # xView 2018 dataset https://challenge.xviewdataset.org # -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- -# YOLOv5 🚀 example usage: python train.py --data xView.yaml +# Example usage: python train.py --data xView.yaml # parent # ├── yolov5 # └── datasets From 5d66e487236daf4cbf816704453d0cc4905ee463 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 02:04:10 +0200 Subject: [PATCH 206/757] Train from `--data path/to/dataset.zip` feature (#4185) * Train from `--data path/to/dataset.zip` feature * Update dataset_stats() * cleanup * cleanup2 --- data/{Argoverse_HD.yaml => Argoverse.yaml} | 2 +- hubconf.py | 2 +- models/experimental.py | 2 +- train.py | 11 ++-- utils/datasets.py | 66 ++++++++++++++++------ utils/{google_utils.py => downloads.py} | 6 +- utils/general.py | 40 +++++++++---- utils/loggers/wandb/wandb_utils.py | 62 ++++++++++---------- val.py | 4 +- 9 files changed, 122 insertions(+), 73 deletions(-) rename data/{Argoverse_HD.yaml => Argoverse.yaml} (97%) rename utils/{google_utils.py => downloads.py} (98%) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse.yaml similarity index 97% rename from data/Argoverse_HD.yaml rename to data/Argoverse.yaml index e379b1ec99df..c42624c5783f 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse.yaml @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Example usage: python train.py --data Argoverse_HD.yaml +# Example usage: python train.py --data Argoverse.yaml # parent # ├── yolov5 # └── datasets diff --git a/hubconf.py b/hubconf.py index 55536c3a42f3..7ef512655ae2 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,7 +27,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging - from utils.google_utils import attempt_download + from utils.downloads import attempt_download from utils.torch_utils import select_device file = Path(__file__).absolute() diff --git a/models/experimental.py b/models/experimental.py index 0d996d913b0c..276ca954b173 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -5,7 +5,7 @@ import torch.nn as nn from models.common import Conv, DWConv -from utils.google_utils import attempt_download +from utils.downloads import attempt_download class CrossConv(nn.Module): diff --git a/train.py b/train.py index bd1fa9c74328..020883ce98ba 100644 --- a/train.py +++ b/train.py @@ -35,7 +35,7 @@ from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr -from utils.google_utils import attempt_download +from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel @@ -78,9 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) - with open(data, encoding='ascii', errors='ignore') as f: - data_dict = yaml.safe_load(f) - + with torch_distributed_zero_first(RANK): + data_dict = check_dataset(data) # check + train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check @@ -106,9 +106,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - with torch_distributed_zero_first(RANK): - check_dataset(data_dict) # check - train_path, val_path = data_dict['train'], data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) diff --git a/utils/datasets.py b/utils/datasets.py index 5b5ded4bbc41..fffe39a61459 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -884,11 +884,11 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) - Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128.zip', verbose=True) - + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -897,35 +897,42 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): def round_labels(labels): # Update labels to integer class and 6 decimal place floats - return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] + return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels] def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' if str(path).endswith('.zip'): # path is data.zip + assert Path(path).is_file(), f'Error unzipping {path}, file not found' assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' - data_dir = path.with_suffix('') # dataset directory - return True, data_dir, list(data_dir.rglob('*.yaml'))[0] # zipped, data_dir, yaml_path + dir = path.with_suffix('') # dataset directory + return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path else: # path is data.yaml return False, None, path + def hub_ops(f, max_dim=1920): + # HUB ops for 1 image 'f' + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(im_dir / Path(f).name, quality=75) # save + zipped, data_dir, yaml_path = unzip(Path(path)) with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? check_dataset(data, autodownload) # download dataset if missing - nc = data['nc'] # number of classes - stats = {'nc': nc, 'names': data['names']} # statistics dictionary + hub_dir = Path(data['path'] + ('-hub' if hub else '')) + stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': if data.get(split) is None: stats[split] = None # i.e. no test set continue x = [] - dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset - if split == 'train': - cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path + dataset = LoadImagesAndLabels(data[split]) # load dataset for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): - x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) + x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) x = np.array(x) # shape(128x80) stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), @@ -933,10 +940,37 @@ def unzip(path): 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in zip(dataset.img_files, dataset.labels)]} + if hub: + im_dir = hub_dir / 'images' + im_dir.mkdir(parents=True, exist_ok=True) + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'): + pass + + # Profile + stats_path = hub_dir / 'stats.json' + if profile: + for _ in range(1): + file = stats_path.with_suffix('.npy') + t1 = time.time() + np.save(file, stats) + t2 = time.time() + x = np.load(file, allow_pickle=True) + print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + file = stats_path.with_suffix('.json') + t1 = time.time() + with open(file, 'w') as f: + json.dump(stats, f) # save stats *.json + t2 = time.time() + with open(file, 'r') as f: + x = json.load(f) # load hyps dict + print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + # Save, print and return - with open(cache_path.with_suffix('.json'), 'w') as f: - json.dump(stats, f) # save stats *.json + if hub: + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(stats, f) # save stats.json if verbose: print(json.dumps(stats, indent=2, sort_keys=False)) - # print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) return stats diff --git a/utils/google_utils.py b/utils/downloads.py similarity index 98% rename from utils/google_utils.py rename to utils/downloads.py index aefc7de2db2e..00156962380b 100644 --- a/utils/google_utils.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries +# Download utils import os import platform @@ -115,6 +115,10 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" + +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# # def upload_blob(bucket_name, source_file_name, destination_blob_name): # # Uploads a file to a bucket # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python diff --git a/utils/general.py b/utils/general.py index db81f7679cd7..6b00ddf2ff72 100755 --- a/utils/general.py +++ b/utils/general.py @@ -24,7 +24,7 @@ import torchvision import yaml -from utils.google_utils import gsutil_getsize +from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness from utils.torch_utils import init_torch_seeds @@ -224,16 +224,30 @@ def check_file(file): def check_dataset(data, autodownload=True): - # Download dataset if not found locally - path = Path(data.get('path', '')) # optional 'path' field - if path: - for k in 'train', 'val', 'test': - if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + # Download and/or unzip dataset if not found locally + # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1) + data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + with open(data, encoding='ascii', errors='ignore') as f: + data = yaml.safe_load(f) # dictionary + + # Parse yaml + path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.' + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: - data['names'] = [str(i) for i in range(data['nc'])] # assign class names if missing + data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path @@ -256,13 +270,17 @@ def check_dataset(data, autodownload=True): else: raise Exception('Dataset not found.') + return data # dictionary + def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): - # Multi-threaded file download and unzip function + # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename - if not f.exists(): + if Path(url).is_file(): # exists in current path + Path(url).rename(f) # move to dir + elif not f.exists(): print(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail @@ -286,7 +304,7 @@ def download_one(url, dir): pool.close() pool.join() else: - for u in tuple(url) if isinstance(url, str) else url: + for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index cd5939155169..f4f228df4e24 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -100,7 +100,7 @@ class WandbLogger(): """ def __init__(self, opt, run_id, data_dict, job_type='Training'): - ''' + """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup trainig processes if job_type is 'Training' @@ -111,7 +111,7 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): data_dict (Dict) -- Dictionary conataining info about the dataset to be used job_type (str) -- To set the job_type for this run - ''' + """ # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run @@ -157,7 +157,7 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): - ''' + """ Check if the dataset format is compatible and upload it as W&B artifact arguments: @@ -165,7 +165,7 @@ def check_and_upload_dataset(self, opt): returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - ''' + """ assert wandb, 'Install wandb to upload dataset' config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, @@ -176,7 +176,7 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): - ''' + """ Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded @@ -188,7 +188,7 @@ def setup_training(self, opt, data_dict): returns: data_dict (Dict) -- contains the updated info about the dataset to be used for training - ''' + """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): @@ -224,7 +224,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - ''' + """ download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX arguments: @@ -234,7 +234,7 @@ def download_dataset_artifact(self, path, alias): returns: (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) - ''' + """ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) @@ -244,12 +244,12 @@ def download_dataset_artifact(self, path, alias): return None, None def download_model_artifact(self, opt): - ''' + """ download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX arguments: opt (namespace) -- Commandline arguments for this run - ''' + """ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' @@ -262,7 +262,7 @@ def download_model_artifact(self, opt): return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): - ''' + """ Log the model checkpoint as W&B artifact arguments: @@ -271,7 +271,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): epoch (int) -- Current epoch number fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. - ''' + """ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, @@ -286,7 +286,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - ''' + """ Log the dataset as W&B artifact and return the new data file with W&B links arguments: @@ -298,10 +298,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts - ''' - with open(data_file, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) # data dict - check_dataset(data) + """ + data = check_dataset(data_file) # parse and check nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -330,17 +328,17 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): - ''' + """ Map the validation dataset Table like name of file -> it's id in the W&B Table. Useful for - referencing artifacts for evaluation. - ''' + """ self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): - ''' + """ Create and return W&B artifact containing W&B Table of the dataset. arguments: @@ -350,7 +348,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): returns: dataset artifact to be logged or used - ''' + """ # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None @@ -382,14 +380,14 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - ''' + """ Build evaluation Table. Uses reference from validation dataset table. arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels - ''' + """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -412,17 +410,17 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): - ''' + """ Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image - ''' + """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -434,23 +432,23 @@ def val_one_image(self, pred, predn, path, names, im): self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): - ''' + """ save the metrics to the logging dictionary arguments: log_dict (Dict) -- metrics/media to be logged in current step - ''' + """ if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): - ''' + """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not - ''' + """ if self.wandb_run: with all_logging_disabled(): if self.bbox_media_panel_images: @@ -468,9 +466,9 @@ def end_epoch(self, best_result=False): self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): - ''' + """ Log metrics if any and finish the current W&B run - ''' + """ if self.wandb_run: if self.log_dict: with all_logging_disabled(): diff --git a/val.py b/val.py index c58bcdb209c2..ee2287644b92 100644 --- a/val.py +++ b/val.py @@ -123,9 +123,7 @@ def run(data, # model = nn.DataParallel(model) # Data - with open(data, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) - check_dataset(data) # check + data = check_dataset(data) # check # Half half &= device.type != 'cpu' # half precision only supported on CUDA From 1f31b7c503867b6e8f493cf76ed2da490f834fd4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 15:04:30 +0200 Subject: [PATCH 207/757] Create yolov5-bifpn.yaml (#4195) --- models/hub/yolov5-bifpn.yaml | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 models/hub/yolov5-bifpn.yaml diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml new file mode 100644 index 000000000000..f1dd7c601b9c --- /dev/null +++ b/models/hub/yolov5-bifpn.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + ] + +# YOLOv5 BiFPN head +head: + [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 20, 6 ], 1, Concat, [ 1 ] ], # cat P4 + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + + [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + ] From 2683b180795c134b7bcdcebd515fac8c0e9cc7a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 16:55:39 +0200 Subject: [PATCH 208/757] Update Hub Path inputs (#4200) --- hubconf.py | 4 +++- models/common.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 7ef512655ae2..93ea84d69dd3 100644 --- a/hubconf.py +++ b/hubconf.py @@ -115,9 +115,11 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr import cv2 import numpy as np from PIL import Image + from pathlib import Path imgs = ['data/images/zidane.jpg', # filename - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI + Path('data/images/zidane.jpg'), # Path + 'https://ultralytics.com/images/zidane.jpg', # URI cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy diff --git a/models/common.py b/models/common.py index 901648b693a3..fc085e22b16b 100644 --- a/models/common.py +++ b/models/common.py @@ -2,7 +2,7 @@ import logging from copy import copy -from pathlib import Path, PosixPath +from pathlib import Path import math import numpy as np @@ -248,7 +248,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename - if isinstance(im, (str, PosixPath)): # filename or uri + if isinstance(im, (str, Path)): # filename or uri im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image From e88e8f7a988662fb2d613e1aca3ae89214c84084 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 28 Jul 2021 21:10:08 +0530 Subject: [PATCH 209/757] W&B: Restructure code to support the new dataset_check() feature (#4197) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm Co-authored-by: Glenn Jocher --- README.md | 0 train.py | 17 ++++++---- utils/loggers/__init__.py | 13 +++----- utils/loggers/wandb/log_dataset.py | 6 ++-- utils/loggers/wandb/sweep.py | 3 +- utils/loggers/wandb/wandb_utils.py | 53 +++++++++++++++++++----------- 6 files changed, 52 insertions(+), 40 deletions(-) mode change 100755 => 100644 README.md diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/train.py b/train.py index 020883ce98ba..7a8c15a6551a 100644 --- a/train.py +++ b/train.py @@ -73,24 +73,29 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) + data_dict = None + + # Loggers + if RANK in [-1, 0]: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict + if loggers.wandb: + data_dict = loggers.wandb.data_dict + if resume: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + # Config plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) with torch_distributed_zero_first(RANK): - data_dict = check_dataset(data) # check + data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset - # Loggers - if RANK in [-1, 0]: - loggers = Loggers(save_dir, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict - if loggers.wandb and resume: - weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict # Model pretrained = weights.endswith('.pt') diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index e65c8f9fd085..027cef4d283a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,9 +1,7 @@ # YOLOv5 experiment logging utils - +import torch import warnings from threading import Thread - -import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis @@ -23,12 +21,11 @@ class Loggers(): # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS): + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): self.save_dir = save_dir self.weights = weights self.opt = opt self.hyp = hyp - self.data_dict = data_dict self.logger = logger # for printing results to console self.include = include for k in LOGGERS: @@ -38,9 +35,7 @@ def start(self): self.csv = True # always log to csv # Message - try: - import wandb - except ImportError: + if not wandb: prefix = colorstr('Weights & Biases: ') s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" print(emojis(s)) @@ -57,7 +52,7 @@ def start(self): assert 'wandb' in self.include and wandb run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id, self.data_dict) + self.wandb = WandbLogger(self.opt, run_id) except: self.wandb = None diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index b5663c92ee09..1328e20806ef 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -1,5 +1,4 @@ import argparse - import yaml from wandb_utils import WandbLogger @@ -8,9 +7,7 @@ def create_dataset_artifact(opt): - with open(opt.data, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) # data dict - logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused if __name__ == '__main__': @@ -19,6 +16,7 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 8e952d03c085..a0c76a10caa1 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,7 +1,6 @@ import sys -from pathlib import Path - import wandb +from pathlib import Path FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index f4f228df4e24..ba2d830df07b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -3,10 +3,9 @@ import logging import os import sys +import yaml from contextlib import contextmanager from pathlib import Path - -import yaml from tqdm import tqdm FILE = Path(__file__).absolute() @@ -99,7 +98,7 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id, data_dict, job_type='Training'): + def __init__(self, opt, run_id, job_type='Training'): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True @@ -108,7 +107,6 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed - data_dict (Dict) -- Dictionary conataining info about the dataset to be used job_type (str) -- To set the job_type for this run """ @@ -119,10 +117,11 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -148,11 +147,23 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): if self.wandb_run: if self.job_type == 'Training': if not opt.resume: - wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict - # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, - allow_val_change=True) - self.data_dict = self.setup_training(opt, data_dict) + if opt.upload_dataset: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact + with open(opt.data, encoding='ascii', errors='ignore') as f: + data_dict = yaml.safe_load(f) + self.data_dict = data_dict + else: # Local .yaml dataset file or .zip file + self.data_dict = check_dataset(opt.data) + + self.setup_training(opt) + # write data_dict to config. useful for resuming from artifacts + if not self.wandb_artifact_data_dict: + self.wandb_artifact_data_dict = self.data_dict + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) + if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -167,7 +178,7 @@ def check_and_upload_dataset(self, opt): Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(check_file(opt.data), + config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) @@ -175,7 +186,7 @@ def check_and_upload_dataset(self, opt): wandb_data_dict = yaml.safe_load(f) return wandb_data_dict - def setup_training(self, opt, data_dict): + def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX @@ -184,10 +195,7 @@ def setup_training(self, opt, data_dict): arguments: opt (namespace) -- commandline arguments for this run - data_dict (Dict) -- Dataset dictionary for this run - returns: - data_dict (Dict) -- contains the updated info about the dataset to be used for training """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval @@ -198,8 +206,10 @@ def setup_training(self, opt, data_dict): config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ - config.opt['hyp'] + config.hyp data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + else: + data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) @@ -221,7 +231,10 @@ def setup_training(self, opt, data_dict): self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - return data_dict + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict def download_dataset_artifact(self, path, alias): """ @@ -299,7 +312,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ - data = check_dataset(data_file) # parse and check + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -310,7 +324,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + path = Path(data_file).stem + path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path data.pop('download', None) data.pop('path', None) with open(path, 'w') as f: From e016b15555591be8ee4fc5c164df9436d5916368 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 21:25:20 +0200 Subject: [PATCH 210/757] Update yolov5-bifpn.yaml (#4208) --- models/hub/yolov5-bifpn.yaml | 52 ++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index f1dd7c601b9c..69f7b5938c58 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -3,38 +3,44 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]] + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 ] # YOLOv5 BiFPN head head: - [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 20, 6 ], 1, Concat, [ 1 ] ], # cat P4 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14, 6], 1, Concat, [1]], # cat P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] From 750465edae8a1eb68409377c3bba94a49d3bf196 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 29 Jul 2021 02:55:15 +0530 Subject: [PATCH 211/757] W&B: More improvements and refactoring (#4205) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 8 ++++---- utils/loggers/wandb/wandb_utils.py | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 027cef4d283a..603837d57052 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -48,12 +48,12 @@ def start(self): self.tb = SummaryWriter(str(s)) # W&B - try: - assert 'wandb' in self.include and wandb - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) - except: + else: self.wandb = None return self diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index ba2d830df07b..c978e3ea838d 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -158,11 +158,12 @@ def __init__(self, opt, run_id, job_type='Training'): self.data_dict = check_dataset(opt.data) self.setup_training(opt) - # write data_dict to config. useful for resuming from artifacts if not self.wandb_artifact_data_dict: self.wandb_artifact_data_dict = self.data_dict - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, - allow_val_change=True) + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + if not opt.resume: + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) From b60b62e874e7cf0581c51936e39287c6906a419f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 23:35:14 +0200 Subject: [PATCH 212/757] PyCharm reformat (#4209) * PyCharm reformat * YAML reformat * Markdown reformat --- .github/ISSUE_TEMPLATE/bug-report.md | 28 ++++---- .github/ISSUE_TEMPLATE/feature-request.md | 3 +- .github/ISSUE_TEMPLATE/question.md | 1 - CONTRIBUTING.md | 52 ++++++++++---- README.md | 79 ++++++++++++-------- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 82 ++++++++++----------- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 4 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 18 ++--- data/coco128.yaml | 18 ++--- data/scripts/get_coco.sh | 4 +- data/scripts/get_coco128.sh | 2 +- data/xView.yaml | 18 ++--- models/hub/anchors.yaml | 60 ++++++++-------- models/hub/yolov3-spp.yaml | 64 ++++++++--------- models/hub/yolov3-tiny.yaml | 46 ++++++------ models/hub/yolov3.yaml | 64 ++++++++--------- models/hub/yolov5-fpn.yaml | 46 ++++++------ models/hub/yolov5-p2.yaml | 76 ++++++++++---------- models/hub/yolov5-p6.yaml | 80 ++++++++++----------- models/hub/yolov5-p7.yaml | 86 +++++++++++----------- models/hub/yolov5-panet.yaml | 56 +++++++-------- models/hub/yolov5l6.yaml | 88 +++++++++++------------ models/hub/yolov5m6.yaml | 88 +++++++++++------------ models/hub/yolov5s-transformer.yaml | 56 +++++++-------- models/hub/yolov5s6.yaml | 88 +++++++++++------------ models/hub/yolov5x6.yaml | 88 +++++++++++------------ train.py | 4 +- utils/downloads.py | 1 - utils/loggers/__init__.py | 3 +- utils/loggers/wandb/log_dataset.py | 1 - utils/loggers/wandb/sweep.py | 3 +- utils/loggers/wandb/sweep.yaml | 4 +- utils/loggers/wandb/wandb_utils.py | 3 +- val.py | 1 - 38 files changed, 683 insertions(+), 640 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index b7fc7c5a8838..62a02a3a6948 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -7,21 +7,24 @@ assignees: '' --- -Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you: - - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo - - **Common dataset**: coco.yaml or coco128.yaml - - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments - -If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. +Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, +otherwise it is non-actionable, and we can not help you: +- **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo +- **Common dataset**: coco.yaml or coco128.yaml +- **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments + +If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` +figures, or we can not help you. You can generate these with `utils.plot_results()`. ## 🐛 Bug -A clear and concise description of what the bug is. +A clear and concise description of what the bug is. ## To Reproduce (REQUIRED) Input: + ``` import torch @@ -30,6 +33,7 @@ c = a / 0 ``` Output: + ``` Traceback (most recent call last): File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code @@ -39,17 +43,17 @@ Traceback (most recent call last): RuntimeError: ZeroDivisionError ``` - ## Expected behavior -A clear and concise description of what you expected to happen. +A clear and concise description of what you expected to happen. ## Environment -If applicable, add screenshots to help explain your problem. - - OS: [e.g. Ubuntu] - - GPU [e.g. 2080 Ti] +If applicable, add screenshots to help explain your problem. +- OS: [e.g. Ubuntu] +- GPU [e.g. 2080 Ti] ## Additional context + Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 02320771b5f5..1fdf99045488 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -13,7 +13,8 @@ assignees: '' ## Motivation - + ## Pitch diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 2c22aea70a7b..2892cfe262fb 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -9,5 +9,4 @@ assignees: '' ## ❔Question - ## Additional context diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c0ba3ae9f18..38601775caeb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,32 +8,44 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - Proposing a new feature - Becoming a maintainer -YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be helping push the frontiers of what's possible in AI 😃! - +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! ## Submitting a Pull Request (PR) 🛠️ + Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: ### 1. Select File to Update + Select `requirements.txt` to update by clicking on it in GitHub.

PR_step1

### 2. Click 'Edit this file' + Button is in top-right corner.

PR_step2

### 3. Make Changes + Change `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

### 4. Preview Changes and Submit PR -Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!

PR_step4

### PR recommendations To allow your work to be integrated as seamlessly as possible, we advise you to: -- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: + +- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an + automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may + be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' + with the name of your local branch: + ```bash git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream @@ -41,30 +53,42 @@ git checkout feature # <----- replace 'feature' with local branch name git merge upstream/master git push -u origin -f ``` -- ✅ Verify all Continuous Integration (CI) **checks are passing**. -- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee +- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee ## Submitting a Bug Report 🐛 If you spot a problem with YOLOv5 please submit a Bug Report! -For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started. +For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need in order to get started. -When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: * ✅ **Minimal** – Use as little code as possible that still produces the same problem * ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself * ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem -In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: - -* ✅ **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: -If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. +* ✅ **Current** – Verify that your code is up-to-date with current + GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new + copy to ensure your problem has not already been resolved by previous commits. +* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 ** +Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. ## License -By contributing, you agree that your contributions will be licensed under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/README.md b/README.md index b4aacd78b0ca..df4e9add519d 100644 --- a/README.md +++ b/README.md @@ -52,31 +52,33 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained - ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - ##
Quick Start Examples
-
Install -[**Python>=3.6.0**](https://www.python.org/) is required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): +[**Python>=3.6.0**](https://www.python.org/) is required with all +[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): + ```bash $ git clone https://github.com/ultralytics/yolov5 $ cd yolov5 $ pip install -r requirements.txt ``` +
Inference -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download +from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -85,7 +87,7 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or PosixPath, PIL, OpenCV, numpy, list +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -101,7 +103,9 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
Inference with detect.py -`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading models automatically from +the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + ```bash $ python detect.py --source 0 # webcam file.jpg # image @@ -117,13 +121,18 @@ $ python detect.py --source 0 # webcam
Training -Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +Run commands below to reproduce results +on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on +first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the +largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). + ```bash $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 yolov5m 40 yolov5l 24 yolov5x 16 ``` +
@@ -132,7 +141,8 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ + RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW * [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) @@ -147,10 +157,11 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
- ##
Environments and Integrations
-Get started in seconds with our verified environments and integrations, including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment logging. Click each icon below for details. +Get started in seconds with our verified environments and integrations, +including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment +logging. Click each icon below for details. - ##
Compete and Win
-We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! +We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes!

- ##
Why YOLOv5

YOLOv5-P5 640 Figure (click to expand) - +

Figure Notes (click to expand) - - * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. - * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
+* GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size + 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. +* EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +* **Reproduce** by + `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
### Pretrained Checkpoints @@ -221,24 +232,30 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
Table Notes (click to expand) - - * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` - * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -
+* APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results + denote val2017 accuracy. +* AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** + by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +* SpeedGPU averaged over 5000 COCO val2017 images using a + GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and + includes FP16 inference, postprocessing and NMS. **Reproduce speed** + by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` +* All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). +* Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale + augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -##
Contribute
+
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started. +##
Contribute
+We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see +our [Contributing Guide](CONTRIBUTING.md) to get started. ##
Contact
-For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or professional support requests please visit -[https://ultralytics.com/contact](https://ultralytics.com/contact). +For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or +professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index c42624c5783f..3bf91ce7d504 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -15,7 +15,7 @@ test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/c # Classes nc: 8 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 842456047953..de9c7837cf57 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -27,7 +27,7 @@ test: # test images (optional) 1276 images # Classes nc: 1 # number of classes -names: [ 'wheat_head' ] # class names +names: ['wheat_head'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 52577581d7bb..457b9fd9bf69 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -15,47 +15,47 @@ test: # test images (optional) # Classes nc: 365 # number of classes -names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', - 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', - 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', - 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', - 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', - 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', - 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', - 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', - 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', - 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', - 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', - 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', - 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', - 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', - 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', - 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', - 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', - 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', - 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', - 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', - 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', - 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', - 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', - 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', - 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', - 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', - 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', - 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', - 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', - 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', - 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', - 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', - 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', - 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', - 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', - 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', - 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', - 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', - 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', - 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', - 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] +names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', + 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', + 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', + 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', + 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', + 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', + 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', + 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', + 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', + 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', + 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', + 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', + 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', + 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', + 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', + 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', + 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', + 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', + 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', + 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', + 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', + 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', + 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', + 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', + 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', + 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', + 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', + 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', + 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', + 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', + 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', + 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', + 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', + 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', + 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', + 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', + 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', + 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', + 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', + 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', + 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 01bf36c0d870..c85fa81d2e03 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -15,7 +15,7 @@ test: test.txt # test images (optional) 2936 images # Classes nc: 1 # number of classes -names: [ 'object' ] # class names +names: ['object'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VOC.yaml b/data/VOC.yaml index 55f39d852d31..e59fb6afd2fd 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -21,8 +21,8 @@ test: # test images (optional) # Classes nc: 20 # number of classes -names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] # class names +names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 12e0e7c4a009..fe6cb9199ce1 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -15,7 +15,7 @@ test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes nc: 10 # number of classes -names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ] +names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/coco.yaml b/data/coco.yaml index cab1a0171963..acf8e84f3e21 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -15,15 +15,15 @@ test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions. # Classes nc: 80 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names # Download script/URL (optional) diff --git a/data/coco128.yaml b/data/coco128.yaml index 6902eb9397a1..eda39dcdaa8d 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -15,15 +15,15 @@ test: # test images (optional) # Classes nc: 80 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names # Download script/URL (optional) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 1f484beee34c..f6c075689709 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -12,7 +12,7 @@ d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # Download/unzip images d='../datasets/coco/images' # unzip directory @@ -22,6 +22,6 @@ f2='val2017.zip' # 1G, 5k images f3='test2017.zip' # 7G, 41k images (optional) for f in $f1 $f2; do echo 'Downloading' $url$f '...' - curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & done wait # finish background tasks diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 3d705890b56d..6eb47bfe5595 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -12,6 +12,6 @@ d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & wait # finish background tasks diff --git a/data/xView.yaml b/data/xView.yaml index f4f27bfbc8ec..e191188da0f0 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -15,15 +15,15 @@ val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 tr # Classes nc: 60 # number of classes -names: [ 'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', - 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', - 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', - 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', - 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', - 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', - 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', - 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', - 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower' ] # class names +names: ['Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', + 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', + 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', + 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', + 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', + 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', + 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', + 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', + 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index a07a4dc72387..57512955ac1f 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -4,55 +4,55 @@ # P5 ------------------------------------------------------------------------------------------------------------------- # P5-640: anchors_p5_640: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # P6 ------------------------------------------------------------------------------------------------------------------- # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 anchors_p6_640: - - [ 9,11, 21,19, 17,41 ] # P3/8 - - [ 43,32, 39,70, 86,64 ] # P4/16 - - [ 65,131, 134,130, 120,265 ] # P5/32 - - [ 282,180, 247,354, 512,387 ] # P6/64 + - [9,11, 21,19, 17,41] # P3/8 + - [43,32, 39,70, 86,64] # P4/16 + - [65,131, 134,130, 120,265] # P5/32 + - [282,180, 247,354, 512,387] # P6/64 # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 anchors_p6_1280: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 anchors_p6_1920: - - [ 28,41, 67,59, 57,141 ] # P3/8 - - [ 144,103, 129,227, 270,205 ] # P4/16 - - [ 209,452, 455,396, 358,812 ] # P5/32 - - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + - [28,41, 67,59, 57,141] # P3/8 + - [144,103, 129,227, 270,205] # P4/16 + - [209,452, 455,396, 358,812] # P5/32 + - [653,922, 1109,570, 1387,1187] # P6/64 # P7 ------------------------------------------------------------------------------------------------------------------- # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 anchors_p7_640: - - [ 11,11, 13,30, 29,20 ] # P3/8 - - [ 30,46, 61,38, 39,92 ] # P4/16 - - [ 78,80, 146,66, 79,163 ] # P5/32 - - [ 149,150, 321,143, 157,303 ] # P6/64 - - [ 257,402, 359,290, 524,372 ] # P7/128 + - [11,11, 13,30, 29,20] # P3/8 + - [30,46, 61,38, 39,92] # P4/16 + - [78,80, 146,66, 79,163] # P5/32 + - [149,150, 321,143, 157,303] # P6/64 + - [257,402, 359,290, 524,372] # P7/128 # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 anchors_p7_1280: - - [ 19,22, 54,36, 32,77 ] # P3/8 - - [ 70,83, 138,71, 75,173 ] # P4/16 - - [ 165,159, 148,334, 375,151 ] # P5/32 - - [ 334,317, 251,626, 499,474 ] # P6/64 - - [ 750,326, 534,814, 1079,818 ] # P7/128 + - [19,22, 54,36, 32,77] # P3/8 + - [70,83, 138,71, 75,173] # P4/16 + - [165,159, 148,334, 375,151] # P5/32 + - [334,317, 251,626, 499,474] # P6/64 + - [750,326, 534,814, 1079,818] # P7/128 # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 anchors_p7_1920: - - [ 29,34, 81,55, 47,115 ] # P3/8 - - [ 105,124, 207,107, 113,259 ] # P4/16 - - [ 247,238, 222,500, 563,227 ] # P5/32 - - [ 501,476, 376,939, 749,711 ] # P6/64 - - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 + - [29,34, 81,55, 47,115] # P3/8 + - [105,124, 207,107, 113,259] # P4/16 + - [247,238, 222,500, 563,227] # P5/32 + - [501,476, 376,939, 749,711] # P6/64 + - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 0ca7b7f6577b..ddc0549f50d6 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -3,47 +3,47 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 - [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 - [ -1, 1, Bottleneck, [ 64 ] ], - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 - [ -1, 2, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 - [ -1, 8, Bottleneck, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 - [ -1, 8, Bottleneck, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 - [ -1, 4, Bottleneck, [ 1024 ] ], # 10 + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3-SPP head head: - [ [ -1, 1, Bottleneck, [ 1024, False ] ], - [ -1, 1, SPP, [ 512, [ 5, 9, 13 ] ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Bottleneck, [ 256, False ] ], - [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index d39a6b1f581c..537ad755b166 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -3,37 +3,37 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,14, 23,27, 37,58 ] # P4/16 - - [ 81,82, 135,169, 344,319 ] # P5/32 + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 # YOLOv3-tiny backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 16, 3, 1 ] ], # 0 - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 1-P1/2 - [ -1, 1, Conv, [ 32, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 3-P2/4 - [ -1, 1, Conv, [ 64, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 5-P3/8 - [ -1, 1, Conv, [ 128, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 7-P4/16 - [ -1, 1, Conv, [ 256, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 9-P5/32 - [ -1, 1, Conv, [ 512, 3, 1 ] ], - [ -1, 1, nn.ZeroPad2d, [ [ 0, 1, 0, 1 ] ] ], # 11 - [ -1, 1, nn.MaxPool2d, [ 2, 1, 0 ] ], # 12 + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 ] # YOLOv3-tiny head head: - [ [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Conv, [ 256, 3, 1 ] ], # 19 (P4/16-medium) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) - [ [ 19, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P4, P5) + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index 09df0d9ef362..3adfc2c6d2f9 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -3,47 +3,47 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 - [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 - [ -1, 1, Bottleneck, [ 64 ] ], - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 - [ -1, 2, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 - [ -1, 8, Bottleneck, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 - [ -1, 8, Bottleneck, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 - [ -1, 4, Bottleneck, [ 1024 ] ], # 10 + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3 head head: - [ [ -1, 1, Bottleneck, [ 1024, False ] ], - [ -1, 1, Conv, [ 512, [ 1, 1 ] ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Bottleneck, [ 256, False ] ], - [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index b8b7fc1a23d4..217e4ca6ac96 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -3,38 +3,38 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 6, BottleneckCSP, [1024]], # 9 ] # YOLOv5 FPN head head: - [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) - [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 62122363df2d..6a932a868229 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -7,46 +7,46 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - - [ -1, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 - [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) - - [ -1, 1, Conv, [ 128, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 - [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) - - [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P2 + [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P3 + [-1, 3, C3, [256, False]], # 24 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 27 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 30 (P5/32-large) + + [[24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index c5ef5177f0c8..58b86b0ca892 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -7,48 +7,48 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P5/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 505c590ca168..f6e8fc7928cc 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -7,59 +7,59 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 3, C3, [ 1024 ] ], - [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 - [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], - [ -1, 3, C3, [ 1280, False ] ], # 13 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 + [-1, 1, SPP, [1280, [3, 5]]], + [-1, 3, C3, [1280, False]], # 13 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 - [ -1, 3, C3, [ 1024, False ] ], # 17 + [[-1, 1, Conv, [1024, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 10], 1, Concat, [1]], # cat backbone P6 + [-1, 3, C3, [1024, False]], # 17 - [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 21 + [-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 21 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 25 + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 25 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 29 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 26], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 32 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 35 (P5/32-large) - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + [-1, 1, Conv, [768, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) - [ -1, 1, Conv, [ 1024, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 - [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + [-1, 1, Conv, [1024, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P7 + [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) - [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) ] diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index aee5dab01fa1..c5f3b4817102 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -3,44 +3,44 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, BottleneckCSP, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 ] # YOLOv5 PANet head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 13 + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) - [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 91c57da1939e..d5afd7d84100 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 4bef2e074a96..16a841a0b4b0 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 8023ba480d24..b999ebb7583d 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -3,44 +3,44 @@ nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, C3TR, [ 1024, False ] ], # 9 <-------- C3TR() Transformer module + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 1024, False ] ], # 23 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index ba1025ec87ad..2fb245050053 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 4fc9c9a119b8..c5187101072b 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/train.py b/train.py index 7a8c15a6551a..3f5b5ed1195b 100644 --- a/train.py +++ b/train.py @@ -74,7 +74,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) data_dict = None - + # Loggers if RANK in [-1, 0]: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict @@ -83,7 +83,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if resume: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp - # Config plots = not evolve # create plots cuda = device.type != 'cpu' @@ -96,7 +95,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset - # Model pretrained = weights.endswith('.pt') if pretrained: diff --git a/utils/downloads.py b/utils/downloads.py index 00156962380b..588db5170e0e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -115,7 +115,6 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" - # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- # # diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 603837d57052..06d562d60f99 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,7 +1,8 @@ # YOLOv5 experiment logging utils -import torch import warnings from threading import Thread + +import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index 1328e20806ef..8447272cdb48 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -1,5 +1,4 @@ import argparse -import yaml from wandb_utils import WandbLogger diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index a0c76a10caa1..8e952d03c085 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,7 +1,8 @@ import sys -import wandb from pathlib import Path +import wandb + FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index dcc95264f8cd..c3727de82d4a 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -25,9 +25,9 @@ parameters: data: value: "data/coco128.yaml" batch_size: - values: [ 64 ] + values: [64] epochs: - values: [ 10 ] + values: [10] lr0: distribution: uniform diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index c978e3ea838d..66fa8f85ec4e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -3,9 +3,10 @@ import logging import os import sys -import yaml from contextlib import contextmanager from pathlib import Path + +import yaml from tqdm import tqdm FILE = Path(__file__).absolute() diff --git a/val.py b/val.py index ee2287644b92..06b2501515b5 100644 --- a/val.py +++ b/val.py @@ -13,7 +13,6 @@ import numpy as np import torch -import yaml from tqdm import tqdm FILE = Path(__file__).absolute() From 7820614c40f307308492e28b74df8b6cd1c15437 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Jul 2021 17:23:35 +0200 Subject: [PATCH 213/757] Add `@try_except` decorator (#4224) --- utils/general.py | 53 ++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6b00ddf2ff72..a414b391d24e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -56,6 +56,17 @@ def __exit__(self, exc_type, exc_val, exc_tb): return True +def try_except(func): + # try-except function. Usage: @try_except decorator + def handler(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception as e: + print(e) + + return handler + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -114,26 +125,25 @@ def check_online(): return False -def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): +@try_except +def check_git_status(): # Recommend 'git pull' if code is out of date + msg = ', for updates see https://github.com/ultralytics/yolov5' print(colorstr('github: '), end='') - try: - assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not is_docker(), 'skipping check (Docker image)' - assert check_online(), 'skipping check (offline)' - - cmd = 'git fetch && git config --get remote.origin.url' - url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch - branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind - if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." - else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe - except Exception as e: - print(f'{e}{err_msg}') + assert Path('.git').exists(), 'skipping check (not a git repository)' + msg + assert not is_docker(), 'skipping check (Docker image)' + msg + assert check_online(), 'skipping check (offline)' + msg + + cmd = 'git fetch && git config --get remote.origin.url' + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe def check_python(minimum='3.6.2'): @@ -148,15 +158,14 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' +@try_except def check_requirements(requirements='requirements.txt', exclude=()): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] @@ -178,7 +187,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) # emoji-safe + print(emojis(s)) def check_img_size(img_size, s=32, floor=0): From c2c958c350407b630bbbf063cefbd64cea7d8c81 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Jul 2021 17:29:39 +0200 Subject: [PATCH 214/757] Explicit `requirements.txt` location (#4225) --- train.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 3f5b5ed1195b..250342acff18 100644 --- a/train.py +++ b/train.py @@ -451,7 +451,7 @@ def main(opt): if RANK in [-1, 0]: print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_git_status() - check_requirements(exclude=['thop']) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt): # resume an interrupted run diff --git a/val.py b/val.py index 06b2501515b5..86439b1380dc 100644 --- a/val.py +++ b/val.py @@ -320,7 +320,7 @@ def parse_opt(): def main(opt): set_logging() print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 18f6ba77cfbbf060a25d32a657629c2c1d419a49 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 00:37:55 +0200 Subject: [PATCH 215/757] Suppress torch 1.9.0 max_pool2d() warning (#4227) --- models/common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index fc085e22b16b..24f02c2a584c 100644 --- a/models/common.py +++ b/models/common.py @@ -1,6 +1,7 @@ # YOLOv5 common modules import logging +import warnings from copy import copy from pathlib import Path @@ -158,7 +159,9 @@ def __init__(self, c1, c2, k=(5, 9, 13)): def forward(self, x): x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class Focus(nn.Module): From 083c13da45f02ebbf23eac535cfcdd4c3b2b9492 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 00:58:14 +0200 Subject: [PATCH 216/757] Created using Colaboratory --- tutorial.ipynb | 257 +++++++++++++++++++++++++------------------------ 1 file changed, 130 insertions(+), 127 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 831735cc0830..3f3f73ad4443 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "cef5e9351ca743bcba5febac0b096a30": { + "2e915d9016c846e095e382b6a02ee773": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_ec326c52378f4410920c328f221e0514", + "layout": "IPY_MODEL_cb7fc3a5c6cc4fde8d2c83e594a7c86e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_83000c64a11c4ae8abd6f0ef2f108cef", - "IPY_MODEL_0f7899eb719f4a9c9852426551f97be9" + "IPY_MODEL_ac3edef4e3434f4587e6cbf8aa048770", + "IPY_MODEL_853ac234cc2a4236946fc516871e10eb" ] } }, - "ec326c52378f4410920c328f221e0514": { + "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,50 +87,50 @@ "left": null } }, - "83000c64a11c4ae8abd6f0ef2f108cef": { + "ac3edef4e3434f4587e6cbf8aa048770": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_886ac5b18b3c4c82bf15ad5055f1e17e", + "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 819257867, + "max": 818322941, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 819257867, + "value": 818322941, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_4e67b3c3a49849c7a7ba28b7eec96e7a" + "layout": "IPY_MODEL_f454999c3a924c7bad0746fb453dec36" } }, - "0f7899eb719f4a9c9852426551f97be9": { + "853ac234cc2a4236946fc516871e10eb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_62c3682ff1804571a483d46664533969", + "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:12<00:00, 67.1MB/s]", + "value": " 780M/780M [03:59<00:00, 3.42MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_599dda3b608b432393760b2ca4ae7c7d" + "layout": "IPY_MODEL_9da1a23b042c41618dd14b0e30aa7cbe" } }, - "886ac5b18b3c4c82bf15ad5055f1e17e": { + "13842ca90c0047e584b8d68d99dad2b1": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "4e67b3c3a49849c7a7ba28b7eec96e7a": { + "f454999c3a924c7bad0746fb453dec36": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "62c3682ff1804571a483d46664533969": { + "f94a7ca8c1f04761bf38fdc5f99664b8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "599dda3b608b432393760b2ca4ae7c7d": { + "9da1a23b042c41618dd14b0e30aa7cbe": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "217ca488c82a4b7a80318b70887a556e": { + "6ff8a710ded44391a624dec5c460b771": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_4e63af16f1084ca98a6fa5a282f2a81e", + "layout": "IPY_MODEL_3c19729b51cd45d4848035da06e96ff8", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_49f4b3c7f6ff42b4b9132a8550e12186", - "IPY_MODEL_8ec9e1a4883245daaf029458ee09721f" + "IPY_MODEL_23b2f0ae3d46438c8de375987c77f580", + "IPY_MODEL_dd9498c321a9422da6faf17a0be026d4" ] } }, - "4e63af16f1084ca98a6fa5a282f2a81e": { + "3c19729b51cd45d4848035da06e96ff8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,50 +332,50 @@ "left": null } }, - "49f4b3c7f6ff42b4b9132a8550e12186": { + "23b2f0ae3d46438c8de375987c77f580": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_9d3e775ee11e4cf4b587b64fbc3cc6f7", + "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 22091032, + "max": 6984509, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 22091032, + "value": 6984509, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_70f68a9a51ac46e6ab7e51fb4fc6bda3" + "layout": "IPY_MODEL_ff8151449e444a14869684212b9ab14e" } }, - "8ec9e1a4883245daaf029458ee09721f": { + "dd9498c321a9422da6faf17a0be026d4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_fdb8ab377c114bc3b862ba76eb93cef7", + "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:36<00:00, 605kB/s]", + "value": " 6.66M/6.66M [00:01<00:00, 6.08MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_cd267c153c244621a1f50706d2ddc897" + "layout": "IPY_MODEL_8fda673769984e2b928ef820d34c85c3" } }, - "9d3e775ee11e4cf4b587b64fbc3cc6f7": { + "d8dda4b2ce864fd682e558b9a48f602e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "70f68a9a51ac46e6ab7e51fb4fc6bda3": { + "ff8151449e444a14869684212b9ab14e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "fdb8ab377c114bc3b862ba76eb93cef7": { + "0f84fe609bcf4aa9afdc32a8cf076909": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "cd267c153c244621a1f50706d2ddc897": { + "8fda673769984e2b928ef820d34c85c3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -551,7 +551,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "0cabe440-e06c-48b9-9180-4b4ea1790ff5" + "outputId": "ada1dd8d-e0aa-4858-e893-dc320319ca30" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -564,12 +564,12 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n" + "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n" ], "name": "stdout" } @@ -593,50 +593,43 @@ "metadata": { "id": "zR9ZbuQCH7FX", "colab": { - "base_uri": "https://localhost:8080/", - "height": 534 + "base_uri": "https://localhost:8080/" }, - "outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d" + "outputId": "a7a37616-a82b-4bdb-a463-6ead850b5615" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 9, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", - "Done. (0.087)\n" + "Done. (0.091s)\n" ], "name": "stdout" - }, - { - "output_type": "execute_result", - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1UvrN/MhhmtmH/AE0rzJRl9hnbGpLm1Obmt5LfPkoxdvmdqpGzTzks33MrRbvL37WrevtPmkuNk3zLI27958tZd1bJZ3mz94Xk/vN8taxl9kr4vhM9YUt2SFJtq/8AXX5vlqb7PNdTPNM6r5iLsVf4f9qnzW8KM72yKpX+KrDWf7vYJtoXb95vmrS8fi5iPe5iCGSZrdYfObYvy7v7zLUNxcFVaNHaM/Mu3/ZqzInkxhGm+79xf7tZN1I7L9/HzfPu/irejTlUkYyqcseWRDM0Plu8kzfc+6v8VZ0cszN87qPm+fy/m2rVm6Z7iTyfl2xpt8yNdu6qk0nlqXh2hG+4y161GmeZWqSjL3SNpEZfJjhXb/D/ALVIq/ut83zf3fmpkbIrDftC7P4fvbqVVTCPHBtH8MbN/FXV7P7RjGt7xGq3O48Z2/N8vy7qfIszRq6Pj+9u+9VhbXbJs3/MqfP8u75qVbVMt5j/ADfe2rTfvfEbxqe5ykSXj/Y3DzSBv4Kt2zIsa70y+/dtb/0KmW8aW6tcvM21fl3bPutWlHYO1vvmhYf3JF/irel8ISrT5CssYM/7l2Rm/vfLUNxpsysNm4fLtfd92tVdI+UvezbXZP71X9I8Ga14hMh0DQri+EWzzRFEWC5zjOOnQ/lXrYalXxNRU6MXKT2STbfyWpxuTnLlgm32RyMmkvtY72Z93y/N92si+sXkupk2MNvy7a9Pl+E3jiRk2+BtTz3JtWx/Ks7Ufg98Q1K/ZvBGqvlfmxYt/hXrxyPOv+gap/4BL/I5qmDxcv8Al1L/AMBf+R5Lqmkutrvdm3r8yMtc1qmmlv8Ab+8te0X3wT+JchMa/D/WCGXLEWLnn8qwr74BfFhi0dv8NNZ2Hp/oD/4U45HnX2sNU/8AAJf5HDUy/Hy/5dS/8Bf+R4Vqlrc28jI6fKv8VUvJmkH8TbvmdVr2DV/2cPjTJBttvhTrROMcabIf6Vz837Mvx5H7v/hUXiHH95NKl/wq5ZJnXLf6tU/8Al/kY/2fj/h9lL/wF/5HARw+Wd+9v92rlrbTSXGx5mZW/vV2sP7NXx13Av8ACDxGfc6VL/hWlZ/s7fG5U82X4P66GxjH9kyf4Vw1clzxx/3Wp/4BL/I6Y5djv+fUv/AX/kcfb2fksr/+Oq1adrbvMqo/ys33Pm212Np+z38ZwUf/AIVbrqKFyR/ZsgOfyrRh+AXxcjRm/wCFZa3uP3f+JZJ/hXHLJM7/AOgSr/4Ll/kdtPLsY96cvuf+Rx0cMkbbEfdWhaxO3753Zd38O77tdVbfAr4tyuwufhrrgCr8pOnyfN+lWbX4G/FpVDn4b6wGAYLmwfgflXPLI8++zhKv/guf+R108uxcf+XcvuZy6wvtabDf7W6jzN0iPvZR8uzzK7OP4KfFRkIj+HWsq+xuXsXxu/KlPwQ+KrBVk+H2rnav/QPf/CsP7Cz3m1wtX/wXP/I744HFdIP7mcpCtzNIRDtbb/DJUMizKuwQ7dqfe/iVq69vgt8Vf4PhtrQ29D9jf/CiL4HfGK/lW1sfhVr8zf8APOLTJGZvwAzWryXPErvCVf8AwXL/ACNYYLEOWsH9zOJmjhb5PmLL8yM33t396mzSTRsr7Fd1Tb9+utv/AIEfF21Lx/8ACsfECSl8SRPpsgKH6EVUk+CfxeWUlPhfr2W6gabJgfpTjkmfSj/ulX/wXL/Ip4LF/wAj+5nNtM7EI0+xV/hWp7eZGwn3X/i+atmT4J/GHIZPhdrudvP/ABKpOP0q5pv7Pnx9vibuy+C/iaZVfaJY9GmcH8Qtb/2FnahzSwtRf9uS/wAh+wxKlrB/czJh1CazmKO6uzJj+98taVvqD+WHd2LfeWnx/Bf4zwztK/w21zcG2lTpsn+FaWn/AAC+Pl7CZbL4O+Jp4ifkeHSJmVT6ZC1vHJc6pLmlhqi/7cl/kc88PjFK/I/uZlyakkP+pdVZm3M1QNqzzK3nPk7/AJljeuhP7Pn7RbhQ3wT8VAAYLDQJ92P7v3awPEnw1+JnhWyl1rxB4F1a0toCBPNdafIiQ5O0biRgckDnuacsmzOMHUlh5pLVvklZLu9NDlqU8Sot8jsvJmbqGoJMrbPlXb/E9ULjWCtsE6j+9WfNep5g42/8DqrdaomXTf8ALs+balcUY8u55NbFS6FqTUHaNXCMwas261J2kOeBs3Lu/iaq8l58pmhfb8vytWXdawFjb58t/dpyOeNbl0Ld1fTbt4mVFZfn2vWfNdJI3zuwH8DVTuNSuJOqLt/u1Va82/Oh/wC+a56nNE9CjiveNCS+eF98aMwX+Kh77cyzvN96s0zP5nzzcf3aljuEab9z/DXFWifS4XEc3KlI0HuPNGxH+ZvvbqktZ3jbY75C/das/wA5JJGdPvMnyK1WrW3uZJkT+7/FXHUjyxPfw+I5S/G7yHZM2/8A3v4ateSjR/I+NtUoflben975quRqixsyOzM38P8AdrllHlPeo4jmHqvk7dif7+7+KpJJJvOTf/wHdUTRuI9kz7t33amVXjiCTP8Adb5t1YSid8a0dgX5meB+iv8A+PVK8z+SJnfLt/d/hqDa8fKHhmoZtqt3bdtSlLmNvrRbVtuAk3y/+zVGJk/jT5o3qFpJ2jZPOyy/NtX71NaRFz8ir/Czf3qcaPMH1rm0JJ7h1Vnd1dW/8dqDzHkHmK/8X3aTa7s0Py//ABVV2byZN6JtK/K3z1v7PliclXGcurLM0yLh0h3fwtTFk2q2x2D/AN3fVJrpFY+Vu/21qP7chXncm7+Jq3jGR52IxkbFybUJvlfyVVm+Zqq3E3mKd83FRtMm5tnzL/BVRr5/M2bFUN99a6qcZHz+KxXNAtrP50bIHYK38NNjkDN5EzqrfNVKOYwJvR12K1SrdPcNvR/mX/x6uuMT5vFVoyNG3kdWV3mxWhbuiqr+d8v8f+1WPp58xnR/7+379atlHDIuNmVX+Grj73xHkyrGnZyO395Vbb8y1raer3Ejb33fwvub7y1nabDH5m+GHhtvzSVtaXZ/xzRrhfu7aInmyqcxr2VnNJE3zqEk/hX71dPpdrtjjf8AeSstZeh2L/I6Ip2rt+b+Guk8O2aW67LmFdsa/N8/3aoxlI39Ls0VU3pjcm5F/u1r2Vo8i7HhyzNu3R0zQ7OTy40httu5Ny/7VdJY2KMuyHdvVW37kro+I5/aGJNYpNC28tjavy/3WqZ7GFo1h37fl3OrfwtWtHo8022GaHbu/i/hqKbT3WRnfcn8Hyv822ly/aOmjL3zFis5mkFz8zlvl3b/ALu2npY/6QZpptgk27/722r62aQt5Nt5n7z+GT7y1FdWO2FfLfJVPustTKMeXmPewsvdM/ULO2kZZkRnX7RtRm/h/wBqub1rT5lkbZN/F95WrsLiOH+NJNv8DL/ermNUi+y5fYvzM3yq275qcYwl7x72Gj8Kkee69YvNC80L+cjN8jN/6DXE+JNPfcyb2O75fl+9XqHiCHcrfIy/P+6b+7XGa5Z+dG6JG3y/MjVyVpfzHqxwvN7x7Vp8NtCrvMm8eb95fvK1S28T3DOnkx+Urs0TL8rK1VoLiBWY2bqUjb7zL95v/iant77/AEjyfszPtVd1eNGPLA+e9pyl+xtXjb/SUV/l3J/FWjC0MinyX/g2orL8y/8AAqz47jyW2PJ+6Z9yxqn3f+BVehbtcvhFXcjf7VefXk/5TupVOaVxLqOFZCj7WPlKrrG3zfN/FUUdq8ciu7sGWp7iRPtDpIil9m/5U+WRqY1siq58lX/j+VvlWuKpUlHc9CnHm+EbarDM02+GRt0u3yW/9Coe12uIXufKRv8AWqzfdpI4937503IqMzqvy7amihgkjO+GR3++vy/7NefUqcsz0KMfc5jCks0vJpvJdflfbFI33qzri3kmuDc7MlV27vl+9XRX0MyqblJoV2yr8uysya3hjV08lfmqqPN7U6OaJzV4rwyM7quP4G2fdrI8lLiTY80m2H7nz11WpWv7vem77vzKy/w1g3Gmp8r+WqfL8n95q+wy3mjLUxqcv2Situk+5/O3eW7I/wDDuqzDG9nCH2Nt3/eVd1RTK80ZTf8AOu1fl/vf7VSRqkfkwIm3/vpt1fXUZHj4qpGMWSWs3mN8+5f7rMv3qjnZ7qF0R9u5/vfdqxIr7o3G7+9taq7MIV2O67t/zr/drq9ofPVK0ucVLV9q/Plv4F31JDM+0v8Aw/7NRF3jwmzCsnybf4lqONpp5vOebbt+VFrKpIiMpfCX4WeSYul4r7futs2/8Bq3DJBDD/pPVt2+P+9trJhWFv7zsr/N81akLTfIny7vvff27Vrx8ZKPN8R3UYy+I2bVdrJMib0k2t+7+993+KtK3t7OaN3dPNO35WX5axIWS0Z32bty7VMdbdveLbwo+xUVU2bV+avnsRU97mPQo0/5i7C0k0bbyzOsX71tm1f+A06G427vszthk27W/h/3qqtdOq+Sj7n/AIY2/u1Fcag4Z3uYVXcy/wCr+VVrwMRKSPSp04/aLn9o7v8ARn8vav3W/wBmkVbO4ZbmaFn8v5f3afNtqGCRFklSWaGT+L94v3V/u0QyPFIIYQ3lbvm/hb/7Ksaf7szqe8XbO3S6jTY7LF/C33WqePyZFlR9u2Nv4vmakt1Tj7SY0H30WSpJI5lhX/RsnbuZmbdt+b7tVUqX6GUVL3SMxzRgwpNCu7+Lf91ajaO5kka5m+ZG/h3bq0Lf7THhJoY0Xb8iqv3qrzWsyyMkNzlm+6rbV21NPTZ3JqfCZ8kaXExhTdlot27+Ff8AZqtdNNbr86bZWTbtVa1VhdlD7GQs/wA0e373+1RNZ2aoIdjbm+VP71KVTlkc0uaMTl9SsUhUyJudv4lVqwtStwtqLaZMvJ/Ev3mrsNSs4biLMN4xLfK67P7tYOrWvkSM83ysqqvmKv3lr0sPzT5W/hPJrOcuY4y+hSNPJ2N8vy/M1ZkNjDcZ+RQ6ttX/AGq6TUIYZjJC+1d3z+X/AA1RmtYZ5lSHaiq/zrXrwlJwkeVUjIxfsDzXBdNyfw+W1Ot9Lkz8+7Zt3L/s10Xl+XJvS23Bmp0dijRt5Myp/syJ92m6zjG3QSpxjLmMWHS0jh8xId7bvl3fLSzRpDN5MwyZE+b5/mrX1C12ybPm3fKy+X/EtUry28mbfMn3k+RqqMve8jqjTHafcQ+YkGxfN+78r/dWug024aGP+HG7duX+7WDZ27+WzvDGzfeRlatjT7yT7Os0yZbf95aIy5pe6a8v8x02l30y7k+9uTcjN97bWrHdJJbo++Quqbkkjfburm7KHyLj7TCjfc+dletVZoGt/wB9BuDbvvPt/h/hrup1P5jjqQ7mxY648MiokeEbarMy/wAX+zVxfEEMLLD9p37X+b5q5r7YmYrbfNvWL7rfd/3qinmdpC7uw2/N8tdkahxy906tfFCSSMU3Ax/Lu2/L81Jb60l18m9WZXb95G3y1zEeqIsaiZNrSfM0b/w1Nb6lDHGpKfxfe3fLtrfm9wiMoROjbVE2hH6L/D/eqjPs8wpDDlJn+dd27bWba3UM3yb2O77kf8NWYw8itJbblVv7rVFT4SebmmMmuJpFP2lNnktsT/aX+GpobXgyeSuf4Y1+7V2GzeaFXeRWZk+81W/sq/IXTY3yov8Avf3qw9nzG0cROJi3WlvG/mPbK38KbqzLjR7lYWdIcPu+9Ia7aTTRdXAmS2/h+ST+H5arSaDM0x+0ozJv3bVeqjHl90qOI984yTR0W3kdEwF+aX5f4qp/Ybn5BM8e5vm/d11V5ptyvm20MPKtufd8u5f7tVLjR/s9ud8K79nyeWtYSpm8cRyyOauIYY7eL5P49yMtU7izT5XdGbc27/eroZrCGNW2Q8r827+7WbqEaRzNGkzJ5nzbtn3q4qlMuNYoQ3jrI33vvbfm/hq5Y7DJ+5dQq/wyPWe0c0cjI6L83yqrNUtvZ+WpTYxlb7jfeWvnswwvc9nD4rl1N+yunWVd6KWV93/AamlvIY5f33HmT/Kv+zVPSYUXKu7Nt+X5nrRhsZmk/dpwu1kaT71fF1MH+91Po8PiuaF5CNbosnzv5Qbds+eq8027dvtsnZtTd/6E1ai2rzfuRZ7/AC03/N93dSrpE98sWyyVpNnz7vlX/vqoo4OcavPI9SNb3DKgjNxMkPzLu/vfdrQj0va3nQou3cvzf3q1NP0HzJGf5ZW3/wAL/L/u1o2ugwwyCH7GyGOXb977te/g8L7WV1E48RjIU9zHj0tNsvnfPu+8v92tOx8N3lxHHDNYbjDtfcqf+PV0Fn4XRpF2Q7f3v3m/irf0/wALwwx/PuVlf5Nrf+O19bgcO4xiuU+Yx2KVTmZzVh4f8+Pe8Kld22Jm+ZVq/b+FZm+dPnRW+9H92up0/S0jhhjRGil37ty/Mvy/3qvWeg7l+eZYl+Y7f9rdX0mHj7p89Uqcuhztn4d8z50sG2/89P7zVfs/Dc0qvD9m8oxvXT6X4ZRjJCLfZtZvK8tvl/4FWnY+HYbWFEfcq7t6/wD2VdMqZySrSlLQ5CTwvCsKfZkZljl3S/uqbcaDbQ/6ZCjeV/D8n8Vd5Ho810q+Sir8/wC9Zf4l/hpt54ZmWR0+V4vu/wB3atcNSiHtDzG48LzSK3yYC/NuX+Jf9qsy68Pvayb38yR9nyM392vUdU0WGNSiQtsjT/lj91v96ua1LSRIwh3/ADyfcWRflX/erzK1HmO/C1jhLzR5ncTJbMi/wLJt+ZqxNS0fyZGe58zcybdrfL5bV3Osx+XdPDvX5fuTfwVzd5bvNcI7zbYWZm3TPu3Nt/vV4MsLKLke/RxUTjrzT7lpA7wq3lptdl+bbXP61C9vveGFnT5WSXbXZ67DuuAmxl3fNuV/4awdYhdl+T5lX7lYuHU6lLuYCypCzzDrs27W/i/3alk/0i4PyLt27tzU+4s3hmdgkbBv4m/hao5pHkj3x7R5ibdrfw1rTpwcvcMvae7ZyM+5uoWt/wBzbNtZ2+b/ANmrJu9833IWHy/LV7UGePaiuxVk3bvusq1UuA7/AHGUv/D8n3q9PD04Hl4iXvXM+Oa2kj3puDqu7d/eqnLN5i7H+RV/8eq3qGxlZ0RkC/f21Raby4wghWYN9za/zLXfGPL7xySqc3ulmO3eZVP3yqbtu3atEMgbajp5b/3lqPYm4yI/7r+6rfNU8N1+887y2+Z9u1fm3VcfeMvQs28aMzB4Y2Xb8rL/AMtP96r8Ni8kbfuflk+/UFrDtYuibG/u1s2Nv5sKI/y7v4W/iq3KUYlxlL4ipZ6fBD9/a6s/zR1o2enx71Tzt+7/AJZr92rcNjbSKiTBcyfxba0LDTYYmEMKMyxr97/a/vVZftOaPulb+zd4XZjcr/Iq16f+znpdy8V/bWljI0s80EcUaIWaV/mAwBySTgYrk9N0eeRlTZ8zfxf3a+mf+CUtvHpP7Zvw9+0CRwfHemx4STYQTLgHODxkgkdwCOM5r9C8McW8u4up4tR5vZ060rbX5aM3a/S9rXPRyLEeyzeM7X5VN29ISZseGf2SP2pPGT30Xhf9nfxpetpl21pqKQ+Grkm2nX70Tgp8rjjKnkZGRzXD67oOueF9ZufDvibRrrTtQspmivLG+t2imgkU4KOjAFWB6gjNfpN/wUz/AOCoH7Sf7Pf7S9x8E/gvcaVpNhollay3V1caal1NfSzRLKQ3mZCIqsqgKA2dxLHIC5X7W114P/4KCf8ABNy2/baufBFhpvj3wdOtlrFxZ3BjTylnWOaEBmO+M+dHMiOS6Fiqsdzb/wCgsp8QOJvZ5fjs3wVOnhMbKEISp1HKcJVF+79omkuWfeL93rro/uMNnOP5aNbE0kqdVpJqV2nLa6aWj8tup+fPgnwJ42+JPiODwh8PfCWpa5qt1n7Pp2lWT3E0mBkkIgJIA5J7Ctb4m/Aj41fBc2v/AAtv4UeIfDYvd32N9a0mW3Wfb94IXUBiMjIHIyPWv1v/AGQf2W/Hv7P37DukQfsy6f4T0/4i+L9LtNQ13xD4juJbiDdKhcMDCH8zy0cLGi4iyWc78tv7XwB8H/2ifFnwh8X/AAv/AOCgPi3wH4r0LVLBvL1PSLZrdraPaS5lV4Y4l8shZElXDIykkngr8vmHjtSw+ZVXQpU5YelU9m4uo1Xmk7OpCKi4cqeqjKXNJLpfTz63F0YV5OEYuEXa13zvo2la1vJu7Pw/0fwn4p8RWOoap4f8NahfW2k2wudVuLOzeWOyhLBBJKygiNNzKu5sDLAdTXU6n+zH+0ZovgI/FPWPgZ4stfDgt1uG1u40CdLYQsQFkMhXAQ5GG6HI55r78/4IdX+j/D7wB8a9c1HV459I0W/tZZLgSJ80MEV2zy7QxABQA5yVODgnGa+cvjz/AMFdv2rPjhNrfhldfh8PeENadoJdD0S1ijuBYk4MX2p1aTeycMwwCS2FCnbX3MOL+Kc04txeU5Xg6bpYaVPnqVKkleNSEZWjFQfv6y3dtFe19fWWZZhiMxqYfD0ouNNxvJtrRpOyVt9/LueEfDb9nn48fGK1lv8A4VfBzxL4it4G2zXOj6LNPGjehdFKg+2c1k+Pfhr8Q/hZrZ8NfEvwNq+gagF3fY9Y0+S2kK/3gsgBI9xxX7IeKdb+O/xb+APgvWv+CXvxT8CadoFhpMcFzpuowJLLCqwx+Va7tsiQuikh43VWBx83OK+Zf+CgXx0+PUP7HjfBL9uj9nG9PjGXW4ZPDfjrRjB/ZBK7mEhljLhLkoJUMAVdyMz/ACYAPz2QeKec53nFOh9WoqM6nI6XtWsTTV2nOUJxjGSVrtQbaT8jiwfEOKxWJjD2cbN2cea1SPm00k7btI+BPBPgPxt8SvEcHg/4eeEdS1zVbnP2fTtJsnuJpABkkIgJwACSegAya1vib8CPjV8Fza/8Lb+FHiHw2L3d9jfWtJlt1n2/eCF1AYjIyByMj1r9Brr4gab/AMEqf+CeXgzxF8KPB2mP8RPifbwXN/q14/2hQzQecZThsOIo5I0SNcRhnLndlt7P2Av28fE37efiHW/2O/2yPD2k+JNP8SaRNNY3cVmLV3MWHeFxEVGQoMiSIFdGjJycgp34jxE4ilhsRnODwEZ5bQlKMpOpatOMHyzqQjbl5YtOylK8lF6q+m086xrpzxVKinQg2m7+80nZyS2svN62Pzg8N+FPFPjK/fSvCHhrUNVuo7aW4kttNs3nkWGNS8khVASEVQWZugAJOBXX+D/2U/2mfiB4Vj8ceB/gD4w1fR5lZoNS0/w9cSwyqvUoyoQ4GCOM8givt7/gkD8Nv+FM/t7fF74TXN07S+H9JurGIJIsiSRR6hEodmB+9t2cY/iYHaRiuV+IH/Ba79ojU/2j4bf4fWOmaP4Is/EEdoujSack9xe2izhWaWVuVkdc8RlQmQMsQXbpxvG/FGOz+vl2QYOnVjSp06rqVKjimqkXJJRUW7y+y72VnfdW0q5rmFbGToYOlGSjGMuaTa0krpWS3fT01Pg6eCe1ne2uYXjkjYrJG6kMrA4IIPQ02vtP/guj8P8Aw74T/a203xVoyeXc+JfCsF3qcawqqtLHLJAJNw+8SkaA5HGwcnPHxZX3PC2fU+J+HsNmkIcirRUuW97PZq9lezTSdlfex62X4tY/BU8QlbmV7di94Y8P33i3xLp3hXSyv2nU76G0t9+ceZI4Rc4BOMkdAT7V+mvx4/aE+Fv/AAR68IeE/wBn34EfCjRNf8a3WiLd+IfEepW/kvMhkYeZK0f7yQySCbZGZMRIij5hivg79iEaKf2wfhn/AMJD5H2T/hNtO837Tu2Z89Nv3ec7sY7ZxnjNew/8FrP7S/4bt1X7djyv+Ef037F97/VeTz14+/5nTj8c18RxbgqHE3HOByLHXeFVKrWlC7UaklKMIqVmm1G7la9tdbnk5jShj82o4SrrT5ZTa2Ummkr27XufQC+JPhT/AMFfv2TPG2u6r8NdH8K/FTwNCt6mp2Ft5jXCrFI8Y8zb5rQyhJozGS+xgjjccCvzMr73/wCCCH2j/hb/AMQ/tez+y/8AhEIvt/mbsbvtA257Y2+b159O9fC/ir+z/wDhJ9S/snyvsv2+b7N5G7Z5e87du/5sYxjdzjrzWnAtCGR8TZvkOGb+rUXRnTi25Kn7WDcoJu7teN0r2SenW9ZRBYTH4nB078keVxW9uZar8NEe1f8ABND4PeAvjl+2b4Q8C/EqCG50kSz3s2n3AQx3z28LzJA6sRuRmQblAbcoIIwSR9d/tS/8FZPi7+yb8ctZ/Z8+G37NfhrStD8OXC2umRX1tNGbiLaCssSQNGiRsCCoAPHU54Hyd/wTL/Z58eftB/tV6Jb+CfGF54cXwwy61qHiCytxJJaxwuu1EDfIXkZggD5XBYlXClT91ftCf8FpP2efhX8XJvhzovwo1HxiNB1BrXUtejngijilRgshttysZdpBGT5YJX5SVw1fF+INCWaeIMMPDAf2lGGH96hzumqMnNtVHL4G5rRJ+9Zel/LzmDxGcqCo+3ShrC/Lyu+99rtaW3PJ/wDgqD4M8AfF39iLwF+2ZqPwns/BHjbWNQgW/sYreOOa8juI5CRKfkabAhSVGIZ1RiCACxHl3wS/4LRftBfBD4VaJ8J9F+F3ge7s9Csxa2tw+nTQO8YJILJBKke7nlgo3Hk5Ykn2j/gqVp8X7av7JHhn9tT4IfEHUbzwr4fDDUPCVxaIptWllEUs77CSs0bBUdWLrsO9GVdxk/NRVZ2CIpJJwAByTXreHnD+ScUcErB5xSVT2Ner+6nz3w75naleVpPli93pr1sdOS4LC4/KvZYmPNyzl7rv7jv8Ouui/M/XX/gnd/wUX/aF/bH8ba/J458AeE9E8I+FtJN1rOrWMdwHErZ8uMNLOVXhJHZiDgR9twNfkt/wWB+PVh+0ZqPxQ+L+jaNaWVhqE8cenR2lmsJkt45oo45pNoy8rqA7MxJy2M4AA/QH48sv/BO//gmJon7Ptk32bx58V99z4kKnEsEDohuFP+7GYbbHfdIR3r8p/wBr9zH+zh4ocDOLaH/0oiryOGuHMkWBz3PstoKlh5UqtGgo3tKFOLU6mrd+eovdfRRt1OLDYLCRwuNxlCCjBwnGFuqSd5fN7eSPg2S8RbfY8jF1/iqpNqE6t/Ds2fL/AL1U5ryZmf59yfwVQupnkjXD1/PnKfkMqxcm1b922x2P/AqzbjUHkjbemdv3WWkmkddyLyGX5v4arNNH/tDy1/irOW5cZcwNI8cn8S0jSOrnY/3v4VqGY7dpfcSqfw/dqMzZUOEbdWEtjqp+7ImaaZGZy6j/AGtlSwyOWV0+9VZd7yBH5/3anhXdl3+U/wAG2uOoexha0omjDG7D54dir92tGxby1SHZlt27zFqlZ/LCEwx+ati1jRcP/F/HXHKP8x9PhcRzcpahhTy98P8AF9+rEdttUzJuKKn3aS1R5Iw8m77/APu1fsbXpPHN8v8Adril7srnu0cQVYY2+/N8vy53VIlvtb7jP8/3mrRexRo1d/Lb+/8A7NJ/Z8K7pidy7N336x5uY7adacfiM+S1fcQ8O7d83y1XmiRV+RMH+7WnNb/KuU+WP5qrzWs32hpt+35NtEolyrlNpBuaZ32f7K/eqJm2NvL8rLT7hUkm2TcbV/76qpcEQo3kyfMvzfN92tIxlzGFTHRiLNeBWe2RGDt/E1U7ieETBLlGO35d2+oZpn85HhfLfxVD9odd2/qu7/gVdXs+b3jzamYdB9xcOsyif5F+7uqvdSOuX86o7i885fJdGqBr0RwtsfJ3fJuropx5Tz62O5pEs155UK702ruqvJfbpGd9v+z89VLrUPOP94N95WqtcTeW33Nyt/FXXGj7p4uKx32UaX2oudnZanspt+P7rf3ayY5vmZEdvm+61aemr5jff2t/DW/wnhVsR7T4Tcso33b04WtnS4XW4RJH3BlrF05Jm28fL/drpdMt3aRPnXarf8Cp8sDm5jY0uFzGPkV9r/8AjtbNjDC209m+X5vl21R02NIWLumFb+Kuk0u1hVVd4f8AcrL4Z3OeUuaNi/oP7mZI4fubPvNXX6DDbMu9LbfL/wA9N25f++a57SbVFuN8yL8v3P8AZrrdCj8uT7mX2bm2pWhidT4f0+8uGKPNuVolZdq7du2uk0uFJGWR0VkZd0vz7a53RZkRNjvMszMvlLu+Xay/d/2a6OwmhjZfOjX7vybU+61VH3fhDl5QGy3j/hG35khWql5D51wdm1v7rNV6S6m8tZpJl3tu3qyVTjuU3Lc2bsys3ySMny1fuSNKdTlK8kMduypv+8v+saqdxvZvJ2ZRV3eZ/C1W/tJf/Rkhx/tN826qcjLJMUlO2Nfm3fd+asZS+zE9zAynKUTPvf36+SkOX/56bvu1iapbpNsSF1A+b7qVt3kKRxpNs+bft21laq00a+TbJ8rbtn8VZR9pHY+xwdPmjqchqywxs0yfK6/w/wANchrC7ZiXTP3vu/w12WrWb7v3zqv95VX+GuZ8QWoVW8kLsZ/nauWpL+Y92jTlLU7e31BJIf3L/NGu5I2q9a6gJpjIiMg+X7r1xseoRxoqedtH3XaOtGz1S2h2o03G/d977tedHnifASlA7iGbdGX38bdrU+2ukj3cMrM/zN/Dtrm4dY/c/Nxu+6y/ebbVttUhkt38n94zJ91XrzazqrRHTTqU4yN+HULaPc8Lsksnyoyr8v8A31TI55re3XY6nb8u5n+9WNb3jiNoNi7Vfd81XIrp5IykyLsZP/Hv4a8mu5KpyqR6+Dq88byNmHZNGs1ymGV9u1vutT7q8FvCwuXX5l2qu7btrNjukMKo7fPH8yf3V/hqS1vIL6x3/u5fMfci/wB3bXHUlHnPZpy5oDrqTzI1k2b2j/vJ/DUV5aw27L533m+ZNvzVZk/0hlh+Vn2bdy/Lu/3qSRbby9+9Qrffb+7XVQ1mOp8JiapNc+W6WzruX5kVovu/7K1jXkLyZd/mMe1d2zaqtXQ6g0LW7J/Av31X5WrE1C6RQvztv2/73y19hl3wHFUl7vMZsi/aJntbZMt/D8nzNT4YfMVUebDbPlZU3fNUbRfM7pM3y/c21dh2QsPOdkf+6qV9LT+A8DFVpx+IrrbutuJndSV/9BqnqGxV855vODJ91V+bdWpIvlKnkw7F2t82/wC9/vVmalHJ5jQyOu1fvqv3lrq5zxpfvJlKaZI1RHRlbbt3b6YzIsjw+crbk3bmpzbIYWTfGq7/AJWb+H/ZqhJBNt3ojfdrCtU5YHXSo8vuotWcwkkCO/ys23atbVmqRt+8fcNnzrXPwxuqxI/9+ta1kRZgjoyL/eb5vlr53GVF8UT1MPTf/bpu6e3lqm+44X/lntrThkfzP9cq7f4mb7v+7WPbzDyVPnKrbv4v7tWvtGxvnmyknP8Atbf9mvAxFTmPao06UY2NBv30bI7r97/eanzX00kzwokap8rfvPm+XbWV5w+xjyZmR/N2qzL/AA/7taC3jrblEdXdkVd0ledze6dHs/dLlvJuUQ/K7fdX5KsW1ws0ivcvnzIti/3l21Rt1SS4+5t2v8u35W3bfvVow2v7x53ufk+VXZfu0/iOOUZx940tNjdrdYN6nb83zVY2zR3IkR2Xc33V/i3VXt/JiXyfJVH3K25n/wDZasrav5n7mRt7I3y0SlL5GMYe0nK4+K38tnh37tr7d0jbmVqFWCSNZppst975U+VqmUpKqvMmz5V3Kv8AFUbW94rM72ao33/mf71ZyhGOw7S5eUga5hmtXuXhZVVN3y/eqz9lhZT+5kD7Pu/xU6OSY70dMfKvyqnzNTLhLm3kSbZt3bf3ivuatIx55cpy1o+7qYmoSWelw70Rssnz7k3bWrntQH2yRv8ARm3L/E33WWul1+N7i4N5DtdVfa/z1zWoRwx7oV+V9n3Wr08PT5o6RPCxEuaXumDJGjTP5afd+42yq81n++aaHa/9+P8Ai/3q1rrZJl0eP7nzMq1SVXhZ/n+b7uP4lr1OX7J59SRB5c0nyQ/39svmJU3lia1DvCzpu/hWnrG7Y8sbmX7/APtVZtYvtE/kzXjAL/d+bb/wGj2ful0Zc0Cp5Y2/voVO75WVvl2/7tV2sUjjbejPub/e3VtSKkzB4U3qrbdrfxNVeOx/el5LZUP3nkjeol7p1x+Io6fZ/eQw87fkVv4auwR+VG6Sxrhmot5k+5nH+0v+992o1me3byfmfy23fN/drCMpR+E6vspMst5zbpk+ZF+/t+WrOn3UNvHsR8Kv/LOT+Gs2GSa5kZ3TLs3yfP8AL/3zVea98lvOnmwv3fmrro1JHJUj/Kbcl9DGodLn/WL8ysvzf99Uv26FkHyRna/z7n2/LWPb3ieSqeYxT/ZpJ72OOTej4/vq1dkZe8eZW90172eGe4RAi/d/hf8A9Cqx532fbD2/gZV3VjrdQsrunySK33dn3f8Adqb7T9oWJoblVf8A2q6acub4jil7sjZ+1LuTY+7anzts+7/s1t6LFtl3pMuJF+VWi+Zv71c9Yq8ypDNNsSR/8tXYeH9N85vJRFX51Zmkro5faE85o6fpT3W3f13/ACRsnyr/AL1bsPh3czMjq80iK3mR/d+X+7Uul6ftBezRQ+z+J/4q6O10b7RGEQbWVtm1v4quMeWPvC5omFp+h/Z1abyYyrJtfa+7bT5PDkKxF/tK7F+b93/C1dXa6DulXybNfN+9tZPlqxZ6L5KvClmz7n2/Kn8VRyw+IfOecat4dmt5P3yM67dzR+V95v8AerEv9FtrcD5edjMrN91f9mvUdY8PpLM6P52/+BlT+KsXUtFmFn8kKsivu27aOXmCMjzDUNJeH/SXSNk2f8s/4d396sO68P3jLLDA7OrJuTdXp974Z+0RtBMjfvH3vuT5VrOuPC9+qvsRSfvP/srXLKjy6le1PMJtBRVl85Gfb8yMsXzVNY6ait/qWxH823b81dxeeF/LkEyJI67/AOH+KiHwztVvJhkUSbt7N95VrzMZh4VNGdtHESOc0/TUa62PCuzbteORPvVvx+HXWEJ9maV9jbl/9lq1b6T9kUpc2y7vuOuz5l/2qtWbOszQyPJhm2/NF822vlsVgYxq3jE+iweK93lZSh0tJIR5O5N3/Lu33lqa30vcslh+8VNq7W3fxVanjtkZZv7vzbv7rVYsy/lbPJZnVfk/u/8AAqzjh4yp/CepLGSjLkiQ2Okv5MkLou/YuyON9vzVv2Ni7RxzJuyzqssap8sdVYVeS3ihS3b93t/eVuaXawyRpFMmBvVv+BV7uCw1tTzcViOhp6ToYZhDNDtaH5vl/irWs9JhuIzDC7I7L/wKo9LmTef3zeVu2+YqfNW/pqvHPsTblvldpF+8v96vpMPR948CtW5jMOg/6O/2Z/461rPR3dVjm3NtXc7f3q1LWz+XzE2/vPueZ92tSx0lLyZJ5o9g+83l/wAVetGj2PLqVDPsdBhlVZnmb5m3Mq/Lt/2a2LPwrDdI5mh3S7N2373/AHzW/o/hlGjEJX5Gf/WL81dLpfhW/t8eXMv+xJGn8NdEqPumEqxw8Ph1J49727PtX5fl+VaLzw/NcWoRN21olTbt+aSvQ4fDKJGqbGY72+9/DWfdaKi2cMN1uaNV/wB2uSVMcah5dc+GXhbf5PlNJ8qK33ttctrmi+TdSpNbbdqV6tr2jfvpU+Vvn+XzPl21xviS18mR08mRVkfbu37ty1w1qJ1063KeWeINMhb5Hdk3PuRW+61crqlvNHv2bW2vu+VflX+H5a9H17T4bht4tt7Rv/F/yzX/AGa5HWLGOEi53rt+bev92vNqUaXLY9ClWv8AaPPtWs3mnEkz/wCrXbE396uc1Rkjj+Sbd/D5jJ8zf7tdv4iExh3/AMbL93eu3bXE6pHNHDN5L7VV93y/w15dSjL7MT0qOI0Oe1HUI5pNiOzJs2tI33aozSQ+Y0O+R1z91flqXUpI5LgQ+Szovzbdnyt/tVTuNQjmk2JJhNvzf3VatoUugSrRkJdXELIjzJIny7Nyv8yrVG4mdV2FG+V/ut/47U9w7xxjeik/e2r/AHaq+dumW2SPb8ufMrrjHlOSpLm+Ijk2LDv2Y/56tWfNDbQtlIdrN/FVq6Z5vkSZkT+Flpqqkiok3Cr8qybN1dHLzR5jn5v5itHbwrIjo+7bU9jZvuKIkm9m2r/C1N3JC290Zmb5f31XrOGbyW2Ox3Pu3Uvdpl048xYs/JWZIZ9vyrt+b+9XT6PYxyzt5KM3lqqqzfdrAsbEY8+a23Irfd3/ADV2Xh87Y0dEyfu7V+8tZ81ocp1fYLdjorxyRLDMsqr95ZP4latvT9LgjUeciu0n39vy7as6Lp9s6p8kbSt8vyo25f8Aere03Rd7Dem9o1/u/Ltq6fvSMKlP+UpWen289mqQ9W+dFj+8v+9X0F/wTJsbWw/bP+G5uo5Gjfxzpu0xsAS/nDackHgMVyO4z0615FHY2f2jYlsqPu3ytH937v3Vr2D9inxX4U+HP7Snw58a+MNbj07R9I8WWN5qF9cKxWCFJ1dmYKCeAOwr9O8Msu+vZri6ibvSw1eSSV+ZuDp2/wDJ7/K3U9bh/De1xdR3+CnN+t1y/qfpD+3z/wAEovEX7XP7QF38ZfhV8YtEsrq4gtrTxHpWro7G0kihQIyNCGOWi8s+W6rjhgxDgL5P+3F8Tvgd+x3+xbaf8E7vgp8Q38R+Iby4Evi3VdPlj8uPE/mTpPsdvKkeRFUQAkrGnztyN/zV/wAFWv2kvDfjf9uTxb4n+CPxCs9b0ie1soU1HRL52hlkhtY45AHUhZAGVgGQspHIJr5bvPiRq0JdhBbMQu7mN8n9a+l4W4gyWWAy2GeZnOpRwqpzhQVDl5akY+6pzTbmqbuo6K9k3c68FnOXQpUI4zEuUadmoKFrSS0u1uo9D9YPg/4i+GH/AAU+/Yr8N/s2XPxjk8KfFPwNFDFpz6jcgNqJiiZFZEVw1xC8KgOV+eJ0DFWGPMx0/wCCdvwF/Y8+F/iL4if8FCfjq2vvcabLD4f8MeHNanglmk4HmQCR0e5m3MuEKeUnLSblPy/lSvxn1+yuEuEsYISvzRyKXDA+ow3FVtV/aH8XahcPLqTW900Q2LLPJK+T6Alulems54cw2JqUMuzirQwdSo6jpRoe/Ft80o063xQjJ9EnbVJ6u9f25lFKo4UcVOFKT5nFQ1V3dqMt0n6aH6e/8Er/ABJ4P0j9lf8AaVtbnxDZWCz+GS1nBqWpQpL5Rtb2JS2SufnliTdgKXcAckCvg61a2W5ja9jkeESAypE4VmXPIBIIBx0JB+hrx2//AGhvGFpvSHQdPZlOCSzgA/8AfVZN7+1N40tdqHw5pqyE8oyyED8Q1fYZXx/wLlGcY/H/AFmcnipQlb2cly8lNQte7ve176dvM9TC8U8P0MVWre0k/aNO3K9LJL5n7NS/8E0fh38aND8N/F//AIJn/tHweG4n0a3XW7O48SXLzrPsDeZLJAWeGc5xJCVVQw+UIPlrov24vEOm/AH/AIJv3X7OH7TfxvsPiL8R766iGnL9t3Xlu5uPNSZtxMxSJFcebIBv3BOAcV+Gqftr/FHRLiSXRdK06BsYZoZJ0bHvtkFUpf22PiLcTm5v/D+ku0nMkrecx3e5MnNfBLP+GcRmOGqZhnFStRw9RVIJ4a1ZuLvGMq9+ZpXs9E2tDzY5rldXEU/bYqU4wkpK9P3tNk572P22+F198Hf+CqX7FPhn9mjXPiknh34peA44o9LbWJEZr9o4WQNGm8NcRPCoDlRvidAxDDHmbX7OP7Jfwr/4JKprH7T/AO1R8ZdK1HXV0ua08OaBobYknVinmeQsxR7iZvlTG1UjUszMQcp+Gdj+2l8QkkWaHw7pUc0Q37oxMNp9Qd/Fav8Aw2L8S9ckW41zTbCZwuC8sk0hA9AS9ZYziPg3kr5fh83q0surzc50FQvL3nzThCrvGEn05XZXXV36pYnL5KdGniZRoTd3Dk11d2lLon6H68f8EfvjNbeOv24vif8AFrx/4ktbKfxD4cvr921K/jQ4N5FOygsRlY4kYkgYVI8nAFfDskkJ+KDSi6h8v+3yfO89fL2+f97fnbtxzuzjHOa+c9P/AGofFl118PWK4OGyJF/9mq7B+0V4keIzNpumlduV2JIdxzjH3q+py7xP8NMqz3F46liZ2r06VNQ9lK0FSUoqz63UtrK1utz3MLi8tp4qpXpzdpqKtbblTX6n6g/8F3PEPh7xH+0d4QuvD3iCwv418Cwl2sryOXaHuJpUJ2k4DRujqTwysCMivh6vI2/aF8UAZbRLA/u933n6f99VHcftGeI0fZFpOncfeLh+P/Hq34U8XPDfhfh7D5VHFVKipR5eb2UlfVu9tbb92b5fjMBl+Bhh1NvlVr2se3eF/EF94T8S6d4q0wKbnTL6G7tw5IG+Nw65wQcZA6EH3r9NPj5+z38L/wDgsH4P8J/tB/AT4raFoPja20RbTxB4b1S5814kEjHy5RHmSMxyGbbIYyJUZT8oAr8V5P2k/FKR5/sbTA2cbT5n/wAVUcX7VXj/AEqX7dpunWELp92WF5VZfxD15vE/ilwDneLw2PwGYVMPiqHMoT9i5xcZq0oSg7KSdk97pq61McfXwuLlCvRquFSF7PlurPdNdUfs6nhn4Vf8Egv2S/G+iax8SdI8U/FTxzCtkmmafc+WbdWikSM+Xu81YYt80hlITexVBtODXzl/wTN/Yb+DH7Z2s+J7X4sfFy60ZtEtI3s9F0ieGK8uQ2d1yWmR18mPaFYKpOXGWQY3fm/q37XPj0SSXdxoumyuV3tJKspZz9S9Yl5+2l8SLSISxeE9GbIycCXj/wAfrjwXHnCGFynGKlm9VY7FyjKeI9hquWyUY072UVFOKV76t32txRxGGoYeqvrElVqNNz5e2yS2slp+p+sP/BOP4xfCD9ir9vHxL4J8VfEjS9R8MahFdeH7XxvGWS1JWdHhnJyVWNzGFZssqkht+wFj3fxC/wCCHPi34g+Mb7xz+z/+0B4S1Dwnq91Jd6XNfSys8ccjlhGJIFkSYKCAJARu67RX4r3n7cvxHtpfLHhHRf8AgSTcf+RKZZ/8FGPjVpQeystG063iBJ2wT3KKx+glrXH8c8OvOHmuUZvOjXqU4Qq82H9pGpyX5Zct48stX8Lt5IxxOY4WGJeIw+JcJtJSvDmTts7aWfofuD+1Jc/A79gr/gnXqf7E2g/FXTPF/jTxPqm/VoLSUbrZzLFLLM8cbsYFVIYkRXbLsd20jeB83/8ABLL4HeFfjd+1zosfjjWtOttK8NRPrdzbX15HG161vhkiRX/1mGxI4wQI4nzgV+X8/wDwUB+KCF3l8HaFvByQVnyT/wB/Kgk/4KE/FSIgjwPoQB77Z/8A45XZlvGXBmX8N43AU8yqvEYpznUrui789RKLlGCaUUkkkk9O5lSznKMNgqtH28ueo23Pl6vS6XTTbU/Sz/gop+0vL+1L+1Nr/jewvfN0PTZP7K8OBH3IbOFmAkBHXzHLyZ9HA7V8rftHeDPEXxC+CmveDvCdmtxqF9BGttC0yxhiJkY/MxAHCnrXzpc/8FFfijChKeCvD5b+FSk/P/kSq7f8FHfi4kYY+A/Dxbb8wCz8H0/1lfT4fxB8OMPw6smpVJxoqn7LSDvyuPK3e2/W7T11ZvLiXhuGAeD5pKHLy6Rd7NW+8464/YW/aYeTKeBrYj/sMW3/AMXUD/sG/tNlsR+CLcBVwv8AxObb/wCLr0zRv28vjjrkipZ/DvQDvxtGyfv/ANtK9e+Gfi39sH4lzNaaP8G9PkmePfZRQ2VyxuR6r8/T3r88lgvByK1xOI+5f/Kz4udPgKE9a1W/ov8A5E+Um/YE/abBLjwhbkkcj+17br/38qAf8E//ANpxl2N4HteDkE6xbf8Axyv1T/Zj/Yr/AGkfiLq9sv7St/4d+G+nXC5828hmkuB6fuQ5YfjivS9Y/wCCedsnjWx0Lwp8f7LVNOmvvKvNRTw1Mojizjco87JP4Vj9W8F3L/e8R9y/+VlqPAkNVUq/d/8Aan4uz/8ABP8A/ajkO7/hBbUt6nWrb/45UR/4J9/tUHA/4Qe1Azn/AJDNr/8AHK/W74l/8E9/2qvB2vX1ppPi3wQbRJm+wLqXmRXEkXUOyedx8vNdBoH7FvhnSPDMeo/Ez9oa3k1JlDSWPhjwZd3EaZGdvms+3dWc8J4KL4sXifuX/wArLjPgWW1Wr93/ANqfjmn/AAT+/amU7f8AhBbXnuNZtcD8PMqxD+wD+06pBk8EW3y9P+Jzbf8AxdfqlqX7Pn9sytD4A8WXYbcRF/bHh9l8znAGEm+U/XNeYfHD9n79v74T2suu6N8MvDmsaXH9yZEnjkk4yPlMny/jURwHgpU2xeJ+5f8Ays6IYjginL+NUXy/+1Pg22/YQ/aPTa8nge2DL/1GLb/4ur8X7Ef7RuVVvBlsu1cbv7Wt/wD4uvSNf/bJ/aE8K6pJpOv/AA20GCaIYkj2T7g393/WVn/8N7/F0bd3gfQVz97Mc/H/AJErmq5d4Hw0lisV9y/+VnsYarwvL+HVn/X/AG6cxbfsYftBwqEfwtbnnOf7Ug4/8fq7bfse/H22gyvhe3MpbJb+0YP/AIuuli/bu+JUi7h4O0Tn7o2Tf/HKng/bl+I0/wAq+EtEB91m/wDjlck8v8CeuLxX3L/5UerTnkmnLOX9fI5Zv2PvjyzAP4at3XdnH9owc/X56mX9kD41JEdnhCAOPuf8TKDGPT79dOv7cHxCYKF8K6KWLYZQs3H/AI/Uq/ts/EGSMtH4V0cnbkKY5v8A4usnl/gNHfF4r7l/8qOh4jKIvWcv6+Rx0n7H3xzkkMknhOA+w1OD/wCLqGX9jn48Stz4UgUe2pwH+b12LftwfEeMEv4Q0bAOGfbNj/0Oobj9uz4gwnavhTRM7sAFZv8A4ur/ALO8CP8AoLxX3L/5UR7fJd+eX9fI4ib9iz4/lCq+FIGBOSv9qQf/ABdULv8AYf8A2h5590fg+JU9P7Xt/wD4uu/uv28/iZbyso8H6GVX+LZN/wDHKoz/APBQj4oRS+WPBWhd/mMc+OP+2lbU8B4Fx0WLxX3L/wCVGNWtkUvinL+vkcHP+wx+0lv3ReCbYn+8NXth/wCz1Rf9gz9pl02f8IPb8nLf8Tm2/wDjld1ef8FHvivaKWPgjw6SBnG2fp/38qg//BTX4truK+A/Dhx0AS45/wDItdEMu8EJaLFYr7l/8rPPqVOGoy96pP7v+AcZL+wV+1KwwvgW3Pu2tWuf/RlV5P8Agn9+1NI5c+Bbfn/qN2v/AMcrtJ/+CoHxbjTcngPw3nGcGO4/+O1UP/BU74yAE/8ACv8Awxz935bjn/yLWqy/wStpisT9y/8AlZzyqcKy3q1Pu/8AtTkT/wAE+/2qdpT/AIQC1I/7Ddr/APHKhf8A4J6ftYOdqeBbNF2441q1/wDjldif+Cqfxm8woPh74Y4Gc7bj/wCO01f+Cq3xjZcjwB4XB90uf/jtbRwPgt0xWJ+5f/KzjnDg6W9Wp93/ANqcrD/wT4/arUqx8C2ile/9s2v/AMcq7L+wp+0vo2nS6heeBIZI7eJpJEh1S3diAMnCh8seOg5PaultP+CpfxiuQCfh94aXPqlx/wDHa+gP2Pf2k/Ff7Rena7d+KdD06yfSprdIl08SAN5gkJ3b2b+4OnrXrZNwp4T8RZhDL8FicQ6s72vypaJye9Psi8FlXCeY4lYehVqczva9uiv/ACnw7pEa7wrBwScEEfMK6zS7d1kTfD8zfM237tafxtso7j49eLHUGMr4kuug+9+8ak0O3n3L/dr8TzHB/UcZVw978kpRvtflbV7dL2PiMRSlQrzp3vytr7nY1tHt/MZYUh+81dLY2aSTGGNMrH833fvf7NZej27j5H+8r/LtrprfT5Fj3un+0is23dXFzcpzylyF3SdN8uTyZodzMm3cvy7a3dLidmSF3bb/AHv71QWMMMkLWz2zFP4V+9WxZ2/8b220fdX+61Pm5iJR5vhNGyEkKxQ7N4/hkb+Jq3tL1Dy02zTZ+Tcqqm75qw7dZsfaYN0R3bkb7qx/7K1qWFtbW9ujTQ+Tufb8yfd/2quBMi1czQqsWxFT5d0qr/eqnqFwkm+HYw/uKtLPdTW7NZzeTMsbbl+X5apXl1Myr/q96tu20c3KXEkWTbNsRG3qm1lrPkmkupvkmbH93Z/tUyS4SPMzuwRn+9/7NTPtUN0okeZok+ZYpFT5d1TKXKe3l0e4XUkMkj3Oze8fyorJu21j6zJtmT5GVmdtqxt8taNxIkluiO+yTb8jbvlb/ZrHupEkmWG5f5fvbv4lauSUub7R93gXyqKaMPUFSMtB5yszfNtX+GuR1mP9ykcLqy72+X+9XXa03lwvMnKsv/Aq5TXIdzJ5aNt27kZflrnlLm9496lGMdeYry3CKyIiL+8+V6t2t9tkXznVF2fdb+KsRrzayPvzt/vJVeTUt0iyPwuz/Vt/FXNHnPzCVTmO60XWIWX9/Nxub5f4lrW026trdt7uzbvm+avO9J1ZIVRN7M33vmrXXXnXP+krt+75f96vGxPtfe7G1OXL70jtlvkurUu7thv4l/iX/wBlp39qP5aCzhZ0+ZmZZfmauQHiR0hZEk3bflfdV+z1jzIza+dsVXVt1eBUouU+ZHtYWXN9o62x1iS2dIX3Z37F+Xc21vmq41x5kZ+7v+9u+7XN299JdFB521l+/wDP/DWhZyIrFH8xWX5otyfeqKFGfOe5TqezjyyNrzHkmebyWMm7au1vl/3qfdXEMe6aa5VW2fLt+7uqpb3EFwqf8sw3yvN5v3aJLeOZUTf/AAfekT73+7Xt4OnzfEZYip/LIjuNkm1E4eRfvL8tZtxb3klx878L8jbfl21uLB9oX/UtujqO6tY7yHY4+6n3v7y19Pg17P3TzK2IjH3TCk0/zI1dONvzOzP/AA0tvsa8f52ZpPuN97/vmr0luhkCJCwib5fm+792nx2m5UuoU2bovlXf/FXv05R6ni1qntJycivqFvCsYeGHemzdu37ay7qQzTP5iMrf3VTbV+8t33eXcjftTcm5/u1l3TS+dve52nZt3N92t5S5feOen/dKF1CkanzpmUK25F27laqkzbcuH+ZvlZf7tas1v5kcfnTN5n3VaqElu/2xt6bH/wCei1wYip7p6NOMiCNXm2P+7T+5Vu385VD72eZm2/eqNYPMmTyX/dKu5/k+81W4YMZTzmb5vkZa+axlT3rnsYWPuk9nL9ouJXfh97fL/DVuG8eRnS5T5V+42z+GqqshUon36tL94IEYjZ8v8VeZUfN7x6dGjy+8WY2tppN/nbSqbX3JViCbzmX7Z95flXc/y7arR280LJs+ceVudf4t1SW6+Yr/ALlh/Du+98tc/uSNKnuwNe1+03lwk0D43fK/+1V+PyVmhtprlQVdml3J96sSz3syJ+8zH8z/AC/L/u1s6XI80x/0be0f3amVP3tDjlU5o6mxbh2bZ9pVCv8Ayz2feWtK1VP9dCiurfNuZvmWsuxW8YI7vx833fvVpx7LiHzoUjjPlL/Dt+7XNUjOUeUI8kpFhreby2ms0VnX725PlVaS3tPMkbzn+78rfxbv9qmwu8kPkO8jj7rtsqWOPdGuyZdu7aiqm3atTGE4mcpR5vd2EWNFkREhy0j7Hbzd3+7TZoXVf9TtLfLu/u1IsaW+XQ/LI/3VqteX0MzOiI0bx/8AAvlrrpVJc+iOKt70TA1CPdJ8+52+ZUXftrDvpt2XhgXP8asnzLXQ6hIkLLN5zM8fzbv4vmrD1CGb7Q/nOxRk3Oy/LXv4X3TwcRExbxUmV4RJ+6X5vMb+GoL6zhjVHfzHH3kXb/FWhND5MbSvtZN/3W/iqC8V2kLu7BWT5o1bdXdGPtDhqRjGPvGck0Nu0nzsjt/Ez7qs2Nxc3ipvRVaFOdv8X+9TGjTzD8nH8asm7dVm1VF2JbeW7N/d/irSUfsnNHmj70S4stzBGU2b9yMys3y7ao3Vwlzs+Rl/vf8A2VXbiSe1ZrZEU7l/4D/u1VZtyn59rfeRf73+zXPKnE7qdScdSOzTdA7ui7t21G/vVFbyQ26ed8o+8qKz1PHZurffVfM+ZNr/AHaq3Fq7Qt523fv/ANXt27q4pHdGRG15tUzJbSD+8v3dv+1urOuZvOZtkzbmfc67d1Xmt08n99D95Plj3/erPuI3Vt6PtXft21dP3feRnW94qSag6rJ5Pmf3U+bbVyxvPtiK+/8A1a/3N1Zl1au0i+dHvVv4m+6rVoaRbvax+T5LfL8u7furvpyjI8vEe6XVkubi4/0lF2/KyNH97dWvp9i8nlZ2lf7rL826q+l2PmN86MWX5UVf4Wrdt7V2u4vvH/pmq/xf7VdlPc8upI0dJ011VZkdZG/uyf8Astd/4X0GSaFIXhZFuP8AVbn+7/tNXP8AhbQ4VkU78fJuWRmX71dz4b01G8nZdRy+Wm5Fb7q/7Nd0Y+4ZSlym74b0NI1+4uV+V2X5t1dFY6KkcnnWb53PuRm+Vv8AgP8AepfD9snyzJCy+Sm3a33m3fe211+l6fZria2h+ZV3Rfxf71ax/vGEqhj2ehvGyTfvGZfv7m2/LVqPw88du23d8rs3mRvXT2OlpMo3w71k+Xbv+arlroX2e3fenz7/AOJPlqfZxkL2hwWoeF9sweG8b5tzs0jfxMtYNxob2qr5af6v7i7t26vS9S8PzSTfPD8rbVesrUPD/l/vnto3WP7jKnzbafLKKI+sc3uo83utB8uFv3O+L5v3cn3l3f3aoXGhwzbvkyq/wyJ97/er0W80d5IQkyKrKjfeT/vlayL7w68cYm3rvVFbatTKnAftPe5UefXWh+cqvDIyIq/3aqX2ipNH+5fcy/fXdXa3Wlv5Mlr+8VvvfN91ayLiz8uGSGz3M/8AeZf4f71ediKZ3UZHKXUPkhofJbdJ/rW/5aN/u1BPZvHIpmdkkjT5P4W21tTaajYvH+V9+0LJ8u6ql8jtumf5Tv2LtfcteNiKMuX3T28PU5Sh/ZsMkbI824SJ/C3ysy0iL9laK2hjYeZ827zdq1JqH2do/kh2NI251X7tCXyNJslH/XJW/urWNLD8252e2/lLtvZvuRH+VV+Vfn+atuz85bjZDwY1+81YulzQSKqTncPveZu3fd/vVoabeOrFJpmZWdfKZv4t1ephafu2OLEVPaHWaO0MCrsdWeT5nVf/AEKuh0/943n3U/mn+Bdv3VrldMh/fB4dyOv3F/vLXT6OEaOKa83b2b7q/d/3a9mlGx5tSUuY6TS7ePy0m2SYX5ol27l/75rttL0lIVDzQfP95F/9lrA8O2/l4+95TN92u90Wxm8lJnttz/e2yfeVa9aMuWB51SXLL4jR0HRofs6BIdu5NzN/drqNL8O7oY32bUb5UaodHs/Ot1mmtW2RtsRY/wCKujhh+VExho33bmX5v+BU5e8cHtvf94zTpP2fe9m8OPKZdzfxVmaho9tGPtSIu1f4fvV1lxFDHjft/eJuST+GsHXLdJIW8lNyxy7mWFtv3v4q5qkf5TWMjgvElj51zs+zM4VF+9/erhPEFq8jfcw8jsjbvlaNq9K8RWs1rJL+5+dXVnbd/DXE+KbV5pHh3szSJu27K5KkeY7KdTmPMtYsZpVljdFRo/k/2f8AvquJ8QaT8rJM/mov32/hr0zW7OG3m2b/AJ2RmZW/u1xHia1+0M/nTNsVNjL/AMs2X+8rVxVKZ2U582p5v4i0+2mt3RLbDr9z+Fq4DxNZ3yyJ/q8/xrv/APHq9P1i1eSdHh+YL8kUjV534ou386a5ddhk+VFjX/x6uSVPlOunW7nA6o0ylk8lt8aN81ZscsM0mxPuM+19y/MzVq6tHMt9Md/y7NyNWHJIm7Y42H7zLv8Au1hyxOzm5tIkrXf2VsoMov8ADsqlcTG3b98m5ZP4anaZGU+T/F9zd96q9xcbW+R1I27WZV+b/drWMeb3iZSj9oiMkMm5E6/dVViqPzizSpM6jb91Y/ur/wDZUrSPGrO8O2X+9v2/LVdpIdzfPv2/7FXy8pzc0Ze6WRJC2zzplf8AhbzFrRtWDQu+xQWVdm2sqxXy9ieT8n3trfNuq5Gybi6bdjJ91f4aipHqbUeeO5uae3lzFJk2tt/iTdtrrNDk8mOLf5Zfb97/AOxritPuv9I2SOrfL8+7+KtjTdSkVfMm2p83/jtc8o8p2xl9k9T8N3kMcbPNMuPu/L8rf7NdRot5MtqLb5VO355F+7Xmvh/VkCo+/fHH/qvMf71dPpevPI6Inlptf5938VVR90KkjtY5kh2PDCrlV2ytGnzL/eq7BKzab5u3adjHAHTrXNLqCSRIjzM0i/N8rfKv/Aa39Om8zQxNKCP3Tbv1r9u8FnfOMf8A9g1T/wBKge7wvKMsXWt/z7l+aOVulhWQ/Y0Uru3bvutWJq0m5nhSbcv95v4q19WmhZUf7SybflVttZF4okj3oV/d/Kqs33q/KoysfB1InP30n2e8TyfkZv733ayppIbiR0eFVDPuZf71bGuKlw7onKKu6KSNN3/Aawry3TzA6W2z+Hdu/vV2xlGRxy934jJ1eJPOZ/M/vbq57VG+/wCZwPK+7t/irodQhSFpk+V/n+9XN3yzSRzP8u9m+ZVrKUvcKp83OYOoqjRrMg3t/Dt/u1QRFaP5E+Vv4mq7cRwsrQzQsu3/AGtqrVK9aGNQkbqqL8u3+GuCt/Kjup8nOPtf3Mm5/wC/tX/arTt714Y/73zfN/s1kNIkduknQL/D/dqSO68uQ735/wBlK82pThserh6h1Gm6lCyb/Obb/data31CGaHY02U3/ulj+VmrkrG8TaqTfIf9/wC9Vyz1HzpvOm+T+GKvNqYePQ9jD4iXwnUrIfJeRJlyqbH8ylW68tQlyilmT/SFWsKTVpvnfzsbfufxVOkzt86fLuT7zfe3VzSp8p0xrc0uUtXN0mF82HC7/k2/w1FNLeSS7IYVwqMvzOq7v+A0xZvmRPtSs/3tv8NR+TdSM291+b5kVfutUcsfiOiNSXwxM7VIQyrM6MpZ/wDe2/7NYeoafCrfO+FrpLi3eP8A5ed396sqe3Rpmh8ldsny7mrqp+7A563J9o5u+snjU79q7fu1n3Fqkn91j/eat28jtmVnR2Yfd/4FVBrEyTP/AH9v8SfLXpYfmlqeTiDGmsYXDbnZtvy1nXVq43b/ALq/d3V0U1n5cf3Gb/d/irS0P4X+IfF2oW1tpumzP9o/1SrFur0KfkeLiPdicJDod5qF8LazhkZ5H2osabq+rf2Bf+CW/wAWv2vvG0Og6bYSQ2ULq2pak1qzeTH9792v8Un+zX03/wAE3P8AgjzefFjxVpU3jOwupHaf9/b+Q0EUK/e3SSN/eX+7X7b/AA5/Z58GfCfQU+GPwQsNL8KadDYLZJdabb/6Szf8tJt395v71dNTFRpw0PErSnUl/dPze+B//BJ34D/B3VLZNY8NX2o6xDOqabot1YfabmZl+80kcfyx/wDAq+uLXwD4k+GetSX7+NrHwfdw6WsVloui6XDJdLGq7lVY41ZlZmr1T4qeF4f2bvBaW3gDWLfQra+vP+Ko8d65ceZcxx/xLBu+ZpGrxyL9v/4V6D4T8SQ/s2eEJ38QWUTLB4u8S6Zu+0bfvTbfvMtcVTFc0uVl08Py+8jl/CvxQT4P3WpeNvj98PLzVZdS+bTdS8cXq2zyNu+6sP3m/wC+a7vwX/wU0/4J56L4LW21o6amszMy3Gm6Ho0ki28i/wALSV8ReLfhh8Vv2n/G0Xj74reONQ8Q6je27NPqk1vJtjjZvlWGNflVf92qmufsSv8ABbxlpGt23wc8VeKtLjt1nnt5L/7D9quN33d393/0KudwqSl7j5TROlT3PsDxR/wUN+BV3rNr4nufiF4JfTVLQweHb7SVRo23fK0tzKvzVwvxS+O3iTXJpofCvjnwjqOkattli03Q2WRbfd/CzLXjPxA8Fp8XPDb+FdY/Y80Hw3bSSx7bq68Q/afL/wBn7v3q1/Cv7GPxI8K/Dyz13QYfBdtZ2M7eba6PK3m+X/DuasvelFe8TKMPiZufDHRf2jtH1j/hLbbwTp99Asv+i3FrKrK393crfxV3mg/Ej4naa9zqvxX/AGY9U8R2l15jT6hH5byeX/eVV+XateP6X+0NrHw/aXwr4z1WPFvLuijtZdyrtr3X4A/tufBzXri2sIdVvEC/8fkMkXlrWftlEJUZSjFxPMvit/wT1/YA/buhv5ktLjwl4kvLNks75bVoJ7ebb8vmf8Cr8yPj9/wSv/a3/Zn8ZXPhXVfhvJ4q0ppW/s7xBp8EjR3EK/ekZtvy1+6/iz4mfsr/ABC1pPCuj+MNF0zXI3826Zv3DR/wruk+6zLXZ/Dv4Y/Ejwr4fubzQPj9Z+IIfK22FveIsvmL/d+b5dtdUcTGpG03dDo1q+Hn7p/Mp44+Bz6LpqarZ21xb3McrRXum3jqrx7V+8q/e21wEdnDG3yOr1/Rn+3N+x78Fvjx4Nurz4i/CjSdD8QzQMsHibQUji3Mq/dZV+81fjH+2N+xzpvwR157/wAH+JI9StI7PzZbdl2zxt/FuVayrU6Uo80GfQZfnHNPkmfOTWfQI64X7+1fmpY7WRVkDo3zNt/3qteWkjLsttv8T7qmhs3j2zfKSv8ADXm/D7p7/N7Qz5I3WMJ5PzN/eWq7Wf7nZ5C5V/vba12jmRRNH93fVC+V45nm2b93+d1bRjORn7SEdjEvI3be7w/d/i3ferH1BflE3ksvybdtb2oRpIvlx8Lv27t1YeoRzRspPI/3q6acffOKtWnuc5ffvH8x3xt+Xb/FWJfrM0iIUXDbvu/w1t30b7t6f99VhX3neWybNy/3a9GnE8utWl9oo3XkhTx/wKqUknzEbPm/2at3SptG9/vJ92qsm+OT5ErrjE45S94rsu1N3zUx442j2fxbvu0+TzG+R9zfxbf7tPhjk/5ada0jEjmLenr5siu833f7tfan/BL2MR6H4wAXH+k2X/oM1fGFjGke3Yn/AH1X2f8A8EvUddC8YGRcMbixzzn+Gav0fwijbj/CelT/ANNzPpOEv+Sgpf8Ab3/pLPKPjc+z47+K5NuNviC65X/roadoMkM0ImCNv3fI3+z/ALVP+NpT/hePiuIY51+6LY6/6w1DoMe2MeTwdy7Wb5q+Hz73s8xX/Xyf/pTPEx/u46r/AIpfmzq9BRFuPJTa25l+9/DXTWMcefJSbhpfk+SuY0dnmZnMyn5vu10+m/6QphSFt2/5If7teRzcupwyidDptvDIqOj7v92tayhmeYfdCRszRRr/ABf71Y+lw4kG+Zl2/NtrXsZPvTb2f5dyfL81Rzcxlzfyl+3tUjbz53+Zm/h+b5aurcCNt/X5F2R7flqrb3ULqY9jH/ZWnyyfZWMaooRvmdt9aD6+8N1C8/1yTIqbovvL/wCg7ayLwQx26eTNIdvy+d/EzVYvE2OJtm7cnzrWczXO7zo/LVvu+Wr/ADUS/lHTiNuLl5GeadMt95/7tRzXTxKr+fu+fai/e2tVLVmk+5DeqybFaVV/h/2apSTJ5O/exH3V+eplKEj28HGEf8RqXWoIzDztsyxy/wBz7zVm+Yka4uZl3M/yNs/hqtJcQ3GLYPtCy7trVJ9udrZnuU+Rn2ouyuGX90+zy+p0ZT1DZJJ5yPgbvnk/2f7rVzeuRxyKqIjbFb/gVa2qXSNal0mZEb726sDWL54wy9V/jasYxme9Tl9mRzEl5tdk8liP4V31UluUjb765/ip0mxZCkM2dv39v96q19JjCfw7fvbanmjzH5p7OXQeuoJGyzfMxWrtvqSSKqPMu+sZrqHzBI77FX+FaFXdMskLyDb83yv96sqlOlJm8Y8x0a6o7YQq2W/u/NWvZ6gm1N77Gbarr/8AY1yFvK63Su7sn8S7WrZt7iaKRftib2+75lcVbAwlLmideHqezmdfb6skMivM/lbfl+X5t1bmn6h5irNHefN8q/8AAa4rT7hJJm+RnX/lk1btnMdyP/ef/d2rXP8AU4R2+I76eJ/mOqsZEVTNNCxMku1Gb7vy1qWdw6sn8O75t33tq/xVzenLNJG6Q3P3pflkX7tb+myTbET+HbtdVWuzD049TT2kuX3S/HIkLK8ULHc23zI//ZqsyWsjRs/kqHX5mkX+7UViwVTN8u3+6z1PMs0cjum77v8AE/y7a9zDx5TgnUluyvdK8cg2Ovy/M+6qlxcJbwvczfuk/ikb7tF1cZha5meNXjZkRY/vVk3V1jKTTbU+7tavUiebUl73vEclwJN7+cz+Z/49VLbMzPzHlvlSht6L+5eP/eX+7TjLbNjY+1v71ay5eUUZAFh8tNj7P4dzfdZqr6hDDGyb3VWZNz7XqPbBJIyeT8zP86s//j1WZLe2uPnG1/L+Xd/drya3xHpUKkpR1Kcbor7N/wDubfu09Y7aORPn2My/6tnpsKp5ypsZh/B8v3qWbY0hTZlP7q14OIj7/unt4ep7t+UtW7P9l2IMfe83c/zNVvyfLWJ4fM+7tZf4f++qqW8GNiPbM3y7dy1qxwpJcLbfMm2Jvvf+g15laUeU9OlL+YS32Kyvvb73ysr1OLdFk/fXTFV+9I38VOt18uP/AEnzPl/8epwh+zKHd1P8P3P4a5+aP2RylEnaORbdUttyP8rPJ/C22rcMiMQk6KG37naP7rLUFjDNNEmP9T95d38NWo40Me93YN935V+Vqly5jjk5RjsamnqGZ5ndtm9XX+7u/vVr2NzNNIzudvl/Lu2VjW7JG0MUCYf+7v8AvLWxDceZI8KfMzfdbd8y1MnPm2FH3Y3LscyTR/ang2iP5f8Ae/3agkj+wWZhvH3D7yeWm35f7tXY1SNSiTSJtlVvLk/hqpcLunMM0kjD73zPURlze6Ty+01CaRFjDpNsCrvdf4t1Z+qTQyMiQJsTb8zQr826pN0LebNs3Ju2rtqpcWciqfJ3NIy7V8v7rV04WPLLmMa/vU+WJRvtlyv2lnUqu3ezVUuoZnWSZNv91/4vlrTaOZVa2hRQ6xbv3n3W3fxVFNbzW6k+cp8z5flr38PzS1keHiI8srGM1vDIokdF2/3d9U5LWHyX2Js+fdu/hate4hSdfO/i/vf3dtZ95dfaIVhtvn8v5mVlr0InDUo80TOW18tok8n5pP8AgS0tqsMkm+F1YbtnmN8vl0rKnmM6PhW+X/gVRQTPuELlVbdudmrf7BzSp8upamt4FXyUdju/8epPsaeWjpNu/wBlqjhkmjuPJ2Z2/MrMnyt/s1YsVdkaGTblvuVjUjLkKo+7KzHNYpcQlPJ2r95Vqhdw/wDLbeu/+8zVqws8MZePlGfb81UbqNFYQfNmvN5pRm7ndy/CZkmn3KIj71/iXa3zMv8AtVDcaem7yfuq38LVsW9u/mL5ybfm+6q7t1PFm8jv+5Uxx/KvyfLtqftWNZR93mMSPQ9yhLby9m5m2yfw/wC7WlpugwrKqB8ldrRfJ8zVqLYorb3T/XL8m5d3l1o2th5ezZC38K7a6o80djy8RHmK2l6G9ri6mRnb7y7fl2tWxo+kos377cvmff8Ak+7Uum6em4pDB867m+Zvl21sabZpIYt8LMi/Lt3r83+9XpUfdPHrR5dy9pFlbNMkyJG6R/Lu/h212/h+2RY1e3+VW+b5fm21j6bYwx7UtvnhWJfm2bvm/u12Hh+xtvNheD5Qy/N/tV6cfgPMrS/vHUaGs3mKZkZ1835N1drotvDFGYUhZX2f73y1zfh1UjkhhebazN8i/e/76/u13On26QTf8fMm6aLb5irurXlOOUpbFyz0uCOP7NCkbSbV3yfxba0tvmW6TeSrN5Wx9y/L/vf71Mt7W23D7339u5fl3LV64hSO3Z0fIX5fLq/dkTGpKOrkc9JBNMjukWH3fJu/u/xM1Z91paLJK6QsRvXdt+6tdHLY+cuxEb7m5vmqnNbwL++N4ybk3P8A8CqfhHTlzT1OU1TTIZn/AOPbLKm3cz/K1Zl7pqRw7Ld1RJF2/f8Au/71dRJY+cyw9E+b5m/ib+9WLqkM1ufOhfYNjfd/h/2mrKW5106fvcxyOpWf775/LZl++2zav+ztrCvtPDbtiK0zf3fl3V1us7JrdPMhjTzN2yTZWDqFpuVkmh2fL96OX/x6uKodtPc5XVLMNl0TL7W+X/arCvLWGORUTa3l/Nt/2tv/AI7XXalb2DRvD58yFU3K0fy/N/drC1iz25d9qSfefdXnVNzvoylE5uaFI5vO+VGb5trS7qVrBHUpv81YUVt38S1eutPS4vI9/Kr9zau2oGi+xvsfzNzfLtX+7/vVivdj7x3RlIns2SMtbQ2ysrfwr8qr/eardiYWmZ3kbf5W6Blb5ttZbXn2GRX2KwZ9qbt33aYusKzI72zY+4v+7XbhvhIqVDs9JuEjmSF3k3tL8qr/AHa6zQ44bi4WFE/d7fk/2Wrz7RbmFm2Qvvdovvbtvy12fhXUnlX98m0bdu1fvV6tGXLE8+p7x6jocfkrs+0/8stu5a7nRW3QxzTop3JuTzP4W/2q838I6hbeWpmhZm8rZtZtu6u30fVHmh+0u7b2dd25PmavSp8so8p5dc9K0G6na1ieb91LHudJFb5a6K1unvIzNJ8/7pmeRa4Kw1izjs47VH2NIzNu3/w1vaXrkMduAjrjytqbmrSMuh50v7p0MMjr++WaNl+/tb+7/u1jax9muN6GFn/ifb/DuqX+1E+zpNbTKjeUyuv95f8AarI1K8QxtCk23d9xVqOX+Ur2nLEwvEjQ2rPs8xV3fd835vu1xeqSp9sTf5n3W+ZU+7XVas1y29YfL3/x+d/d/vLXMa03yuA6hFX51b+Lb/FXNUidNGRxWvbN7O8MiJv2rJ/E26uI1yzWS3ltnfIb+Fv71d34keGaGTY/zLFuWRmridaaG4k87Zgtt/1f3a45RPRjU5Tz/wARJ5cJhd23/fXy/mVa878YWaMx3IpVvlSvR/Elvf8A2hoYUWPdLv8AMZ/4dv3a4bxVDNJCzpbRp839/wDirlqU/tHTTkeZeIPPSZYd7M/8arXK6g0zXDPs2O39567XxBAkUbzJbMjx/wDLRX+7XF3ypNJiab5m2/NsrilGX8p2U5fCNW6RpGe6ufm27dv+f4qivGSRhIjsEX5t396mtj5tiZ2/Lu2feqNleSHZHwq/dVUqI+7sVL3pkd1N5jJ5iblZ9u7+7T22RyMET5Pm81W/u/7NRLDu+dPn/wB3+Kp1W53NMnyqz7l3PXRzmEf5RLeR/kdIW3Mn3VWtJbNG3Q/Zl+aL/e3VDD+8k3zblbfuWRavWqpFbibZub723dtrCpzHXRjEfDazyQ7HdU8z5av2K/YQ0Gzeypt3Mv8ADUVn8rfu0ZQvLsvzVoqqLcBPO+8isit91mrklI9CNPmjzF7Sb7yykKWzfdbZu/hrotL1KGHH77cWiVkZf71c3DJ9lZN77ZG+barfd/vVb0+5T7Qu99u75katqMo82hjU5up2tjq1t5SoHXLffuP9qvQNHlQ+DVlAOPsrnBOfWvHrWaZpGm8/a8ny/f8Al/75r1jw3dK/w6S63BgLOUkk4zjd/hX7b4Ku+cY9/wDUNU/9KgfQcLJLF11/07f5o5yZoZpC7zZj+9BJ/F/ustZmqTIszIi79v8AzzpYbyaTfvRn/ufLtqreeTaqzusnnM25fm2qv+y1fkMa3KfFSjzGfqk2632Wbsg2Y2/d2/7Nc1qz3McjwPJ95F3svzLu/wB6tm+kubjfO7xh2Tb8yfdasLVPmY2czqv3vvfLW/teUw9nzGRfXSQzOny7WTanz1z2rXjpI/2Z9q7fvL/erV1pYY2R0OxWb7y/NXP30bm3b7w2v8+3+Gp9pGQo0yjqF5uXYiNn+Pd96s2aTzNkJ243fxf3qkummkb53/g+bclZsmoeX8j7VH97+81ZSkbRLrSW3k7872b5drf3lomm/c7JplG3bs2vWZ9ukmUwzPs+b5f+BUn2ny2/hAj/AIWrjlT9/wB07o1Pd5Tfjut0zOiYdvvf7tWrNv3ZfeuN/wDF/DWDa6h8wd593zfNV6x1J49yJIqLu3J8v3qwrUZnZRre7qbiyOuI3flU/h/irRhk+0Rs7v8ALt27VrFtdReS3b98rN92r8d1C0I2XPyx/M6t/erlqQud9OX8pfhhh2qjpvaR9qKv3v8AgVWFuEk2fJsf+JW/5Z1TtdQmuFZEdkRv4lqz/pMiqg2uVbbXNKEubU6Y1OWPukM0fnXB/wBJ4/grI1NUaTy5kYhfuba0rrzdwT7Nna3zqrfdqrqtu6xr5L42/cVXrWnHl5UZy97Uy/s++TEiMit/DI/8VQR2X2hmhm4H+/VqSHzP3Lzcr83+1XQeBfhzrHiq8httHs2keSVV2+Vu+9/s130Y++ebiJcsRnw7+HP/AAk2qW9nN8jSSr5W5GbdX6r/ALAP7FfhvR9M0258N+D7W/1mS9VpZr6DzWX5f4Y/4a8s/YO/Zb0Twt4403Ur/Spr+6sd32qRbVWgjk/u/wC01fpF+zLq9/8ADG6u9A+Ffw6vLvxDql1591qmqOq21nDu/vf8tJNv3VWtqlbl2PnMRW5pcp9P/A3wjeeGPBdonjbTbW11Lb5UW23WPc3+yq0z4wfHr4K/APQP7d+Kniy3WWF/3Vjbxb5ZpP4VWNf4qu+Cl+Ilno9ze3lra3N66eYt9qMu1Wkb+H/ZVa+WP2l/2fbzxRHqU/xR8f2d4983mWtjodvJuXa3zNu/hVf71c060tLbGHLA4P8Aa9/bJ8WftB+HrTTPhJ8N43fULryIrrUl+2XNiu370UC/u45P9pvu1o/smf8ABOvWdU8Pvr3xU+NE1jLcKq3unyWSyysv/XRvl+b/AGa6D9kv4K+EvC/iaDw74M0qaGwtV8/7VcXDSXNxM33tv8NfaPhzwNBpmmtiHyJZFz9ocKzL/wB9VtSlzRugb5jz7XPB+i/Bf4Yp4P8ABnhiN20+3Vl1jULWPy1+b+KvnH4pal4t8YFL+GGPVnkTdtW82qu3+7XvnxnuPAei6bcN4y+IcmuyzTsn9mteeXHuVflVlX73+7Xw7+0d4os7G+stYm8SeHYXum2RW9jebHjX/a+b+7WVStKUiPZ83xHNfFS4/wCEHszrfiHwlfW33pfsduvmt/vKq1leHf2nPhj4quH8PaJ4km0p2dVnsbiJkkZv4lrmr79u7XvhrJPoPwr8H6TtuItqXXiK3a7kbavzNury7w78PfFHx41C51i88f8A9ircXrTy28ekrBA0jfeZZPvbaw5pz1pm0eXlsz0T46/st/DG5kl8beH/ABDeTeJLza0unxt+6aP+6zf3qf8Asl26fC/4pQp4h/Z5mn06SVWutS1C/wB7fL/Esar/AOO1hfD/AOFfhfwz44t/D1/8XZrm8t4t25d3kKv+1u/i/wBqtj4mTePPB+uR674J8eeKLm2hZV+0aT4f/dqv+y3/AC0/3quUqjjZmXL714nrnj/UPFXxd8a3Gpab+zxo9vYrdLcRah4ksNsce3+FYl/vf7VdKv7ZHxD+Atj/AGl4wv8AwHrEUd1t/sfT5drRq3/LNY1+7Wf8OP8AgpRo/wAMdJ0rTfiF8N/E2uWrSr9q1LVLWGJW+Xb91l3Uz4hfBH9hX9uDxR/wmH7Nl/caV4ptf3uraTpM7LFqTfxRsrfLu/2v4ayk4zhyS91mnLKnLmR2d9/wUg/Z4+NkNt4D+IXw9k0F7qLfFeQ3Xybv9la+Y/23f2Xfh78RtJvfij8Mdet9SNnp0y7bf5XmXb92T+9838Va/ij9nn4P+D9Qm8PfFT4o+D/CeuW8uyLw6uvfa7qNf4d237rf7NRaL8L/ABV4bhuNSfXpNX0e+umVLqFNqqq/dXb/ALtRGUqPu812TKXN76Vj8j9b0+5tdSlttSh+zur7XhVf9W392o2t0+WFLncF/hr9NPi1/wAEi7P4zatL4z+HXi2ztZ7z/j4sWfayt97dt2/3a+O/2gv2I/H/AMB7iayvEjuQrM3mQy7m+X/0Kt/q8pQ50fQ4LNqE4xhI8Kkt3WHf0ZXasu+hkZPv4Pyt/ercuLVPM8mZ2H+7/erP1KNFh2WzqR/Ezf3qxpy5fcketKMJe9E53VFTy3TP+18vy1gapG8m596+V/Atb19+93o521jahG8au7lWZt3yr/dr0KfNLlPMrS5fhOY1BWZPuMv8SstYt8sMWUfdub+L+7XR6hH9qlKD5X/u/wB2sC9jTa298n+9XfT0908mpKXMY0ypDJsHztJ8v+zVORC0jJ53zbN1Wrr9yzP94VCkfy70fI/j3V083vGBBDvXG87t33mqaFAGXYn3qTydzbEdf9mpLdXVs9dr/eqvhI5mXNPX5mR/7ny19nf8EwiDoPi8j/n5ssj0+WavjS0jhVdj7sr/ABV9lf8ABMJy+heLywwftNln/vmav0jwi/5L/CelT/03M+o4R/5H9L/t7/0lnlPxqm2/HjxYgAI/4SC43Fu37w1V0uTbcKLaHd/tN/DUnxwM3/C+PFyxy5J1y6wPT94aq6TJtjXDsD/Btr4jiD/ke4r/AK+T/wDSmeLj+b69V/xS/NnX6XI+77jKG+Xd/C1dRpMiNAsNy+8fwfNt21xGl3otmVzN8rJtZfvV0Ol3iTQq0MKtKr/eryJR93U4ZbnaafcFl5fYq/c2vWzp9518lF837y/3Wrj7PUXVlebam7/nn92tjTdShEwRLpf95vustBHKblrM6zRTu7THZs8vbtqx501xIXwu6P5n8xPlb/gNYq6tNHCkKIx2/wC3t8ula+S5kXyNu1vl3K27bTlLsXysu6tqCKuzzGB2bt38NYl7qE0aF4uFkbbuj/hqO8vnWPMz7l2bvlesi91R5G3um7b9xd9Z+05vhHEnurxoRs6+Y/z7vl2rWa2oIJFSFOfvJt+7WfeXSSKdh2j+Pc3/AKDWfLceRCH+0ttVfvfeqZfCejhZe9zG22oJC/yQsN27duXd8tQXGsPGuyP51VPn/utWW+pIyrCkzYVNqtUclw6rJCjZC/3Xrml70rH0uHrSly2JtQvnuI2DxxrE38NY+pXaKzpsykiL8u+lurxJIWR/u/8AfO2snUdS2ln+XC/L8tH2j3I1pfFIoMzu29E+6m1lqCZpmXY+3K/3f7tLIz7iUC/e/iqG437sf+PVjKPQ+Zo4eRXjj86d/wD2WrNqrzKrom0/x/7VR6e0KrvRGG19u6rdnG8bbE3ZZ/vVhKXKd8cCWLNUkjR0TL/d/wBmtS3t0ZPOh3b/AO9I9U7dJDIu9Mt/srWtart3p8zsv92ueUhfVZFvSvtLfvkTYN3yblrZsbc253o7fKnybm+9VCztv3ZmfawZdv8Au1qWtv8AMv2NPNVV+bzGrH4iOWMY25TV0i6dW/cxsQ3yvu+6v+zXSaPHHMrB/vr/ABVzel3k1um/7MsiMm52b/x2t7S7qJd6OrK2xW+V/vV1UY+9flMuaMYG7BdQ/wCuuYW/hVWV/mZv71STTeXZuQnKu21l/u1kWMgl3u6NsaX/AFn3fm/2almu3Zvs0L5C/wAOyvWo6e8c1Sp1KOoTXjR/P825P9W3y/NWPOs7Mfk8xf7y1q6lHNJK7xuyMvzbf71ULqNPuImVb+KvSjUt7zOeUoy0KCw+Yjp8zjf977u6o185MfZiy7U+ZfvVaa1eFg7n5WX71VfL8uRPs275vuf7VOUoS0OaMeUVWRspDtVu22nRyQtiH7yt9/b/ABU5bV1kYvDj+Ld95adFYvDGvz7WZvk2xfLXBWlCR208RykM0O3/AFL7Ds27mepoYXaPfC+x/wD0L+9UsdnayY2PJIV+5tT5as2tqkatCqMzs3yf/E7a8HES5T3MLU5o3Qun2MMa/aUtlXd8zzbvvVdt7V5v9d5mfl2L/Dtq0umTRwtDDDG3yfdX7q1aj0lGVUudwdvmZV/hrxqkuadz14/BYq/Z3X7m5N3+tVqfHD5jb3/3h/dWr32JFjeZJvNdf4W/hp32GFs/Iz7otzbfl21EuSWhfMQQwuqpNc8NJ/qt38NWYY0kxOm5dr7dzfdp9rYzSRlHh3xr9zan3attH5MZhhtmdVX72373/AqqnGUpcqOSpUjGPvCWbTPOqTeXnzW/eMn8Natr50kPkvtz/wA9FTbWfG0Jh+SHcrJu+ZGVlq9pk0jTffYxL9xWb5q19n7vKc6rf3jTYXL4hublQN6tuZdzNtWoNSkP33/esv3WjanKv7z/AEOFmO6okt0VvJdG+XdURo/DGJp7b3blK5mhjmREf5tvz/J/47Uas8a7Eutm1G/d/wB6p76N3t1T5mib+FX+as2TY28Ju/eLt2yV3UaP90yqVJdCyGto7OKb77Rtu3SP/wCOrVaSb7Ysbwjb97bUULO2xJ3WIR/cj/utTftCthEmkfc+3dIm2vWpx+yefKPtPeIL7eqvC9su6P8Ai31kXCwlPJ+YO332rS1CNPM3v8u1d21WrI1KYSK7p8iq+12V67IxMpUylNJMsyRnyz+9b5m/i2/3aa9x5jK+zYG/8dptwqHe77lDfKsjN97/AHabCsNwy2021gq7ttacsTllHl90s6XsYMjyL5sku7ar/wANaDLCsmyENlfvsy1ShVIdjujMy/c8ur9vlo3d02tu3IrVjUkZez94fZs/+0W2srKyf+PU+ON5pPOd/kb5dv8AtVHn7KzjfJ++2/x1chVJlLxjZu+Xcqfdry8RI7KcZdCKxsfJkeb+98yL97bV+yt3uJvJhmYts37VSoYYXVRCjthV+eSRPvf7VXIZlhxD5ys23a21az9/4hyl/KQrbo67N/Kvu+WrMaQ27ffk+Xb838TVHJcQqwTzGJ2feVKRri22p+5kLr825d1ddOPMeZX/AHnMy9atDHGZt+6P+P8AvVs6fNZtG3nWef8ApoyfdX+GsGzkc3n2b5odybt2z5Wrc0uZFZXkO3a+395/E392vWoRueJW5onY6KqLDA7zNsZPu7du5a7jw7HD5MUycfdZGX5dtcDolx9lWPfDtWP5k+b5t1dj4bvEkhQJZ7mZ/uxtXqU480bnjYjl5jvtEukjKI8yyuzt+7j/AIv96uw02TdYq/ygbVb723/vmuC0O6+0YmQMzfdVfu7a6zT77y2+eZXEaruVl3fNWhzfEdla3VzMq73Xy9m/b/EtXproLMXR1Kr/AOPVh2OpPDuS2udn2j/XtJF8rf7tW4byBgyJMoiVWbdJ8tTzFRo9CzI3kyfJD/Budd9Zt80Mjec6cf3W/hqaS6tm33MO4LGu7zG/iX+9WbcapbTX0aOi7GRnRl+bdXPKsddPD9yDUm8yPzoYfNGxlVWfbXO6hZpHD5PkwvtTc3zfMv8Au/7Na11cedC+P4XbYrVh3U0CxlE5RflRd/3axqVDqjRlEytQg86P7RNNxvXarL93/gNY2qfLcb5nyyqz7VrbvpH8tvJuViK/f+T7tc1rV1YW6vv5bf8AdV655VOY6Y05dSvdb1K+dCzI33o2T7rf3qx7yGG6UlHZ1Z9u2b+GrGsa88kLzJN8y/LukrmtS8Q+VG8N1Mrhm3Iv8K1yVJc3wnRCPSQXzOuxI3XdGu/cv/xVZGtahbW5l8mZkb+7tqjrXiia3tWe2O5938L/AC1yXiDxg8d0qQvHtVG+Xf8Aeaufm+ydtOUuU27rxAscgebdsX7n+0396qC+IEaTYu53ZvkZq4+68UQs3nJ8jr833/l3VUt/FE8lwD5zKy/3v4q7sPEwrS5j17Rtc+yqr/aY2VV+RfvV3/hDWJpoxEjqs396vEPCfiIbh5M3zb/n+SvR/CuoJtCTXO12Tc7f3a9KlLm+I4an909o0XVraHYj+XCzIrJJ96uz0HXHkaK6e5Ybfk2q/wB7/aryPQb52mhmtplO5Nv+9XcaLqSTR7/s0aFvl3M+2vUhJHnVn05T0bTfEG24W5G0Ddt2t81dLa6k8ca/PGiN92vPdI1CH7KmbmR2VlV/k+8tdPoMnzbJvnDI3zfe2/xVvGUebyOGpGJ1Ed5eNC0LzZ3ffZUp8cN7HEu94X+Xay/7VVbWPdCh+07UVVZ9rK3zVZh2TXHybUZvl/eU+WJEoy90ydajh85EmtlIX+JX+Vq5nxFJ+7W2Ta7bWZ5G/wA/drrdU8wzM7pt2pu2/wAVchrUcMcO3eyp93zPustc1T3TaG5xPiKZLhXdywDRMqx7P/Qa4jVFRVW2e5bY27Yuz7tdv4kaDzD9j2qyvtfdXF+JJLaNltvs+59+5W/iZf71ckpWkejCWhxviq1tri3ZLYs6LFtSTf8AM1cPr1pM1vHNZTZ8ncvzfw13mqLCsmxEaINuXav8X+7XKaxaosO9JmQyP91k+XbXNKJ0U9jzTxBaveQuiOzQs25o64/UtK27/k2D7u1q9R1SxhhD73XMnyoy/wANcl4g0ZGuvLm+fb92T+9Xn1PeO6n/ADHEzWqLGZl/vr/s/LSNC8f+jOkeV+ZW3VsTWLyTb0+6v3l27lZaij0uGWR5nh2/3/8A4muOU/sm/LKWxkR2czM1zN5iL/Bt/ipi2+6bcjs21v8Avmte60/gukMi/P8Ae/2aotaeXMN6Nlpdyt/erb4oaClDl5R1q3kzjZDvVX3eW1Wobh/O+xony7NyR0+1t0WPe8m9v9lf/HasWLwyHzkTcrMyt5iVjU/lOmnzRiT28dy2zyU3KyfearkK+XIIUT5lX52X+FqdZ2sxjML7lC/6pd9W5NNSFVd3Ywsv8X3laoj8Oh1KU+YrsyWKs87tnZ8+5d1XbVbaKaOCZIdq7fm/+Jp0MM6yF3dlZv4l/hpq27qv2abgt83mbfu/3aPi+EJR5SeG4hjk+ROVdtm5/u/7VeweEl8r4UqpPAsZ+T3GX5ryFrf7PNDG+1wyr8395q9d8Ks7fCf5mJIsrkHcMHhnFftfgnJvOcf/ANgtT/0qB9Bwwn9crX/59v8ANHBR6pC1v9pLtvV9z+X8zbahvdRRp5Eh8x1+6qyf+hNVHdPbt5P+r3Ju3L91lqvJqXmQt5Xmb/7v3a/F5Vj5uOH7C3135PzpDHub79YGvSO2+ZJvk27kVv71XbzVvJVnhfHzrurF1jUPtELom5t33FX7tRGtLmuZywsTKvrh2k+fhf42/u1zuqTec/z3Klf7v96tK+utirv+dtn9/wCVWrIvo5mVd+3d952WtI1uaXumX1flmZ2pR7rht6Y/hVmf71ZV5sZvuKo/gZkrQul8zbs2v/d+SqF8r/K8z8L/AA1rGXtCJUyncO6/Ojqyf7S1BJqCbd+9d27/AL5qO8ZNron8Pzbaz5pvLYQvHubq1ax7mMvdmadvqG2TZs2/7Va1jNC0yTbPmXdtWuUjuE/g+Y/x/P8AerR028dWaabhm/i30qkZ/ZNqMpc51kd4jRD73zL8/wAladldvIu/5v8AYrlrW8dlx9p5Vv4q37G+eRdgTKbP93bXBOPNI9ej8Bu2Nx+5SF9ys331X+KtNV3fvN+xv46wtPkSOH/XMzKn8Va6s7bZim9pPv8Az/w1xVI8tU76ceaER+o7/McI/wAzf8tKozQpJh5nUt/Gy1oyR+XGHROVX5/7u2mW9ik2fu/3vm+WrjGPLzGVaPNLlKNjpdzcSeXs2v8Ad3L/AA19HfsX/BPxz428UQab4VsNQlu9Qf7PYWtv96Zv4mZv4Vrz34DfD3SvEXii2tvEnFvJdKl15MW6SNd38P8Aeav1t/Y9+GmlfBfxF4a03wToOzxTq37+1s1iVm0+zZvlaRv4Wb722uylpHmkfOZpWlH3EfQf7Fv7HE3hpbLRfiEjJLawxyfZbG12bW/i3O1fXGpeAdH0icaroWg6VHKu1P342Ksa1bub6Hw9o1os2u6bazKkf2yW7dV3f3q+SP2wNO8YweNTrnhv4reIdYhvnaKLSLCz3QW7MvzL95d1TVqez+D3jx404QXvnu3jJ9burpNP8N+PNLheblo7V/P+X+L5f/Ha+Y/iV+0J4t1rXLzwB4WezsVt7hotSvLho5ZY4938Kru27q8Z1j/htXRdaSGb4PyWMMcSwJqGpaktqs0e75VWOP5q9c+FPwtT4aabL8Zf2h5vD/hXTdPeS6XT4Z1iW8Zf4mZv30zVyzS1lNGnxQjyHrXwQ1fwH8APCr/ET4l6xpelK0TLbzanKz31x/d8iD+L/gK1l/Ff9tr4kaxo93beHvhvNoOix2Uk/wDb3jLUo7F75f4fJi+9t/8AHq8W8eftKab4km1T45eAPAOjldPX7Ra+KvGTyeRGv3VWDzP/AB1Y1r5r8J/C/wCJ37fnx5uvFXxC+Md5r0G7zdUuriLyoreFV/1ca/dgWoVaOIjy/ZNYw5IEXxE/au/aN/aS8ZWHw6+CejR3UC3TJOuk7vIt933ppp/vN/31U3xA+CPhf4a6Dcw2dnpOseIbW183xDrl5KzQW67fmt4F3fNJu/iavqjRdS/Za+Gvhu1/Za+APifw7pVu2lyXvjXxFNdKktjCvzNuk/hXbu+9XwB+1x/wUy+BviLWdc+Fv7LXgxb/AMOaDLJbp4qvIt0epTN8rNHH96Tc3/LRqvD1MNT92GpnKnX+KZm+H7fw34s1h7nWPEP2izsUjW1tbN1T7VNI3lxwx/xN81fQ2h2fwf0HXNU8B+Lfijo+gQ+GdNZ/GV5ay+e1j8u77LD/AAtcMvy/7NfEX7JXhP4nax4/tvjB45SbTfDug+dq95faha+XH5yxt5Kq33du7+Fa89t/Elh4H+0eJ/H/AIwW+k1jV5tRvJrjd5V5IzM3/AqVatyRLp04y1Pr7xN+0xc3nhhrn4G/C6Pwx4Gs5WR/EmtQLJqWqNu+983yqu2qtr8cPj9ps1t42sPiLrmrWkL/APINt5Y/L27flVo1+6teReHf20dB+I11Z6N4nnsYNKhg2263Vv8AuF/7Z1778MdN8T3Ghr4q+A8PhXUWk2xXWnwxL/pTbd23b977tc8cVRnK8jX2M/hien/AT9szxD4+1XT/AAl8WvBmkvZ3Hy+TfWCu8m5tv3tvy19Q+KvhP8OvAfwz1HR/gb9l8I61rE6y+I9U8P8Altc2q/eW1X/nnu/5aba+SP2Vf2tvgtpPxkvfDf7VPwht/DF7oqSOt1b7mit1X7vyt975v/Qa7rwj4hf4f/EzxJ4q+EvxLutc8PeKriS9lutUVd7eZ95W3fd/2a562MdKTUZfeaRwrqbxPN/26Pgbf6potr8QhbWaap4fZU1TyUXzLxZF/dyM22sz4O/EDXtL0uz0TU55prZv3kULP8sfy/xVteMNU8SX2g6rba3qTP8AatyfvpdytGrfKv8AwGvM7q617SfD8iTSQxMq/wCsX+7XFWzLnnHQ7KeWyjTlc+mfhT8dLDTPESWdzZ7HmuN3nLKqqsf8W3+9/wACr3b9oz9iXwL+0F8C7q/1XwBDcOsTXFlq1rdbZV3L/s1+YeqePPFWh61bX9hfw3Lx2qxRRtF8u3duavtX9hX9uLVbJYfDfjy/aIr8zTMjLEqt/Dt/u16VHHOjGM90eVWwvf3T8yv23P2Fde+BMlxrdhqtrcvCypFHbuy7l/4FXyheXyfMiQ43P/49/tV/Qt+2Z+yzoP7SWgzeIfCv2W+tNY01kuI7ODzPssi/N53+zX8/nxu8G6l8OfilrngO8hk83Tb9leSbcrMtemksRHnid2V4ypH91Pc5TUZE3M8219vy/L/FWLqChV8x/My33v8AarTvbibkbNyfd3L/AA1lyrhSmGf+63+zXRT5uU6qkjn9QYKxm8liW+X5W2/LWLfKjN8/Cr81bl4vms6Qp83+9WPfRPJuf73y130pRPNqGPfTfP8Acb+6q1U8vbtd/lXf92rtwvyrlmX/AGarTJD8rp83+9W0eaRzczKskO5h5gbDVZtU8tvk+6v3KZt2bn2Z/wBn+7U0LOXZN9WLl6Fyz+7v35/h+avsj/gmGyPovjBkP/LzZcDp92avjm1jk2qn8K19kf8ABMdI00TxcIhwJ7Ef+OzV+l+Eji+P8I12qf8ApuZ9LwcpLiClf+9/6Szxv44s3/C+fF5EPK6/dbW/7aGqGns6qif886s/H+WcfHbxa0UmB/wkF0u3b/00NZGn3yXGCm4fJ/FXxHEH/I8xX/Xyf/pTPHx+uOq/4pfmzrtLkdoVd3+Va27W8+yyfu3/AN+uUsZH2BEfaF+9/tVs2N07ZhdFcf71eNLm2OWXwnV6XdQqvCfIy7d2+tKG8fzB5Kb1ZPlVv4a5iGT91shdV3Vdt2vFhCb9rf8Ajy1XwmXxHTR61CsZs3hZ/wC/8+1agm1SGNnbyWwq/dX5lrIa48vZ94nbtdart+5ZpoXkBVPu/wB6iMfcHzGhdX22AQ71+6v3k3VlaheOtw6QzRpudmVf4abdXTy/vn3bvvI396su4kmVnhmdWXZ/49RGMio7heTTrGqPc7T/ALPzVUa7dWaZPnLbqb5kM0n+sbCoy/8AAqptM+0JBN838W2lKJ00+5JJM8a+S7/e/u0yS4RZhGjt9z5t1R3E3mbXeZS/8H96qF9Ik3yb9yt/drnlHlPWwuI5eWJZuLh4V2O6urfMm6su8uklk2O6pu/i/houpkUh9i7l/h31VuJvMjZHRVC/3fu1j73xHvwxEZadi5Nbo3+rT5qrbXmZU2ba0riPy/n8n738K1Tkh2x+ciYdfmTdXHKXuHoUcKRLC+fkTcf7q1YXzpY/v4Zf4lpLdUKs7zbdv91alt4/l+Tayqm/a3y1zSly7Hs4fB80S3Z2z3A2I8ibvmrX0+ZNrQyblDPs3f3ao2qutuod1X5PurV2zhhkhXem3/Zaufmi37xljMHyrSJqaeP9IML7WRfm8tW+9W1DJbKqzJbbX+6/92sfTVmhjCTOvzVoWbIkj/JsXZ8m35qrlifNzjOmX7KZJJDbGHZu/wCea1q2N15WbaRlVN21Gk+8tYcM/wBnZ/ulf4drfNVyO3+Xfv2fP/e/irpo6SvI4pbnR299t/chN/ly/djqW6vNqy3ifJt/hj+ZvmrLsYX2k+czbf7v8VW7dnbdsRvlf72+vUo8nKcMoz+0RXM1tIrWzOz/AD/7rVRvgkn+pHzbtyRr8q1oXFucfaHTc0b7kVfm8yodQt3muEfYqrvX5VrsjU9y5lKMpFLznY5k2o3+9/FRDZodsiPs2/Kq1O1vtma58lnDfNupLO2SH9y77X+8rUpS7mPxTJJIXjZYZm2FkXZtp7W/lje6+Vu+XbJ/ep63DzBt6Nt/8eq59nSRU/fLcMyb9rP93/ergxFblOmjHmKMNrhgltJ5X+0q1oaesKqXv3+dn27v9qkjtXa8TyUZj/Gqv8talrazKx3zcyP+6XbXzmIqTlLlPoMHT5YXJLOxZVR32vKz79s1XLhZl3P5Kuy/c8v+GkjtrlVhh87zNv3/AJfu1dsrW8Wz+fn+8u3btrglKX2T1Kclze8UIYX8wu9srq3DU6OFIz51yW3fdVf4anhhMU3k23Lsm5FZ6uRab51u+xPKRn3Nu+83+7V0qcakia1b2cNSnZx3MkiTO6rF91l/iWrccKXLNCkOxlZvmVfvf71XrO1MkgeF1CRrtlWRPmarENvNubY8bLs/ufMtelTw84zseXWxEOTmMy4t7mG3CImGj+/u+ZdtWdPtfOVEfllbc+1NrVpLaz/Pv3bWVfKbZ8tW7bR3um8+2sPNaZP3sm/bXU8L/Mcf1rrEz1t7lmlS2dY0WXft3/Kq1Yj02/aPi2Zoo3+Rv9nburetdGe7tfscPG2Jdv8Ae/3a118NzW9uiJDgMv3mTcy10rCx3UTP61zfEcBdabNMr7E+WP5Xj+4y/wDxVZF9oqTXSzpCoZom/dt/DXp83hu2b906R/KnzeX96sy80FJC7JCqhXX5m+X71bxw5vTrcsOWR5y2mzMy/aYY0K/N5i/NUUmkvdeTIj4ffvZV/vV1114fvGjZJoMxfNvWP7zVD/YvlKHeFT8m1Y2rojT5ZaGsbSicRq2nu6+Xv4+6396sa6861j3wwr8r/vVau11bRbmS4T7Sm35Pmkj/AIa53Ure2aR4Zk3Dcv3l+ZmrTl5hfDexzl5LDueb7m3/AJZ/e21TC2yzM803yfxsr/d+WtXWrBLWMu8yjdKq/L95qw5LidmNtZpjd8yNs3Lt/ipc3MYVI8xorsaYIjsFZdrtG3yrU00z2tqyfaWX5v3W5d1ZNjdTmRfnUnd86/3quLI6xs/ktuZvvbvu/wCzWEo9zm92MS3JebVjmdFEv3X/ANr/AOJq/b6xNCws0ucSL/D/AHv+BVhPqW7Y7puXfuZVT7v+zVmG8haNnRG+V9u3+Fa86pTjz6mkanWJuyXm23DzTfPJF97722j+0ZvLO9F3/wDPNX/9mrMa68pfLhTMSov3vm2rSLeTzSM6IrtJ99v4ainGZMpcppfbXklGdvy7t7L/AOg1oafNuhW5eb5f9p6xLG68xCmFT/gf3WrR0+JJvK+7v2ttX/ar0KK/vHl1pcpu2quskkl48ez5dm3/AHau6XG/nM7w5C/J833Y/wDarGt5vLjCefnd9+P+Ld/erZtd8yGZ5mf51X5U27q9bDxPHxEoy3Oo03YzJHv+ZvlTd8tdXo9wlrIiQzf6xPvN8tcdpPlyXaQyP86/LFu/irT0++QYSdI9u/b83zV6cY+4eLWlKMrnouj37rbiaN9u35dzfxV2Wg380avc78JJt/h3f8Bry/Q9WT7Lvk8tRG6q/wDe/wC+a6rRdcghZ03yBWT55FolH3bGdryuegw608d1Eny7V++v/s1aS61CqjY8czfe8vd96uBsfEFndMLO5eRmWJmiZU+X/gVW7XUkaNN77GX5JdtctSX2Tto05HXf2h5cfkzSMvy/dWsy8uIbiEJsYtH96NX+as2TVkjjE1s7Mv3dzJ8tZ0+sQrIEbdvkTf5zfw/7VcVSod9OmbGpXUMLI77vlTdtVKwtW1iaTNtC+xN/3o1+Vay7jXZpF3vcs6qzeU3/AMVWbfeIPJhR0fcy/Knzbf8AvquWpUlE6o0eYtahrX2iF7VHxu3K3y/Nu/vVzOua1D5Zs/lYfd/2vlX71VtW1vddPC8yp/F81cfr2pI29IZo3Kv+9/e/NWHtDo9jIta14ihWPyd8m7duf/arivEHiqVXKbIx/wBNN/3v9ml8QagmnxqNjZ2fIrP8u6uO1rWAzfJtHyfNTlIcYjda8YXOZUR2H8Kt/E1cdr3ip2kaaa5+Zf7r7ttO8Qaoke3ekn8W9t/y1y958zPs+7s+b+LdWVOMzb4S3ceKImUpC7Nul3fvP/Zav6Lqj3F0yXKZMf8ADJXISfNdrP5Ku0L7U3fwrXQaHbzRKibFI3bt0iV1RlEiVPm949O8L3k0cKPC/wA33vu16R4U1KZduz5nbbvVq8r8KiZZIUd2zJ8z7Ur0jw3Hc28bTeWo2/KrM/8ArN1dlGXu2OGpH7R674dmSFrdneMBvlVY5Vb/AIFXW6XeJHvR037Zf4mrz3QVtljt9luuN/3V+Vt1dp4daOZY5k/1v8a13xlM46kTvdHuoZreF3Rv7rqvzNXXaTcJGqpvYM3/AH1XCaLNDNGsPmqpX5k3LXXaTcbriJ3hzt/ih/vV2xqezOCpGUjtNNvE8scrC2zajf8APTbVyONI/kn2rLI+7zP4masTSdSht08mZ94bcyf3l/3lq1Lqt40f7mbeWTc25fmrpj7sOZHH74/WJvs8yedNsKtu3fxf7VcTrmqItw8PnL5bSt8zfMzVta1qmGXzHzJH81cXrWqTJIzPZttVfvL/AAtWMvjNo+6c54hvIZP3MMPl+XKqtJt+9XJatcfbJEuRMrLs2qy/e21taxdPfTGZ/kXft3Mtc1qVxMzN8i+XIjN5jP8Adb+7XHU5JTO2n2MbVr52bY7yM8aqu1fu/wDAa5nWozHvvJkjLx/8tPvV019F82+a5j3yfLE33Wrm9ctxGyXNy67pEZUrmqHXF9DndSt0S1R3hUL/AHvvN/u1z2sRJ8kyQs8jN/C/ytXRag32qGTziqDavzR/3v7u2sbWP3jh04/iVvuqteVW907aPvSOZuLVJ2aH7N80f3mVflqC1hSOQ+T97/vqrs2+aR32Kqf3qbHC8bZdG2fdZv8AZrzZR9/mZ6NOXL7pk31qFtWm+bc38VV4LfyYUR4WIV/4q3LiNI1MKfN/EjVRukSZVd/LzGrLt/u1pGfKaS94qx/vF+dI12tU9nZ+XcbyisG/hb5lqPynj2TTQ7W+9t/vVoWdxvuE+SNUV9v+1upSlM1pxhyly3X5XhSFi396rstm7f6Sj8LtVlk2t/47VbT1eZWTzm++zfvE/hq5GyXqh3fbIyfwp96lGnLm90rm9wRo/Kk+eFnT5v8Aap8dok0iTBGYr/Du+apPs0wRHe1ZQ3/LRn+ZlqXdNMweF1C7m+VV+Zlol7r90IxnL4ih5L+Z/pNtIyt825vvLXrvhg7/AIVAlA2dPn+XGB/HxXmVxZv9oZ38wL/H/srXp/hoxv8AC0eW+VNhPhiev3+a/a/BF3zrMH/1C1P/AEqB9BwtGSxNW/8AI/zR5JeXDrC0Lp86/cVm3Mq1k6k3nW7F33q33m/vVueTsmfzoWH95f4mrH1SHdI8UPyD+7/er8MrS5fhPFp+9E5vVDcyRtDE+WV/urL8rVl3X2xd801sqL951jfdWnqln5Nw8rxsDGu35azLizDM1y82Pl2uq/LUxl3kWZepSBlaHZGv8O3/AGapyRurbN+4MtackKXUap8qL93d/s1Wmsfs8LoX37fubkrSnWjEzqYfmlzIxNSbdCkKeXjZ8se3/a+9WdeL5jNCm3O/5619SjdV3w/7K7WrJuFmhjlTO12fdu/2a9CnKMYHn1qfNMxLxbaOTf53z/x/JWPqEzw5SFPm/jZq2L6ZIyZvl/4F95qxNTkhaY/J/vV2UzjqR98heaCFv3jyFmq/aXF1JLvd9p+7t2VlyfNIzoy/7tW9PjdptiO2KuUeYuJ0mnzHycI+5lT5Grb0ubzI96PJu+981YWhxmRm2bd38O6un0bT5mj2TPgM/wB5a5alPlPVo/Cauj28O3f97b83+Vrct7e8upopkZvKZP3X7rbVPS7SH/Voiq38Lfdrf0mHyz88zRj7qL97bXHL+8ejTjL3RbWBJGELuxVX+fctbHhvw7DqmqfY4bbzXZf3q7/4f71JbWMy5Tdnbt+XZ/FXT+EdFe41iJEh3Sfwbf4d1Y+zjLQ3qRlGHMfS37Enwj0Oz14eM9bh85LNldI9m7cyr8tfpL+yP4BufDN5e/GbxJNavqWpP/oG394/l7flZl/hWvlj/gnX8HbnWNF0rw09neL9uumkv5JIv9XH/F8392vv/wATa54Y+DfhWbUrDR45/wCz7fytIs2TatxI3yx7qcvd91nwGMrSrYiR0Pw70ebWlvr/AOKmq6fqEzStLbxzWv8Ax6x/eXd/8VXIftS/tQfDX4e6Omm6V42stUmk2/Z45tO85V/veWy/+hV5n4r+L3xRm8L33hKw8KxWmpas0b65q00v3o2X/UxrXkPxK+HusapHN4qubKTVbyx05lSOR1RI1/8AQVWvOlKvKMuTQdKjScrSPG/2kP8Agol8VLPUH1+58VedbWNwy6No9qu6RWb/AJaMzfNXz54u/ay8Q+Jr+2+Inxgvbq5VZd0VjfXTMsn+ztZvu/7tQ/GzWNW/4SLUbDwveQski+VdXEMW5d38Sxs1eBeM9D1LxV4kgbUryS4trOJVt45v4m/3a5rU49fePXoYWUtIns+vftYfG/8Aao8Zab4Amv7qz8P28X2eK3ht12WNr/0zX7qs395q9T+PH7V/iT4Z+BbX4Cfs06xNpkaxQrex28Stc3ky/wCsmnn/ALv+zXgvge11D4feH4vCvh7y01LUtz3V591o4/4VovNDTTbP+xNH3TXV1K0t/fbtzM275VVqa5doy9fM0+q81W3Kc98RPEHxO+IHh2X4XaU9xHp91debr1xZu32nWJm/56t95o1+7tr0H4U/AfwN+zX4a0bx5+0Civb3l15uk+G43XzLpY/m+b+7H/tV3/h3xh4V/Zt+C8/ja80HRbTV1WP7Leax8z3E38McC/xf7VfFXxe+J3xR+P8A4uk8f/EjxldaxeTK0Vv/AMsoo4/+ecca/Kq1sqsFpCI44OrWn/dieu/Hb9u74kfFTXNZv9S1jTRbSW7W+jeF9Li2abp8O75dyr/rG21826hq2q+JtS+3+M9Vjd1+XzFT5Y1/uqv8K1u6L8O9Vb/Q0tlQSfNt2bdtaVj8EdVa68l0Z/n+6q0pVoSleTO2GVy25R/gzQ5ry1TUvD3i23dFXakPlfxV6p8J5viXBrEF54P1JtL1K1iZPOsZWXzG/hZv7tP+CP7Ob3l5aXOpWEkcLSruXft+X/dr9FfgD+yn4G/4R2K5vbONE2b/AN4iqzf7Tf7NeNi8RQcowZ6uDyOpKLkeAfCv4O+KviN+5+IUP9pX/wB77ZNLvdmb7y/7tfYHwp+BNnb+GYba5T7yfJHs+VlX5fu/3a6/4f8Awj0Twz4ia80rSo1ibb8qrXunh2x0m3+z6VNo8Jjj+XcsW1trf7VcVSpGU79D06eUQoxPmfxZ8Bb/AFazazttE+Vf4lT5f++a8g8ffCubwixe80yaVWl2I0cFfpBP4Y0ewgFzCAu5e1ea/GL4F+HviJor2cNv5LK7O3lv8zf8CrGpGMpB/Z/NGXKflj468B39ncfbNNtmeLeqt5zfNt3V9KfsE2/wv1jxNFZ+LfFrWbzbV+ztFv2rVX9oT4G3Pgu6EMMMjKu5/lTctaf7DdroM3xOsdH1m2hilupVW3kaL5pv9n/ZruwVb3vZ3PkMywfs+Zo+if2lLPxV8DdJj+IXwcmmitI4JEvNNhfZHeRt95vmr8qf+CvGg+FfjBryfF3RPD1vomqrp0fn2tvFt+1L/e3f3q/bv9qf9m7V/G/wb8iC8UfYV8+KaNs7o9v3Wr8av+ClHgebSfhfqXid9sU1jdLFLHMnzMv+zX0sYzjKMo6RPm8NU5a/LL4j8zVjdt6O7M+7a+6qklvMYTCXx5f92ta4ZJG/hDt/DUE0aLJwinb99q9mnKHIezUj7hzlxZ+Xu2Jjb/Ft+9WJfWrs+xONtdlcQ+c2zYuW/vf3ax9S03y8v5PLfwrW1OWvMjllT7HH38e19rj+CqfkeYT8nH96uhvNJSSQfuV+Ws64i2syZ+St4ynI5pR5TJj2LtTvTo4/LX/aX+JaszQ/x7NrbvvU37Om75+q10c5lGJLbq/y73bc1fY//BMbaNF8YKvQXNkB/wB8zV8bQt82w8/JX2L/AMEv33aN4xX+7c2P/oM9fpPhD/yX2E9Kn/puZ9Pwhb/WClb+9/6SzxD493Aj+P3i9C/3vEV1/wCjGrF09naQINwMjfeWtH9oVGPx+8YTRnD/APCSXSgev7w1kafN5TK7v91K+Lz6PNnmK/6+T/8ASmeNj/8Afqv+KX5s6bT1haQp823+Kt+1kSONETajb/kZfvVy2nq82x/OZT97atbFjdPGzP1Lf+PV5HwnD9g6Ozut032VIWwv+z81aEM0LQvMiYP3dsnytWTZt5cio+7fH99av28kPyud25n3f71HxESNC1j8tV3ncWTdu/8AiqWRZvLdJXw33kqKO587emViZXX93t/honk/dGFH3Ns+dmetYx+0Z/EUbyJ1489lVk2/L/DWZdKnlSwujP8AJ8jbttad5saQu+7d/wAtdtUrqH5vkTKf3v4qv7IRlyyMeSRPMZJht/hqtLD8xCblX+LdWlcQ7mCJHtZaoTske95tq/N/31WMoyN6ZWX5mWHzNoX7lMmCRt5Lv833ljalVngLJ5e4b/u1FJJuY5T+L5maolTOyjU9n7xnXkj/ADb/AJfk3fcqqZLZmKPuP8NT3gPlnejZZ93zNVC4k6h//wBqo5fsnpxxR1t0s32hdibW3/PVObZHJG8yMx3fw1p31vMq733FlfaG21VuC7N8iM25Pk2vXzvtI/Cfq2Hw3LHUrR72kd3Rfm+5tqeGONYx5xwP9+o2VI8og2v/AAL/AHafbzP52/5nf7vy1lKUz0qdGMTSs98kgR4cf7X+zV+OFJFHmdPuturNhvI42Ub95b5fuVfhk8uTe+512fw1yS5+fmMsVR5ocpfhaSWEbE2hU/v1fjaGGPztjbf49397+7WP9pcf6S7/APfNTWtwm37TsyzP8ys23/gVddKMn8R8Rjqfs5SubMciK3yR/N/B8n3q1LW4e4jjhdJP3nzq2z5a56yukb/l5y2/atb1nM8McTzJ91tybq6eY8LlNWG83xqjv833XWN60LVYW274G8qNNzqz/wDfNY1jJ5jMlsiru+/JHVqOaRm86Z2Qqv8Ayz+bd/stXVRqSlHlM5Rj8Rfkmf5P3LbG+Z/nqOa+gZShhVWbdvbNU5tSmZfJmdRt+Xb/APE1C3zNs87/ALZtXTzGUo83vE8cjxsI9mE2fLTFZFkG/n+Fv7tV1mjm3XO1vufd+792kW+2qsyIy/xbZPu/NSqVOUiNPlNSxjtrj53fYW+by2qzGvnbhtVdr/Nu/irLhuoVkRPP3qqfM2/+L+Kr6yQzRo8j4Mfzf7VeXiKkpyR10aMYxNWys5p5k2DYnzfdT+KtfT7cxr99mEf9771Zul6gjZmmmaTc3y7n2t92r0OpQ7ljR1H99fvMv/Aq8upze15T1adSnGlozUt4/Lbzkl3M33/3vy028muYV8mD5k3Lv/iZqitby5bcmxhJNEy/Kn8NL5jySIHTZIq7Umaop0+Wr7x1+0pypE1pI67blEjzu+9/Cq1q2do91JjfvZn/AIv4VqlYr9skRFRokX5vLX5d1dDZw2ki/c/e/Lv8t9rV6eHw8fiieXiMVy+6vhGWel+djZD5e2X+H/lpVqHSfLkZJpmUL83l7Pu1fs9P+0Rl5hwvzbf7tX7WxxMk1tbZjkf72/7terRw549TERMr+ybmeEO4+99yt3TdJaG3jm+xyeVv2o0bbq0tN0dIVeF33orfe3bttb3h3wy9vDjZuDS/e37a7Y4ePVHDUrS5vdM/S/Du3DpMzpJLtX/Z/wB6ty10FZIVtnfy93+qb+81dFpvhm2jt4dj7/4nWtjTdF87L7IQ/wDyy2/N5dbSokrESi+U4G+8LpD5UyWuDu/1i1i6v4ZdrgwmHzV/iZkr1abQdtw6OnmFvv7m+WsjXvDfl3Ucy/IzL93+9RGjynYsV2PJbzQX8xvs1tC6Rqu5W3bo2rn77SXjtTNNbMu1/u/e3fNXst54deO3/c221vm83/pov+1XMXnhuFoTNs2ltzPHtp+yidtPEc0eU8t1rTYZpNidF3bmX+Ff4a4zxFpvkys78q3y/wC7XrWqeHbPyXfY0P8AdVkrhPFWmpGux32eS+2Ld/Eq1nKMTf2zPN9at90Z2bfvbtzLXM3E3+lCHZs2t8y766zxRDsk3wzyB/4P7tcTqy/uzIXXK/M7VzezFKpAat5FZtvSH54327l+bdTl1SZXaOZ/m/g3P95axmvodzJ53H/fPy1A2rJc3Hyfw/fas+XmOaVTlOjhv4fMVE+7/E1TQ6hNtZPlTd9/5q5i21xI92/c7b/4asrqUca/675GT52+9uWseWXNzEe05Ycp0P25I41tndgdm7ctPbUPld3nZlX52/hrnv7aST7j4f70Xy1F/bDyL9/e7fM+2qjTjLY5vbe4dZHqUNvb/aXdpEkdflVK0YdQeb5E8wbV+Zo/urXE2usIoEKPiL7y7v71XrPVHW5SF33pJ8rLv2100aPL8JwVq3N8J6DpmpfKHeRiV/2vu10VjqUO6H5/kb+GvP8ARryHy97vHvV/vb61rHWE875Jm3N8y7q9alGMYnmVJHcprE0cT73V12bYtv3t26tFdWhtYX2XMcrr9zan8W6uGi14WapYJu+X5t396rMeseWqJG+4M235n3NurrPMlGXNzHp2n+IHvMP5ytt++zfL/wCO1rWuvW3ls8c2GZtzq275WrzLS/ESSAOm7zV+X7v3a2LHXEkZN/Lxvv8AmespS+0a0T0yHXJmh8yHcNz7drfNu+WtK18Rw7j5k2122713/e/2ttedaTrjlgkMzKrf3m+7V2bXEhkd9m7bFt3N/FXFUqcvxHp06J3c3iTzGCJNJEy/61Wb5Vasq68RXNrI0M253/3vl21yS+JN0aJGkm2P/wBBqG41aZo2Tzo9iru2s/zMteXWrRiz06OHlI6S58UPt2fKfM+4v+zWLqHirdbyokKj5/lZvmauduNWma33u6na7M3zfw/3ao3F8F+dLlkVvvRtXDLEc0viPQjh5RldGjeapNcR+dC+0SPt3fe+7XPatqyW9vJM9s2z7ryR/eb/AHf9moWvJo7hntpsIu75Y/utWFrl5NIqQzOyDd8m1/8Ax2s+b+U3jR+1Ipa9rEzffm3/ADbd1cpqWrPLHLsRl/8AHt1a2rSPcfOiYCvXPX3nW+XeXfu+Zo1raMuaVmYex5feMrULj7VGEd2dv7tZsOm3EjPsdW/2v7v+zWncbJpMbNu35vvfep9rbu0Ox0Yru+8qfeq/acsAjT98y4dLdpD2f/Zra0HSpmkZPmc/dqW1052ydijd/eWtvT9Ptlii2Q/7O5qKdaHOaVMOb/hez3NsRGRliVdv96vRNBt3t7XZvjCQ/MrRp91q4zwzp6Mo8yRt7ffk3/xV3Ogwo21E+4vy7lf5t1ejRkeZWp8sDtvDawtGkP29V3fPuZK7DR1NncQon3WT+FK4zQXht49gfcuz7y/xba6izuiY1+eRFb5ty/w/8Br0KcpcpwVKZ22kXyBURPLDN/D/ABV0mj6lA8Y2PIjfw/Pt3V5/pGrQTW4m2fJ83zNF826tvS9WSNUtYUZvL+4y/wAK12wjzHnVNj0W1vHtWzH+6E3y7l/hantq3kLLcw3MKvH8v+181crb6wkioUuZGDfws392pV1xPM+e5XbJ821q7Y/AcHL73umjq14j25R9v/Af4q5TWLqG2Evzq/mfN5dXLq9+0K009zkLuZmZ/l//AGa5jXNWRpPO2xoPK/iXc26spbGsYyMrUJ3ib59qjd8sa7vu1g6jJ/f8v5mZ5Vq/qF9ukWFJmf5/vb/u/wC9WPcfZlXfNI0Uyvt8xW+9XFU/vHZGU+XQyNQkkmZZrx2wsX3o2/1bVzusalDJHveZvlTZLuRl+atfWr547zZczK+35fMjb5V/u7q5DxJrDx24hL+aVRv3LP8Ad3Vyeh00utyvPKjRyeQ+Nv3Gb7u6sDVr6FpCJpmRm+VFX5lam3msbm2b5HX7zsq7ttZ91qEM87Rl4xt/irzcRKNP4juo8nwiRzTSMyJB5qKvz7vl+Wp5QmwP8qfxbV+7WbHcI8jb3UK38P8AtVpWrJNMEd9+2LbtV/lX/arzanve8ehGMRNqSfuYfmfZVL940Zd4413P8u1dzbaszSQsk32aRnSN/wCH5d1QNHDGxzcL/Ez7fvVnT68x0R94bH5LQna+9F/vL96kjtnWQedDIis27dJ/DVnTbVGZs/L/AL1TxokNx8j4WT7+7/lo1axlLm0NuXmhqTWipDYv5KM6K2591TWcabtjvsVl3J5a/wDjtR28e6HZ1Zf4d/y1citYWVIX+RVT5tv3qPacoRpyY638ldPjtkdS33vLkb5ttTx/vLxIUs8Nt2+Yrfw0yzW2tvnmRVZf+eifeVqsx+Yqw/Zk2+Z8vnVEZfFYrl+0QTNDHux5m/7v+y1el+HUB+FuxYgAbCfCYx/frzq4t/MYeSGD/e+V/lb/AGq9I8PxhPhlsxtH2CbjrjO6v2vwOd86zD/sFqf+lQPe4a5vrdb/AAP80eWR2vlxpZzP/H/F8u2s3WIdrfOi5/8AZa1mb5lm/eNKv3tq7lZf9qsy+t5Y5d/zMi/L93atfhk583MmeRRjGPunL6lp7x7k+VfMbbub5ttZs1ukzHY+1f8Ano33d1dHq1nHH1T/AHKz5rWHzGjtoWXd/Cv96uf2sY6nZHDykYclrtVke2Xa33938VUbousghd9jN9xa2ZmtlkXEPmfwsv8AEv8AvVj6lLbRx74XYFfmT+KroztU+EmVHliZOpLDIz/ufnXn5f4a57UXdZHQn5V+bbW9qU3y70K7m+bcv/s1c9q9wNr84Zk+Vm+7Xq4fY4K1M5+88xpN7purJuGZdybPvfxN/DWlqCOqPMj71X+Hf/FWZdM8e55h95K9Sn72h49Sn7xXX95cDL42/L/wGtXSYx5gOxvl+XbWbD500ibEUsv39tdFo9pj6Sf7FdPL7gUYzlI3NJsfM5+6yt86766rQ9NS4w/k5VWxtasXR7GGVfn/AIm2o1dloenooZ0C72XbXJU7M9mjTlze8WtLsEkZ9k0Y+fau7+9XUabo728afJ95Nybl3baqeHbcQqj3McbBfl3NXV2FmkjPJDcq4Xb96uOpGXxHq06dKUYpEOk6X5ylILZstF95f71emfAXwimreLre21W5YLuXfcfd2rXKabZfZ7h3jmZG+4vyfLur3b9kP4U63488VWc3h7TY79VlXzY2bb827/x6lHlcRY6MY4WTP1u/ZK+FOm/DP4Z6LDbbrq81KwVrJZF+6rfM25q1vGEj+OvHiTWFst5pfhtdlrb26/Leag38Un+zHXGfAP41eJ9e8SS/DeGzaKbSdNaJmX/l3Xbtbb/tNX0V8K/CPhfS/DMNhYRR+YsrS3En3mZvvM1cMv3kryPzapzJyPOND/Zr1KGFdV8SXLXVxskuNXmb/VLIzfLHHu/hWvnX9rjwb4vvvM+HulJbvaK6tcafp6N5cO5vl86Rf9Yzf3fu19SftCfFC+1vTIPA/gIXn2mSfY5tU+VV+7ub+81eaftlfEzwv+zP8K7fTdHS1PiprJVih3+Z9jkZW3TMv8Un93+7UuVKNKXY68LRk6sbbn5i/Gj4av4T1ObR9YS3udXbd5sMO1fssf8AtKvyq3+zXg+gfC3VY9Yge53QrJP/AKU0nzMq/wCzX0BY69rGrLI+sbUnvLhpJ2kXczbv9qsK6h+2X0eiWz7ZvN3TyMn3f92vmamKjf4T7/C5VOlh+aRw3iLwe+patPqttZRwQw7Yk/vNHt+Zqy4/Elh4Z1LzrnSrd0t/me3k+Xc235a9Pvo7DQfDGuGaFmnjt22M38Tf3Vrx3VtB8YeMNHm1u20qOB5k/wBW0u5qKdT23vIwo4flZ4r8ZvE/xI+Nnj648YeJ5ldY38rTbVflgs41/hjX+Hd/E1ZOk+DfEcbxubbaFf8AiT5a9T0P4J+P7qF5byzjTyZdreZL91q6qx/Z0+JbWsVzYaUt5u3b1t7jdt2/w12VqyjGNmdeFwspu7OS8A+A9Y3R6k9hJcS7/kWNl+b/AHq7nxNZ2ek2qasmiTQyKu6Xcn3f+BVZ8N+EfiLoOqeTdeErqFI0/wBX5W7/AL5rtvFmueHpPC+zXka3favm29wu3buryK9aPMe9Rw0eXmRjfDvx/olnshunVE3q21v/AGWvvb9lXXE8SWdto9nIyJIqq8lxtb5f4a+Crz4d+Etb0m21XR7xVbfuT7P91v8Adr65/Yt16aOxSO1n3vHt/wBcm1v92uLESheMkejh4ycHBo+0ZdP8K+FbX+1tYufnX5XZV3eZVvwT4i0r4heJjo+m20zJDtVty7f92l15/wC3PBdnc6rNa/uUVpdrbWZqT4O3GiaP4gj1j+1rWPajOkbS/wANdcKlKP8AhPPrRnGlJxjqe4W/wlbULBJgm0bflWue8Y/DK+0O3juAjKD8r7a73wF4+s9ejCJqNuyBtqqtanjHyrq0XO1l/ir2ZYfAV8LzwPi6ea5nhsdyTPi79pb4dvq2gyv9m+aGJmST/wCKr5n+Bf8AYnh/4sWb6x+7aO82xTL/AAtur9AvGXhWw8WCWxvIcbd33fvV4P4T/ZB0q18YX0NzDcT2011vguNu3y23fKteVhYxjV0OnPeWpSjM+x5YLZvhqNE169WeK5sNsdwv3WXbX4x/8FY/hzo+tf8ACTaDePNHpWl6XNcJJGzfvLr70Kt/s1+vXg+K6+Hvga58H+LTJdRQt5dq6/Mvl7a/O7/gsN8LZ7z4J+J/EPhW5ke3jtfPfy23P975vlr6aMvejA+AnKPt7n4GrcGTyxc7fOZdsrf7VM2J5flvt37/AOGtW+0/7LuhdMP8zfd+bduqs2mtIV2bhEzfPuT5q9eMoQ91nurmqQMy48mdtnl7f93+Gsy6skWOR0RifvJ89b0lqnmL5e1f9plqrdWqFmR3VQ3/AC02U4ygOVPl3ObmtfOV1dFx/svWVqGmw2+R975/4v7tdXNpvkwtDs+Zl+8q1mXGmzLG2+Fm+fbW1OpKWpyVI/ZOXuLeCNy/k/xfdqpcRw7nfZW9dafubY7/AHfl21m3MCRs2/cq11xlzHNyozWj+benT+OvsL/gl1uOi+MmOMG5sduP92evkSZE3b0Rifu7a+uv+CXAxo3jMbcf6VY/+gz1+n+EOvH+F9Kn/puZ9Fwj/wAj+l/29/6SzwP9oyZ/+GgvGK7sgeIrvj/tqaxNNuPL2JN0krZ/aOwfj94yOSCviW6/9GGub0+R2XYzt/srXxmff8jzFf8AXyf/AKUzyMf/AL9V/wAUvzZ1Oj3SNuRHw38DfxVu29w6yLDs27vv1yelzG3l3p81b1nN5jK78bvvN/drxvfOTl5pnSWl4nl7HT5l+V/n+8taVjcfJsSFflf+KsG3mh2+WduWb5P9qtXS5nkYxv5itu+9VRlH4jGUZmxbt+73zR/IvyrJ/FUMlxM0jOkfH3tzJ95qYskm9Rsbfs/75pZLh/nhO5F3fw/N81aR96Bj8I6WaaSMom7aq/eVP/QqryxI20P/AMCqeN5mZk+Vxt3eX/E1OjXd8kPyVUZcxPL9ozJraG3XeifKz/w/xVnXUMO95Htox/drcvLdFtVfYo3LuTbWXdL8q7AzL96plI1jyGJcQozF0P8A31WddSeYp4kEMbrvb+9W7dW7qGfO0N83+zWPdW/mK/f/AHanm5jojLlMy8m3/P5mTsrIuroN877Xdf7v92tPUI0jU+TuG35X+Tbt/wBmsS+XbJshfaf71Ryx5jXmker3lr9nk3pueJfl3VmzW6TM2zaF/grcuLdFHnHcR8qbW/hqo1rDJH/CN391a+PlL7R/QcY83wmTHYuzLM77gv36lW1SSRnhhZSvzfLVo2b+cSnzfd+an+RNC0SeSzOzsrt/DWHtOY648kYFdWeFUR9uG/8AHqkt5nikOxNoX+9822nL5xjy6Kf8/epm5GzD/H/epx973ZHBipR5fdJ4bi5aP9zNHs2Ns/vNVuHZJbNbI7P/ABf7W2qEbfZ2xN827/x2prW+e0be78L/AMtP7td0fhsj4fMOXn9407WSGFfJRGb+H7ta+nq8kHzpn+78+2se1mfcr71/3lq5b6hN5j7JsCRNqMq/LWrj7uh4EvdmbemyJ8qbFRt3zMv8VTLNNJHI/nLuX+L/AJ6L/s1nW/7uz++2/ZuRtv3quBPLg8ya5/g+ZWStIS5dSOX7I+GRI4XSGNgmz7rPVeS4+ZPOYsd/yU6Tfuf7M6xFv4WqnL50kjuky7fK2vC1dPP9oxkWbjUElk8l32t/s/dWoFvILyQJsYfNtX978tVmbbI6Kioqr8zN/FTI7xN2+Ty97L8jfdWueUuccTY+1Jb/ALlJF/dv8isv3mq/p9x84+eNh/sp8y1z6373EiI6Kr7fmaN6sW00KyK6bi2zduauOfNy+Z0x930Or02b95++mX5f71acepTKzBEXfs3Purn9LbanD7y339yfdrUhZP8Alt137t1cXNzTvI6YxlGBuWt8hhUIJF3fL5i/w/7tWVmSaNfJRti/Ju8373+1WXayJH8+xTtXbEy7mbbV2Jk2Lvs2H8KfPt3V1UafNL3iKlSMYmxpLNMqTecrPHtX5vvfLXXaDbpMrv5Ledv+Xb/dauP01vmVEhVNz/Kyp8tdt4fXzpPIRGR2Vd/l17mFp+6eFiqkoysdDo+mvIzb3b5flZl+7XQ6TpO1o5tmxGXd/stVHQbXdbxecmGh/wCeb10liqM4f92rr8u1Xr1KcYx+E4alTlLuk6D8rPDtQSfNu2LXS6X4f+yyKfJWUtF8q7/u03w3Y+XGyPDH8q/LG33mrqNL02e4bzo4VRVXay10RjGJyc5FpOkww42Qq+77m193zfxVvQ+H08xPJtlZ9m6Jn+XbV7S4YYlR0h3SL/dX5m/vVqRqyjyfJkD+V/q2Sq5UHtOU5280VPtD749n975K5/XtP8m8EM3y+d823+9XeXGzy1eZGd9jb/7tcxrlrbQyhNjFNu7c33aj7ZpzcxyOqWaQ7vOudqeVv/2v92uW1eOGa4a88r5FT91I33v+BV2Wp2brIiJzDI/zyKtcv4gjdZzs3bpPlZm+78tLlluddOfLM4XXo0mh+SHarJ/F8u6vM/G0dtaxvNawqV835tteo641tcb3vHbEbN833a8o8YL/AKU6u6tu/h+6tTLk5Top1JnmnjCOb7P+7mjHz7vl+9trhtek3bofP2oqbv8Aers/FVwlx5mx/nVG3bfmrzHxRdP5hQPzXJyzqG0sRFR94yNU1b7+z7y/L8tZNxq0c0b3Jm2Nv27t1VtavX3702/7y1h3GpPu2O/3f71Vy/ynDWxHtInW22uBdohfa6/Nu/hqZta3N+7fb/7NXFQas67k3/K38TVet9Q8xvlm2bfm+ap9nAx9tI6ttY2YdH3Fko+2Jw6df49rVzK6kYlb+PbUi3ySS/I7f3qIx5ZGUqkpe6dPb3m2RpoX/eL8rbv4a0bfUtzJdO67l/4FXI2eqQtHu3/8C/2qu2epTSOux9n+y3y7q2jH3zM73T9akVkm+8v93bWo2tedHvFyzL937u3a1cTY6g4VUmf52+atG31abDJu2p/erqicnxHZ2/iF5sbPLYqvzM33VqVfESBm2ffX76/3f9quTh1CaPbDH/wGRfu1ajZPl+zTbG/iX+8ta+hh7M7bS9YmkiWGafcjLt2r8rf71bVnrT7kh3yP/F/eX/ZrhrGbzFV3vG37Nrs33VrbsY7xIUe2m83c6szMv8NcdSpynVRpx6Hc2erQx2/2lOX/AIlX5ttWl1yaSETfM/z7f97dXLWN1cw/8eyfeTczL97/AL5rQtdShjhDo+2H+Nm/vV5WIqcsbnsYWjzStymxeeIEjbyYd22Ph2/iqncaw9rHvebCyP8Adb+9WU155032lHXZuZdv96oWZJLN7ab5BG3ybf71ePWxHNE97D4X3jT/ALSe8z9pj2+X8sSr91v96oG1SaTb5235f73/ACz/APiqprdO0QdEXYr/AL1f9mmTXfmMsMPzLs+RtlcVOod0aMfsj5Lp7iGWZ3b5k2/LWFeN5jeT80W7+983zVozXFyI2tt6hG+4zfxVkXjfudm9lZvl3f7Nb06ntNEYVKMY7mdqW+H9z23t8rP91ayLqO5uPkSFmRf4f4mrQuriFoU3fvfn2/NVW4vljk8mGHyjt2tu/vV2c0oxujk5feM37DuZkRMNt2/MlW7PiH/x35aYs26Yp8su5fuq3zLUsLPb3C/Pt2/cqJT5dB06ZetVh3ryq/71aGn/ALuVd/3VrJ3PJIyTJ93/AMdrY0G8Ty/JfzpRHu3tIn3qIRlH3jWUfaaHUaDcJ5ao6MzK27cv3a6zQbyQTNInzO33F2/LXD6bcJGyI74Vfm/2q6Wx1J9ypvkEvy7GjZfu162Hqe8eVWp+7ynfaPJbfZ2dHZ9z7Uh83asf+1W7pupTRstz529lT7rfxVw2lal5kZmd2V12/u2Td/wGtnT7ry1KJMu35v8AZr0adQ8ypR5TsLXVPMs/9c0e19rrIv8AF/s1r2uqXkEYvLOZndfllVotqrXFQ6nthE/ys0fysrPu2/71Os9amaYyC55+VWZq9GnLuedWp8x6FD4lhgaJIXYFlZvlT71Nj8T/AGj9yjr+8f8Ah/iWuGbxI9q7wWz/AC/xs3/oK1HJ4shUqnnMiL9zbXbHklsefKMIzO3m1yFrf7TC7AfMPJ/2qwdY1wrCu/gs3+sV/mrm5vFO3c9n8vz7XaSX5VrMk8UwtDKlzMvnK21l/haoqR5dCffl7xs3esJIX2bnC7d395v9qsbWdYe32b3XzP8Alrtf5dtYN14qgt3e2S5jV2/h/irBvvEk1xGzb1VY1+dt1cdSPNLQ6acvdNnWvEENnHNM8ykxv/C33q4fxDrk0k0ib/l37kkb5t26q+teKkkDvNt3t8zMtcnrHiItN5KBdn3k+b5lrgqfynZHlkbtxrVtZw7/ALTvZv8Almv/ALLWVeat5jApt+Z6xJtS3SI/nZX+61Vprh3kZ4Xj/eN97dXBUj7T4jqjI6i3u8Ks29Szf3flq3a6lJH8+/Yfvbv7y1ydvqEyxKnk7vn+T5qnk1Sa2yjzbdq7vm+9urilT5Ye6ehTqR925uyaqk0b/eZmfa275flqS31NGXzkto/m/hX/ANmrn21iZtuyZU8z5k3U+HWHaSN0Ta6/e3PtVq55VJcvwndTlyy1OvtdQmkX54VTc/zMv3q0oLy2jkV0Tdt/4FXK2OuQ+YryXK7Gf/V7Pmq6usYjeTYqqv3W/vVEeaR1HQxyJ5kSIjH5fut/dq/tcXSbPubPu7/mVv71YdnqXmbd7rhU+dmq/Y6pDcXH2YoyMy/I237vy0hx97c1Ifs3lt5jxjci7Fk+arUMHkqIUdX2oyuuz7v+7WRbt5eyH5Svyt8y7q1GZII9jo21fmb5/l/2adT3fhJjT5viGyabCuy5+YfPu2s9ej6AGT4akO4bFlP8wHXl686mmS6YpDuRFT978/3a9G8PRmP4bCNn2EWU3zD+H738q/a/A6bedZhf/oFqf+lQPo+HKThiattuR/mjzMxww2ryeTMrN/DUN1H50ex3b5fl3L8ys1XZLia8t4kTcwVdvmN8u3/aqhdM/l+RDuVF+ZGV/wDvqv59qVpVDmo4eFMxL63eRv30zBI/l2r81ZV3G8dw/G7y/wDVbf8A2auhmjSGR7v767dvy/xVg6pbTNDvcKCzLuX/AGv4amNSPNytnpU8PzR+E57ULiVpG+RYgrsrN/s1iXk3kuFd1y38X8NdBqNu+2RCnz7sfN/DXO6pGjcPtTb8u7+9XfRlEKmEjuYmsXDyKbeHais/zsz/AHqxNQ3sp3uuV+Xd/DWtqkfyKUm4VtqN/drFvmi2jf8AM/3Xb+GvUo+8eTisHLm90x71tqt5Nt8i/K7R1m3ioU/2v4a1Zmdd+zbsZfu1lzInmb3/AIW+7XqU4njVcHyyE02HazeTtb5/u11Gg27tlHeRtqfJ8n3VrG0+F4nDui/c+bbXU+H7fdGzvMxGz+5XVGXvExwvvm7o1v5Koj/OW+589dvoduk0LPs27fvqtc34ft5gqOm0bf4tldvodq7bOF3f7X/oVRWl7vMethaMZGrpdhbeXGnaSLdtZPmWun0nS5oWbeiqJk/1n/stZWiqjXCQ2z/Oy7Xb73y11Oh2VzGzpM7OVddjL/drhlzSPSw+Hjct2tn5cn2beu3dt8tX+Xd/vV9A/sg+IpPBPjCwv4YftU32jbFH91Y/9qvErK1s7gpNbQ5Lf3v/AEKvU/2fY9SXxdbPYbXXzVXc0TbVbctZVPdIzTD82Dkj9UPhb4f8K+A/BOr+P9Kud2qas8lxdXDRbVVW/hX+81dp4W8Za94f8Kvq/wBsbdJaqkULL821l+Zq8+0rxtrGj+C49N8SQ27XF49vE7bf3axt97av92qnib4gWzalcaPol/G6R3HleTC3zR/LXm1KnN7p+Zxp/vfeK/iD46P4D1aPUtEg8/WFuvN+2TS/u7eNV/55/wATV8RftLftA+IfiVql/wCNpr+adpNSkuPMkb/Wfw/NXrfxz1y8s7/WrmbaIbO1ZUXzf9Y235trV8m/EC8/taOysIYmjT/WxKv3dteZi4Q155H0WT041KsTJh1vxPfTPcpcyFW+Z1Z/u7v4Vrfsbi5Wa33wrujRv3itVLw/Y+Zbx22z5P8AZT5lrtPA/gua41KG2s7NZY2fc3mfe/4DXzlTEUuX3T9Fp058nvGF4ivtV1ZTYWGlSNbzNueSH5mrn1+Cfxa8SWKXL6lHo9lv+S6vFZfOX+Kvqe88E/C74b+BZviR8QtTjsbGxTfLH95rhv4Y468d8YfGLxb8WNNi8W63pVn4c8G287JYfal23N5H/u1vl+IpRjKLPPxWFlTfMtDw/wAVfDHTdDhTSrP4/XE100StKrRMqszN/wCg/wC1UvgnwX480WZI/DHxRsZEkb5FuL1o2Zv91mrK8dfED4Pw3kzw6VCgVtssi3DeZIv/ALLWDfeOPhXqlu6aR5ltNIny/vd22niPZyj7pphZOnLnke9Wev8AxR8M3STeJPDbXMcLqzSWvzbl/ibdW/q2peG/iF4Ru5nsLO4h3Ku26i2yx/8AfVeM/CX49X+k3iaVN4umu2hiVU+1bV/75r0Tw78YvB+rR3OiaxZQzxzS7kuF+Vl/vV5EuenP3T6KjUo1qRt+H/hn4Y/sWGawS4tfLf8AdLDtkRmr0j4f6TqXw71C21Kw1u4htm2rtWL5mZmrmNB0XwfcWkL+FdSurdJG/wBSs+75v4vl/u133ibxI+j+HdNsrnxDH5TXmyJVt/m+7/eqJS5pe8VGPs5H0R4T8RaDqnh1LbU7m6kuIVVYlkl/9Crs/h7oqX2oRzL9lCTPuX5l3Kv+1Xgnw50PStWtFv57+a4juIt3+tZdzV7f+zvofh24m87zt6Lu3NcS/M1VCM5T5UKtyRpSPpTwCulWumo14kburfu/n21va9rN3b2EjWc21JP+en8NcXpzeDZzHZwTW6vH8rLHPUXiH+1LGxl/sLU/MZdxSO4fcrf7Ne1KoqMOU+Lng4V8Xzv8SbS9VFzqkqxPld+167f4aWNtqGp3KXNvG6KnyqrfxV5HpWpXKyGa8mjhmX5pVr0v4ReJHW7CfKyzN8zLXHg8VGGIi5bcw8/wMlg3ym3400NLXSp7ZBny923d/Etfmz/wVA+I2n/C34d6lZ+IZpmsdeiazg8n5v8AWfLu/wCA/er9O/ia00OiPc20G9/KZdq/3dtfih/wWy+MP9vXkHwom01ZraGy3/ao22yLN5n3f++a+wVH967S0PzKNP2mIUT8tPE3h1NF1ibR7O586GJ9qXUn3pFrNm092KwyP89dVfaLtuij3O/y/m/vMv8As1VTS4VhV9nyr8u5V/hrr9ty+6fVUaMYwOWmtfL/ANGSFtzVRl0lNjJ8qjf8q118mj7ZGMyMyN8u7Z92qs2kl5CnkqqR/KlEanu2iP2PMchcQzQ/uUdSG+4rLVC8sX5eNNy/3WrrbrS3+1fP8rbN3l7Pu1l6lp8Z+4ny/eaumnU+E5JYeMeaTOJ1axRN2+FW/wBqsK4s8A7U+b/arttYsbYLsfbvZG3LXMahawqzp8wVf4v71dlORw1Iw6mDffd2Oiqf4WWvrP8A4Jehho3jLOcfabHbn/dnr5Ru49y79n3m27v4q+sP+CYMaxaT4zjXtdWX/oM9fqng+78f4T0qf+m5nt8Ke7n9Jf4v/SWfPn7SEu39oDxlC/3f+Eju2/8AIhrk7dkWVPn+9XW/tGr/AMX/APGZ+U/8VHdcf9tGrj7WRFk8z5T/AHK+OzyX/C5iv+vk/wD0pnjY6P8AttX/ABS/NnQafJuby3/hX7396tfTpN2Pk3L/ALX3a5eGTlXP3d+7733q2tNm8sId/wAyvury/fOXlOmt7j5n/fMh+9WrY3TsvnI6qW/hb+9XLw3Dqzl3+992tnT7h1Yfdx91V2VEveM5ROnhk85Vm2fw7W21L8kkPyJn/gf3ayLe8RlZ8MqL99quxzIy7IX2bl3bquMebYwlEsq0fmb5vl3fKn8NTKyeSsKbt0f3t3zbqq28k3k75Pnf7u5fmp8Mjsw/fcbNv+9/tVfKjD+6Pnt8Rp++VdqNs3fd/wB2sq7jcR7Plx975m+9WjeXkyv/AK5cL95WrMvl3Tb97Z/8dVaZZn3TJIwRE+RU/wC+qzrqNI5vItk+9/47WrqEh8z53+X/AGU+asm6fd9//Wt/tfw1MTaJja0tyu1PlZN/97c1c9qW9fuD5mf/AJaV0GoO8at/C/3vmT5WrA1SHG15H/4DUSiax2Pari186RoU+9/tf3qg2/u40d/+BKlaq2e64OxGI2feanSeTEvzp87fKrbP4q+Fl7x/QGDqc0DFa3RXXenzSfN5ar96mfZUmUeSkn+xWpbxzI3+kpG3/jzbabfWPlvvSHaqpuT+Go5oxNvaMxWt03vC+5N3/j1QSQvtVf4FbanzVpXlnub+L/gNVGXb9+Flb7zV0KVzkxlSHIVZLjbuR5MMzbWoWYxt5P2fcNv3mf5ahulhhbek25arxuVkd0dXX727+7/s120o8stT4zMKnMbtlcedMm+ZlVU+6v3a0obx5Lcwo+z/AGv4q5zTpH2h+7VrWbPtP2bbu/jZmreEYx1Z8/KU+c6TSbiZpGT5U/hRm/8AQquXH+sfzkZDsVU/76rCTVtqi2mSN9vyrtq9HqFzND52xVH3fv1EZT5tRSlzaF+ZYcv8izM33tq/d21R+yQzLK6blX7qbX+9RNqDxw/u3yGfbtVvu02PyZ1Z0fay/dp+/wDEEteUGWPc0m9i6/eWRf4qpNHbNvuX+dd/y/PuqzfM6yLNJNv3fxLUK2/ys/kqC38LfLSjKQL3vhK0d9tVoXRVZX3fMtaFnfIwZ3hVf4fMX+KqsduJ2Uum7cv3d33amk8yFRGj8fe2rRKMZe6VHnOj024eRf3KeYzJ/e21prcfZ1PkupZv4f4VrmNPuts0saQswVdz7v4a2bPY0aB5m2x/fb+Kud0ff1Or2nuWOhs7uGE+ciMq/d+WtNLtJFSzhRnK/Mm2sOxWa4+REUo0W5Pl+Zfmrd0+WzjiCJtfb8y7f4a6adLlncxnJVIWia+myTKrTTJzvVV2r92uv8M3lsu/fufcm3c1clpW+OL/AFO91Tckjfdaun8NyfaNs0yNEq/wqv3q9zC0z57Ec3OegaDM9zCNjxptbdKq/dauq0G1tm3XO9pIm+bbsVVWuF0uZLVY4fIWNGXcit8zbv4a67RrqZpbe58za33mXf8Aeb/dr0adP7RwSkejeH4/O8p0f7ybUX+9XZabawsEtk+QfK21V+9XE+H7jy1T54ztbc6xt8q13nh9vtEf77yzKyqrtH8zL/8AE1rGPLEn/CdBo9n5Nr5Lopdmb7v3dtW5l248ubLKn8TfNVeGa8tYV+fP+z95d1WZCjKX8jY2z96zJ96o5oyKjEqagqR7kd1y23Zt+61cz4gjS0j2XLspaXcvy/dauj1DZHH5z+Yd3zRR7fu1jaozRw/ImWaLd83zbaxlLlNIxOV1hUuml/crCm75FZf8/NXH68sMgEqc/e2fNXXa1CGuDC7+YN3mbo/vfd+WuN8QSQwq5d+d7SfMm3d/wKsuc3jznn3jC68uGaZ9vlbN21fvLXk/jpvMjaG2RVRdzRN/Ev8AwKvVPFU3mJvtodu5PnVfutXl3jC3hjjbZbMsvzebub5WqObqzX4fhPKvFKeTI6JuzIm5F2/erzDxRCity+CvzOuz7tekeKP3kjoiMpX5X3NXm3iCP/Xec+7bu2L/ABUub+Uzqe8cF4imdY5dm37/AN5a5ua4/fbH53J81dF4hWZf+WON392uUvF+zsfm3v8A3a2jyyOWUeUtW93tYo6ZVa0bebdH8j7f/Zq52GTdJ5KDFaNvePGqoNu3b/FWkokRkasF4jbod7K3+zUkd47Mdj/ef7y/3aoQ3Wfvj71WM7vuO3y7ay/xBzSNKxutuzyvm+Xa6slalnIkapM6K53/AHawre5RWWXeq7vlT+9WnYs8f39vzP8AIy1UdiZRN7TbjyFMexfu/wATVpWO+SPjcu59y7v4ax9PWFtvnD73yu1a9nN9onXzplfd8v8Ad+7R7QI0zTt5PM+ffkL/AA/3qvWtr50/nPwsf8Tf+y1Rs4xt2JuRa07ePZIg6IrfLTlW0kyo0zW09Zljih/5Zt8u7+KtuwaaFETep2/3X/8AQqx7P95H8j5WP5XVavxzMg8lHX5trLuavNqVvtHdRw/MblvqEy3SzB13r827ftq39qRd3nP5r/wL/drFVnXZNGy/3fl+ZqveZNMpeEL8rqqM38S15OKqHvYOjJF9Lj5Yk2bVb5tzJ/DTbiTbIkKbmf5m2qvy1VhZ23o42Ps+Vd9NW6muFD79zsu568mpyyqns048seUluZpo23w+WnmIvzbvlaqlxff6O80M+/dx9yo9QYMv8OGX7yp/FWfcaj5sapZwNt/ij3fd/wBqnTlt5DlHlkTtqj/wJhVX7zJVeSRHs9iOzbU+XdUdzsWZEh2/7W56q3kkyqzzI2FX5mX5q6Y8vN7phKPNEgmk2ugmfL/88/4apX0cM0ju7thf++d1TeT9oVXhmb5fmRv4qrXVi8dwqbGK/eZa6pc3wnHKn7pRk7TO7JtfY9WIZJlVkMzMq/Lu/vVNeWLzLv2bAv8ACyUn2eaPYibfv/3flo5eaMSffiTWMKK3yPv+fc6ru+b/AHq0bVbZvkR2iVf4W/vf71Z/lzW7s8PG75dy/dq3ayIbcJ50gl+ZtzfdpKXKXGJtaTqDr883+sVPl/4FW9p1x5OxJpm3Sf3U+bbXKW801ufkm+ZV3Ju/irSt9QS4eCZEkRP41V/vV3U5cxzSox+I7bS9XfSZUSZFZZHZfm/hrZXW9yqyXK5k/wBn+GvOYfESMwSdGYq/7rav3a0ofGEzf6HNMpVfuts+bdXo0ZHnYinGUTvl162WfZbTSEyf7P3qjbWHjjd5h8iy7dq/3a4638SJNImybDLu3Nt+VaZN4mRV/fTfLub5d33mr0acjyalM7bUNYeaPZZ367PvbVrNm8Qf6QzunyR7d9csviHTWmVH3Kjfek3f+y1BJ4ieJtqFZWm3N8v93+HdXRGpy7SOaWH5jevPEkPmTfvt25/733VrNv8AxNczKUebZ/FE2z/0KuevPEXzMjxxmWP+6/3qwtQ8SIv7nfIYv7q0qlaPQy+r8pval4qfZvR1DL8zrs+asjVPEUfktbQzKzb/AJ1j/hrmNQ8SbY9jupVW2uzVlXWseWrQo+3/AHXrmlUlzGscPy7Gtq3iCWNfJedt395U/wDHaw7jUNzMiTKq7/kXfuqpNqTyYTf8y/wtVSaZ4/nwu1nrlrSlI19lylxb658sskisW+b/AHqkjmfzN/nR/L8ybvu1kLeOkbTed8v+zUU2qOzDY/LcNtrm+L3S/hOguNSmZVm+X+79+j+0pto+dUaT+FqwZL4zL8/y7futUkdxN5i/6tv9pvvNXNKX2TpjLlNuPUnkkV5E+RV+epmuvNY7IcbV/ibdurGjvEkUQvDz/vbamt7r5UT7399t9YyjynoU6kZG3DqFyjJ8qsi/N5n/ALLWvp+qPt2bMf3JK5iNfm3puI+8vzVbsL7azb5m+b7sa/w1zSjJHZGR2djqD7t6TfKq/MqtXQaPO6wi6d/vRbdzfxVxWj6nuZUd8MrfJ8m6ug03UvO3Q/ZlO197/wANYylM2p0+Y6K1u/Lj+R2xG3ybvmatGO8fzF+T5Gfb83/oVY9ncJfND88cUknysv8Atf71X4ZPMZIXRW+f5v8AerHm5TaEZy+Iv+ZNuTftfy/vN/Fur0zw65f4Y+YV2E2Mxwe3368sfEjRI/yv/ufLXqnhlU/4Vqixn5TZTY/8er9s8C23nuY3/wCgSp/6VTPqMij+/m/7r/NHnt5H+5itn+5Ii7v7tZ8kMyzSp5KhI22r/d21qSWb3CnenCvuTb/6DULbPOh3w/Kr7Zdz1/PkqnN9o1o4X+6Y95buWNtC+xmTzfl+6y1i6hIixu6bR91nWN/vNXRX1u7M+xNyK21v9paytYVIdsH2Zl2oyp5f3v8Adp0/dPVp0/7pyGuRXMzffkVY2+Zvu/N/7NXO6sfLb7NMjJt/irq9Us90n2l3ztXYism5WrmdS86Ni820p/drvo8vxGv1X3DmL3ZIzIgVl3bflrJuo+S/3Ntb2rQpG2P4W+Xy1rKns3DSfuWAVflr2cPLljc4K2D5YnO3lmlwz7Jtzbty/wC7VWOw/wBKeR/+AVrzWe5m/c/w/dp32fy4N8m3/YbZXfGpKMeU8epg483NIi0+zSZvkTCr9/d/FXS6OszRjeioW+VmWsaxhdFX523b/wC58tdFpMflrsRN27/b/hraNTkMJYXlOi8OxoNls+4r/Ht/irudFt/tHluiMzxoq7WX7v8Au1xuhyOrIyQqhrufDLPJInd2/vfKtVKXNE2o0Yxl7vU6fQbONVLwwySvHFuVY9q11ulRzeVbzIm3+J1b+GsHQVSSNIYfLZV3bpN38Vdbodm81uj+Su5fmfa+7dWTX2melTpxL+k28zWvz229G+b5f4f92vVf2f2ez8XWSeR5paWPyvMT737z7tcFZ2MK2fnbN7q/7j59qq1dz8KbeaHxZDNYW0yXEm1U2v8Aeb+9Xn4upyYWpM6qeF+uTjQl9o/S74m+G4bT4ZS/EzwZ4psLzWtAtYWk06RvMjXb/s/xf7tfMHwX+Jut/FzxlqtnbPJJqlxPJdXEdvFt3MzfdVa+P/DP7SXxy8E/FbxVHZ63cSaRb6pI1/DNuZY/m27a+z/2Q/i54Bk1Cw+IWgww/wBtLexyrG0G1ZGr4fKM3nJSdXY8XijhChlyl7GXNJamT+1t8IfiX4VvNLstb8PXji4+ZpGT93GzL91q+cLjwref29NDqUKxxWfyRf7392v2s+K8ejeLvA3/AAmfjzSLGWBtOxArL8vnMv8AD/eavzg/aO+FvhvQ7BP7N8yaaS6knlVYvu/8Crtz/FUI0oqG8j5zhTC162KenwnhOmrbWypGkPlSyS7du37tegeFfFPhLQYY45LmMTyP/oqsn3o1/wBZI3+yteQeKtUvrWZLO2hbz1+5I275V/vV5r8VvjRqWh6bqei6Dcs1xfQfYpbpW+aOH+Lb/vV8pTjKpLlW5+jYiUMPDlR6b8fP2sPDHxO8SXOt68kieCvB8XkaXpqvsbVLrd/rmX+7uWvh74/ftWePPi14mmnub+4trCz3Jp1rHL8sa/w/LU/xI8XQ6ho6eG7CHyYN/wA/z7mZv7zV4zqWoPcTTwWyb3j/AIv71fQ5bl8YyakfJ5tjJThaMiDXvjJ4q85oZnbZ/vbv+BVF4d+N17Z3W+a5bLfK67qyNQ025jXzr22x5nzbWasi80mGVftKbVb/AGa+jp4XDOlySjY+TlWxUZX5j3bwZ8XptQmFyb/ey/eVW/8AZq9K8K/Eie/uGmS8k27l/d7q+QdNnv8ATXVra4kT/davR/APxKvLEJvmb/b3fxV5WKy5xu4HuZbm84+7UPqez/ac1v4b6lazJc3gtYdzvCr7vmavXfit+1emqaf4Pe2RY45r1ZZZPNbbuZfu7f71fFs3iiPxJqEWnxzcfe2q9bPxG8bPp+l6BpX2m4LWNw0+3zf4tv8AEteQ8JHSx9HDNpSpvnP1y/ZX+Muj61p6/wBvTbYo/mf978y/L/DXvPwv+IfgzSW/4SrVbOGSzXcqyNPtRWr8PvDf7bHifwHo722j6kyySffZvm3f3qitf+Ch3xyWzv8ARNK8W3Ahuk3RRrb7ttYxwmL2ggxObYaKP308Pfth/s1W/iR/Dd5qFvDcyT/6M0gXbGv+1JXpWkfETwP4igku/BviKzlRfmfbdb1r+Z7wz8Wvj34y1ppn8Q6ldy3Uv+rhT/vpa++v2Nf2nvGHw7htPCXjGwvoYm8tW+1W7Kzf8CrGthsdh4c9SzMMux2ExNW0vdP1Vm1r+0rUXP8Aq3ZvmXb/ABV2nwN8RSw+IvJuQqlW+RVrwvwH8RrbxhoMWq28yusy7lkjr0P4U6tdQ+JYXhdg6tudl+avBWInGpFy/mPezTDwqZdNf3T6A+OHiZvDHg6XXLi5W3tTbMtxMx+Vf7tfzQft1fFi8+M37Sni3xiniS4vIW1JrW1jaX5I1jba3lrX7ff8Fef2lY/hJ+yVcbtSEV3q0v2Ow2ffZtvzMq/7Nfz+XSw6lqCXN/c75vNZmmX5fM3N83/Aq/WsJKNajGbPx7BYb9/KZjw6bDI3/LRmb5v+BVLcaXc+Tsmh3L91mj+7Wva6OnmSp5P7tn3I0laVrpcEf7l7Zm3Ju87ZTqVPZyPoadH2hx7abMtr/o1ssifwf7NULjTX8vztnz7/APV/7VegtYpGzfuV2fwL/drJ1LTobdvMd1VvvP8A7tZKvzR1H9VjGRxmoaVM0e+ZGLr/ABL/ABVl3WmzRsyOWV/7ytXZXCosrJ9mZwz7f7v/AAKsDW7Xb5u75GX7rbN1dVOpI46lOH8xwWpaSFaV5k3N82xv4q5fVNJTaHfhv4a9F1PT4JIX2bS38bVyuuWe0SD+H+7Xo0ec8rER5ZXOA1KzmikL9fn+8tfVH/BMeEQ6T4xwxObmyPP+7NXzZrFvt+d4cbvlWvpj/gmlF5Wm+MRnObiy/wDQZq/V/B//AJL/AAnpU/8ATUz2eFVbPaX/AG9/6Sz5w/aTYx/tC+MJFlVf+Kju+G7/ALw1xm1Fb5E4b/brsv2kZU/4aD8YqzFc+JrsZP8A10NcarJu3u643ba+Lz/3c7xX/Xyf/pTPKxijLF1X/el+bLkM5HyId+3+KtW1vnX5ERWb+81YNrNJyiOrD/ZarsN0I8fw7q8s5ZRhE6WzvH2r8isW/wDHa1NPuJI5C8x3fwp89c1b3m1F8l/4N3zPWnp906/O83y/edWoM6lO51lneH78gX5l2+W3/oVXYbjzId6PJ833/n/9Brmre+jV1m87f/DV23vk3BE+X/e/hq+b+U4qkToo7qFVDmFkOzanzUkl87R8Iylk/wBW1ZC6hu/10i/u/wC7SRapDlPJf7qbZfn3bq0+Iw9maUkkednmbdqbd2/5agkvXVkf+Gqbao8jFEMaj/nntqvJqiOrOm1v4du+p5v5TSnEtXl1DIu/5vubU/hrImussu9Pm/2v4qbc3ifxt95qz5b7zJndnz8nzUe/8RrH3iLVrqHa2+bdJ97/AIFWBfSPJIzuGfd9yr99cPMu8uvy/wALfxVlyS7mPzMP/ZaylI1jyH0ZHHIpVJhnd/dp0dv9sm+SFYgqMzeZ95mqfy/LfyX++su7dv8AvL/DVn7Okcab+m5vlr4ipGXU/X8LjJR90zY4X2/O/wA6tt2r/F/u1DqFqkkbHfuZn2s0n3q2ZrF45IoXhVVjbcirVWbT0kkKJH838C1zxO/23LtI52+t5priUfdVU/76qjdWs3mKjpjcu75XreurOaR22W33v4l/u1i3kcbSedt+VflSuyMTixFaW5j3lq8ay7NvzJtfctZ8kbrI0MyrtrauoxGjb4eP7rVSnt3WTf5Kq+z7391a9Wjzch8lmFT2kiPTZNrL87bW/wBitjTlmmKR/d/vNsqnZ26Ltd+Qu1vm/hrWs49q/fbLfxVtKXszzI+9ISOF5mX7NM29X27q0o98J3vBIh/jWT/2WmR2/k/cSPCvt/2matLT7GGNWffu3fM7M3zVzyqFxjEreT5jCTyWLfN+72bamWzuVXZAjRJ975au2tq6xssbyKV+dV2bvmqS1hh+0M8/mAyOqqv+1S5uxfLzGdeRQtMjvHubft2/xbqhu4fJX54Wbb/Fs+Zq9I+F3wgX4kpfz3GutZm0mQKy2+9n3bv9oY6frXVz/so2lzEUm8dTlyc+Z9iGf/Q6+9yXwu434gy6nmGAwynRnflfPTV7ScXpKSa1T3R6mGyfMcTRVSnC8X5r/M8KNvBGqzP8vy7v9r/danRqkkqujx71+V1Va9wn/ZLs51Cv45lGO408f/F0xP2RLCMEJ45lGev/ABLx/wDF16i8FPEe93g1/wCDaX/yZ0RyHNV/y7/Ff5njCzOrfuduN3z/AN6tS1aZpPkf/tm3y16on7IumqF3eNpSydHFgAf/AEOr1p+zDbWgIXxrK5PQyWQOP/H60Xgt4jLbBr/wZS/+TD+wM0lvD8V/medaau6MJDuD7/nXZ/DXTaUz7kjS23Mz7flTbXT237OFrby+YfF8rgfdVrQcf+PVq23wXtrZdq+I5znhmEeCR6ferop+DXiItZYRf+DKX/yZl/q/m62p/jH/ADOftVS3l86Z1RP4Fb+L+GtnS7iazmaG5T915S7vL/hb+Fa1bf4YWkUgabUzKigBUaDgY/GrEfgFIp3mXVXw5yR5XP55r0KfhDx/HfCL/wAGUv8A5M4a3DGdTndUv/Jo/wCZd0CRJJNt5M2+F9jR7f8AZ+XbXYaDdPHshSaFW2fxfe/4DXJafoLWEm437SJnPllcDP51p2MjWYJYl3LZ3ZxiupeFHHyVvqi/8GU//kzgnwjnz2pf+TR/zPUPDd150my53YkfZ9/5l3V3mi6slqyQv/yzTam35WZl/vV4bpvje604Nts1YtjcQ+3OPwrdsvjbd2YBHh6JyDk75yc/+O0v+IT8ff8AQKv/AAZT/wDkyY8HZ9H/AJdf+TR/zPfdJuHVU4Xay7nZm3KtX9Nvt1usyIxPlbpV3fKrbq8EtP2jLyzgWGHwnCNvpdsAfw21ZtP2nb604TwdAQeubs5P47azl4Tcfv8A5hF/4Mp//JmkeEs9X/Lr/wAmj/me03rLJthhm3MvzPu+bdWLqG+OFvs20fN92OvS/wBmX9kb9r79qHwRa/ETRfB2ieHdA1KJpNO1XxBrTo14oZl3RxRRPIFypwzhQwwVJBBrlv2vP2af2n/2StD/AOEr+Ifw+0u/8PSXSWw8Q6DrLTQpK4Yqro8aSx52kbmTbkgbskCvk6fDOZ184eVwlSeIvy8ntqV+bbl+Ozknpyp3vpY4aeTYyWL9hePPtbnjv2338tzzzVGRZJd+4fJt+X5dtcR4qmRrP/Q0U7fl2t8275azLr40XVzE6f2GFZjkP9rJI/8AHab4Rl8YfFbxZpvw88FeEX1LWNZvY7TTbKKQbppXOFXnAA9WJAABJIAJr3q/hLx9SpupPCpRSu26tJJJbttz0SPZfC+cQi26aSX96P8AmcT4pkH2VoXfYNvyLs+7XlnjiaHy5cvs/wBrZX6WaP8A8EIP2u/Feix6r4h8deCdDubiP95pdze3Ezwj+6zQwshP+6zD3NfK/wC2P/wTO+PH7KtzZ6V8Y7eCGw1OSVNM1vSZFuLW7ZApZQch0YBgdsiqTyQCASPlsp4dzHOsx+pYGVKpV1tFVqV3bV8t5+9om/dvprscWHy6ri63sqbjKXZSjr6a6/I+I/FXnTzNsdgy/wDLRk+Zq4PXrOaSSWf7T95/vbfmX/Zr7z/Zo/4JCfHX9tnVb1PhJqCfYNNkSLU9b1YLDbWjSBiozuLyNhSSsasRkEgAjPefH7/g2U/at+FPhG68cad8RNI8U2dhayXOoReHlb7RBFGpZmEU/lmXAH3Y9zHoFNXjOFc2yzNVluKlShWdvddakmm9k/ftFu6sm03dW3Ir5ViKNb2FRxU+znH8ddPmfkv4gW1+yPsLNtf+GuM1C3jdnf5v9mvqn42fsd2vgT4d6l45h8evdvpypJ9mk00IJN0ioRuEhx97PQ9K+aNWt1Enku+F/urTzrhzOeFcXHCZlT5JyjzJc0ZaNtXvFtbp+Z5uaZbi8uqqniI2bV909NujZzY3qzJ90U+3abc53s3yf36ffRoku9EZf7tQLNMrK7wq7L/D/erzeb+Y8o0Ybr5WfZ91Ktwyfu/O3s/+7WXDO7ZjRFz975f4a0LV42j+RG3M/wB3+7WUo8poadlImVff/B95a19NLtIm9MfJWTYxwrHsR8j+9/drZsVfcqTOqq1R7TlLjT5zWsfOkKee6o6/drbsGhl+/wAHZ8jKtYtrD5hX7y/PuVvvfLWxp9p+7V49yqv+t3JXPUrGsaPLI17FX++EX/gVbumrcyR702qGTc/+zWdpSPtT59iM3yfL96tK1heRTCm4+Z/FXLUxUfhZ20cLItx2aRqs4mxF/wChNWjZr9l3JNCroy7n/vU6xsfMWONwrL/D81WWRIZsQosu1/7lebiMVGMbHsUcDy2Yy2j+ysnkptLPtXy/mqzHM/l73ePar7kkV6HjdsfI2/8Ah3fxf7tSx2ci/uXRtn8S/ery6laVSMYnr0aPL8IKzw2/32fc25G2/wAP/wATUUN1JJ8nkrs/jaP7tT3UDyRlIdq/Nt2qnzLTlsUhhZLaZd8n3fk+9XO46ndGnymRJMkkv2aa22K25Ytv96qrXjtIh6J8ys2yrsmnvCpd7mb/AK5t/DUE1nebfsxf733ZN+3dXauQ55U+WXu7lNVdZV2Pn5Nvy/8AoVSyRpqDFHRg+/8Ah/8AHqsrp+6Rsortt/e7aks7F5I1S2Rl27d235mat40+YzlGdPczZLGHc8RhZG2KqMq7W/4C1Tw6a8cjb0Vvl3bm+9/u1pNY/wCnEXMLB9n72P8Au/3dtTLpqSSb97Lu+626uj2JyuMfekc7eWu23cSblZk3fL/CtRrpaXDQv+73t/wGtqazS4uFTZt2vt3L/wCzUl1Z7pEdH2BW+Zl+61bRp8sfdOepzSkZsenvFIkMztkfM0kablX/AGaLrTdrb5ZWLr8qL/C1bCw7m/czbh/BItM1TS3WT7Y+35ovkbf/AKv/AIDWEo25Wa049DH8mEBXmf59m5NtWWvHtFSZ3+7tVtqf+g1DfF0ZYU24Vdrs393/AGaz7q4eZUmRGWJV27VraMvshVpx3iasmoIyvGkzIVbd81RLrkLbpk+/I/8AF975ayby6hmX/RnZjGnz1XuL54Y/kRlRv4q9ChL3PePJrUftG/JrUn+u8759vzRqtV217C7HuZP93fXOS6g6sPJf5mf+Kof7QkVmd34X5n+Wu+nPm3OSWF9pqdVHrB8xn+b5du9Wb5adea1tt+rfM33Y22/N/wDE1zUd9cou9JlG6ludSn8nY7/N/Ay1ftIxOmjl85QLuoah5ibE3Yb+Jf71Y+o6lNDGNn3mfa7b9tR3GoPIqfPg1l3DPJuM24/73/oVZe25h/2a6fvcoy81CZWXD4Zn+b5KoTXzzMrojMn3UarjR/aJNjuu7ZVZrErCv3vl3VhUrGjy2XxcpT+0TQ7/AN58zfc3J92hrhGbf/s/8s3qdbPbvd0Ulv7zVHJYPHDvRP8AZ+X7v/Aq5ZVv5jnqYKZXWT5flDbW/hX/ANmpqqjSf3fl2/71Sx27xyI7/KrJteSnN8q/Inzf7lL2nN8Jwyw/LLUhjV45G+66s/3aPMfzG3809ofMK/3dvystRv8AMqRvbbGb+Kl8XMY8vNIkW+eN9m3lv4qtwX0MaCEPt/8AZqztr2snnJMzbqfBNbNMjuF+X5V3fw1lUj7mptTlyzNi1ukZtiI1XtLeKOZnR9qN83ypWRb3HlyK7vub/wBCq5bzO27ft2r/AHa5PflHU9KjU5Tp9NndZt8cyqsjfOtdDZ3Dqv2lNqr93bu21xlnJ5cKzJN8rff3VvWd07Yh+VkjTd83zbqiXw3O6nI7LS7x/PR3+VfvbY62rPyVmCJ0+98yfdrlfDd+P40Zz935Vrp9Pjdo3S68zezLs+X7y152IlLmPQwsYyiX/sM0LDemwTfPFtf71ereHyj/AA7UxggGylxnt96vMrODbcLNM+Sq7drfdWvTvDqqfh+qKMgWkoGT7sK/bPAWTln+ZX/6BKn/AKVTPqMogoTkl2OHWOGORkR5NrbVdW/i+WiS1hGXh2t935f9qrMdvDIqfaXZFVvk2/N81L9l+0R7JkZh83zKjfw1/P0Y+/7p6uDomFfaW8TfOkgDbvljbau6snWo9sZebc+35tv3Wausu4d0zohYWy/KjMv3mrndQhSSN03sWk/vPXXE9Cnh4cxxOrKPnf5lhVNz7V3bf9mud1K12xb32s+3btX7u2uy8Q6PcrHs8tWVvm2q3zVz+paXuj85EZCyb9v92uqnL3YnoQw8eSRx11GkeYd6yOvzN/s1nTWsPlrsds79zru3V0mpW8LRv88YMi7vu/M1Zq26LHv+5tTb9yvSo82nY5a2HhIwJ7PdcbymPOf+GoZLVzJsTdhv4WX5a15LdI3Hk8/xf7VRzfOq7Imc/d2/3a9SjKK+yfP4qjCJSt4Z41+Xbtb+Fq1rH942x4VXb/Ev3apyRpHKZEm3H7q/3qksRtmZ0+9/eX71b/YPGqSinqdX4f2Dakb7v9pq7fQbySRkhuZlii+6rLFXA6Ldx+cYdjI7fLuX7y11uh3zqzJNMz7fl27vvVfLzR94iPaJ6JoN5ZxzvZwOr+Y23ds+aus8P3zNa/Zt671T5IWbbu/2q850fVHh2JGnzr8zbV/8drq9DunY/aXnzufd5e7burm9pynZRqcseU9J0m9eORJpkjQxxbdv3t1epfBfVLex8QLqVz8629rI6bU/2fl2/wC1Xiei6puaO5mmVP4XWvR/hrNc3WoXFhbIsrSRM0TR/wC7XjZ1KUsumo9j18qlzZjBnpXwl+Hfw31b4EXmq+OZlsbnxt4oZX1DULhVk8uNvvL/AHal+Aek/DfwT+1hb/D34deOYde0iNo23W7bo45N33a+bv28viA/hvwT4O8AaHrHlSQ6W1xKsO5fL8xvm+b+9Xt3/BBL9mG7+JXxmvPiLrM8kum6VbLcXjSP93b8y/8AfTV+aZdTxPsddD0uK5UJ81Rn64ftXyxWfwdsdRkhkhghs40SOP8A5Zttr8+/if4sfxZfXOpX7/JH8sUjNtVm2/xf7NfcP7X/AMZbKbwzD4Vhs4/skCHKyL95tvy1+YHxY8YXt14uuf32yHzWVo1Taq114/ERryjGEj53hbAzwuGlUqx5eY6K/wBF0HWLXffw27xxxfPJH8kjN/vfxV8sftHfD/TbWyuU8H3KyXk10yy/aLLb8v8AstX0T8PfEmja1cR2GqpJFa27Mtx5L/NJ/wB9V1Xir4A6b8RtJa80DSo7S3hRmWaZt3mVrl9SDnaZvnEeX3on5CfEiDxDp1vMlzDJFIvyvuSvKrrUPEmk28mxJAsn3pNtfpH4s/Zf0S88SXdt4i+zv5L/ACSSfd+Wvn348fB+z09Z30TQVeL/AJ57PurX2eX4rD/DKJ8DjMLiqseaB8k2uraxq1x5L/O/+1W1rHhXWNJtEuSi/c+7W+3hHQdJ1FLy2hmQszfu2ib5ai8Ua9NeW/2BEXasW3dtr069ZSlGEIniQwteL99nENqn2hfJ8xVK/frT8LyTXVx5KfKfu7qpWHh172++T5gybvlWvVPhT8Jb+6mS8e2ba33NtZ1pUqcC6EatSqd5+zb8Kbnxl4ytdBezkVrh9kU2zcq/7Vev/txfsK+PP2c/h3Z/F3xPpUlvol1PHBFeXDL+8kb7qr/FXa/sj+CX8L+MrDUtSso8Ky/M3ys1fZn/AAW2+DOuftE/8E1vCnirwlF5114T8QQ3l1tk+Zo/L8tm2/7NfIVakp5jGD92LPuJYPlyj2kdT8Qta1zTdPhH2l1X+5urc+GvxM+G+i6hDdaxp8dyyv8Ad3bdy/3q5Lxt8H/G0OrbNS0e4ETfKjSVe+F/7PviHxB4kisH02RRI3zs33a+meAwvsOac+U+XnjatOrFwpcx+lf7E3if9k74svC/gnV7HS9ajfb9jvkVWk/2q+7rL4d+CfHHhX/hG/Emj2895a2+yK6WBVZdtfkX8O/+Ce/xqXULbxJ8GXkhuYXWWJVb5m/y1fol+yjrH7S2m6vZeBvjT4Sm0u8jVVe6jf5bhf4vvfxV8bmmHrUo89KfNE+wy+eGxtK1aHJM9v8Agj4f13wbb3Gj/aZGs/tG1Gkb7te6fB3Xrb/hLLbfGzr9o2su1qw7XwTZw6YNQMLBJnVm3JuZmrT+C3jXSvCfxPx4gt1fT7G3mup7m42r5axqzbq+WhRp1sXCMv5kepXj7PKp/wCE/OL/AILjfteaP8dvjpZ/BPwL4hkuLHwDPJFetDuVvtkn+s/3tvyrXxdp9vDNdeckO/bxLuTd81df8ZtQTxt8cvGHieF28rUPFF9cQSSJ80kckjMvzf7tULfS0ZQ8j4Xf86r/ABV+y0qMaVKMI9D4PBYfmp8xGunpHhHh3jZuSNX+VWq9HYouUTzP91nq1Z2fkybPszOWep44Xabem4I33o9v3awre97p7dOjyx92Jl3Fq726u8K7Nvz/AMVZV5pu5XSYLt+8jKu1q6eaF45Am/5Pu7W/irG1a3SZmd0YN/zz3fdrKj/LIyqUeaGnxHHahZhWabewf7y1ja/HIsy/O23au9ttdTqVrtZ3Tps3O38KrXP6p532hneZnVU2pHtrspx960jyKkfd0OR1a1hk83ZwzNu+WuW1yx3MX/ib5fmru9QsvmkHyotc/qOmja2/j+Fa9GnHlZ5Nan3PP9WsbaOPZtr6O/4J02sdtZeMDEhUNc2Xy9uFm6V4brWk5UnY2N/8X8Ve+f8ABPq2a2sfFYb+K4sz/wCOzV+s+EGvH+F9Kn/puZ6fC6tn9J/4v/SWfLP7TRkX9oHxkEGc+Ibr+H/poa4Rrry/k/ir0X9qC2mT4++LZht2nX7k/wDkQ15xIuJN7pk79tfF59H/AIXMVzf8/J/+lM8fGSi8XV/xS/Nj45ts2+FF2t/Dsq1DeddzrhaobXjX5PmX+9up0M/zbHh+X+GvG5ehz/FL3jbsbxPlLx7l37srV9dSDK3kurN/B8n8Nc5BN5ce+F+W/hWrEdw6yMiblSiXxcxEpcx09nqTzYhhRd33d1Wf7eeNVSba235a5WG8mjP2aN9p+8+2ntfbRlNpP95qfNy/CYVNzsIdaTydjurFv7tDapthVIdvzfNXJLqgaPyZoP8Avlqmh1J2k+WfAZfu1rKRhyo6dtWTeuHwrffpn25IZG8jbtk/vPWJHqXnSLC6KzL83mVMrPIymbbt3/w1EpFRj73ul+ab94+zafn+bbVdmfyf3m1X+9/stTlL58t0wP4d1P8AJmMj7Id/8O5kqPaGsYlCZdq732qf/QagmtIpMfxbv7ta32Hy1Xjcyt95f4qa1n0TKg/x/L/DWPOaRjy/EfR0djbR3BedN+35fl/iqa4tfMhHkw427tm5fvVYs0hZfkhY/wAKfxNV1bdBahIX2/xOrf8AstfGVJe/7x+k0zG+zpDGk6Bkbf8ANteo5Le2jZ/nYpv3bpPvLWpcWPzbN7Kny7d38VVLuPaqI7r95vlb+Ks5RudEcRymDqASV3869ZPn2/L8y1i3sMZ/49oW+Vf4q6a6s0XY/kyK7fLu27lrLvLV1md3XZ8/8XzV1U/e0MKlTmj7xzF5azec2yHe2z7u6q0lpc28iu6LtX79bdxEjXDud37uX+FflqL7G8m9N7Hd/EtepRqS9lZHgYiPN8Rm2du/2j9yiy/N95q2rG3ufM2Jux/dVKdpeioyvOnlp/e/ire0fS03LJsVQ392nUrHPRo9ypZ2O21Fy9s3zPt2tV6x02GRWx/f+833avx2aSK0MO6VY/mRm+7WhY6TMu534TZuVW/irnlU9nA3p0485mNCkTI8KeU33fvblqxbR3LSfaUTMq/8tK1rjSTI3kum3+8q1J/Zu1fsyI22P7v+9WMqnNE3jT989g/YH+E+qfGT4n2nwi0LU7e0vfEmuWVhBdXgcxxPIzKGfYC2BnsPy61+l0//AAQ68IeBZJV+Nf7amgaAt1fvFoLPpscRvIlxhmE9ymJPmGY0LhePnOePhH/gkfJNYfto+BRbStFIPHuko7RsRkNMVYcdiCQfUGvpL/gs/qOp3v7eviG1vr2aWGz0jTIrKOVyVhjNqjlUB6Au7tgd2J71/VXh/i+KcxyvKMly3G/VacsPWqykqcJybjiZRsudabr5X0vZn0GGnj6lShhMPV9nFwlJvlTek7dfU4z9tf8A4J+fGP8AYo1y2k8VSQ634b1KRl0vxPplvIIGYE4hnDDEExUbgm5gRnazbW29D+xX/wAEyfil+1noFx8UPEHiW28FeBbNn83xFq1sxa6VFYu9uhKLJGhXa8jOqqcgFirKPftN1HUfGf8AwQi1C5+M9/MRpmoCLwfcXczB5I4r6NLdFJUlgCZogORsTG5QMr7h8Q/iH+xz8Iv+Cdnwv0r44eCde8RfD3VtH02GCDRJJZEa4Ft5w+0OksBbLiRsEAF0zsBUY9jMfETi7D5Osuprnxn1qphXVp01LmVNKTnCnKSh7Rp25G+VNS8iK+dZlDDewir1faOnzRSd+VXuk3bma6XtufJ3xy/4I5614d+Fd78X/wBmT496L8S9O0iCWXVrWySOObEYDOIGilljlZUJYxlkbA+XeWC15b+xJ/wTy8d/tweHfF2seCPiBo+jz+GUt0gtdUhlYXc8xYqrOgPlJtjkO8BzuCjZg7h9ifs8/t3/APBO/wCDlr4kP7I37MfxHlvZtJa71fTdI0qa5jlhgDESTBrqVYo13ENKV+VWPXoeW/4I5+Ph4X+Bv7QvxL0HShBc6bAmq20AdRGuy2vpY4wAgAwVIyBjBGFGOZnxh4iZdwnmNSupqrSnQVGpWp04TkqlSMZRnTi5QstlJWunffZPMs7oZdXlNNSi4cspRim+ZpNNJtfM8z+Pv/BKX4Vfs9/CTVvEXjL9t3wvF4w0jTFuJfC01oqmeYgEQRhZmnO4H5W8nnglVXJXS+HH/BGGS28A6b46/ah/ad8O/DxtYto5bHTJ40eSMugfy5XnlhUSqDhkTeAR9418UX/ibxDqviObxhqWt3U+q3F615PqMsxMz3BfeZS/UsW+bPXPNfof4k/bV/Y1/aV8G+GfCP8AwUr/AGffFHh7xXp+jxPYa59iuY0uYJo0P2yIxlJRHKyFwpSRAOVdsmvf4jh4k5Dg8PTpY2piOeUnVnSoUXVglFcqpUnZSjzX5m3KSVvn2Y5Z5g6cIxqud2+ZxhHmWisox0ur77s8I/bI/wCCXvjP9mL4ap8dfBHxS0jxz4Ie4iifVNOjMc0IlJVJGVWeNoi21N6yE7nUbQOapfsVf8Eyfij+1noFx8UPEPia28FeBbNn83xFq1qzNdKisXe3RiivGhXa8jOqqcgFirKPbP2qf2fU8HfsG33j39hL9pnXtd+Dl1qqz+IfCNyFk2EyGOSVZvLSZI1k8rfbOuDnzSTgVo/8FGdR1Hwr/wAEwPgj4Y+FN/N/wh+o2liusTW0zMs8gsxLGkjbRkGXznIO354x8uR8vnYPjPibHZVhcvw+Mi8RiMTOj7aVLknSjCHO1UotKKr9FHWDutbmNLNMfWw9OjCqnOc3HmcbOKSu+aL05/LY4D43/wDBG/xDofwxuvit+y58cdL+KNnpiSHUdO0u2X7U5TaStv5EkyzuFbcYyVbA+UOWC14l+xF+xZ4m/bY+JGr/AA60DxtY6BLpGhS6hLPf2skpcqyxpGFXGAZHQMxOVUkhXI2n2X/ghp4i8f2H7Xt14c8OT3DaJqHhm5fxFArHygsZUwysMEbhKwVScHEjgHkg+0f8Eyrfwvo3/BTn47aF4DuY5dGWHUTaup3/AHdTi4VyoO0FnGBwcDlsBqvN+K+LuFsHm+XV8Sq9bDUYVqVbkjFpTnyuM4pOHMt46arV+TxOY5ll9LE0JVOeUIqUZWS3drNbX7dzitH/AOCH/hnR7XT/AAz8X/2zPDmheMdTT/RdBtLNJVkZmKoIvOnhlmyRjIjXnIGcZPx/+0/+zZ4+/ZP+MF/8GviLNZz3tnDFPDe6dIzQXUEi7kkQsqsO6kEAhlYcjBMHiv4k+OfGX7R1x8TfE/iW6vddn8WLdPqNxJucSLcDZjPAVQqhVHyqqhQAABX1l/wX0WJf2lfB5SCJWbwOpeRYwHb/AEy4ABbGSBjgHgZOOpr6DKcZxhkvFmCwGa41YmGLpVZNKnGCpzp8kvccVeUbS5fe10vozsw1TM8LmNKjiKvtFUjJ/ClyuNnpbda21PhKus+A/gCb4q/Gvwl8NobI3H9ueIrOykhG75o5JlV87SCAFLEkEYAPIrk69C/ZL8Y23w//AGn/AIfeM7y1E0OneMNPlljO77ouEBI2kHIByPcdD0r9JzapiKWVV50PjUJOP+JRdvxPdxLnHDzcN7O3rbQ+sv8Agtb8f/GOgfF/Qv2Y/h7rVzoXhXwz4btpX0nSZGtoZJpM+WpVCAyRxJEEXGFy2OvHSf8ABJr4i+Jf2nPgL8Vf2P8A4sX0viPThoAn0KLWJHn+ziRXjMYZjlVSVYJEAIKNuZcHkeW/8FwfAmp+Gv2zv+EunsnW08R+GrOe2uNp2yPEGgdQTxlfLQkDoGU45ye4/wCCFmh3XhnU/ip8eL6wkOnaH4XS28/Y2JH3NcOi9iQsCkjkjcvTPP4FjMHldHwGw+JoRXtIwpVIySXN7d1I3ae/M5txbve10fHVaWHjwhCpBLmSjJPrz8y6976HwNf2N3pl9Npt/bvFPbytFNFIpDI6kgqQehBBFe7f8Ey/i34E+Cn7aPg/xr8R5YoNLaaexe/nKBLKS4heFJmLA7VDOAzArtUkk4BB8O1vUv7Y1m71f7OsP2q6km8pCSqbmLbRkk4Gcckn3r68/wCCJnwY8D/FX9qu98QeONFi1FPCfh9tS021urRZYBdmaKOOVt3AZNzMnB+YBgQUFfrvHeLweD4Ix9XHRbp+xkpKLs/eXLZPWzu99Ut9T6TN6lKllNaVVacrTt5q36nuX7Zn/BLT9sb46ftG618U/h78cNMutE1q8WWxi1fXLqCTTYtoxCESN18tDkLsOSOSMk5yf+Cq+saf8EP2HPhx+yN8Q/Hv/CWePIri3vLnUpZBJLHDCkqtMS4LhC0nkxk7WdY2JJ2sp+f/ANpb/gqD+2J41+OGr6r4X+K2s+E9L0zWJotH8P6W4gS2ijkKqs4A/fv8uW8zcNxYABcKPoX4teLf+G8v+CRt7+0F8Y/DMP8Awmvga9aK28QW+moJLpo54UkddoXZFLHKBIq4QSRFgvyKo/FqeU8X5HWyCvxFKlPC06tOEY0kozhOcXGnzvlXNFac6g0m+ktz5aOGzLCSwc8a4unGUUlFWabVo301Xe34lP8AaG8Y+Kf2MP8Agkz8M/h58MzL4b1zx6YrjXL/AE7fBcsssRuZiZAQyyMDBGTnPlqUGFAA8Z/4JJ/tO/EzwD+174e+H954x1K70DxfLJp2paZdXcksRlaMtDMqsSFkEiIN4GdrMOhr1P8A4KM2t18Vv+CZfwG+M2j6Qy22j2ltZXqxK5FuHtFhyck4XzLYLls8svPPPzl/wTD8B6l4/wD25/h9ZWFk8qabrH9qXbqpIijtkaXexHQblReeMsBznB97I8FleN8Mc4rY6EXUnPFyqtpNqcZTtq7u8UouOumljswlLD1chxMqyXM3UcvVN2+7Sx5l/wAFvvhPZfBr4qfF3wZonh6HTdOkvIb7TLO2hKRLBcPDMBGvQKC7AAfKNpAAAwPyc1SHZJ5P3jtZv92v2I/4L7+L7fxv+0H8WZbW2EK6eLHTi43ZkaBLdGY5P94EcYGAPcn8idWs3+d3hYqv8X3d1fkniPiMVVWTzxH8SWCouV97vmvfzfU+T4ldWccLKW7pRv8AichdW7qrP8zCqDWrwr8nylq3Ly1ePbsThqo3Nk24Mj/x18BGpynyHLzGfD5nmM/zYX7+2tO3bC70+Yf7P3qhjhS3kZ/J43Vo2sbybfJ/h/vVEpdzWnT5i3p8aeYqOGbd95dlbdnbvN8k0P7vf8+6s+xj43+R977+1/u1taWs21Y4fm2/+PVyyqR5fdOynRiaFnbv5Zm8lnZfm2763rGPzoD/AKM2xlVmjaszT4UVW2fM29WX5q3tJimaREd8fPtdWavPrVj06eHizY0exdVSHy1Xb9z5vvVsafb7m854WQx/Kn93/gNM0XT0jkRJnYln+Rmeui02z32I3cbn+X+Jlb/eryfrXNLlPZo4X3SPT7VFU/udki/5+Wr66em3Dw/My1YsbFIUe5+ba3y7m+XdV+OxSTc+xldVrgqVuWR6VHD+03MtbPbh34T+L56sJbzKzJ5Lf9dP4WrRktbOOON33L8nz7aj+zzRwo6OyfOy+W3/AKFWMZfDJHVGjy+6Vvsszfvw+xtq7WakXT7byw7/AMTfIy/e3VfWGSSG4/fLsk2/8BqW3s5o4/Jhh+Zvl+V/vVtyzluXGnGPxGNcaXZ3UhdNuxfk/wCBVBL4fh2h7lGLr95W/u/w10v9myKyWy/embdu/wCef+zTZ9Hf7QZptpXZsT/er0aNP3QlGHL7pzVvpaW8nyBi/wAyurfw1ZtdMRYfOSGbK/cjX7zf7Vbk3hmGNVcPt3ff2vuqxBpFtbru82R/Oi2szfer0adGPLoccv5TmJdNfKx/bG27fvTfM1VvsDrtmd/njbb/ABfe/vV2N/p9s80Lw/M2z+Fqp3mlwxx7HRt6t/E1dPs+U4ZU/iuc79khaMo6MvmfM0i/3qh+xJZw+ZCm9Gba3+zW9JY2fmK8MyuG+5uf7tULixdG+S552fNu/hp+z15Tn5ZSkY8apJ/qX3OrfdVf4aLqD9233vv/ADqzfL/s1d8vyZAjvj+H/Zpt3apcKrwuoZvvVlKPKZ8pzerWvlt5zp87feVqwLyaTzR/cVN23Z96uk1SGa3Xf5zFt+1ZG/hrmdUV9xMz87vurXD8J1L3oEEl9MzNB5yoW/26zr7UGVim/wCT+9uq55L7Qj220/w1n3lmdzJM6/f+7W9GW5nWw8qnwkMc3nKju+9Y2/ufepjXTr/rtzFvuL/eqw1nNbqqPbMqyf3n21XmtU3B0h2/7NdUa3N7sTqwuW82g5r55d/z/Pt27qZJNNtVJudv8W6kkXyzvdF2L91dlQv5fzyQpt/i2s33aqVafLofQYfKfcigEyecr7/9ylb98u93kZm/ib7tV13tG3nO29vuMqfLtq9Y277Tc/Nt+61ZSrRjG5p/ZREtm8bbPlG7+89Syae7J86YH+z/ABVct7fzmRNkmf8AZTduq7b6bNMrzvuxGn/fVc0sRp8Q/wCyeWJhyaCkipvfYzfcb+7TJtNjjVvLfjf87f3q66z017pf9Tt+RW+b5qq3mjoJFhhhVW37fuVyfWoy0ZwYvK/d0OVm0hFXG/afN+df9moJLH946P8ANt+VK6W4sUhm2fK3yt/urUP9nJuV5HXDfLWlPERj9o+VxmFlGRzFxZvHs2Q8fdfa21dtM+ywrGqFGb/Z31vSWMKzcpsXZ/y0+7ULaei/OkzFt/8AF91a3lW9w8ephzCm09933Nyt/DvqNbPdcLsT+P8AhrZvLHdI2/gf3qguoXVQibU/iqeaXui+r/zFOOF/vwu3y1djjmkkZ/MVGZ9v+ztqGGHH3/m2v97dWlbx7ZEh2K3ybk/2qr4YGlGM4ljT1aNkTZ97761t6azqph8njeu3/aWsqG12yLsTaWf71benwv5a4hVd3yo33a5vhO+j7ux0Oko/8c0hTf8A6pfl2112n2sO3zo9yvu2vufcu5a5fR03CF4YV2b9svmf3a63T45o1CK+8bvmZvvMzVxVI80z1cLU5Ym3Z2G79z5efutukr0fQURvA6orBla1k57HO6uB0iHy7dZXRW/5616FoMUY8JJDEgKeQ4VVHbLcV+2eA0OTPsy/7BKn/pVM+lyio5VJX7P9DlFtYZdlsjsZNy/u1/harUtrcx/uYfm2uzPtermnx3MM3k3SK39xdvzLU0MPmW/mb921/u/d/wCA1+DU6M+Y3o1pRlfmMDCMqJDbSTIzfeV6wdcs/Ojd3hj/ALvl/wAVdlqEbi3e1RFb/ZVNtYOoWMLQoX+Uqm3/AHq6PZ8p7mHrfaOI1q3fzGuXtlES/L97+Jqwr2zf54fmfd8vmL91a6zWo0jiWF3VQ277v+98tc/q0iRqzvul3Ov8XzV2UqMuU9aOIiclqVrc2qsiQKQzbfuVk6hD98OmHX7y1011++mf5Nv+zu3ViagttbjyfIY7vl+auyEJR6CqVqUTAuF3Sb03CRl2pVFo3t/4Gcs21/71al9a+Wyqi7D/AHaosqBTcv8AIy/fVf4q76cbHyWYVOb4SKRPs+7em7d/Ev3qLWP7Pdb0Rm/2lpxkfyxvTa7fw0kW6ZneE4X+L/arsjHmifN1q3Ka+lx+ZcBPO2qz/Mzferp9HkfzfOR/Kdfvsv8AFXK2FqHHKbXZflVfvN/tV0elwvHjL7H+6zK3zLUVOaRH1jlOu0u827fJT+LczMtdLpV88dwr3O3/AGGWuN09XjxsdmH93ftZq39Jme4VURF+XaF+f5t1cVSXunXHFR+yd5oeobbgJc/JKvyxMvzK1etfs/68mn+JvOmm+aO1kVI2+7N+7bateG6LdbtUTem54flWTf8A99V6R8I75G8WQwncr3D7P7rV5mYU51sHJHfluK9liozJvil+z/4q/aE0HSn8Kwx3OoWaNbfZY38z5d25Vr7K/wCCPfhbxf8ABT4b+K9N1jQ9Qsrp7uGGeGb5fl3fw/7K1t/8E8vgq/wZ+JjeNvFOgSXelWSyXpdf3mNq7q+gvhh8eP2ZvjNf6v8A8Kp0a5l1+/uZGubSCJlKFW+Zn/2a+FqqnGhyP4j2MZWqVMU3yc0Opyv7TmuTLvR5pHMMS72X7vzV8RePrHTZNeu33ybppfNlaT5lVf8AZr6n/aG1z7VNeW015JB5bMqKrfK22vnpfDqatp5mvIcmR/8AXfd2t/u15MY+yZ6EfdgeWaPq1h4f1R/sd5u/jlVvu7a9L8G/H+wtbaW1v9YmSwVdu2SX+L/Z/u1538WPhzreizM7wSYb51WNN+7dXz58T5vHmk+IrR7aNgnm7v7q/Kv8S16FHDupLmiefiq1CPu1T608XR+GPE1r9s8MeGLi5E0TM9006+Xt/wCBV8+/Gi38Z6Mvnf8ACt45rf7rSQorMse35d1eY/8AC5PivJJ/yEpv3O7fJHLtX/d21et/jl8YLjT/ACZpFltmfbtuk+WRa9WFPFU5RbR48vqM+ZRZ5d8QfiB4J1Czlhm8HtFPDuX5vlryHVrVNcvPs+j6a23eu6OP5q9u8beGbDxhqj3mq6VHEq/xW/8AFUOh+E/Dekx+Ra2y7YW3MzfeZq9ehW9h70nqeFisJKvP3YnK/C74B3OpXiXOqwthm+f+FdtfSfh34Z2FrpsSabCq+Snzxqv3q5bR9SsIY1sLZIUaP7qq/wAzV7L8IY7PXLT7HeXLJcbP3TRrt3VrWxyrQDD5b7HVfEdd8MNP0qz8Jprd4lmj2rR79z/vG3N/yzr9A/2ePCuifGj9mnVPAvifzXsLu1aN4WXcsn93/wAer4kh+Ad3D4fPim5ufJtoUVmjb5VVt33a+0/2H7rxLd/DwaR4ZsxPA0amRi/yxxrXz+Pjy1YSPqsBH2mAqQmfm7+3l+xHqX7OPxWhvPGmiTP4U17b/Z18vyrDJ/drX+Hf7Bt54i0+z8Q/C7xbD5Eksb/Z5Nr7mX7y1+rv7Q3wR8GftB/Aq68DeObBrnZFIbWRl+aFm/ir8vJvg/8AtG/sR/EpLHe2peGI7pnsrqNmZo1/hX/gVaVa05YWNSn7zj8SPNw2GpQxLo1fkz7P/ZV/Z78beEL63v8AxRCv2aGJfK8mBVbd/FX0p4u0nwlfNYQvokbz+asUU0i/Oqt8zfNXzf8ABX9szxCvhmF/E/hhjFJF/C+2SvY/hz4i1LxvqFtqb3LIrbmit5G/1a15H1irK66yPSxGBlH4djtvHsGmeGNBR41by1i3RM38TV8lfHzxpeaP8KfiH4hsLzyn/wCEXvILeRn/AOei7d3+z96voD4+eNg0Y0RrnZ5ab9sf/oNfMX7Ukltp/wCyj47165mbz5LCG3WFU+WRpptu3/vmuLCR9pmsEo7SOmph/Z5VN1Ox+b2j6HMqok1y0h+zr5s38LN/erdsdJhaEQwzKqb9m1k+Vmqa1tUt5gjurKv97+9WppdvMrfvkz/FtX/0Kv2Bw5ocyPicL7vukMOg3nnCZHjDxv8APCr7W27altdPmhV4ZkjWRl+61bsciSSedNbfL8q7m+9/31U91pKLbvMjqNvzIqp935q46lOctz16fuxOKvLab7Q6WzxqV3Km6Lc1Ys2j3KyPNI/m90X/AGq73VtPSS4e5b5Hk/1S7Nq1kXuhw27M6QzHd8zt/do9nLmsKpGEtjgNc0e5jdkmhZP4tqvuXbXK32mv814m4rv3L/s7a9U1bTkmg87fGsWz/dZv96ub1bw35kf2b5Wdfvssu1V/2a7KcYxPDxVP3vdPNNQt5maR5pvut/q1VfmX/erA1ix3L8nzLu+f/Zru77Q5rWE7Nu1k/wCBVgappaRskyIzp954d/8ADXfTlE8StG3unBatbTqr7Eyy/wATfdr2/wDYQt1gsPE+0/entCR/wGWvKNc092X92jbdny7V/wDHa9j/AGJovKsvEeEA3TWpyO/EtfqvhAkuPsLbtU/9NzOzhdWz6nb+9/6Sz5h/acsIJfjT4qk80gtrlyCAv/TQ15VqkOdyJCv3fk+bbXt/7RNktx8YfE+4DB1u4G4L/tmvI9es3Hzuiqq/xfe3V8Xn8eXPMV/18n/6UzwsZ/vdX/FL8zBTfJ1enq3k53puWmXcflNs8tR/u1CjSeXj+Fq8XlOb2hPDJtk/c8t/Aq/xVYW4dlHybf7+191VrdnG7/fqVZHjbKf7vy1X2Be0LH2ieTYibfm/9Bpv79tvbb92mpbzSbZjDll+XdV6Kzj2h33D/gH3aj+6YyjzFaFXZWfzty79tXbW1uZJFcBXFW7DR0kbfs3LWvY6WzfIkXzMn8NVKQombbw+Sq/Ju/3qt2lm8eX6Lv3bv4l/2a2LfQ0bY4tsszf981etdDMa7/vMzf8AjtYSlKUjanH7RlwwwNh3hb5fl21Yt7abd9/buTbt31t2+g7pPnDJt+ba1JJo7wyb9n/AmrnlLlOunT9oYzWc0J2dV/2aRYX2rDtU/wAX7z5a2X0vy03vSeX+8V3s1Xd8vzfw1hGpI19ifQS6S/nSpE67413IqrVlbV7dWCIoHlK21vm3NWvNpf8ApDvhWeFPnbd95qia32SbH2qF2/N/er52pT5T7Wn73xGLcW91IiQwzKh+9u+9WfNa2wxAkK5bcqsyfxbv4q6G+0/zriLZbKPvKzM33WqpeWqbhBsVvn3IrP8ANWPvHZKMOQwLu1mWPZ95F/8AHmrK1COFmX52+Zt3y/w1v6pH5m7bcsPLT+H+9WXdRpNN9z+Hcy7fu10Q92epw1OblkjAlhMl9smfaF+X5vu1DJayWTbJHZyz/dq7eR/6SU8lcb9rrUa7Ps/z/wDHx/Bu/u13c38q3POlHmJNNt4bO186H5tzbtqr81bmkw20kyTJDIyq+3b92sqzWaNwUG6Hzd27+LbXRaTNDHdJsRtirv8A9n/gVRL93qKnH2kuU09L0n7UY02YRv4f/iq1V09422Dy9v3VXO6ordkuFH2ZFbcv71fu/LWlbxpGqeTAqhdq/N/D/tVwyqc0T0I04RGLpqSfP5PzfwK3y7qhbT0t7VbmH5dz/wB/+KrrW+2N/tL79rrs2v8Aw1U1Jd0zJbPhG+7H/dpUypxj/KfTP/BKnTb7Q/2u/hnrkd2UbUPHOnKqoeVj8/y2B+oZgfY1+nP7dP7TX/BOXTf2gL74b/tefs86jrfiDwzDbfYdZ0+xV/tFvNAk6ozpPE5CtIw8t9yjJIxvYV+TX7M/xc1T4CXvhb42aLpVrqF54Uu11S2s7wt5U8kEhkCsUIbBK9jXm/7e37Y/ib9uH9pjX/2jdV8J23hj+2Ut4E0ay1CScW8UMSxRh5GC+Y+1RuYKgJGQq1/RfEs8s4WoZDWlSqODwSt7Ks6U1OclUk+dKTs+eS5bWtLyLzKjToV8PUkny+z+zLlabd9/mz7m/by/4KOX/wC1LomnfBr4UeCU8G/DbRCn2LQYkiV7to8rC7rGoWFEQgLAhKqcks+F29D+xv8A8FMPC3w8+EUn7LH7XXwyPjn4dyDy7ELDFJPpsWWfy/LcKJlEm1kberxHJVjhFX8in1jU7iRkS8nH+y0hWmT39x5w/wBMZzs+VlnPy1MvE7g+pkEMnWSuNKMueLVdqcam/tFU9nzc9/tX1Wj93Qbx2WPALD+wtFO697VP+a9r38/lsftl4v8A+Cn37Kf7PPwz1XwT/wAE6/gBceG9Z16Ird+JdVto0e0YYCuA7zPclQX2q7KiM27DZZT41+xx+3T4X/Zx+D3xe+H3jTwdqutaj8RdIMNnfWt5GqpO0U8TGXeMqMXDvvG8kqF2gMWH5ZTa7etEiveS7lb518w/LVO+1u7ijdn1C4Xav3vMLM1GD8Q+FKeW1sG8qnU9tKE6k54mUqk3TkpQ5punzWi0rJWVr6XbZjHHZdSw8qbouXM023NuTaaau7X0Psa1urmyuY72zneKaGQPFLG2GRgcggjoQa/QOD/gpp+w9+0x4O0Ww/bv/Zn1DUvEGhWSQR6zpQEwuW2gSOGSWCSIOwLeVl1BPU1+EF3q+oSycajcK23+KU/40+DVbq4fbLeSP5e07fNO2va4i8Vcl4ndKeKy6pCpSbcKlPEOnOPNZStKNNaSSs07jx+f4THcrqUWpRvZxnZq++qXU/aL9rf/AIKSfB/xT+zzL+yR+x38Grnwf4PuLhGv726dIpbiIP5jxCJC5+dwhaR5GZgpUjnNVP2N/wDgph4W+Hnwik/ZY/a6+GR8c/DuQeXYhYYpJ9Niyz+X5bhRMok2sjb1eI5KscIq/j3p/iG+lVzPNKBu3LtkNWo9e1BVd3v597fLuydy15v+vfB8ckeVvKZOLn7XneIk6vtf+fvtOTmU/NO1tLWbRxf2zln1R4f6s2m+a/O+bm/m5rXuftp4t/4Kf/sqfs7/AA01TwV/wTq+AE/hvWdejIu/EurWsavaMMBHAd53uSoL7UdljRmztbLKfEv+Cd37bnh39jz4z+I/ip8RPC+q+IDr2gT2jNY3KCX7Q0qThn8z7wZ4wGbOVDFgrkbT+YsXiO9LjbfOB5X3VJ+Wr9r4ouzMkM08hjblW840Yfj7hTD5RisBLK51Fibe1nPESlVnbbmm4c3u9ErJdtXfOGfZdChUoPDt8/xNzbk+13y306H1g2vW7eMT4n+wv5R1P7V9m84btvmb9m/bjOON233x2r3b/gpD+2d4Q/bZ+K2hePvBvgzUdGg0rw3FYTR6nPG7yS+Y8r7QnAVWkZQxOWADFUJKj86rPxBfLCqfaZWKtuVo87v92tnSdcaILcxXM7L93czHdur2cX435RUzTD5hUyuTq0IzjB+3dkpqKldezs9Irc9iPEmGxGIhWdB80E0ve72v08j3OgEqQynBHQivJbHU5JV3797bs/eP3q1bXXJHZ3ikcfL/AKljtb/arpq/SWo0/wDmVt/9xl/8qPapZ+qqv7P8f+Afpr8Nv+Cnv7NPxi+D2g/B7/goR8Br3xdP4dtxHaeKbUrPPORwHbLxSxOUWMOyyN5hXcwHSsD9qH/gpf8ACS8+AN7+yp+xJ8G7nwP4W1KXGq6nIyQz3cDDEsXlxlzmTbGryvIzMgKEYNfnXJr1zD9xHT91uX95upW8QvHbur+bhvl6/KrV+d0fEvg/D5lDEwyepyxn7SNL61L2Mal786p+z5U76pfCuiPIisrp1VUVN2T5lHnfKnvdR2/Q+6f+CZn7c3wb/Yv1rxPefFP4SXWsSa3aRx2et6RDDJeWwXO62xM6AQyEhmKsDlBlX+XbxXwf/bY1T4B/te6j+078J/hzp2l6dqWoXXn+DYJilt9gnfc1qrKPkIwrKwXaroCE2jZXyJLrBhCwvPI27+JTVJtRu8PDNKy7X3blb+GvWreNXDVTH4zF1smlKWLgoVU8Q3GUUrJKPs7LTqrNbqzbv0vFYGdWrVlSu6iSleTs0vLofq/rf7bH/BIT42anJ8SPjX+yLrVr4nv2MmqiwtgUlmJyzl4LmESsSTl2QM3U15h+2v8A8FLPC/xu+Dlt+y9+zl8HY/BPgG0ukkljJjSW7SNvMSMQxDZCvmZkb5nZ2CnI+YN+bOpXF3DGq211KZPvbdxw1ZN1qF+gbF5KdqblVXPyt/drzMs8S+FsDi6OK/s2tV9i1KlCpjJzhTa2cIunZNLRXvb11POhiMuwtWM3CUuXWKlNtRfkmunQ/TP9h/8A4KPeHvgP8LNU/Zq/aM+GMnjn4eapcb4rFpI5G09WJaVFilG2VGkCSBdybH3OCS1eqt/wUz/YY/Zl8N6q37Cf7L13p/iXWbGSBta1iJIhakjKEs8s8kqK4RjCCiMVHPFfjBquo3IVsXs/8K7mc/erntT1e+l81GnlX5mZN0h+WujHeI3B+b4+pi6uVVEqslKpTjipRpVJK3vTpqCTeivtd6u7bM8Vi8sq1pVJUZe87yiptRk+7VrH1j8UNOk+MVpq8Hj7Vby9m1ydp9TvXn3TzytJ5jSM7A5YvySc5ya8ok/Yk+DskPkG+1sKTnAvY/8A43XgOp6pqjsVGpXGxv70pb+tc/f3eoxK+7Urgtvwi+c3zL+dfT5l4ucK53VjWxuRxqSjHlTdRaRV2kv3e2rNcTneX4uSdbCKTStq+n3H0jJ+wN8D5Dk32vD5cYF/H09P9VUR/wCCfPwJLlzfa/z1H2+LB/8AIVfKt5qd7BIGTVrovv8Al/ft9386z7zUdUZj5eoT7P8Aanbd/OuP/iIfAf8A0T0P/Bi/+VnE8zyR/wDMDH7/AP7U+uD/AME9vgUW3f2n4i4z/wAxGPv/ANsqcP8Agn58Cwwb+0PEBI6f8TCPj/yFXx02p62zB01af+7tadv++utPh1PW5mXfqs4+f+Gdv8ab8QeAv+ieh/4Gv/lZUM0yd7YKP3/8A+y4f2D/AIKQFSmoa9kdzfx8/X91VqD9if4PWxBhvdaGBj/j8j/+N18f2upapt+fVLn5f4vPb/GtvTNSv5Zkht9TuPm+ZlaRvm/WueXiL4fx/wCaep/+DF/8rNlmmU3t9TX3/wDAPrCL9j/4TxcpcatnOcm7T/43Vq3/AGVfhnbSiWO71UkDABukx/6BXzNo93rB/ez6hK3zfIqzN93866LSrq8KrNJdXH93b5priqeJXh4t+G6f/gxf/Kzrp5hlstsKvv8A+AfQ1v8As8eA7crtuNRO0YAa4X/4irkHwV8IW5UpcX3y9jOvJ9fu14Xpup6hLJ5qX0qFX/e7ifmrdtr2984+TeyOW+bazn5a5ZeJPhwnb/Vmn/4MX/ys7I4/AvfDpfP/AIB67F8IfCkKqoluyEGF3Srx/wCO1MPhh4byCZLrg5H70D+QrzDRdRuNizXV84f7mybPzVeivLg2kkUch3SfLiRi1Yy8TPDfm14Yp/8Agxf/ACo6oYzB8vNGivv/AOAegN8LfDTsrmS63L91xMMgenSpF+G3h5ZFlD3GV6Eup/8AZa4EXUjSR/O+V6tuP3f7tT2M8zxn7I7ff+75h+7SfiT4bf8ARL0//Bi/+VG0MVhmrqkvv/4B2zfDTw65O6S5wwwVEigfotTL4B0FAMedlVwrbxlR7cVyOlw+YiXL3Mof76qzFt3/AAGtW0S6eNXlimX+Dcw+9/tV1UvEfw4n8PDNP/wYv/lYfXMPLel+P/ANqLwHoUTBgZyQcjdIOP0p6+CtGBDP5rkZxvYHGfwqlawySSLsw7N95VX/AMeqza25nb7Xbo6Fn+RWFdtHxB8PJ6Lhymv+4i/+VjlicNGP8Jff/wAAnTwfo0b+ZHG4OMZ3Dp+VKfCOjkY2OD/eBGf5Vet9DF5aCLyxtZvvsdu2nXGkBLpI5rgfd2qqr97/AIFXUvEDgBfDw7T/APBi/wDlZzrFYSX/AC5X3/8AAMs+CtEIUBJBtORgj/CkfwPosgIlMzZILZYckDHpVwaWzXRS5QlNv9/azVQ1LSr+3iUoiDe/zLn5WrT/AIiBwFb/AJJ6n/4MX/ysyni8FF/wF9//AACB/hh4YdizLP8AMCCPMGDn8KZcfCvw1chRLPd/L6SgZ+vy81T1KwnRmeAzbF/5aSSL92svU9MnSNo453lMm1Ym3D5l/vVMuP8AgGGv+r1P/wAGL/5Wccsxy2DusMvv/wCAbI+DPg8MH33m4HO4zjP/AKDQfg14Q3tIj3alv7sq8f8AjtcbNBcIxRbqVmba33Cq7qpXVreBiIWbePmlZnLfLXK/Efw/5uX/AFdp/wDgxf8Aysc8wy6muZ4Zff8A8A7a7+AngW8yZGvVJGCyTKCf/HaoN+zH8OXBV7nUyCcnN0v/AMRXH6qskqhBeNGv3U3OfmrY+Cv7K/7Rf7SviePRvgz8M9b1x3fynktY3WNWX+JpG+VVrN+Ivh7y8z4cp/8Agxf/ACsn+2MsjK0qCXz/AOAasn7Lnw3kIzd6qMYwFuk7f8Aph/ZV+GRJb7TqmSck/ak/+Ir7C+DX/Buz8Xbq3g1v9o7456f4ZhcsZtH00m8uFX/eX5Vr6D8Of8EH/wDgn9ocMaa/4p8ea5IyrumbVBAu72Vf4a8yv4ueF2GdpcPU/wDwYv8A5WaU8ywtX4MI3/Xofl5P+y38Nrn/AF11qhOc5+0pn/0CmSfsp/DGTGbnVB/u3KDP/jlfq/cf8EQf+CdMg8v/AIRnxdDuRv36+KnZt396vOfGf/Bv7+yjrDk+EPjX490hPu7JZYp1rmXjP4VPbh6n/wCDF/8AKz0aGYUlLSg0fm/P+yH8K7mQyy3msFj1P2xP/iKYP2PfhQCWN3rBJOSTdp/8br6q+Lf/AAbwfGrRklvvgh+0Zo3iaBf9VZ+IEktblm/u/L8tfH/x0/YH/bc/ZzkuE+KvwZ1qK2t5f+Qlo+bu2aP+9ujr0YeK3hnV+Hh6n/4MX/ys9nDYzDV95cvqjXX9j74UJ9261f8A8Co//jdSw/sl/C6AbY7rVgM5/wCPtP8A4ivBIJ71L86Wt5NFKv34Z2dW/wC+c1p2IvlYb9Tuf725pDWkvErw6cdeG6f/AIMX/wArPZoYJV43jP8AA9xP7L3w2IIE+pjLZBFwnB/74qZP2bPh4gws+o4/6+E/+IryJEktlBW5lLSfc2yP8zVoQT6nGDDPqE25v4d527a5ZeJ3hx/0TNP/AMGL/wCVms8vqxdnL8D04fs3fDsNkSah7j7QvP8A47RL+zh4Clk8xrzUx7C5THt/BXlqGaSTyLy7ddr8t5x3VQvJrpTsS4lx6+Yf71S/E3w3f/NMU/8AwYv/AJUeXiMOqbbauesS/ss/DSZy73Gp5Y5P+kp/8RTJf2VPhlKQxutVUjptuk/+Irxa4vb6RnH2112t8+1jVSW6vZGb7NezIP73mHbTj4neHD/5pmn/AODF/wDKj5nF1sHTV5UE/n/wD3F/2TPhhJy95qxwMf8AH2nT/vik/wCGS/hbgD7TqvAIz9rTv/wCvFI31G4VVS9uA2cblkO1qe1ver88d9M5X+HzDWs/E/w6pxV+Gqf/AIMX/wArPKWOyxy/3Vff/wAA9n/4ZH+Fe8SG41UnGObmPken+rqB/wBjf4RuxJudYGRggXif/EV43u1JoykM83zbmSTzD8tZd42o5WP7bKHb7zLKf8aqn4n+HU3/AMk1TX/cRf8AysbxeVf9Aq+//gHuo/Yx+EIGDdawRnJBvE6/9+6kT9jz4UIjILzWMNjP+lx9v+2dfOEs9/aXYZdUnQ7vutK3+NSf2pdqWlTUrlpG+bb5p/xreXiT4eKOnDdP/wAGL/5WZRzHKnLTCL7/APgH0jH+yR8LovuXmsfX7Yn/AMRU9r+yx8NLRNkdzqh92uUJz6/cr5xsb/VJbjzprm4jVvmT5zXRaHHeTRPFPdyOVbP+tO1qUvEjw7ir/wCrdP8A8GL/AOVl/wBo5Y/+YVff/wAA98sv2fPAdgu23m1D6m4XP/oNaEPwi8KQLtWS7PuZV6+v3eteUaIbidQq3TN+6YL++KqrVvaLcX1orPcK5eMqPMOdsi1nHxG8OZO64ap/+DF/8rG8zy2l/wAwq+//AIB6LbfDzQrU5jluD6hnU5/8drWttOtrTTxpsW7yghXk84Of8a4eyiu4lSZp8+Z9zaK7DTVdNAVZcKwibJJz681+l+GfFvCecZli6eAyaGFlChOUpKfNzRTjeD9yNk7p3122PVyrMMFiak1SpKLUW9+mmmwg8Naech3lYHHBfHT6AVYGm26xrEC21egAAz+QrJWULue2D+rw5/8AHqtFQkQfcERfv5NfnUfELw7Wi4ap/wDgxf8AyszjmmB6Ul9//AJxoNiu4K0gDMWI3DGT+FVbjwVotyhjlEpBOSN/f8qfLa3Lzec0BdF+bcx27awvEdrKJAyySB4fRvl2tTXiF4dXt/q3T/8ABi/+VnZSzLD20hb5lm8+EfhK92+d9p+X7u2UcfpVKX4DeCpozG9zf4Jz/r1/+Jrltbnlt4HNsSyjcrLuK/8AAq898Q6tco7RR3Eqsv3VEx2tXRR8QPD6e3DlNf8AcRf/ACs64ZlTteKPXpf2afh3KBvuNT4IIP2peo/4DUcn7L3w3lJMl1qhDDBBuUI/9ArwaTV76NvJhurj5fveZKfu1Vm1TUmkXZdSfN8zfvD92uuPHXAD24ep/wDga/8AlZlWzanCN3G/zPfH/ZP+GUgO6+1fLHJP2tM/+gVBL+x/8KpQQ17rPzdSLxOf/IdfPV5rF8JHb7fOuf4vNP8AjWdd6vqjReVLqE+xd3Kyn/GtFxzwGo/8k9T/APA1/wDKzya2d4KMbulf5/8AAPpOX9jn4UzIEbUNaGO4vI+R6f6vpTof2PfhTbqFjvdYwO32tP8A43Xypca3eyMTHqVx8v8AC0zfL+tOttc1adkji1ScH+FhM3+NJ8ecA/8ARP0//A1/8rPOeeZZf/d19/8AwD6xt/2TvhfbYMdzqvyrgZu0OB/3xVyH9mr4dwjCz6ic9Sblf/iK+Y9F1q+df315cNIv8X2g/wCNdDY6zdzXCma6kVdm5VaQ1jPxC4BTs+Hof+DF/wDKx/23lslf6svv/wCAfQ0HwE8EWyhYbjUBtGF/0heP/Hamg+Cfg+3IaOe9yO5mX/4mvC4b+/jjEiXbyt95f3x+WtTS9YmbMM1wyurbnXJ3Vzz8Q/D9b8OU/wDwYv8A5WdEc2y/ph19/wDwD23T/hh4e01w8FzdkgY+eVTx/wB81raboNnpWowanZySLLbyiSPLDG4fhXjlnq108CJFM7H/AGWNeqfA7xbpug+PLK/1WTbZxyrvMi53L/FWdTxE8P1C3+rdN/8AcRf/ACsl51l8XdYdff8A8A+lfD3/AAUV+Pfhf4fXPw50bTfDkdpd2pt5bo6bIbjYVKnDebgHB9Kwvgp+2h8VfgBoWp6H8OtF8PQtq+BqF/c6c8lzIoOSm/zBhT3wBmvvD4kftGfs1fFP9kbSU+G0Vkuv6IlvLp6vZLHI0kf3scc18taf+zB8bvht+2d4V/aV+NN3aDTvF9x5ljamQPujWP8Au9F/2a+fxniT4Y0EpLhalLT/AJ+JW/8AKR6WV5zg8x5qc6fI77b3fTseReNP2qviZ48uWudatdKUs+4Lb2jqFPtlzWIPjZ4x3ITDZEIcqhhbb7DG7pX0n8fLPSjq19NBaRrKZd8iLEFIj3bvSq/wk0q08QrNNFpKKscW7aYw3/fVeLPxZ8K1HXhKl/4NX/yo+mp0/aHg9v8AtI+MIbt7648NaDcytAYla4sXYID/ABKPMADe9ef+LjaeNdVOr61pluZD0WNCFAznAySf1r7T8QeKvAvgnVEsLzTLfUXKM0qraqyw1xl74i0DVll26RaM3zPueMbYa9DB+KfhhUhePCtKP/cVf/KjzcbKlRnyyhzHyK3gHwizMx0SLLnJPOemOtZ158IPCV4csbpOuPLmAx9OK7f9pP4xSeHrddG0bTHeOXc3mlwBXxr8Q/iDrN1qRtri8m2yMxz5pK7q9GPif4cyjpwvT/8ABi/+VHnzxOAo6ypJf16H0Bc/s8eCLh941HVY/aK6UD/0Cq4/Zn8AgY/tTWMYxzdp/wDEV8j6n4x1J2kha9nP+15p/wAa5a88ZaxPOiR6hdeWzbX3TN/311ran4i+HFf/AJpen/4MX/yo4qmdZfTdlSX3/wDAPubRf2cfAeh3K3Vvf6rIy9pbpcH8kBrudCsbfw7fLf6cmHUABX5HHevFfhx/wUB8P/sw/CbRfDPg+K0vboWbf2neXdos5mZv9+ty9/4Kk6N4n8FC70qO1h1Bfv8A2e3Cttrkn4k+HUXpwnTf/cRf/Kj0qeLy1K9op+v/AAD6D1L42+OdV0X+wLme3FtkYRIyOn/AsfpX05/wS8/aiubT4lR/APxDooZPENrJBpF7ZqQ0U6RvIfO3PypVWAKjIbHGCSPx/P7WfirxL4qPiPxD4gvrmeWXazXU+EZf4V219u/so/FJ/AHxL8HfFVSD9mMVycNgESQkHn6Oa+jyvFeH/HvDmcOlkUMLUwmHnVjOM+aXMoykrWhG1nHXe60sbYephcdSnToqz/qx+hPin9rrwf4B+IF78IvEOpLb3bRbYmml+6u7b92vP/E3jLSfiRpMvhW8mWaxVvkkZfmkb+Fq8E/4Kha54Y1r4Xr+1F4Pv47TWNNv4/tUa/6yaFvvKteN/s2/tOX/AIkuIEu9TuJ5mdVlWR/lb/dr+Rlzyj7WD92R61HD4eUOWovePqr4E/s9+LU8bPNo+pXFxEt/t+zzfd2/wr838NfUd5rGq+GvDcOlXmiRw3bJuna3Tay/w/LVD9kb+yNb0K31Sa2VZWbczRv+93f3mruPjSumqxSKzb5YmX5W+ZqdaMY0HJfEeVWlKOMVM8K8X302pXn/ABMkaTb/ABNXhX7a3iC80f8AZ1ltoYvJs9Y1GGCBZIt3mSK38O7+7X0N4k0220vRvt80bDd96T7rL/s18b/t2eMLzxJ4g0TwBczSS2Ol27XirDcbo1kkX5fl/vUcNYf6xm8ZS+yXneI9nl7ivtHz5a6RebW/fRhlTd+8StbTYXbZDbP+98r723/2anabazW80XmzfIz7WZU+bbV+G33XSuNvnNL/AH/4a/WZRhL3T4vDx93UntbeFbd32eUy/N++/irSW3ddqP8AeZfn8tvlqGGzcRs83lojP8ke6tOxsy37lHX5tq1PsY/EejGXL7plalpcLbpZtqN/EzfN/wACrI1HTXZfOSaTf/Ev8K//AGNdj/Zb+S0I3RFflaNvmqCbQ5mt1mFm3zJ/EnytWlOmpS0OeVT7J51daKjQO/kwzL8r/M+5mrB17QftDNc71j/56/3K9LvtFhj+eEbxIu1FVVrm7/QfMhZUtsKvzN5la06f2jzcRV908m1vQZomaaHb++2/Nv3ba5bWLV4VdEds/d3N92vWde0N47OW58narPu27drL/wDY1xuqaK8Lf8ey7Wbc8a/3a6adOMjyKlM861bSxHCR8zNGv3mf+Jq9S/ZCs/sdrr8ezH7237YzxJXIavo6bXR337m+SNq9B/Zjtmt4tdLoAWnh4H0ev03wjv8A6+4W/ap/6bmdnDUYxz+lb+9/6Sz5v/aAsUk+K3iQgr82sT7lK/7ZryTxBpPkyH522V7v8b9O874k+IZCmVbVLgHcvfca8p8TaS7Yfzm2/wAKtXxnEMf+FvFf9fJ/+lM+czDm+u1Y/wB6X5s801Sz8ubfsU7qghtWkVnxjbW9q2kvGzb3Vd3zKtUo9Jdm2P8AL8n3q8bl5feOMpw2TyZ+fH+1VuK1eGVJH3fd2otatjo825fITen+1WvY+H0umWaZGT/Z2/dqOX3dC5HP2ukzSSbI9zbf4a14NBmk2pD93+NWrpNN8NqyB4flRU+WtPS/DbztFcp87L975avl5/dI+E52z0Py9qeSw/2q2bfR9pXbw395q3F8PiOHe+1/+Bfdq9H4bm83Yib41/5bK/y7qylzco4mJY2tzCqf6Nv2/NtatjTbFJF3um5m+Zdv8NaFnod55vlodqt9/wAz5v8AgNbmm+F3VVZ7b97/ABL/AA1hKMzppmRb6G9xILxE+RZf4aW60d5GfEOP9n+GuysfD77Um8vCw/M1X/8AhG7U/f8Allk+ZGX+7XLU8ztpy5ZaHmFxpEMg8l7ba33vmqhLo26Rnd1G7/VMv8Nejal4XLSP8mX2feb7tYmpeH0t1SFId7b/APdrKPx+6dMZRl7p7akMLSB5tu5fuR7Pl/4FUFxbv9q2743iZfnZfvVsyWr21r9pSLa6syr538S1W8794k0MOz5NzfJurzJUT7SMTGuIUt5DDN8rSfMu1fvL/tNWdf2Kecr20ioNm7dJ/DXTahbJcLvSzZhIm5v96srULW2aNYfmZ1/h2/w0nS5dUKpLlic7q0c0cbpZzL/e3L826ufvo0C7PO5b70bV0lxZ7l3ui7du1N3y/LXM+II0s2MyfIrN95VqfYyUrHFUqR+0ZepTfdm379332/u1Ta4/5Y+cp3fxU/Up90jSQp+62/Nuqrat5KfvnjxsVUjVfu/7VbxjynnSlzS8jYs2maNfnyq/3V/hrZ09khh3I7Jt2sjN96uZjvNzeS6cK/8AC3zVu6Sv2p/JRN38XmL/ABVjWjyxKoyjI6zTbxGhS6mm27m2NJWlDqFt/qfmdY93y/drnLOR4YUSE4TYzbf7zVaW8ttxvPJbzW++y1zcvvanVzSNj+0N0LIlsyuqfd/vf7VUby4cR75tzr9146zrjUpmZXeZY23bWjZ/++ai86ZSZpp1x8y7d33WojGZUqkuh7D4eYN8F5GQYzpd1gf9/K+c2uEdlTfsl3fd+8rV9DeF23fAt2kO7Ok3e73/ANZXzVqV1+72I67Nu5l/2q/fvFayyLh6/wD0CQ/9JpnTn7k6OH/w/wCQ/ULhGVjN5jOr/wALVnX+sRhWSxdQn97+KorjUIUVnRFV2+/uesa81ZAqv/e+7X5DCjzHy0sRLk5TZt9SkuNrufmX+791qbc30PmL/pkjt95l/hjrnYdYeOZk879yz7U/u1dutQhmhDw7gm37tdlOPKYSr8pozXUdxGN+3O37yv8Aeao47qaJWS2fZtbayt/FWTDqW24fjejf3m+7T11Kb5od+U/vMtb8suU4albmlc2luEaTZ0H8G5qsLfTKPJR9jf3q52S+e14++f4Ny063v/NV/wC8z/8AAqcpfZFKp7v946iPUsMUd1fd9+rNvqTzM/zrsh+6q1zUNwZG8t3Yj+OT7vzVo2d88jb/ADFwybdy1zVJTN4/zHV2OpbpVCXP3k+Tb8taul6pt2qjybl+bcqfLXJrqCWsMUe9f/Zv9mtXS7yZV3vMrbfm+Vfu/wB6uKodtH4jt7fVJmhT9yoT722N9rVpLrW+3857zczOqvIyf7NcVZ6pDcKyedtVf+ejVYbVHMaOkzMzfNtj+WOvHrLm1Pao1OWB2C6ptVLmNP3jK3zM1Mk1TLM73O/7vmqtc8uvPDZ/67BkTZ/lqkXVpolb/pomzatRGnIupUgdCtw8krI7ttm+5SrdWcarvm+dV+dY03Vi2987xqjvIpV9v+1t/vVJb3CXEh+fzd25Xm27dyrUezI9pD4S3NdJdq800rNE391dtYV800sazW0jJ95XVv71bWyGaFX37hGn+r3feWqN3bvHC7vtZG2/u4/vVcY8vuhJcxzupzX7R/Y3O9933f8AZrm9Qt/mmSaZtq7WRW/irsL6F4W87yZMMn3v9quf1zT0kk8t4d3+0q11UakY+6jklR5tZHL6havDJshdn/2v4dtYmpfKzTbPmX5UbZ8tdRq1r5e77u1fvVgatMgGx+iv8+3+KuuNTmmOOH5o8py19bzbmf7rfwKy1n6hbO8bBPmffu+aty+UyD/U/K3/AI7WXf2TyK3zso+9XbGXumNTD8pjzK8ch9KW1t3jbfNHt3fNVjyUbO/5v4qljtQzbHfedvy1pzcpz+xn8RY09X+byUYN/eb7tdDpdqlrcRbE+b7rMv3aztNtUh2edCwbZu/3q6PS4Ukj+eFW3fcZkrz61aB0U6Muc1dPhRdjpu3fd/2d1dDZxzRsm+RQ+z7ypWPYrCIvJy3zfc2/w1sWcfyMrzM/mfNu/u15sn73M4ndGPL8JrWavHH8k33v4mX+KtXT1DK37rfFGnzMv96s+xaa48uFNp2pt+X+KtSxV2/0ZwuPuurVjKXu8x1R980IUfa8Lw7yzbauTrM0cUybc/d27vu0Wq+XiaaH5fKbbHGn3lp+n2u0lEhb7/3mrl974oHSuWOjkWbPzrVUdEXds+fd81WrNXmZJrZNjbf3sci7lqOzhmhQu8KuJN33m/8AQa19Lsd0iujtsXa77f4V/u1pCXvSOyPN7L3S/pdq8W3y4Y9n3ZW+7/3zW5p+moq/vk+78vy/3araLps1uqrNc74t7NLuXduWugtbNJpFeJFcQ/LXp0o+5eJl8MinHYvJMuxP9Z8vzVp6PpH7v5+rfcXft27atLp6RhU3q5X7kcK7latezsvL2vMjb/7u3+L/ANlrrhKNMcpSlIrf2Z5liNm1U2NvVv4akt9JLfuYduzbu3N/erYt9N87fNMOI2+Vl/ib+7tq3JH9ojX7TtRdi/KyV3R5Kkipe7H3jnV0Pdcb/lZvl27vvVV1DR9sgR0ZQrfvWb5lau1hs7b7Q0yWfnPG219r/wAVMutJdpi8f3lX5GZ/l/4FWsfdkY1pLk948x1Lw7H57hE4X+Ff9r/ZrDvtO8jbbTOsI2/uvlr03UNB8qOW8dP9ptqfe3fxbq5bxBodnJL8/Rv9VtrlxFSPwnnxlCUzzvUNNk8yOKGNVbc3y/e3f7VTeHfh7r3izWo9B0S2a4mvH8q1ht4mkaaT+7tWuo0LwFqXjTWLfw9pWiXE13cOsEEdvFueRmav2S/4Jgf8E0fDH7Nnhiz+J3xLsYb7xdcRLJBG8S7NNVl+6v8A00/2q4oyVSrGETnx2NpYWlJs+f8A9gD/AIINaPdaNafEf9ryBnE0Ucll4ahfDNH95fOb+H/dr9B7X4f+Cvg54XTwV8NPB+n6Ho8UW2Kz0m1WJdv+038VeoMqbCBXnvxm12DRrAtM+0fxf7Vc3EK+rYHT5nzeBrVMXjo855n4x1+3tsrEGl2/8865STxBCzKdi7G+b723a1ZXiv4laJBIYUv4dzK37uR9tcRJ4uttVulvBqXlIr7dqv8AK1fkmI9mpn7Fl2DoRpe8z0m78SQiEfZrjLbf9X/do0/xZpzolvczNE7My/N92vL9Q8bY02Qw3MLlpV23CvuVVqjp/izVbP8A0bUvLzHL961b5WX+GuX2nLC6PVjl9CUfiPZriaCKY3KHzpP+WSxvS/20n2WS2uYVlST5fJkTcrf8BavGbHx5r3h23vtS1XWPt8Mcu6KG3i2yQru+7/tVtW/xOmvI5X8hlVov9HkZvvf7taU6s1Exll8Ho3cpfG79h79i79oBoj8YPgjpbX7RNEuraOn2WdVb+80f3mr4u+N//BB3SNHvH1X9mT43yXULK3kaD4si2tu/hVZl/wDQmr6/1r4qPafZg6NdPJ8sse/ay/8AxVTQfEabT1uEnuY5Ujt2ZPJl3MrV6WGz/HUbwvzGdHDTwsuejOSl+B+O/wAbP2Vfj9+zfNFD8Y/hvfaYm/Yl9Cvm2kkitt/1i/LXDXFuG2wwzKV/jbf91a/dSPxponjrSx4M8c6VY6lpVwn+n6feRK8Uy/8AAq+Mv22f+CS3hHVLCb4xfsS6kbcKjPf+A9Vus+d/e+yP/wC02r6TA5thcauSXuz7Ho0c/qxfs8XHT+ZfqfnrdfuZAg8tlV22M33v/wBmse+WG4bdvVdr/PWz4m0/VfDuvXPhvxVpVxpWoWcuy60++t/Llt2/2lrHutn2p0f5D/E38Neh9qxljcRSr+9CXumX8m50toWHz7mbbu20v2G5WbZM6qyp/f8Alar9rH5URTbhl+6396plt0mm3o7bf9patS5ZRsfHYyn7TmRX/s879iW+3zNvzU5bPdCzJ8qr8qLHWhDZzbQ7wqgXds+bczU63ieNj5MPyyfP833t1Epe0PI9nGNjEvrHy43mcNu2bVWsXULV418wR7WX+Jq6y83rMd9tyzf71YOoW94uoSvsjxvXb83/AI9XRHl5tCfZ+6c7cH7ZiZEX+9uZPmZqqLHuk+f76/M6/wB2tK8017iR7nzvm3/xf+y1TbT5oZGm2K5k+5/drpjUjzcsjj9nIls43vLhXd2/dsu1Vau40m3RdtztU+Ym35q47SLV47hHkh2uv3o67jwrb7m+0yfNtT/Vt91q1jL3NCffOi0m1RWG92RV+Vmat/SVeQN9sdfJVtiLN8ysv+zUOhafDN/pM0zLu2t5ez71dDp9i8sf75I/OZ/kj2fw/wCzWkTjqxl8UR9qrRshfzHTYu+Rf7392us0Nd3h5FBPKP8Ae69TWHp9j9nkV5oWVlba3+0tdHZIY9OVGIYhSCVOQTzmv3TwPi/7azCX/ULU/wDSoHu8Nz/f1V/cf5ozUhmW6L7/AJpvvtJ/6DVuNoZo1mgRVf5vu/8AxNPhhWGTzpkm+58u1NzbqfpNjtdrxU+f+JW/hr8ajT92585HFDLiN7m3Xypmf7y7Vb+GsfXLebyXd3Zm+6jKn3a6C+V7aFXl8tP+eUcaVgeJp3s7d32KAvzbf4q3jROyOYe9Y4DxUqNayOlyzLv2/wB3c1eYeKrpJLr50ZVVtrxr/DXpfi5PMhlSFI96/NtX7q15h4q3sxn+6rfM/wAnzM1dVGMInTDMOWNkYd5JuXekzRvu+b+KoGum3K/3V/iaq1xdXKyOm3dt++1V5fOZW+fC/e2q1dUfdicWJzKUpDru42yM6PGRv+9WPqU3mSedvZTs/v8Ay1PfXDyDy0fDN9xttZl8z/Km/d/e2/xVpKJ5dbGc3ulZZvLk+cKy/wAVT6XfbZmk+b/4ms+6ZJGCb1H+ytLZ3CfKm/b8/wB3+9WUo8xx/Wpc2h0+mz7ZCiOx3fwt/DXQafqSRxgvz/erjrW6SOPY/wB37v8Au1uaTcfvPJeeuWUeVs6KOI/mOr0+4hgk3w/8Cratb7z5vOeZmZfvfL8rVyNnefMmblSrfL/s1uWOpOZE+dW3LXFUj9o9CjWlsdj4fvPtEfnO6p833V+9XQWerfZ5A6PIybNqN/drhtPupvkwY1Hn/PIr7WX/AIDW9Y6ojM0cm3zf4JJP/Qax5Ym/tOaNj6W/ZF8Vf2l8QtK0HW/EO+1k1SNGhV9sX3q/Xb9r74e+H9Z8J+HvEtkzEeFrWJrWRW+WNWjxX4a/AnUrmHxxbTWcMnmNcQtF/F8277y/3a/Yz4i+NPFWtfstaD4q8T6fOsGsWq2cDN8v7yNfl/8AQa8PN8LX9leEeaJ6GT16EsbBTlyyufLXxx8VQ/8ACSBHeSWW62/dbdtrqfgvZ2Gn2MmL9T5lvuZVavF/id4sfT5Gmd2edpVWWaR/mXa33a7X4V+Mkh8M/b5rZYUZGRmZtv8AwKvz+o58vKfqdFx9pZnOftAePLDwrqypYXmfOn2Iqxf3v4q4m1+MGlaTp8t5fpsiji2v/emrjfj58WJv7cub+51KF910yxLs2syr/FXzr42+MlyrN9muZPMV2bdv2qu6vWwdGXLFI8zMqkOaTNb9pD4wQ61eSXXnSS7Xbyrf7vkrXyz4m8QzXl083nf8tWZVb+Guk+I3jq/1dX+0zSOzP88m771eYa9rgW48lNufup/tV9Hh6fNLlPgMxxyjLkLPmXOrXC2dr5hdn27q9a+GvwF03UrEf29tt3b5vMk+ZVrkvAdvo+kWcOq6leR+dJ/D/dr0HS/FifYXSG88pN23cv3q9KVSNP3IbnnUY+0lz1TE+JX7H9zqelteeGNYhl2/fhWvErj4K+PPDd43+gSFVba7R7mr6x8N+MLaxhR01Nm3L86t93dWlofiDR7fWIRNp9vMjfM7Mn8K/M1XTx8oR5ZRMsRgYzqc0JHjfwD/AGcfiR8VtcTw94e8MX15eQv88KxfMv8AtfNX2bf/AAw+I9/4Lj+Evg/daeJYraGwi+XJimi2rIMD0CPXef8ABLv49+Fda/a41WbVbW1SG4s1gt2ZFVV2r/dr074NXmk/8N8C/wBQt0ktD4x1N2jLfKV/0gjn8q/Y/CXFUa2U8SrltbCTv6ctQ+k4bjVpUa8m/s6fieB/Hr9jX9oHRf2XX0a78YXGqPI0dxq/mbmdo1+7GtfL3wV8Val4B8UR2E/mW81vOq/MzfKv+7X7+fH7SfDHijwf/ZWj6bCkFwu+VY13bv7u6vxT/wCCj/w3h/Z9/aKtNbs7Nrey1iVtyqvyLIq/3q/nn2dCdL2dM6KebV41Y1JS8j9LP+Cf/wAZH1LS7W1Fzsm2f6xpf9Ztr6f8Va/ZeI/9PuUVHZtyyN91a/LT/gnv8YodWkhtt8aOrbEkWX5q/QbRdal+xw2d75yeXErfvPutXz8uajTlCZ9Th5fWK/tS18WJIbXQYbaZ98HzNLtTd/3ytfml4w8RW3jLx9rGtveb/Mv5EiZm+VY1+VVr74+O3i59P8A6hqt5NthsbCaVVX5Wb5f4a/OHw/fvJZwwzbklm3Ovyf3m3fNX1PCGFipTqng8RYiPtYUjbs2hkhV0RXZk2vIv3Vq3Y2r28wMKLI397f8ALtqG3unbZNv3tu3eWv8A6FWto9vux5033l3fc/8AHa+85InjUakeTWRoWOn/AGrEyOy7XVvu/K1aEGnpIzzpMp3bm3fd/wC+aZp9q8iv+53N8rJGzfL/ALtbFrZzKzFE/wBd8m1U+7/u1py+4aSrc2sfskFnpb/fuSqI3yqv+7/FVmTT3/2m2pt2r/7LWhptil1tmufluG/iZ921au29vJJbNM6ZH8G5fvVrTjDYx9p7vMzjtQ8PwyMUhdYi23ytyfN/u1z2peHZrhZofs27b/tV6Hc6a9xJ8ltuib7+75tq1Tbw3NHMvkosKr8ybauNP4TmqS5o+h49rHh3c7w52uybdrfwtXJa54Z+8iJ5bq/9zbur3LU/CrpcSvsb92+5ZF/irlNU8JpcSPC9hI7796SN/DW1OPvHj1qh4xrHhHMZhRFxJ/EtdD8EtKGlDVI9pBZ4c+nAfpXSah4ddt1sk0e6N23KsTU7wxpLaWZy2QZQjFG6r1r9M8J1/wAZ5hX5VP8A03M6+GakZ8QUv+3v/SWfP3xe8NJdeMdYmhdhNLqMhTC/7RryfxN4T2tvuZmO776qvy19K+P/AA3Nc69eTblbzZ34H8K5/irzPxR4UtmWV3tsiRq+Nz+N84xX/Xyf/pTPnMfL/bqv+KX5s+ePEGjuZm5xt/h2VQt9Jm3K6IrN/tf3a9Z8TeD0VWuURh/s1zDeE7lZPkTf/stXhSic3MjK03R52Vk+X5v4q6bS9BRdruiqdu35av6LoM7L89tt/urXT6P4bdpF3plF+9HWXvS90rmM3R/CP7tXg2oWet2LwPbQyb02uqxfPtX5lrp9F8N20atvhkd/7u77tbdrocLTBHSSIt8zxx/equXlkZHBx+Ddyslmm5Nu7cy/NU9n4Zm270g+X7u1V/ir0q38N/a0byT8kcq7vk2s1aVv4Pe4X5EYf7Ozaq0csJEylM860/wS/wAvyNvXb/q23bq6LTfDKK+zydzr8qrJXoOm/D37ORMlg3zffmb5t3+1trX0rwKjXUk0ztt+780X3m/hrOVM2jUlE4HT/CLlhM9mw+XbtX7tXJPA9z9o2THYrfLub7q16NF4LS6VEHyGN/uq1adp4HsxGLZ7NijJ95q5qlOR1RqRPINS8EOqv9mhWVfK3Ksf8TLXNat8Pf3J89PMk+83+zX0JdeBbYLLM8MYeOL5WZPmWsLWvA8MjP8AOq+Yu35Yvut/tVEqfMbRqfynKQq6xqiSbW3tvaR/lWoJN9psRLbeJmb95G3y/wDAqJrh7ebHzGJlZU3fdX+7WfcahcybtiNlX+f5/u153LKJ+gx7k0cfnTK77c7mZmV6rapZw+T/AMfKxBv73/stTx3bvvezEKPs+X/d/wB6o7i6hffLc2yod6ruVt22q9nyrQupU93lkc9qVm7QvvSMoybfm/h/2q4/xEqNHMj7f7vmR/3a7e+leS1dLPbvbds8z7rLXHeKIY5tyfZsKr/P/D/vbaz9ieTWjy+8cPqTeXJs3sRSxzWzbfX7vzfxUupLFHmZJmRpPmT/AHapfbELIkPyv/z02/drKUeaBy832jbsZE2kof8AvqtjQ2kjkPOxPvLurnNNmkjVIXnXH95vvVq2d0kjCaF2YbG+Vqz9n7vvExknPmOh+3Ha77ONn/AqsrI8kLJ9xfK+833V3Viw6hDNC9tv3fut23+7/wACqWK6hhYW2/5VTb8zbqj2c+W3KXCpyyuWLiR7eP8A0lN+3+L/ANmrO1G+mjs2SF9xVdybvvN/tVYvrrcvk7ONyqjM9ZerSOtw77NqMm3ctVGMuawqlT+U978FzrP+zs06twdFvMH/AL+18q3lzDHEPsyMu35d26vqbwQY/wDhm19jZUaJejP080V8malMk2dkPz/e2/3Vr968VoXyTIP+wWH/AKTA9DP5JUMLf+Rfkindas/2hYXdWP8AeWs3UL7ywV37tvy1DfXXkyO/mY/u1kXmqpuw82HZv4q/I6R8TWrcpZutQSPbsdgn3tu+rEOveXbhEfd/tL91q56bUk3F32qFam298nzP527+8q1tHzOb2k/iOqh1JJJFeF1fd/47U39pbleFvutxu/2a5nS5N0i7H2lf7z1rR3CfKh3H5K1+GRHNzGqbzzmV33bl+/8APSw3E0395d3ytVKFk8zf/A38X8VXLRZ9rJv+9WUpFU/i0NKzkf8AjPzfw7auWt15cfycf7P92s2CZ4ZBDN93+9/DV21bd9x2K/drGUf5jvpx6m5DcLMyb9qKv8S/3qvWd9tmHz/KqtuXd8rVh2+FVZJ5lRGb5a0FvCxKb1Td91f4q5pRgd1Pmj7xt2t062aOj5kbds+T/wAdardnqMaQ7E27v4925lrm/t00e75Nu75vmq3Y6h8qI7L8rbkjrz60Tuo1I8x0C3DzbbK53Oi/d2p/47V5bj/SEmtvMRvvK23dWDHcTLH88zMv8XzVow3Tqqu4bH8ar91azlGXLoaVOU3JLqZSgQMoaX96zL96rdrIk0myFF/h/eR/8tKyLOZJr1EmSR0b5krUtYZo1M0PyIv/AH1urCUY8vxD+KZdWPz4diOqbX+8v96i8h2t9mR28qR12bfm+anGbdboju33P3rSfKtPVrxlGxGQqnyfJuXb/d3VnU54+6dMeWWxkXizLu8mXzPJRlRv4axNQt0khd0uWyqszLXS6hY+Ssps/LEf92P5vmrDurVwoguU/wBr5U20ox+0Wcvqdu6wujwqtxu3bpPu7dtcteWrxsXm5rufEVq6tshTc+35P7tctdWrzSO8MKvu+9t+Va66dT3blyjy7HKaxazblWN2Ufe2/wB2s26h3Mfn3N/erevLH7VMyP5ny/w1VuNPf+//ALq12U6nLoc/LzGB9lQbkT7rfxN96rljY4aMuvP92rbW6blQRqzL83zLV2xs/wDlm77lZN27+7SqVv5S6dGMfiLGm2Matv8AOVpf7ta9nCnmB4UZd397+Gq9nYwx+U/k5f7qsvzVqx2T2u5H6fe/4FXnSlE2lTgWre33SK+zDr8vy/xVqafC8c3nQpu3Ntb5Plqrbrtk3q+VkT+L5q1LGN1b53w/y7Y1p83LDQ5eX3y9p9rdbvkDff8Al+f7tb9nGgtxv3Ef6tNy/NurJ0+H7RtTfIEk++392tiyZI42tkmb5UVfMX5vm/3q4pXlL3Tqp+7ys0dNhmWREm2/IrL83y1pbf3iwpyv8W3+H/ZqnYx/apA86MzzL88jfdb/AHa2bRd8Z+dRti27WTbub+7WfNyxOmO4/wAmGSRZNm5mVmRm/hWtLS4UWWJ4dxXb8jM3y/8AAqo2zIyqkMMiyfxbvu1saLb3N5JFeQphV+VFV/lWrpxh8R0e0l9k6DTWhktv321om+55db+l6b+7R027JH3P/wDE1k6PapNEiTJ8zPu3M/y/8BrptJs/OZETb9752/vV6WHlze6c8qnNEsafpKQwuIYWBZtyNt+7WlbxusgdE3fLu+5T9PhuYY1h8xn2pteppLNI03zbirfLF5f8VdNPnLp1CXT5vm2eWwb+CSNvlqSPeszbHbZs2vti3f8AfVQW9ncxsz723r/Fs+X/AHa09LtLmGEvv80Kvz/Jt211xly+8VKX2h0Nl5y+dv8AvfcVk27f71T29i/l/Oih/vfN/FV61017hdlzNGwVF/1fys1W5rbFvsL+Vu/4FureM5ROSp70TktWhhlkV7kLtjdt8bfN/u1yOqad9qk+xzIzpvVn+WvQdUt4ZLaXf+7Vk/hTdXY/su/CVPFXixvGGt2dvNp8O37Gtw/+sZfvf7y152ZYilh6UqkzjjH2MD6N/wCCX/7IGm+CJE+NPxJtrV9YuNy6Tbybf9Fh2/eZf7zV+ieheI9KsLSO2u7lYxt+8z/LXxLp3xqs/DP+gabND5zQbPLb7qt/D/47XP8Ai79rTUtr6gqSW4X9xFIt1uVmVf4V/hr4mjm2JjjPbQPMx1OOJgoyP0Sutc023sft7XKiLs+75a+Yv2sviheWWl6q+k38OIWy+5vurXzov7fHiSPQbTRL7WFjSafa0kn3dq/8tF/2t1eZ/tIfHh/EngiXxDo9y13Nbz+VqNxNcfNMrf7P8K17GMxk83oq552GpxwdXnOM8ffHC8utUkRNV87zF27l+ZW+b7u7+Fqx7X44X9nb/wDH5Mkkn3FX5lavEvHXxB022b+yra/aSZn37o/u/wDAazNN+IU1hZ/Zv7S8pVfa/wDFXx9XLakp8qPtctziVOPxH1N4d+Oty9vs1W5VE2fuo/u7v9qpbz4i3PiSN7Cw1j7P8isjN95a+XNP+KUN9Iba5hZHjT5ZmlVfmWut0v4lQ+IIYrl9eVJWRvN2t821fu15OIwU6J9TRzql7I+jtB+IFta2z6bf6/5vz/vfl/8AHant/iRZ2ObOzv5PJ83dAzPt2/7NfPA8aX9vavDYXKjzt37xvm+b/ZrO1b4ieJNF0+K5h1iSRd/meTMm35vuttasPqs3H3RfXnUlpI+jdS+KVtqEySJNveFvk/u/8CoX4kaba3CTWfy+c6q21/vbv4mr500/4mQ6q6PDeMjqm64Vfl3VteH9evLr7keXhRl85flX73ys1ZRw/vHVHE+0peZ9IaT460pZP9JuWhmWVWabdu/4DXf+GfiBp6/Z3R2YyM32eRXX7v8Aer5f0/xU8McRmdcM22VYV/i/vVreD/HF5HYyLbP5Kea3kbf+edTOi1PmpnHWrQ5PhPRv2xv2J/gb+3F4fhtprlfDnxCVW/snxdGi+VI38MNz/eVv738Nfk38afg38Uf2efiJefCX42eFZNI1rT7hkRd3yXi/wzQt/wAtI2/vV+qdr8UvtVpbw/bJPI2/e2bW/wB2q37Snwx+HP7ZHwVk+G3xIto/7b02Bn8F+JpF/wBJ0+Zf+WbSfeaNv7rV9nkub1eT6vivlI8lVquGnzUvh/lPyUjhfaqbMn+D56u2qoqNC77lX7yt/D/s1qeNPh74n+GvjC/8DeMLNbe/sbpom/hWRf4ZF/3qoWqhV+f5gv3/AOKvYqPl0HKpGt7yLCtNJCqedt+dWX/apbXfNI01zDsVZdibqW3/AHKu8NywMn+xu21b8iGO3EKJvDfNub+GnGVonHKJm3kkMMZaFG/3W+9WPfW+6KV4fk2/wsv3q32hdWHmCPym+batULrT08kIhbDS/Oq/w1pzcsomfvnMzWIhX+JWb5vLVPlaqf2N5I/O8na3zfLu+7XSXFmJI/kTDL/D/eqrdaW8cf3G2sn+srb2nv3MPZx5TK0uzuZFH2aRleT+L+Ja7LwvYzW8yQ7923/nolY2n6Y+4bJtn8MUiptb/erpdHsZI2VJnbdJt+b/AHa7sPyyPNrylG521jCnyl5vNVUX/V/NXT2rfZ7z/XL8qfIyp/DWT4fhhmhRLaaHaz7kjjT+L+Kt61t/9HVJX+Vm+TcnzV6NOnCR5dat2LVrawybrq5m3Iqbljb71XraJRp4iRNo2EADjFV/nWVU8vDMm1mZPl/4D/tVctolhgEcW7Azt39evev3DwSio5vj7f8AQLU/9Kge1wxO+Mrf9e5fmiG38mb/AFL7Bt2qvzNt/wB2rLbFtWS2i+Tdtbc/zf7O6oWjeSRVSFtq/LuVquNYpHMv77en/PNvlr8g5mfHRqGdqESXAXZcyD91t2/wrWTfRzXUZ3uxTyt27+FttdDdQzX37mEsWj+/5ifw1j3yzeWqI8eyNNr7qs0jWl0PPfFy+XGUuX2M0X3q8r8UWvmW3nWbq0avuRmdvvV6x4wkSaFJkhX5mb7vzLu/vV5n4kjfa6Xnkl/vfu/lVWqoxlylxxB57fLLJJ8j7lb/AMdaq91JtjV3hw6/eZa1tQaG2ZvkXdu+fbWYypMzok+5W/i/u11fD7oqlafIY01w8kbd/mXYzJ93bWdfRvH++SH5meugutH8yRUcNhk27ao3WjuqMiRt8rbd33lWq5uX3Tikc7Mz+Z5OzP8AEtLC25jsTeG/h/2q0brR3WYL8zfLtqKPTdrNDs+9/EtTUj2IiuUfp8m2ZXmfG3+8v3q2rG6SGRJv4Nn3l/h/3qy/LRfkdGZl/hq3b/KrJHu/6Zba56kTppyN6zkfbvfaV27kjrStbrzI/n3ASLtrB02V7XbHNMy/wvurVt9nll3dv4di15eI5vhPUoy9z3TZsdWuYZEcJ91PvL/DXQaXN9ujWaYMwV1bb/7NXKwxzSLvR/mj+5tevUPgh8P7rxVqBd4WI37POZdrVz4en7SdmaVK0cNSlKR0vw71T/hD9c03xPeOyPb3Su7btystfuP+y94p8Pftq/sFX/ws0q/3a94biWXTvMX978q+ZDIv+98y1+O/xi+G+meFPCqabC6veNF80avu2rtrf/4Jtf8ABSXxV+xj8aNO1LXLy4m0yGX7Lf2twzN9qsWb95/wJfvL/u17/wBXpKjyo+UhjKs8Z7W56D+0do+sWsl5Z3NnJDfWtxtlj3/NHJ/Fup3wj/tWbwvc20yed+63xNv+Zdq/NXvP/BULQ/h/rnxc0n4/fCrVrS78LfELRo72C5g/1Zm2/Mvy/wAVeLfD3R0ghm037fsga3byF+6u3b91v71fk2d4L6pinBfCfuuS5j9fwEKq+L7R8V/tKfEy2s/Gl/C7srWtw0XlyJ825fvV89+IPHX26aV0ud7M7Nu316X+3pZ3/hn4jX9qjttml3Juf7zV82x6k/nb3dldf4f71e5luDpyoRkfOZ3mVWFeVI321Ca6mZ33YX/x6uY8U3U8OpLMi/w/I1aWm34mZUm+f/Zql4xXf5fOF+7ur0KEPZ1/ePjsRUc46SK9rr2p3GyFPm2/M9dt4Y1p5nSG5v8Ayv8AelrlfDekw+XvT7/8H+1XfeHdL8K68YrfWLNYivy+cvystd0vZSi00PC06svikd74T1DwlJCs154thVY0X5Wf5m/2a+hfgr8Ifhp8SvBtzrdt4wt5b2OJlt4Y/mbd/tV8leIv2edN8QN9p8E6rIVVdzR/aK1fhL+zr+1FHqyp4GuJi0m7b5dxt3bfm+7Xm1sLKXvUqp9HhYx5eSVJ/wCI+n/2bfgTrej/ABqS80fUoRcWsv8ArGn27v8AZVa988Bz6tp3xzjmjvSl5HqlzunzuO/EgLe/evizwX8J/wBtjUtQS80SHUIrlpWiaaOXazSbvurX1p8KZPiJ4d8WaSNPsBf+JrYiOSCVgPNuAhWTJ+u41+w+EWHqU8k4mlJ3vg5/+kVD6TK8PRhhq0YJpuL39GfYVn+0vNa6f/ZWvPI80Nvt27ttfAf/AAXU8eeG/GXw08Ia14e1JRPZ62Fe3+XzG3feavYf2vta8eeF/hTeeLbm5sbPUoYvPnWO48xo2/u7q/Kjx78RPHfxm16O48Z6xJfeXLuih3MyLX865Rhq8sYq0pe5E+TxUvY/up/Ez6G/4J8/Fi58P+Orezmv1hSSXdLI3zeZ/s/71frl8NfF02oaHbagnnPaTW+7bJ8zV+Mn7LPhXVdP8aWEypnbcK21l+7X61fAW+k/4Re1d3kaOGBV8lv4mrgzWUfrH7r7R9lw/UlChzTMf/goN8RLPwr8DW0HTZv9N1q6js3kkf8A1MLfM21f738NfH2gtCot9jrn/noy/dWvSf27Pie/jv4vR+GNKdW03Q7f/Slb70lwzfLt/wB1a850GB1ZHh8v5X3bpK/SOHcH9Xy+PN9o+RznGfWcwm4nV6eEaMJC+/yfm8zZ95a6DR7Uz4mdGRF5RVf5mrH0PHyB5oy//LXb91v9mum0eGGGRXSGP5n+Zq+kjH3Tzo4rl92RrabHNbtv+zM8jLsRW/iroLKFlUJs/dsny/xbWrN0+18zbc/vN2zcrL91a6Ox0iFY1WF/lb5nZf71aU6f2jf61L7JHDb/AGO8V/JUiRdqSM+3b/wGtCO1ea3MqJhvlR2jdV/8dp0OnpcNI9z9+OX5a1bXTxCqIlnt27dm1KvllzEVMRKMTKk0m8imST5R5bfP/u1X/sXd8k0EkXlvuRli3LJXWRaWjTL/AKNl2X7yp/49VyPQ3+SH7TuSP7m7+KumnT9w46mKnLY4HUvDu6Sa5tofJ8x9m3b/AOPVz+reGZt0kzx4ddy/N/er1680CGaNIXhYuv8AyzZ/masq48Lu0m+5dd+9m+7t21fs2cFatKR4nqHg94V87ydiyN+9kjXbuauc8SaONLulcSu3m54cYxj0/Ovcb7w+8W/f/E/ysyfw15d8YbD7Ff2mVILCTORjOCtfpHhUpLjnCX7VP/Tcz0+E534jpL/F/wCks8w1zw2t7dvOu5CRuKg431xXirwWHjkhjeOL+Lds3V7n/wAIzHd6Zb3UsDfPbj5x9K53WfB73CtCkMcoj3Oqtu/76r4/O4RlnWJX/Tyf/pTPn8fKX1+q/wC9L82fOfiDwTM1rsmdWeOVl/dxfMy/w7q5q48CvaM0/wBjYtsVv7q19Bax4NmMn7mKSbd/07/N8tc/qnw7trqQLNYNGVbft3turxJR5TllL+U8r03wrNHcbFhZ3Xarrt+7XV+GfDdreTLLc7Yk+b5Wrr7HwXGsfzpJv+9t27d1XdP8L20EZ2QMx+bZt/irl5eU1jIzLTw+kcafuVYM3yMq1vab4PhmukuUjVH3bHmki/hrU0nRZrfZ9mDFWRdkbfdWu00fw+7bYUeOWNfmeRfvbqn/ABEylE5O18OusabLb70v3v8A0Guk0Pwak2zfD80bbpY5E+9/wKur0/wyiuiXUMexfmSFf/Qq6PR/CMMiP5KfeRWdmp+zM5S5TkLXwTttB5z7vm3fu/4v9mtO38Jwxsv+h+bt+/JXaadoLtmZEX5fu/w1Ym0naqOnmbI02bV+61EuX4Qj/eOQtvB8MCvss4ZJvN3bvu1pW+jwyM3yKsUafdb5a3bjR5rhd7pnc3/j1OuLNJF2TIpb+FV/u1zy973jeMjmptHtpoXS2dS8ifxJXO6x4fhWx+SHb97zV/vV38lv5M37lF2/dVtn/oVYviKzSeQo6fdTajL83zNUcv2jXmPkbWNcRY2htod235E+b7q/3qZb3FmzJ8kfzfM0n96sNdUfcHmffIq7f3fy1PbzOvzo8iD70Ue3+GvPjH3veP1SXJE3ZpnhUPDe4Rl3bdv/AI7UV1HDuMMyZT7+5f4v9mqXnSNMNibQy7fmb+KmyTXLXSu7L+7/AL1KXuyFUlSUSWSOzyyfMjqv3WSuQ8UW7tHI95MyvJ8ybn+6v92uq1C5TzBC9zlV++y/ermvGF0kkMjo6uioyJuX5qiXunj4qVP3kedeIfmUOhyivt3bKyZLjcySbNp37dy/w1d8QTTeTvR8u331Ws6JoWVt7tv21ly/ZPMqSNKxa1kbzo/3rKnzLW5b3U0NuvkouyT/AL6WsfRtk0iIk25v4/krftbP5ofnZ/8AgFT7vwi5fd5izbrCsWya2bZtVvlqaSN7eIb4WQ/7vzU/R4YbfLumx1fc/wA3ytV64bbH/o0Pz7N25v4a05eWRXvS1MxpPM+eaaMhvuMyfdqlqUcKsiLNvT+Nv4a1Ly4hhRpkh3n+792szUWRkSVE2Mqbty1MYilyr7R7v4Kbd+zc7qSc6HenPr/ra+QNajmaNnR2+VtytX2B4Jbf+zgzJnnRL3H/AJFr5G1yRJLffIm3b975K/dvFLTJMh/7BY/+kwPU4jd8NhP8C/JHF6lNcjKO6kVj3Fx8xfYrCtfXNizP5P8AFWK1vt3vHu3L822vx+PvRPialQgkm8wLs+YNUdv+8k2D7392pfsb/wAH/AttTWdkI2MvzBl+b5lrWPUx5ZSkS2av52+ZOa2rOTdJtmTnZ/eqjb2Mqzecj8SfwtWrb2MLSfJ13f6ys5S5jSNORbtU8zZ2C7ldatQ/KyvLuyq7aZa2v2dVD7Xdm+etKOGFWWZ/MO7cvyr8tZylE6KdGew2G3kkb+//ALK1dhhaFV2W2TJt+VakjtZvs4e02iT5V+797+9V23sX3f6SF+ZNq7XrCVaB6MMORrbxrDv+bLN/ElEcm24Lvu/d/LuZank098/cY7t2756dHDDJbp97HyrurGVS5vGnKUhVH2mNfOfJ/wB6rNuu350+fy/4aS3tUVdkMf8AF8n+zWrY6R9nw/lq25fnrhlU5TvjRkyDTw7Lv+X7v3Wf71adizyTNv8A4k+8v8NMgs0VUdCwRvl/3q0rG33R7Jpv9rav8VZ+2Y5Yf4S7Yrtk3wjcjfJKq/e/3q0VZ4XXykklRfv/ACbW3VDptvtjVERQ3/j1a8NvIYV2TL8zs1Y7fZKVGUh2mqhU+c+d25nVv/Qatxvu27E2jYzfL/DTvsiR26uEy2+rCwolv5MM21lRtn+01c1SXMdNOjOPumVeWcLfI+4q38Uf8NZ+pWMdw2yH7m/bub+KupksZpFXYjFZFVfl+b+Gsy4jubiFZvO+bf8A98stR7TljzHTGicfrFq+Hd0+bdt2qvzbawtT0l4/4F+ZPvSV297ZpIvkuiu2/wCdt9Y+oWe5gjpwv8Vaxre6VKnJe9E4G80d4ZGmuU3hk3bqgm0fnznhVX27ol/vV1Wpaf5bKm9XDf8APP5t1VGsEWF4URQZF3bf4q3lWvuOOH9w5CTS3lja53+UW/iZfu1LZ6TN5hR33/7tb8mjoyrJ5P3fmVWpfsdssPzowlVfnVaca3vBKjH3bFW1t0hUJDym7a+2pYbzduREV9u7azLU726R4SFF+b5nkb+L/ZqvJJDHMrujIrfKv8VRF80uaUTmxXu7Fy286b/RvOVfm3eWq/eatXTV81R59ttbd8zM/wA1Ztq3lsJkfJbc23Z92r9jN5n75ZtjNL8jMn/oVFSUpXSOOKtys6HTVkgUpD8qK237/wAta+nxvbLvttvzSrv8usvSYUmJS5h81pPlRo/l2/8AAa19PjSSZf8Ax7+Hb/vVxSly6ndGP2TXsFFvCj/u2C7tyt97/eWtDTW2lHmdiZPvbqzre13N5MKZ3f8AjtaVsvmMz3Lsrt8v7uspcvNzo6I80tEadva2fmec7btrbtu/5v8Ad/3av6L/AKP/AKNNO2/733NqstZMdvdeZ+5mVkmT5vm/eVraQqW3l/aXaJf+mjbtrVtGQ4y97lOw0u3hbbMm1fL+bb/drrdHheaOJ4XaIsu3/e/2qwPDckMcfl3Pls3y7fk+auq0nTYYbtLxLmRHVNu3buVt1d9H3TnqS5tTQs99vIg370b5Nrf+hVakV5RG8O7asW1YasafDDMywpCzN97d/e/2auw2b+W6Rrt213U4++Tzcu8jLsbOZ4/PTl2+6y1tSW8lvGr+cwdXVmZaktdFeDdbIkmWdf8AdWrTae8Nsz2zqw+83+1XVGPNMmVSPKFrNcwsj71y3zeWybmZakkZAqzb5GRUZv3n3dzVUuIbmxZ5ryGbfsXypFf5V/3lql4o8RWfg/Q31vU900K/JFaw/ekk/hXbSlLl94uMYxXvSKN9ND4g8TWPgy2RW+1Sr9tWN/3kcP8AEy17LD4q0Hw3odnYeGN0Z0+38pYdnyqqttryn4NxwtY3HjbUEmttavpWWW3mXb5duv3VX+7WX488dTW+oTCGZvObd5XlvtX73zV8Tm9aeOr8kdkeVPEc2sT0Hxt8cv7J26t9v8t/ueS3zK3+1Xm+tfFt5NQe2eZndpWZFhf723+KvKfEnjy/1y8ubm83JB96JZH21x3iDx9c2cLecP3sf3FVv4q5sPg5ROOVSXKetap8WrmwjSbWNS81Ld28qbY3mR7v7tYGvfFpNWt3mg1KaNJPmlhZvnavHZvipNdXT2015v8AM271b/0GsDVtehuJvkmZBub95u+Za9ajTlH3eWyOGpKB1WoeLn1bUvtKQyJLI0kXzP8A6v8Au1SbxNqumxqIUV5lXO6T+KuM1bxdCyuiQ/NHt+b+KRqqt4yubrY/nKzfdaP/ANlpyw/wyjua06nKdlF423OZriZWeSVju2fMv+zWxoPxceGNLB/JtkZVR/L+b+L/AGq8wm1ZJI/Jtkx5n3938LULLc2Z/fbW+X5GWuHEUL/EdsMRVhrzH0F4Z+I0M0Ys7O5mfbKzSrN/D/dZf7y11LeItS1JY7DUkjuUX/VNH8u3/wCxr500HXNSWSHvJ/e+8zNXq/g/XtVvL3fczfIvzKrP92vCxlNUavMfSZfWnWja51FnHeWeobEhYbn3Iv8AC3+81d/b2qaa0NtD9omtJkX95M3977yr/wACrN8O2dhqVuk2n2bJtVftDTfxN/ervrPww8mm/aUh8xY9q+XGm7y/9qvP5qUo27nuU8PV+yOXS0t3RLKbymXaqL/E1dXpscOmwpHbTxod3/LZflVdtZVvp5tdQ+021szRsm1ZPvfd/wBmtm38P2euaa/9pSyPtf51VtrLtopx5fcOfGe1ia2jww31nsuUwu9WuGVNq7f9mr9vpWpNfO9nNhPmaKNvvrU3hfR4brVrbR97XT3FrviVV3Mqr95Wr0K18B6bcLbm2muCI0ZXXbtXc396u+GDnU9654NbGSp/4j4n/bw+EPiHxJoo8T21tHcX2kp5vmNF89xH/wA893+z96vkCKJIZGhhDMVav12+JnwbTxJpFxol+i3iSRbdqxfvIY6/LH46fCPVfgP8dNR+HWpQyRWd5K15oi/3o2+Zl3V7GBxU6l6M1sRRxHs5afaMmGZFUPM/yr97bV1fs0cgR93975azbNkNyiIn97za0IZHlUfuVlb7vlsu1q7Obm909OMftDprPzmEP7tDHt+Vfvbf71Rf2fcw7d9mx+9tZfut/tVpL9j87yY03Lt+9tqOGPEx852+aJv4N22lGOpqYt1pO6aZN/zfxstQSaakil3mYsybYlb7rba17iOZZo3ebc3/AC13fLuprIn2cedCyuv8TJ93/gNdTjzanHzRjzGJZ2LzTYmhZFX+Gt3RrG5hvPOublWSP5ait1+0QOkM2/zG+eRmrV0m3uY5khhgkcL8v3PlavVwsZHjYqUY7nZeG1hWGNIdqM0XzeX95q6CxVGj+07G+Vlbc396uW0uORYUmR2V1l+9s210Me+aBkaZV/v/AD7d3+1XdGieLVle5pWq225538tZd2/bJ/FuqxHCsECxEkAIM4OSOKpQttkijmh3N5X3o/7v95qvp5YYFHyuchge3rX7d4JyvnWPX/ULU/8ASoHu8Ju+Lrf9e5fmiWOSGFv3cPmfN95X27W21ZdXMbTfaI0fyvut8zbv96q1vC8Kyu8zfvF3bl209FS3jLukkg2fMrfeWvx2NT3uU+R5BGuXbT0tZpvlX5t2/wDirn9WvJriF9kKtufanz/+hVp6hvaNXh+7s+aNfvNWNqkkMymF0w7fNuV605mTy++cP4uXZl5nZdv/AHytea+KI0ZZH353bVr0vXkebzUdMqyfvdzfNXCavpv2mQ21zDs8v5dv8X+zV85pGPKefXGlvcSOkKMzs+2n2eivHGd9ts28fKn8VdZHoLzXDpbfK0fy7m+XdVuLwwi2/l2yMz/xtW0ZQH7PmOQk0FG+583mfxVRk0RNmxE3/P8A6vZ83+9XosPhU3kaeZujZU+6qU5vC6KrDyNyt8q7lq+b7MROjOR5Y3h0LcSh4cTKnz+Z/dqhdaLNGo2PtbZ/dr1ibwTMsnkyp8jf3k2/8CrO1Lwn57ND9jZVj+X7n3qFKcTKVLl+JnmMGlvIpjc5f+Nmp8Gm7W37M7V2qq12eoeC0hzNbJlf/Hqzl09LdVR4Wdlf5NtTUpy5JWLp+6YwtfLX7jMZE+VZPurVmPeW8tOWj/i/hauq0P4X+IfE1wf7Htmd1TcsarXMXGk3mk3Ettfow2y7WXY25fm+auCVGcjeOKhT6nU/Dnw3c+JNaTSk6zOuyNf4q+pPhb4Zt/AVwlzebfssa/6VJv3KrL81eG/AfxF8N9H8caakOqxm5mlVU8xNu1v4t1e3ftZeLLDwv8N7/R/D2pbb68t2g8tZf9XuX/WV04fD+z96XxHmY3GVakuX7J4l8Xv2pFvviJfHStYjmhjuGT5m+9XCeI/G3/CZM2t200cVzG25I1+ZdqrXzzr1rdeH9Sf7TqvmNJu37X3V1PgvUr+O3+02E2/au371dP2ji5ZH1r8Cf24fFvh34er8AfHN5HPoUN19q0O4un3Np8jfejXd/DXvfgH4sJeXFo7uzxsisy/w/wC9X5oeINceRn3p8y17B+zv+0LHMraPqV5JHcwr87SS/K22vkOJsr+sRVWB+gcIZzDCz+rT+0d3/wAFJNDs9U8cHX7BJClxFu+58qttr46utJvIZC7/APfVfVvx4+JWm+PtJgvL+5meS3+Vmb5ty/wrXjLafpWpbkS5jG7721a87KKkqeGUJI789w8a2Jcos86jjmhbf/49Ud8v9oTrbRorbfvbv4q63WfCP2GRXhT727+Cqmn+FftTb3/75r1qctj5lU5RdpRGaTZ+XboiQqp/g/2aZezzW7P5M2GWunh0V7e3VHTdIvy7mqva+DY7++VJNyn+9975qrmhz8x2+zvDlicnY+MPEmjzYttVuE/6Zxv96vUPhT+1V8SPBeoQ3lhK22P5Uk3bW2/xVT0X4K2Gvaglm9ysKr97zH+9/wACr66/Zl/4Js/Cvxd9gufE2tyOlwu91jTcse7+KuLFVMM/dmehgaOb05Xpv3SP9m39tq2v/E1tYeJNKmBaVmi2y/xf7Ne4fC3WI7n41Wmuxnylmv55l77QyuQP1xXpvh3/AIJV+Bvh/pMV/wCGNQW4SFfNWaa1Vm+avNvh74eZPjrD4aT5TDqtxCMDpsEg/pX614QKP9h8T8r0+pT/APSKh9vldTFVaUvbb2MT/gpl4jh0H9nbX9Re0Yrcr5cG77rN/s1+cfwN8Dtq1y07pv3bXZq/Yn9rj9mf/heHwXvfCX2b52i3W+5/vN975a+Avhn+zT4t+H91qFh4hs5oXt/lTd8zN81fzvg8bSw+AnTUvePl82w1SOOjKfwnT/AXwGYdct4fJjzG6s8nzbV/4FX1t46+Nln8IPha+qo8f2zylitVhb7szLtVtv8Adrx/4T+Gr3w/p/8Ab2sQ+THH+8lmb+FV/wDQq84+I3xGvvid4qmv7mRls4W8qwhjfb5i/wB5lroynAf2jX55fZMquYSw+F5IblGHUNSvtUudS1u/a7ubxmlnmb7zTN95q6PQbo/aFR7zc+3ci7Pu1z+k6fDDJvR5MK+75vm210uixw7ldPkWNfu7PmZq/UKPJCHLE+Xmm3zHY6XDDIpd4VCN8z/L8zN/s11+j2tz5yTbN0apufcv3v8AZrlNHU3Cqk1yzhfuRsn3f9qu20eK5VsTXKkKm5Pk+XdXfT96BjzS+0dT4Zt0Zt7usUTfMiyN8tdNpdnbX0KYRdituSRmrA8MqjPsSbLx/O2371dnpMc1xG29I03bdm3726t/hD20uhJDo7wxiF3jy3+1/DWnZ6clv89s8MkU0X3VX7tT2lm8zR3PkrEi/NtZPm3VdhhWFvOm8sKvzfLW5UqnYq2FmkbJ+5b5fvt/erYt9JtrjZMltCzfwrv+b/ep1nY/6R86fKzruVa1bPTXkZUd1AX7jfxbaIyOeUio2k20iuiQrGV/1W75qy7zw6jM7zc/w128empfWuIYdyq+1WqGbRYG8y5e2V/3u1JK6KfunBKoeX6x4bh2nzE81FX5V/irw/8AaR0/7Bf6SOf3kMrfP97qnWvqjWtBTa/7tf8AZVflr5z/AGxbOSz1HQRKSSYLjr7GOv0nwsSfG2FflU/9NyPa4Nk3xJRv2l/6Syp4f0Z7jwtp0iMyq9lDuCr975RWZqnhm8aZvN3INrOkka7VX/eWvR/BmjS3Pw20R44iC+nw7WK8fcFLqHhm5aU7I1dfu/7W6vj87Vs5xNv+fk//AEpni5hP/b6v+KX5s8b1bwukO7YjY2fJ/tVk3fgpGuE2JG/lpv8AMj/i/wD2a9i1HwqjL5IfY7fN92sa68LzQ7njRWCvt+X5q8OtGPKckZHmH/CJmNjvRWVvmWSo/wDhHoVt2uYYV2N8qN/dr0m50O2jWXZDsH3n/wBpqz28NyW+H8mMLI3+8qtXHKPMbxjy/CctpOhPHC7okZ8yL7sjbfmrstD8P7Fi2QqRJ/d/vU3S9DT7ZvmhZx8reX/tV12i6ci3CNNNs2/8s/4azkOT5iLSfD3lqrvbK4X/AJZqv3q6G08Mu3kuifN8rMu//wAdq7otrDJG6I+4fM/lr/yzaug0nSXupkfCpF8rNG3+1REzlzSMmHw26qERGJb+6ny0smhfZ48ojF/mXayf+PV19npm2NPJ5WP7jf3aJrNG3/aZPlbo395qmp/MEZfzHGyad5cex4ZG3Lt+b5m3f/E1n3Vr9lby3kUqybfL2/xV1uoafDHbj528xm2pH/C3/Aqy7rSoWvHh8lXPzL5f+1/s1hL3ionI3Fnsj2TbYtzbt2z7v+1WTqFukaskNgzuq7vMrurzw/MyxPCiqv3XXf8AMtZF5paR3iwzJvjb5nZW+6tTKoan5uQzPMu+H5X3/ulq1as8f7533bf+WLM25qwbG8vLVndP3X8UW371XrGd75ldyodfl3Vyy/mP1CWIjKldmurX7BPLTa/8KyN8rVJ9tigRtm4v/Eu/dUFmr/aPtME25mXZub5ttSSQJbun7ltzfKkmzbWceQ8+tiOaPuhcfvlM3ylV++2+uS8VXzq0lzDJt2/KitW9qV19lkOzy5EXlpF/vf7Vcf4gm85me56MrbmX+Gs5SPN5pc3McnqRupvuIu1v71Jptm/2pYLlG+/t/wBqpVt5pNk2/cPl+b+7W7o+mw7t72yuW/hb+H/gVc0pco6cfae8SabpdtDIltbQsVX5vm/iroLHT5FkXenDfMm7+9RpOlzRys/zY2fPt+9W9Db20i/ZpE2rIv3v4lpQ5feHyGa0fkqyfZt27+JUqX7P9ojVHtmfb8vyvt/76qb51kWL5dvm/eb73+ytQ3C3NxHM7wq3lt/q9u6r9n7uge0k5amZeW8KsOwb5mWsbUP3iy7NzN/AtbV15jRnzkkt1jT5I2i+7VC8kgjtGuUTft+X5WqqXNH3TOUYy8j27wVgfs1vg5xod70/7a18iaxE7Qsjuyov8VfXXgtz/wAM0ySSNk/2FfEn/v7XyJeM8xfZyNu7a33a/dPFBWybIf8AsFj/AOkwPW4jlGOFwl/5F+SOM1aF2b/XMG+9838VUfsczMkmznZ8+2trULcfaG3vgf3W+7Vfb919+Nv8Vfj0tj4upGUinDa7T88P3quLB5n7nr/fp0dn50y8bm3fPtrRsUTaZk+6r7W+So5kdVGiV4bXavyJurVsbG52xO6KgVtyMv8Ae/2qmgtoWcIm3ZH99lrSjsXZU2TNj5d9c9SpynVHBy7Edtpbr84RSd+5mq5b6fatI33sbPl3Vp2dm8sw8mP/AHG3/eqwumgR73fY27+KuaVTmjrI6qeFlzRK9lb/ALxSn/LNP87ql+ywvNG77nP3n8t/lX/ZqRbOZpNmxU2/xL/FVy3sHkbenlqv97+9XHKpKPwnpqhHm+EhjsYWO/8Aebo/vrJU0Nqkcmzydrt95Wq/BCYVh2bnP3WbZu3VZjt4WWHzn+Wb+JvvNtauOVaf2jtp4VfFIpw2MKws6Q/8B/8AiquQWNyzfvk+ZV3bV/u1fjtdsmx/4fmRauWOnzTEu9ssK7fvLUyre6dEcP7xRt7dEyny4j+/u/hq5Dp/lTJ5M3muvzOqrVhrGGObyZvmfYzJu+78taGmKnkyzOipIrbIt33qz9tLluFTD82hLp9jbLIqI+5mi+dtvzLV/TdNdpGhmRdrfLtb+6v/ALNRa2Lxqmx42+RV/wBpm/vVrWcbyqsKQxqy7vNZvvVnUrcsviD6tIS1s442WHyMhn+TdU66fNDI81zCyLJuaJamt7Xy1HnI3/Af4asMzrGn3nZV2rJWEqnve6bex5YxKE0ZVo5rZG3bdysr/eWqEweFghhVPvbtyVsXTQCBYYH2CP8Aur/31VbVLN47dZn3bfvLTjLmL5Z8hy19DNHM6Q+XvX5vlX+Gsa8he4kL3MPnNH8y7k+Vv92uu1bTYbpXmSTduX5GX5a5/ULNrht77R/dXdWnukcsonNXFukjH7NCqNJ/47UFxGjbEmRsxuyfd+9W3dWsMMs32naNsqtuj/u0xrO88v54967922tvae4ZS7GHHZzNI/3Y0X+H+98tMWN2jdI/mZU2u1bMdrC0L3PnK33t0a/dWq8cSKrbIVUN95mojH7RnKp9kxLqPzV+Taj/AHdtVGhvJFZHTYy/Kv8AtVs3lq8B+5/tfcqs1uklykw+ZmTbuVPu10xlb7J5lSXNLlZBa28zSeZcopP8TfdVlra0ezea437FRVRm+/8Aw1Bo8KSN5ezG3+HZu21sadYu0w3tuVUb7v8AF/tVlUlyy0RFOPM9ZFrSY5Jp97w4P3d1dHY2cLbnmeNkkX5W+b5mqjbx20eN6M8i/Oqr81bWmx7meHfGF3bkXZXHzc0eaR6FKNqliSPT38tEtvk3bd+75dq/xVfhs5o42hhdlRfu7vvM1WoY4fL/AHPzOyfNGybvMq7Hp800nmeTlflX94n3ax9pKR2RjDm0KlrazSzLcvw+z7rfw1v6TYpcKHdM/Nu3bKit9PSaNbZ/Lfd/31W3pWkvcKh85l3S/wCrb+GtY+9ykShKL3NrSbeGRvOTpD8ny/3m/vV2+j2u6FA+4lfl+auY0uwS3/fbGab5vlX/ANCWur8OQ+XGkLvIUbbvb+L5q9bCx5oHnVpSpyNqztb+Fm8mHafveYyfdX+7Wtp+m+ZKghSQ7ovvL92Sp9Lt9tqybNyt8v7z+KtzQ9NuLe2S2udq+ZtZ2+6u3/Zr06NOWxwyrGfZaXeeWXfbt/5aqv3auR6bFDjiSZ/mVv4du6t2z0tG3wrwiuv3v7tXofDaTTulym7dt8qNf/Qq6+X3iIVpS0OPutLSa2aG5dnWT5W/2f8AZrjtT8Mv4m8Yf2NZ6rCkdi6u9vN/E38P+7XrfirQ303w7fTb4UZV2xSSf89G+Va5ux+Htt4Ft7bVdYT7Pef6q6WR1bzm3fe8z722vEzrERw9C38xVfETl+7MTx5dQWuhszpa2lzvVEaH7zN/F/u/3q8C1q6vI9auLm/vPMfb8jM/yxr/AHa7j4uWd/Z6lqdhO8bzTS7d3m7v+Bbq83urW5vlj0ewtmeRom3eZ91W3V8pQUpbHFL3vdOe8bWN5NZpPawx7FiZ/wDZ3f3q838YTGOFpobndc7F3ei16v4uW80XS4dEvLnesbblWOL5m+X7u6uAvvC9zrgTPnR+Y+3ayfK3+9XVRqRjPyCpGUocpwEOmpJdrvRWeF/NlkZ/utVDUr+S4vlmtk4+47fxN/tV6G3w9uby1EKuquz7V3Nt+b/a/wBmsnUdD0rSY0s9Vv7X7W27zZI/+Wf+zXrxlQlI4KlOUYHAeIrKG1kifzpJkZN26Ntu2s1Y3tYpX+0tvVt21mrqtet7G1kR9/mBvmdv4dv+zXLapqENxL+4SN/7jbvmWolKEjL3ojvO1Web5H3Rybdnz/NXQaPYa9JGdh80bvl3L/DXP6XqFhK2x5sS7921vl212ek606sjpCqQt/t1wYyo6cYnXh/3kviNLQ7waVMX/drc7du2RvlWu/8AA+uTLIHudr3DbV2qn/j1clayaVqGIYZrfd99Gk/vVoaXq02m33k3OpQskjf8s/4a+eryjWlK59nldqUo8x7v4Z8TTW0Ze5+5s/er/Dt/vV7X8JfG2la5o7Wf7mUbd3mN/er5NsfEDsv7nVW2sjL8392uy+G/ji88M3CWVtfraJ5q/Nv+Vt1eNiOWn7p9vQq0oy5e59VR6DZyKtzFCspj3bfLfatVZNH0/RdWtr+GFmhkb/R/MlZmkb+Jv9pawvAPjL7ZpOy/vI0MMrLtX5d3+1Q3ipJr5YrmbeluzeV8zf8AjtTRrcs/eNa2FhWPq/wL4N8Pa1qFt4h0ezWK8ktVR5I28uONf4vlr0S3+HsMccsKWEm1n2o25f8Avqvm39nf4mWGpeKrOzubyZotvyRzfdjb/wBmr7t8HaX4f1jQEfR4hM6xL/pTNtVv+A17uBrVK0LwkfDZ7lcacuZnkV98NbbS7lLm2voXlb5Z1+823b91q+O/+Cs37Fd58RPAdx4z8AabHZ6jocX26CNkZpJFjXcyq391q+/fH3h3R/D6y3V1CsJV9zeX8qyfLXN6lqWia9pAvNes1vbab/Q/LZty/vFZdzVvLEezr899jx6WEnKN/sn89lrqE01nDc3On/Z3miXf833WrVt5HWPfDDtljVVfzPvfN/FXpv7aHwTm+C37RGr+GDprNpdx+/sLxU+ST5m3ba87t5Ps5/cJhf4d1erTqe2ipLqevT5+QmzdSMEd9w2bd23+GlkkuYWlmmtv3G3bFMr7dzU6OV2ZbZ/MV2Xcu37rUy437Ve5h+Rv/HV/2q3px98uVSJBcQpJvtpk3nbu3M/8VN2eTIHeZlXd8256hkupvtDuj/7K+Z/tUBoZrh7Z3Vnj+b/Zrrpxl8Jw1JQ+IkhW58tHhRfN+ZXXZ8tdBose7Y+9lk3f3vlX5axLVfOm2Qo27b/u10mkxosnk214odfllZl+9Xq4ePKeZiOV8rubOjyJHdfZp929UX5m+6y1tx2MEMOYYVkb7+1vmrM0/ZIqpv2bfvq33Wrdsyb6NIZkUeX/ABK+1mWuqMvtHmVI/EmETTeXE80zDc6r5i/dX/gNXo42B8qfaSGKnb0IzUK2L28m+GbDN8ryfw7aktUnVFj2nzAcAZ754r9q8E4xjnWPt/0C1P8A0qB7vC0eXFVl/wBO5fmjQtVhVpNkMYC/Kit823+9SMySTNcojN5fy7W+6y/3qVdn2hJvs23/AHk+7UTXfmKqD5n3/NGvy7f7vzV+Iy5ovQ+a5fdKd5sjs2dNrS/Ns/2f7tc1qm9nYb5MNt3+WldCsP2hmCIu7+L+81Z2oWcNqyuJtsf3XX/araPuwH7Pm+ycXq2kpFHcOiMzr/y02fM1czq1mImZ4drs3+t/vL/vV6Bq9r9lkESPv/2v96sn+xPMuGdIFUN9/an3mpxl73KafV+bY5XR/Cs1wu+bdlfmRV/5aV0mi+C0uI0+zW0kok/1u75dv+1XX+H/AA35ipC6SebC67VWL/0Ku20jwe7pFNsjI3fxL826u2jH3jT6vy6Hm9r4DhazTfZ8Kv8AF95m3UN8P3Yr/oeUjfcklexx+DUuJvntmE0cvzeWn3v92luvA9tbx74Y5JPMl+638NdHu05kypT+0eK3XhD5Wgez3Rt95l+9XP6h4TS181I7ZmXZt3f3v92vfZvA8NtZ+XNCqybm/wBrbWJr3glLXfcPDCU2f6z+7/tLS5oyOeVPljeR8/Xng/arQzW3+kfe/dt/47TPDvw1m1TVksktmmaRljiWNNzbm/hr0rWtNhvl2aDZ+cyttuLj+GNf7zVc0fxp4S+Gtrv8PW32/WfK2pfRptjt2+78taqMYx1PJxWIjT+EvQ6HoP7Pfgu+trxIz4gvlVZ4V/5d4f7rf7VfL3xO8UW11qVzc2yQ/N/Cq/xV6T8QrrxV4u1Bry/uZNm/c80jtukZvvVwGv6LolvCEmvFLf7SUvi1Z5/PKUjyXUpb+a4W8015Eljfcm1f4q1td+L3jDX7NdE168uLiVYvkkZ/4as+KNctoXYWEKsqy7VZU/8AHq4vVvEDxSbH25/2ajl9/mNI8xwXjaO/kvC81zkq38VXfhr4qubFntpvuM2395UPjRvtUjFNpXZuTbXNWtxcwtvR8Or/AHt9XH+6OXmel641s0bujq4ZdztWBY6lc6Lqi6lZvt+T96qt96oND8SPdW/2aZ/mX+Km3kKMrTfKV+7RKMKnuyHGpOlPmiem6x4g1WTw7DqTpus5tv77/arndN8Y3drdN/q9jfd+SvTP2E/Fnw38Ra5N+z98YLOGPRPFG21t9Wm/1mn3TN+7kX/Zrn/2zv2SfiR+xj8XJ/Afjl2u9OuH8/RtWhX91eQt91lavMrZVS5ZTpRPWo51V5488il/wkyX1q8KPGzN99tn/oNWPDeqJDdLZ71YbfkZl3V5xDq1zGqok2f4vmrV03WpGm85/kZf4d1eJKj7Pmcj06eKUpczPSdYntpLUI6Kq/xt/wCg0zRdRSGT7T8qbfl+WuIm8WblW1f5f/HqSHxE8MezG1t25Pm+9WVOhOULHcsbScj0/TdesLi8SaG88qZZfm/utX2r+yP8ZP7PtdP0F5lk23EbeYv3W3fw1+c3hvxJPcX3+kzL+8bdu219Q/s0+JLizuLP7HMvyyqvzPt+X+9Xl5hTnGOp9Dk+MpVpcvMftJ8NfiBpXijQX0jUpoXCxK1u2/a23+7Xxf8ADo2i/tiuZwghHifUchzgYzNitD4Q/FS5sZkea8aaBty7Y5drMv8AerkvhvqS3X7QkOrM+8S6vdSlnP3gwkOT+dfrngvVcuH+Kb9MFP8A9IqH1+HhShdxZ9V614ss5dS5eOKFX/dNv/8AHq8H+K3h+w8QeOBc2aKltJuWWRdu5l/3q7L4saf4k1LTQdBGx5vlVl/8e2159qVvqvhPwvfa/wCM7zyYrW1bbGvzM0n8LV/LVCM6uIvH7R89mFSNSrblPEP2ofilYTTxfD3w3N8lqm66kWdd3/XP5a800GG18tprxGD71+X/AHqzLrUpfEGtzarNt824Zt25f4d1bOlw21xM73k3k/dVK/ZMow9LCYeK+0fJYyd6tzY0ux+zTbJtvyt/ndXR6bBtk2Rwb/7qr/E1YunWrmPZ9s3J/GrN96uh0tUtWhRCsq7fkkV/utX0FPkkeNKR1fheOaPy98DK/lbXaaVf/Ha7LSWmhXZCWR1ZdzN8ystcZpt5bLiREkdf41bau3/drorHWLZY2RN25Zf7ny13U/g905ZShI7nw7J5l0jzQ53bvNbf8qtt+Wup0GSe4sY7l/LZ5P4m/wBmuC03WEVvJeZVDbdi/wATNXVafqkLKvkP/q/mf+61bRlzC96J3WmzvGuyHywv3dyv95a0LGRJ5D5CL5qv+9WT5q5XT9UT5N7/ADfe+atSG+S3ka8hkXYqLvbd81a/CZ80v5jp7OQxwqkN4u6SX5m+9W3p/kzRuiJICzt5Sr/EtcpY6hMzjyXhwv8Arfk+Za6PR9UhVkdJPlZ6uJjUl9k6/SfJnt0y+11T541/iWrlxGnlh4YVXa+5FX5qzNPuraKb9zMu5vm+58yrWh9oh3K+/G5d23bW8fh5jjl7plalpv7p5vlLsn3m+7XzD+3hbSWeteHLaaSNpBa3Bcx+5jNfUF1fbbiWG2mVwvytHIn3Wr5k/b4Yyax4akZFDG3ut2zp1i6V+j+FKtxthvSf/puR9Fwc4PiWjb+9/wCkSPQPhVp3274UeHxlAF0eDJPUZQVe1Hw+iw/wptTbuVPmb/aq58FoFl+EXhuRQpC6Hb/J/ebYK1brTbw/8e1qz+Y3zRs/3Vr5PPP+Rtif+vk//SmfPY+X/ClW/wAUvzZ5/faPDNJKjWzfudu7dF8rLWdqHh9LdiiWcfzffZn+Zf8AZ213MypH5ibM+X/C38VUNUsEmG+5h+dvm3M9eDW2JpyPPtS8P2ccbw2ybXX+Ksu+s/Mj8nzpPl2r5bLXX61bxvveF9rr/d/iWsK88mF22TtIFX5tvy1wyNfi2KGn6Z+7RIdqv/HI33t1dLpGn+WqO6bf9nd/6FWMtxNJGjwIy+X8v91mrpPD7Q3EexPOTd/eT5Was/fA1tH010jRPJVPMl2o3/PStiPZGFT5k/e7W3fLTrGNJLdZvJ37dqp8+1qWa38yTfN5JTb8+5vutVfY1MZS/lLcV6n2na+3Yv3o4/l3UT3CNcNDs2rv/hfctZUWpJHI0KfNM3zIrfepW1JLfd+8UH+7WVSXKSS6hHcxyNND5eGTdtkf7v8As1mX29mL2yKryMvlNu+7/ean32qQyTbEm+VotqNIi/NVaGbzmi+Rf7yK38NRL3tjaMixcWs0zeSkm/8AvySfxVWbw+kkbvO+Ts+793dW1ptvNJGj3iKob5nZXqe60vzFd403Bvu7n+7WMom0D8drW+S4ZH8/59u5Fatax8mHDpyzfNurnbJXjkjE0Ma7fl/3f9qt7T3dl3o+N3zJ/vV5vtD7ipiJS5jUhuHsbpN+1lkf/d+b+7Us0011H50d/Hv+b93I1VWvJlZvJ4lj2l/97+9Ve8uvLX7T8pdn3I393+9VVKhzxpuRnatePskCTM7/AGj5lb+H/ZrntQmdg/zso/3PlWtrUmmmn3xztmbd/H8tZjQ/K6Q/MWTd+8+6zVh7bmj7xcaJQs7FJPmRP9lG/vV0Ghw+XdIk21WX+Gqlnbw2bMk23d97/darenyWzKXfcPm3Iy/w1jzIqPLHludDZW/kRmZH+Vk/esv3ttT27QsVudjK8f8ADt21W0ybzIzM/wAu5du5qka6SPCPzuXbub+9/s10RiTUlCIqzTRtve5XZI+1Vki+ZaikaaRNifKzLtdm/u02adIV87Yvy7WZd/8A47Ucl0jqEmhVkm+ZF/i21pH3vdOaUipqGy4j/ffP8m1Jo3rC1aSKzhaG2hX7nz/7LVp310iyNCkPyKvy7X+9XN6ndbn2PtZm+bbt+9WtOMI7EVJH0L4Dcn9mNnI/5gV9x/39r5EvLjhXmdkO75P71fW/w+AX9lxtpJ/4kV/jPbmbivjnVNSdpNj7WOz+H+Kv2zxRV8lyH/sFj/6TA9biRp4XBp/8+1+SM3Uo08x7jfn5/mqsq7ZFdE3JRMzySNC+7C/9805WhXYnk/8AAq/GD5unH7Mi7DZbtjoiov3mX+9V2wt4X+T94u5Nvy/w1Ss50WZe277q1raX50LeZsYrv/4FWVSR34enYv2cKWsfko652/w1r2Nu7bHmiVdz7dqr/wCPVTswn+p6H/dre021mST9yjH5d25q86tU5feZ7NKhzbGjaWO0LsRv+ArVmbTYfLV4U+Rm2/3tv+1T9Ftdmdk33X+dW+9Wu9jI0QSbazb9rs38Vccqx6csLDksYElj5cy+S7bmXbTrO1MczIn3vvPGyfe/2q3bizSOZX+/5fypueq0luisu+H73zfKtctSp7xEKMea5Ts12yJNCjbmXbu/hrStbHdMzw2zN86tuamR6bsXZNc7Yt22Jl/iq9HJDHvhhTajbfKVm+7WMqnN7p6NGjHdkUlq8VwvnO3D/wAP8VX7VZlX9zMrBfvr/dqBbWGT5JnZkV/4f71TxK/mHy5t3ybfu7aUqn2TdU/7xMq+QPueYjfd/i/3qv2ckMdwmxF+6v3U+9WfHdv8v2mZdv3UVa0tPnRmTYjfK25mX+Gp5kX7GPN8Jq2tvCrDyIZLhf8A0GtCxdEmTyYWbb/sVSsxC7bIZd/z7t0f8Na8LfZ4U8652/7qfNtrOPvBKjMkjmmb3PmtvVk2qv8Ad21JJ5jhEtnXaq7XX/2aljjQS796h1f/ANlpJpIYZG8l2Z12/e+Wn/DMPZ8xJNJ5Mi3Lwxt8ir5a/wCs3f7VVNT/ANcE+x7mkXcy/wAK1LcT+XumttpZl+dmXczVVZXkb7+1f4/MXdR7xZQkS5+yqgRYXVv3qqu75f4axtR03zCfJdUZXbezL/rF/wBmukktZnkVPlVdm7d97d/dqv8AZbwTTP8Ad3fP8zf+g1vT97Y5JS5fdOSns4VVoY/MKq+7a3zUyb7TaxbJnUpJ97+L/vmuhvtL3R/Oi7JPm3b/AL1Zk2kpGvyJ/Fubb/EtVGMFscdapIxHs7lIV2Ivzbt6rVO5tfLZkhTc33n8z+Fv9mt2Sze4ZkhdURX3/wC1/tVV+zzTXCwpD/eX5l/8eraPxnFUlKUdDJWHazI80bf3m/iVqj+zwzbZ4XV5P4f9qtKS1n3Sp5MZ2vt+b71VdxRtk0Ko0fzp8lbQ5ubmZySkQ6XZzWZfyY1USRf6tvuq1blnst3CD5/kVHkX5qzrG3tpJHmRFRfvNWhptukczfO3m/df+6y1jiI8xth/eNuFVt5Gmk2lmRVVdm3dWrp8iLIqbI938G7+Ksm1ieR/Jm+VPuxf3l/2q2obPzlXfyisrPu+VVbb/DXBLm5fhO6n8Rr2az3qpsTLSPh/l+XdW1DC6tvfj/2Zf4qy7PfGhcvgr9zb93dWxoMkLbo/3bq3yozfw0R5Oa3Kd32OYv6fawSSfuYflb5vMVvl210em2tnJILxE27flXanzNWbpum/Z2Xy03p/Guz7q10ej2SWqom+QKr/ALr5q7KNLqjnre7EtW9u+7HyiX+Fv7q11nh9YVkjffub5vm+7trM0tUm83z0Z13fvdybWat/Q7GY7fnXbHK2/cnzbf4a9nCx5fdPLxEpcvMdP4dl86QoXj2/xr95v96uls13Qs0yYXftiXZu3Vy3h+N2medAw/vN/wAC+7XXabInmrNC7bV+/Htr16ceU8mUpc8jZ0+N5IfO2Rqq/fZf4q3tPhtvmR9x875YpNu1lrP0dra6mR0RZ0X5dqr8rbq29Nt2a3+R1ZfuV0cvuj8kZvijw1D4im07R9jbFulnuIV+bzlX+9XM/Gy1v9S1y4mtbO3a2tYl8r+Flb+Fa9C168+w6em+5hiNvas6SeVukjWvK/FGqalqmmh9Vto3W8uFWWSPcvyr/FX5/wAQ1JfXbfZIpylUnc8X8eeHZta1ybWLb5FV1WWNX+62371cpJ4ZexvDeQ22ySb5ZZFf/wAerufF2nzN4gFn9sWG0WXc25du6s34gK9uW1CwRo08pYov7jSf+y140Z8kdDX2c/anCeLrG51BbPTYUzFDFueTbt3N/F81c7rmh6Vo9xb3J3Mm/dcRzP8AKzL/ABV1Hi37Tp+mvcwzbxtVXX/0LbXnmuapNHbyec6sv8PmfM1KnU925208PJHH+OteeHVnuUvFmXzf3Xlt8qrXAXF9f6lrTzTfvdv3lX7zVf8AF1xbbpHkfbt3b4/vLXF33ixLWP7NZu25UVmaP79elRqU1DzPNxFOUi94pu3Wb/SXjjSaJWVZH+aua1BnuL5Utn3/ACfdVayLzXLm6uH865bYvzIrUln4iextxIjru3/e/irqp+7Gxx1Ixeo+4kuVvEm3qJd+1m+9V688Y3NrE9sk25FRfmb5dv8Au1nfbLa6ZLmG5Vfm+633m/2mpdc0u2uLdZvOVt391f8A0Ks5QjUiuZkU4zj7w/T/AB1qsjecl4yOv3Nr/LXTeHfHF/HeIl5DvWRW82SR/u1wcdu9nHsS2VlX5ty05fEV5br+7Rvlb5GrhrYKnL3oo9Cjip05R5pHvmg+Pvstiz3OpKv7rb+5i3sv92uq8G+OLnct+k0LIzK3mXD/AHW/u7a+b9N8ZO+62eZQW/i2/LXY+F/E1gq7Jnkd2+4u793XkYjC8vvSPrsHm0JRjeR9g+Cfixpt9dLazX+2WaL7sn8W3+7XoUnjbzo4kv7a3hWP/ln/AOzbq+V/hb40sLm7hTUtSsYnj+W3mZtzf8Cr3Dwr4ZsfiRdIlz4h2Sr/AKpreX5WX+9Xh4iLjPm6H2GHxP1ilzQ2PbPgb8XvD2k+K4rO5hZtr7XkZNyqv96v0V+EvjXwS/h3Sdam8QRyHd5SRRy7V/3Wr8ytD+DWq/C/xBJqWy6u2W3Votr+Y0n8Vfbv7MPiDwV8QPA9nZ23l22pW67GtWXay/8A2VVQxUcLpHqcmOnSxVLlke9fEm5XXkNnpTxzwrubc0vyrXi/jBr/AE+5s9BFk0TSSxsixq2yuzuvBOpeHrq41J/Ekjxxtt8n7ytu+9Whpuj6d4muLC8lf/j3n/f/APTSP+7U1sd+/wDfPNq5fD6tFwex+cn/AAWe+FltoEnhL4hWdm0Dx38lrK0b71aSRd21q+JJFh+dJn3/ACKv39qs1fpH/wAFydPkvvhBaa0ty0aWviq1eKGOL5Nu1lZq/NqOW2WH5IfvV9dklT22D5vM4a9OeHlb+6TR3B3LDsVJVT5P4tq/3aqXlw0jL+53bvl3K+5f+BUt9HtYXNtbb327d2/a1Q3Vx9mjbYfkb71e7GJ58pe6JhPJV03A/eaRvvVY02HzJH859yt97/4mqdvM91j7rBv4v7taVjbyXDo+9UT7v3PlrooxnE5akixpNql1JLNCjE7vlX+7XRWNnD5j7JG2/wC0vzVSs7ZNqbvmbY3y+Uytu/2a3NP0maPZ8m4f3mr1acfcOCp/hJtN+aMb/wC9uSNvlatrTZHWZSibv9pvmaqmnw+Ym/ztxXd95P8Ax6rDM9o8To+6Jv4dtb8hyVP5TV8x2UwRuro3yyrTnAZyqPuz3A61QhuraGRnh+T5/nZv71W42IgDOwchfmKdCe9ftPgpHlzvHr/qFqf+lQPoOGl/tNX/AAP80W7W6e3kZ/l3bNu5n/hqG4V1uns5vnh3L96L5vu/3qqTagkLi2hfci/dZqurN5kKPcvuVmX5t/y7q/FfZ+8fMRj7xE0KQqdm5Ny7kX+9VGUw3ELXNs+1W+5u/vVbkv8AzJJUEGWX5vlqgt9uk8nYvlw/Ntb7qrUSlOMdTejR9pLQp3EaMuxLZcq25JN38VaOi6Dc31wj36K3zf8ALP7zNTNLsXuLhJng2RK/ybW+Zq9B8D+F/MxvmZl+bylk+9urSjHmPUjg/ZxViz4b8Jpu37I/m+aWTZ8zf3Vrr9J8NzXStvdVLLvTan/oVaPhXwv8qXLpGm196f8AxNddHoPmW5e2RY2j/i2/w13xlyx0IlT7HKWOhwyQ+fCnyyP/AHdu7/gVSX3h+2sbf7TfoqRw/K9wz7dtavi34heD/Celu80P2iWNNzxqnyrXgHj/AONWveKLpraFGuEkbbb28K7VVf8AaojKXNZI8vHY6hho8rkdF4u+JnhjTZJrawtZL64Vt22OL+H+9urzbxp8SptXuks9ShmuIZEZoLGxXcqt/dZqd9ovI8/29fx2X96GH5mb/ZrMvfHGg+HY/N0eFUm+YJNs+at6cT5rFZjXqe7E0re71W+hW5fw9Z6fab/+Pf7jSL/FurE1q88N2a74Y7fdJ8ySN83l7a4vxZ8ZLydZYU3B1Vv338O6vMfEHxQ1nULje9zu/h+/92tOZHmxi/tSO28ceOLa8upIba537fllZv8A2X+7XmuvaonL72VV+4u2sm98WXNxNsmOVb+L+L/easi+1a5jmb59zN/C1OS5jWPvfEUPECbpGmhfhvmaNVrkNc08M2/zthVvu11VzqRZX85/9la5/UJppH2STKp+6rNS+IuMjgdfa5t2b59tZsuyZTMiMB/FXVa5pcNxGyfL8rN97+9XMeS9rI9q6fLv+9soiacxVjupo5vkfdt/u1safq0b/uZuVb761z90r2VxseHZ89WLe6SObfv20+VE/Ebc19daPfRajbTzBo2Vt0b7WX5q/Ur9lfxR8Pf+Cqn7Hd9+zf8AFHUoW8b+Ebfd4cumf97Mu35fvfNX5VLfJdW/7ybmvQ/2Rf2kvFv7LXxw0r4keD9YmhaG4VbpY/8AltHu+aOqjOVOV0Zypxloh/xm+APj/wCBfjvUfA3irR7hJrGdlaTZ8rf7S1ylrNtkP2lGRl+Wv2P/AGkvg/8ADf8Abm+BumftB/D23t/tN1oyz3/kr92T+JW/2lr8wviX+z/rHhXV5Ib+w2Osu1GjX5f+BVxY7CxlHnhHQ0w+O9lL2VTc81kbzMTfcOz+F6csiSKHn4+X71ad54N1LT5pUmhZhv8Au1XXSX3bJoZNjfd+SvD9nKOx7Ea0ZRvGRf0W1TcjmbP+7Xrfwn8Ra94baJ0+dfN3bd/8NeefDT4c+I/Hvi+18M+GLEz3FwSyR+aqDCqWYksQBgA19sfAL9mTXPh94c07xv8AEn4WXKabc3rQ2utSRM9rLPGQZI0cfI7qjKSoJI3DI5rqfDXEmb4T2uXYCrWg5ct4U5SXNa/LeKettbb21OrBYqFGpd1VF+bS/M3/AID+NPFuuapbaVbabMBNb7omb5fvfL8telfDAPpPxis0u5gGgvplldsYyFcHNerfs1/ALxv+0L4uvx8EPA11rv8AZNuJr0QQrBHboc7VaSUqm9sHam7c21sA4OPIDN/wjvxWvG122msmttUuUuoLiBlkhbc6lGQjKsDwQRkGv0rwi4c4gwmX8T4DEYOpTrSwcoqnKElNynCpypRa5ryv7qtr0P0bJ8fh6mFqz9tGfLFttNWSVz6x0m/1LxIyWejwxzHZt3N83/Alr5P/AOCjPxU1Lw/qNh8HP7NutPm1CJb+XzomjeSFW27l/wB5q+mf2Rv2gf2Z/A3i+01r4v8AjmWCzjX97bpo9xMR/s/IhrH/AOC3XxO/ZN/bL0bwX8Qv2Z/FIvfFnhmdrK6sptFuLMT6e4zw8iKvyt2Jz6V+W8OeDfG8JOtXyzERt8MXRqL/ANtPj804lw8aqjTnGUX2aZ+cljeJG2x9qL/G3lfMy10uh/d27Nz/AN1W3K3+81QwfC/x6DiXR0wq4BNzGM/k1a2mfD/xfbsslxCRwBtWZPl/WvsKHh7x1DfK8R/4Kn/8iefWzHBT/wCXsfvRPa77VUd0+fczVq6bqCNIiXLrH8zMn8K10/wt/ZM/af8Ai3FLqvwy+DfiPxFDbORNd6RpElxGhPYsgIB9s5rJ8UfDD4l+Cdem8N+N/Ct5peoW8o+02Oq2pgliPoyPhh+VdFDhHimtiJYaGCqupHeKpycl6q118zzKuKw6V1NfeXtNvLaRdn2be67f4/vVvabqVz9neHzo2b7qf7NcVpmm+I7OV2ewQqwwVMoOa0LdNfRmPTccBfMHAr1o8B8aR/5l1f8A8FT/AMjjWMoKWsl953+l+IJo40muXj2fdbzF/wDHq6XTdYeO3TY7IsifJN/tVX8Ofsh/tc614ei8VaP+zT41uNOuoTPDdQeGrh0nhIyHXCfMCOQR1HSuT0/xJNp90Y9UjmjeJjG8EynKEcHI7H2rmwfC3EuOlKNDCVJuO6jCUretk7bdS/rVKMdZI9d0TxB9qT53j279v+9WvY6hM0e+bbt+6nly/erjfhJ4Z+I/xbvZbH4Z/DvX/EM1oGLLo+jz3YiRv7/lodv44rofHng/4nfBeC1v/it8LPE/hu1lfEVxqvh+4hikb+6GdACfbOa0fDXEFPFrCvC1FV/k5Jc3/gNr/gRKtSkubmVjq7PVIHVUtnba33m37q3tJ1a2hZPnmD7l+VU3L/vV5D4W+JehaxqbWmnXjj9wWZdjK2B15Ix3rqtL8QXgHlvcqyt9xll2s1cmNy/MMqxXsMbRlSna/LOLi7PZ2aTsZ+0jKHMnc9b0nXEaFZELb/uu3+zWwutLJbhHuVBk+Xb/ABbv9mvMNN8S3knV1ZPvPHDW3H4ihZmfzFLK+23X+Jm/2v7tZRkYVI8x12p3ztIHhRn2/K+56+cf269h1Hw0YidnkXW3P1ir25fEFtNs3ncGZv8A9mvCf225mfUPDsbFDthuiChz1MVfo/hXOMuOcKvKp/6bkfScGQtxHRf+L/0iR7X8Eiv/AAqDw0zbVP8AYtuqqf8AcHzVta00ccKujr8q/Kyt/DXJfBzWBb/Cfw/ayFcNpFsu70+QVs6priyRy2yPGgb5EmX+Kvjc7nbOsT/18n/6UzwcfT/4UK0v70vzZT1a8gaI/wCsLKm1PLX7v+9WPeXlzG2zf5X7rb5n96nzal9oXM25fl+eRW3M3+zWPqGoeZZhH2q6tt3LXhykZRj9opa1evNseFG2L9/y221zeoaghDw/K2373yfeq7rl5Naq+zcu51+VU/hrjNa1L95LZpuRG+40b7WrnlI6OVnQ2upfMsPU/wADK/y10/hy686PY94uGT7u/wCZq8xt9Uhjx5LxsZF/heui8P6w9mqpDdLhk+7t+ZWrn5uYJHqWk6htjMds7P5cu3bUlxcOYYkhufNRm+f5Put/tVyFr4kmWMrvWP8Ai8xadceNINy5mZN3y/KlVKXQn3DYuta3XG9Ewyu2xm/h/wBmsq68QQrcF5n4b7i/7X96sK41pod8KTbHmfcm5vvL/erBvte2/uft/wC7VfvM+5mrKpUHGPvHZT+Irb78L+Xt2/x1fs9Ze6aVPOXe3ypt/hryW88WbZP3Lrvb5dqr8v8AvVd8K+NppPkdN9wvy7mrGUuYfL757tpN9Ctp++mjdF+XbI3zNVxtW+0Ls8tk3JuRf4a4Gx8UJ5KXM3O77vzfe/2ak1LxRND5bs6qmzdt31nKRfL75+U+hq62ux4WKbNvzPW5ZyILVLb93sb+HfVCz08xM/75iPvbdv3auWcczNvhEinc29W/9CrwZYj3/dPu6dGESbdMrK838Xyr/u1BdSW21Uh8vP3mWrtvbvJH+++Z/vblqG4sU8l3jeNmbcrttqfbc0uY19jymTJawrGrxupZf/Zqht7M7nkdvvfw/e21oNZzIo5VW+8yqtNa3m2s8KZPy/M38X96nzc0feJjGX8pnbrYMqPuc/dT5vutUkfkxzRo/wAzL8u7f96l1K1aw/d7Mbf4aybi72wvs3Nuba1ddOMJ8tjjqSlGVpRN2z1KZv8ARk3IPmZf7taH2wMvlum4R/3f4a5mx1DzIx+++WOr9jq03mebD8m5NvzfxV0R+02YytLlsabSJJH52/btb522/wDstRRzJHCd8zL83y1Q/tB4Zt/nKGaL96zVWvNSF0u9PmCp8u6nGJjKp7wanqkJ3fPsb+9XN65qXmNvtn4+7uq1qV08bb0df3ifOu6sDVt/mO8LqC33Pn+7W8Y3OOpU98+pPhtMJP2TDM46+Hr8nH1mr4pvNQeRmdP+Bsv8VfaPwzJk/ZBJHU+G9Q/P99XxV9jfyykaZ+fb9/8Air9n8U3bJsh/7Bo/+kwPoOIkpYXBN/8APtfkit5n2jekPyVbs45lVP7n8dLY6f5cjK/zO391PvVah0yaObePmX71fjT+L4jwqcfeJbWGGWTZsZtrfK38Na9nDOP+WyquyqMNu9u2x0zufcu1P4a0FjdRsRN/z/w/w1zVOSJ6uHpmzpLQ/fTdt+78yVu6bP5bF4ZmUMvzq38VYOmyeaodE3Bn2qv92t3TYXkj/h/76+9Xl1tj2MPTqy5bHRaS0yrFDbJnd/e/hXdW7bxpHMvnOz7fmTclYOls8ewbNh2fKrVprdIzQpMm9N38TbfLrzKn8x6fN9mRLfLN9lZNiqJn+833mqCaF4UKIONu5d396nteRNuT95Ii/L8v8LVB8/nCNPubfvK3zVnL3o/EKPuj2urmRYXR1Xy02/8AfX+1ViGNIdvnf6xtv3kqCOEtcbx0/ut/eq9DG80zXMyLv+6i/wASr/FtrLm5TeMeaRHDDCyskMLL+9+dV+9T4983yP5imrMa/wACIu6FdvzPtZqfDYvHGEeFl3ff/vbqcZc0jaMeWXMRWdqJGXZCqhVZtzN8zVo2cc8kiwoissi/d3/xUsdi6yD5l+X5kbZ81XLS1/fAnaVX5tzfxN/dqvcO6j7xZhb7PDEnnMm5/n2/xVp6fcJBGmx9219r7vmZqotGYdr3KZ/etv8As6bvlqxbyPCqOibd33f722pjE0l8HKzRhvLZvnuX4ZPmZabN9mSHYtz87N8rN92oLWN5GV0T5Fb5P7q1dtbW2uvn2KxZ/wCLd8taxjDm945alP7JXjt3kjFtD8/zbfm+VVpYbONbiGaa5ZHk3fNt+Vvlq5Ja/uWtvLjc7N+7+7/s1at7OG4mZIbbb5cXyfxLtolK0eU5qlPuUfLea3XyYVSXfudZPvbalW1m8tH3/N91FVf/AB6tBbRJmZPOzt27P9lf7tLdw+ZNvs0YP91/4qPdjojkqRMZtJRoUd33srbn/wBms7UtP8uZt8O8r/t11E1q8jI43IPN/wBWqfLVHUrdPLf7L8kv3mb+H/dq/djpE4pf3jkrjR/Oj3o/l+Z8u3+Kq91p/kyhIUyqp97+9W9dwpHCNQuvmMfzbV/hqBrdLiX5+sf+qjZ9rN8tPm9/U5qkZcpzd1prsyPGka/N8y7vm/3qrSaWv2NIXdi7S/d8rc3/AH1XSR26XkYmlh2qq/6lfl/4DTZoZGby0ttzR/L8rfw10c3uxSOXl5pXic3HbzQ/PCnLfK21Plb/AHqt29rMsS7y33Pk/vVsfYXj2OifvW+5/d2/7VO0+1mto0e12sGZlZv4VWolyy1FR5o+6QaXYpDDJvdoljTbW9pcKKqQp+88xPn2tTdPh3JsdPmb/a+ZW/vVqaevl25hmT/a8xf4q5pS5Zcx6VGn7xNprOV85E27n+833WWuj0S1SSRU3xq0i/Lu2/LtqhbKkdvDC8O5FTdAuytLT1vG/ewwxp91d0abWWsYy5p8yPTjHlh7xq24mnb/AF0gKvteTZ92uj0mFLWQIgzt+bzP4t3+0tZGnybo4U+6Wfc3+1/tVu6fcQrMru6nd8q7fl3V6VA4a1O0DpNNhmWJrlId25f/AB6tbT2hFu0LzK5Z13R/3qy9KuI1hMPkyIsjbUb7y7lrU09vLmRZvnbdu+58q17WHieXVlyx+E6HQltrq4/ffu4mTci7f7tb2lW9rCq+SjFF+98/zbaxNDt0jbYiNtbd5TNXR6THNGoT5X3J8m1vmr1I+6eZWjyyOj0K/kZXRJtkTKzJtT5l/u10cMrwr5s21UZ1ZGVdzM38W6vN/GnxS+FHwZ0ubW/iv8RNF8NQRrvim1jVI4Gb/dj+81eV6X/wV2/Y68SfESx+EXwf1jxT8QfEOqXCwWGm+E9DZ47iRv7rSbaUqnLDmMXWoRjrI+ifFFxN4ovLlLDUpporO6+zeW1rsSPavzLu/irzj4ta9Do+mwW3nLHLs+WGN/vf7TL/AA16N8PdJ8Q2PwzvbzW9HurXVdS164e80e8b57GRtqrCzf3q+V/2n9c8Q+F/Fl9qv2BkRYPKlVm+ZZP9mvzzMq31uvPkFR92XqUfFHxK0S1vjeXV5MWb5Yo9nzbq4rV/jgl9pc2lb12Ryr5rfL83/Aq8S8V/ErXvE2rD7ZD5QWVkVfvVreCNNe81S3037MzyXG2KCGOLc00n8Kqv96vLo0eX+LK1j28HSlW+E9b0O4TVtLuZtYmVbaZF8qSR2bav+7XlXj6+8PWf2m20HWFnjjbb8rN+7k/iVq/RLQvEX/BK39iHwLpfhP8Aa9vYvFvxBvLCO6n0S2iZrfT2ZdywuIm2q397dXz98Xv2n/2cPi28+k+B/gX4L/sC6l22tvpFh5U8a/3mk+81YYqthcLCNRPmb6I+kynI8djHJVockOkpdfQ+DPiHqE0in7NMvy/M7Mn8Neaa9qH2e43wnKt99levqT4zfs122oeF9R8f/BZLzUbKzRrrV7Fl3S2cf+z/ABMtfJmuXCtcM6J8snysrJt217GV1qWLjzo+Wz7L6uXV+Uc2oQzRGTfuVl+7TDaJ5auj42t8q1Bbyfak8yFFbav3t33qdb3l/IzWboqRbtyyV6dSPu+6fO/FrIms43hLF93zfdq3b69cK3kzbVj+7VCZXLH99vVvl21JDNazb4Xhb93/AMtI/wCKuaUfslxlKPulu+Wa6t0e2+T+/tqte6XDdTvDDcyRr5W7bH/erofDdmLqJEez3p1T/dqxqHhKFlTyf3f3m3Vl7SMJcpp7GrL4TibGzmiwZpm3762H1XypERHb723/AIDUWqaOIV8yCbdu/h+7Vnw7b2cciXM0KynY29WqKkY1I8500ac6fum34RuYlX7Q8s3yvtVt1fZX7H6+J45orvw94P1KdZFVHaSDaqt935Wavmv4dX+pLJCmleBmuhD83l/Z9yt/vM1fcH7K/wAUviRpGoW39saVss/K2yq21fLk/hXbXyOZ1HKL5Yn2mSynbl5j6Ct/jN8fvBOoQf8ACQ/Bmxn0+4iWBdQaeFp1hX7zNHX0B8F/FHw68dWUmsP4e/s3VVWHb5f7v5t33q0fgN8Mfh/+0d4GfTfEunafq10sH+re42yw/L/Dt+7trzX4ofDm2/Zb8WQXmm3/AIo8PWMl1HEsmsJ/aFjJu+7833o1WuOnhZVKfPDY6qmKiq8qM9z6bh1mx0uyntteij8xvm8xdzVDo2uaKshSzmhRNu5mZvvVk+D9S8U+OvB8esaL4u8H+IbZXVWls7hkdfl+bcG+81TxaRYrI/2rw1a7JNvzRp/DRWoxjOMJI9HBuFSlKPU+cP8Agql4Jk+OPwA1qz8OaysI0OzW9WSKLclxJG27bX5RWNvNcQpsdlkaLf5cif3v7tfuZ+1/8NZvGf7OHizw74X0mSGQ+Grho0tdq+ZJt+Va/FbS9F228NtMn76HdF8y7WXb95a+q4f0pzj0PEzWpSlKHIjAk092jbejJu+by2Sq8On/AOkI8IkV9jfLXWalpfnw/uY22L80rb9rVmLp7xyDfNhoX/1m3d/49XvQ933meZKMTMtdFSRUh6P1WT+8v92tWx0l9xtgitu2r8qbttWrOG5k3iHco+7u/vVtaXYo0xSGwwy/c3fe/wB6vUp8/JqedKXNMP7DSOZI4fMYx7W+Zf8Avpa14fN8xbaFNyMm5l/u0+xsbn5oYYWd5k2r5jf+PVLb2KMyTeQ25V2bm+6tehRjE56kv5SG1Xyo3REZG3svltVhVuZlIebZ/wBM1Tdupz2vll3Tbs/3/mprRzQozv5jOu7738O6uuMYnLU5+UqyjbGQ7q3z/wDAdv8AerQtVij0gLGRsERxg9uaxL5UbytkfyMq/Kv96te0YnQctnIhYH8M1+z+DC/4Wcf/ANgtT/0qB73DP+81V/cf5oox3010yuAr/vfnXftWr7zxyWfnfdVm+TclYcM8yzBEtlKbN8rfxLVqW4to7GPzptm3dsXdX43y+5ynzXLEbfXz3UiIjx+WybvmqCTU3uplhmdpG27UjX7u6sqbUJlkM33f+esatuq7pc0zP89tuf8Aux/e/wB6ueUJnq4Wmd14TXy496BZtu35dm5lr1XwXpUPmedNucMjbFZfmVq8q8Ntua2R0+VW3/N/s16TZ+Jk0W1+2XLybmXcqxy0U3GmejKolC0j1C1utK0mxjtpkjik270jk+XdXPeIvjBCt0dK/tCO2t/mZG3fe/3a8a+KX7QFh4b0G41XVfENvbCO3ZYvtT7mb/gNfMPij9sa2ub+7+wXLXcmzylupvlX/a2rXTCPN77Pks0zmXNyYf7z6R+MXxd/tqS68GfD1GvLuOLdK2z/ANCavFdW8ZeKtPjmzYMpVN0sjPu+bdXkl1+1Br2m2d1YeFXa2mvtv2q6VP3jViap8dL+3sxNqV5nd9/b/FXVGPLE+Ylz1HzTPUNQ8eeJFZ0eGbbJ+9dpP8/drC174iefbjfcsJWVn2r/AHq8l1v9oK61i4Pkv5SL8vy/xU3TfiVbatcb9SRdn8at/FS5pyHGnI6O+8eJcSPbTTb9rbvL/u7qxbzVIZpH3zbPn/heqWqSaVeRrNZ3kafNu8tq5m8vHhfyU3My/wAX96rHH3ZG7ea0nmGV/vf3lqhcaw8iv87b9m3czfw1k/aJliZ5gpo8x1YPnf8ALu8tarlHzIsXGoIq702/7u/71RsyfK6HH91aPLh8zZs3N95GpJGT7WyMmfl+8v8AFTjyi5ub3WZl5bvIqp94turndctZll+VNp3fIy11zL5ZLu+1v4dtZ+paa8zIh+9J/t/doi+YRzGrabbapH50LqxjTc6r/erDuLe5jZUmhZWrc1TQbzTZmvLNPutuZf71XdHt9K8Tx+S/yXP93/apgcvHJNGwTa3+9UU008cyuj7Sv+1XZXnw98lWm3sm3+KsS98Nur70+b/ao5Zhzc0j7w/4Iy/trXPgPxHJ8BPG2syHStcl8uykuJd0ccjfw7W/vV9J/tPfBXQfE2qXCf2bGGaX7yrt/wC+a/I7wfJqvhXXbXXtN3b7W4WVNr/NuWv0j/Z9/aY/4XB4FsX1vc95awKl0vm7pN3+1upe05IcsjlxlOM+WX2jyLxP8BbnwrdXFtHbM9vJ/FJ8zf8A2NaPgD9lez+IUkVnDYXEcsjbH3Rf8tP9mvr34V+H/B/jLWLew162txFJLuZW+b5a/TL9j79jv9iC60Gz1iezivtUZN/79fLVW/2a5vqcefm5vdMKeIr83LE/Jf4Xf8EsPi34D1XTfil4e8N3WomeZbGytLOEtJLLMwgRFUdSWcCv1c+LP7FH7TPiv/glb4F/Zm0XwbHL4z0bWUuNU0ptZtlVIVlvGA80yeW2BLFwGP6V2/8AwVM8C+DPhz/wT88Tz/D7To7AwalpTwT2rkOrC/gIYMOQR61478aP2hfjrov/AAR2+HXxa0n4u+IrbxRqPiFYL/xDDq0q3lxH51+u15g25hiNByf4B6V+xcN0c8XDeULLZ0kv7R09pGT/AHvsVyt8sleFubmStK9mna51QbvP22r5ena/5nz1+wj8Uf2+/wBnv4ieLvhB+zj8L31jU4YJpfE3hXWdNZ0spbcFTMR5kRjlHKBQ37wlVCudgr5v1STxv8S/iVdzXenXepeI9e1qV5rW2syZ7m9mlJZFiQZ3tIxARR1OAK+1/wDgg5d3V9+1B4zvb25kmmm8EyPLLK5ZnY3tsSxJ5JJ5zXi3/BOOaztv+Ci3g271GaKOCHXdQlllnYBIwtpctvJPAAxnPbGa/dauc0sq4lz2vHCU/bYahRqynFOMqsvZ1JWk7vRciUdLpbuVlb7TgyDnlmOV3Z0pL00ZN/w6k/bz/wCEN/4Tb/hRNx5H2P7T9h/tS1+27MZx9n8zzN+P+WeN+eNueK+f9a0XWfDer3Xh/wARaTc2F/ZTvBeWV7A0U0EqkhkdGAZWBBBBAIIr9gLLTtXg+NEfxdvP+CvWgz266gJpPCudPXS2t882whF7gLs+UPzJ/FuLfNXxf/wWa8TfA/xp+1NaeKPg74s0vWLq48OwJ4lutHvFnhNyjMseXQbS/k+WDhm4VQQpHPn8AeJmfcQ8Rxy3HU4VIzg5qdKlXpqnKNvcn7aK5k09JxtrpbU+FxGFp06XNF/e1r9x8jV6J+yZ8GrH9oP9pDwf8HdVuLmKy1zWY4tQlswPNW3UGSUqTwDsRsMc464OMHzuvpj/AIJDaxpGj/t7eDjq8ir9pgv7e2LQq2Zms5doyfuHryOe3Qmv1DjDH4rK+E8fjMN/Ep0aso26SjBtP5PU5aMVOtGL2bR9B/t+f8FMPij+zH8X1/Zg/ZTtdH8PaH4J061sp5TpiXDNJ5KsIUEmVSKNGjX7u4srZbGBU3xS8a6N/wAFMf8AgmdrPxw8d6DBZ+P/AIXXM7vdaXDhJAqxvJhWJIilgYFlzxJDuHA2n5I/4KM6dqOl/txfEy31QsZH8TyzIWjC/u5FV4+B/sMvPfqeTX0x/wAEtZ4fDP7BX7Qvi7xJLt0ltMnhXfapIvmLp827hjhyfNiG08dPU1+HZjwtkXDPAWU59ltJRxlOeFn7WP8AEqurKCqKUt5KanLR300XY7oValXETpyfuu+na2x+ftfTf/BKLUf2b/DH7TD/ABA/aP8AGGk6VbeHtFnvdAXWkHkSXqkYfcwK+Yib2jXG5n27PnCg/MlFfvfEOTriDJMRlsqsqSrRcXKFuZJ72umtVo/Jvbc8+nP2dRStex9n+Pf+C2P7WNx8X77XfAF/okHhaLVW/szQZdGSRZrVXwgklYCbc6gFiGXBY4C8Adb/AMFvvh94SkHw2+P8PhtNC8S+LNLki1/SnWNZmMcULoZcEF5I/MMRfaeAgJG1Qee/4J4/sVeFPBvhtP29f2xbiHQ/Anh5Fv8Aw7p+pKQ+pzKQYrlk+80W/b5UYBadyuAUwJPEv23v2rPEn7bn7RE3jK2Se30aN107wjpV4yRm1td3ymTDFRJI5LuxYgZC7iqLj8bybI+H/wDiIuG/1Yw8aVDL4VIYmtBWjUlKKjGi2tKk4P35yd+V7tS0O2dSp9WftXdytZdvM+9viB41+P8A+z//AME9PhRe/wDBPnwTHqtpcaVbya3eaZpR1G4h3wCWSUQ7PmLzmXzHKfKQBhc8aH7A3xa/bD/aL8K+OdC/bs+Hvk+CJNBZBqOu+HhpjTBwyzRbNqCSLyg7M+35CBzzxhfFL9oTwr/wRz/Z38Ifs9+CtMuvF/i3VLaW/mXWNVf7LZsx/ezBVHyxGbcscMe3IV2Z92Wdn7LP7f8A4e/4KW6R4i/ZA+O/haXwtqfiHQ5xa6l4U1WWFbyMDMkaBtzRuqfNtYyRyKrhlx8rfjmJynM8Vwni8wo5ZCphJV51Fj2l9a9l7W7qqHMpvls9bpW15bXZ2qcY1lFys7fD0vbY/MrTrrR9F8Y3zaRcNLYRzSpaSMxJki3/ACEkqpOVA/hH0HSuw03xhbM0Sb9wb5n21x3x68D3/wAAfip4i+GGuXcc1x4f12fTXuIiGWQxuyh+CcZABx1GcHkGuWs/Gezb9nmk3L8zqv8A7LXoeM2Ko1uLoVaMuaMqNNp9002n81qZ4JNUrPuz36w8UW0MiTJfsD8qqrf3WrorHXkkjk2Oqv8Aw/w14To/jRJBsjuYyv3fvfMrV1Gk+KN1mN7szM/3Wf7tfkntPsnXyzPYbXxJCzJbXM3O7d/eryX9qHUBqFzozK+Qi3Ax6cpWxpOveTIl59pX7/z/AD7t1cj8d9Sm1ObS5pX3YSbBAAHVemK/SPCad+PcKvKp/wCm5n0vB0X/AKwUm/73/pLPYvhrrX2X4baNDblo5RpUI34z/CK2brWofJWFN3yr8zbvu1514A14w+DtOtt64FlGo/2W21oSa9DHj52ZvvL8u75q+Lz6rfO8Uv8Ap5P/ANKZ5GMp2xlX/FL82bd5qz28jzJDlJH+Zay7zWk8xkfdIn3X2/w1nTau/mFEvFDMzNKzf8s6y7zUbyNPtI2lm+bbI33lrxZVDn9mP168Mil5H37G+dVrlNWuLBlNyke5t33lq3rmrbYy6zKnz7W2r8rN/dWub1LU3+wB3dmWOXZ83y7Vb/0Ks5VC/ZsS4vHjme5hdT5b7tv8O7/Zra03XEtG37NjMy7Y9+7/AL6rh5L65N1FbQyYWRWZPM+VGVat2+v7HTznjR2+XzKzjKP2hcsz0+HxAlxalPti/d3P8m5WqHVNaj+R/tMaFdvm/wCzXF2+tQSQokM21lT5WWqV/wCJHWNXlfczfL83zNVRkZ8h02peLIYrzelzteN1Xds+by/7qtWRrWuJKyTWz7U+66yNXKah4ofzG/1mf7sf8TVg3niLzGZ5pmUMv3Vespe8VHlN688SJ5kzo3K/LuV6k0XxpNHIlykyqrbl2/3ttcBqGsJ80MNzhGbc6/d3VDpetJJdCaR2T+5urjlU5TqjThyXR9AeF/iHbP8A6m/2t975n+Wr83iaZl2fafN/hRZHrxzwnrWI0R3Un5tzb662z1BGtfkm85f4/L+Xd/u1lzSXLyj+r8seZnylHp/l/vkhZXkf5GkTbTzpryN5yf6xvl+Z/vVu3GmySSSfIr/8C+7Tls9zbNm4/wAXyfdr5n23Kffex5jOh0nyYZER/up937rbv96q8lqk6s7w7PO+9XQx2M1x8kjt83zf7TVn6lp8bODv2n7r7aqnU933jb6vzJWOZmXy2Ih8xFkb52/vNUflzbZZ3eTb/B5j/drV1S1VkH+9uZWb+KsfVriHzOXYP8uxV+Zfm/irrp1uaASwvLH3Sjqzedbkh12r821fvVgXV5tuGfew3JuStPWJHt7UmbdlfvMrferDvJkbHkupXZ89ejhbbHmYqjOQ6Obyo1RvmO/d9+rdnqU0aiH7qx/Mism6uekmfzPOT5G/753VNHceSy7UZf4mZn3V3yieVKPIbkl552f32N1VJdQQL+5fd/D96qclwk0sf77Ztb/dZaZcbFjWOZF+/u3bvvNUSlymFSnPl94Jm+0SNDM+Gk+X/drLvm3fc/h+7uqzdXTxt++Rn3f7FVpI/tHzwwq6t833qqUupyyp+8fVHwsjP/DIoiwcnw7qAweuczV8i2uk3LMYvJVW/jVq+wvhTEP+GWI4SMg6HfDB/wB6avma30MSMr741b+FVav2LxXny5LkGv8AzCx/9JgfUZ9GMsPg7/8APtfkjHstL8uFUdNjM/yVeXw7MW/cv95K6LTdBTy0huU2/wCz/FtrYi8OvGw2bdjfL92vxCWI5TzcPR+GRw66DcrComg3fNuWT+JaktdL3bUQNjdufb95q7O60H7Gphuk3/L8jR1XXRYX+dH+Vk3LurD2nNuerTjHnOcsbOWJTzj+FGX5q3dGt5vMR9/y/L91tu6pl0W2W3WFHY/MrJHt21o2tnbW0KI6MV/j3fw1yVKnNHmPUo7sWBo5FSbzJPKX7jfxVOuoO0mx3xu2t8yVB5cNvM485vKZ/k3feVdtH75Y2R33/Lt/4FXDy/zFSqRWxaa4m+0OjpwyfPtf7zU+OOZgqJ95fmdmb5az/MeGQb5v4Puqn3qspHDdR+dM6ouxd+5/vVlKP2gjU5vdL0MQW4D/AGyOFf4P96tDYjQpsmVXZ9su371ULWES2/necu3d8u3+Grq7Pm2T71X+KplUOqmW4YbZbf7/AJjb937yrNrG0i8Rskit95n3Kq1VjmS4k2TDcy/Kjf3quRqkaeWkytJ/00/2aUZG8eaUvdLljdJcR7441Hltu/efearpZJZPJfazMm/aqfdrNg33S/adysq/Ky1cb5Y/OWFhHt3bf4mrSMfe5jrpy5feLcLTrDL5jrDuXc391qnjuPlE3zMfuOv92qtqXuof3Yj/AHi/8B21ah0/fGPk2s25VZf4q1px933jeXve8ixb71txNM8m7c33f7tXrSP/AEHzHud2377fd3VHptq7W6P8u77rQ/3Wq3Z+bCp86ZdjPuePbW9OjGXunNKpyyEjjMli1y/y7k3bVfc27dWjpq/uVe2m8v5/n8v7v+1UUa2d0G37gF+X5m+9TvtCWrCFHXCy/wBz71KtRl0iYSr05RvKRcjhh+zlPmf5P4fvLUbTJ9ojuURt7fN8rfLu/wBqljuIW/cpy6/wt8tV7qd4JGheT5JPuNv+7XP7CcZKRyyqwlHSRKzQ7vnfeWX5lb5VqndW7yRvM67Y9u6tOOF2h37N+3+L71VZtjWbh9u1V2usj/w1XJOJ5tarCMveMe6VJFUNtV9nzrH91lrOmt/OjR0to98br8395a2prPbtffG25tu2NKZJYw3Ehkhmj37lRo1+8tFpxOfnhIx4YfO8tII5E3fwyPt+b/4mpYbPaz/dDbPvf3mrQuLUR3P2aNPlZNyzSJVyxt3+/c+Sieb88e373+7XQ5Tl9kx9yM+U57+z7mGFLq2uFZ2b513/AHW/2qktdJ89fs03ybl2/u/u/wDAWro4dJR43S2HO9m+b+7VqPR7aSJvs24oqfd/u1lKc+U1p0lze8ZWm2aRw7BCxdvldmX5lrXhtXKjMLbtm2KT5fl/4DVm1tfJjRE+Z2+Vdy/eqxHofl/M6K7xuyvt+bbXNyTqHo03Rp9SKG1f7OIbmZiqy/d31prbXV1tTe0YX5UZl+ZWp9nY7oVSZI1bZuRW+81WJI3s7dEe5jO5t+1Wp08PWlV5eU3li8NTh71SJNHbpDGib1R/K/1i/e/3a1dL+aMQv5iN97d/FWcti90qiZ1QN9yRv71a9tNZ2Vu9zc6rC32e33s3m/e/2a9zD4WvH7J5WIzjLI6Odzf0WJ47pYXuWfcq/u/4m/2q6TR5YdNUWFzNyqfJHI25q4aHUNYVU1W81qPSNPkXck0y/vZP91f4a1rjxNZ6PZy3mm2bRytFte8ZN8s1exDD1Op8rjOIaesaMTrb74meGPBenya9qrzeVH8sqyJtVf8AgTfdr4I/bI/4Lc/EfTJ9T+Fv7L01lptus/lz+JEhWWbb/EsLMvy/71eff8FK/wBtvXrm/f4G/D7UmjVV3a5qEbfNI3/PFf8Adr4cY7ucV0xo31PGljMTU96Uje8dfEfx58UvEcnif4i+MNS1zUZmzLealdNK5/76r9mP+DUX9lHSv+Eo8V/tmeM9Ijf/AIR+L+yfCrSQf8vUy/vZlb/ZXatfjh8Kvhn4w+KXjC08F+CNAutS1K8lVLa2tY/m3N91q/rM/Yd/ZV039jv9i/wP+z9olnHbX2n6JHea3df8/GoTR+ZNu/3W+X/gNfPcT476ng+SHxSPQybCvF4v3tkaHxM0XwxY61f3Oqw7Zr6Xz2jVvmZv7zf7VfF/7WnwNvNea+1XTnZreG93L523dNHJ/E26vpD42ePNY0uaabxPo8MDt/r449zRzL9371fP/jL49eD9BvlufFWpQtDJFvuI5vmZY4/u7a/NKOInTjzH0n1eFSryny3N+yjo+k3g16awk3szS3SyO2xd33W/+xrsvgh4P8L/AAX8J+J/2k/FVms03gu1kbQY7i3XY19IrLB8rf3fvVi/Fz9qiz1jxBc2GlPG9pHFuikjfcyru+WvHv2l/jZrGqfsi6P4TfUZmm1zxfNcXSt/CsMe1Vbb/vfdonPE4lx5/tH2/D+XYenXjJ68p8+X/iLxt+0T8TtQ8R63fyXl3fXUk+pXTLub/d3Vk6tL4i+HMzTaVdTRiNtqyRuysrf3a9f+APhP/hBfhqHa2hl1XXHZ/MjPzLGv8NY/x88e+DPD+lDRIdEs59QmbdKq/M0K/wB5q7faU/bRowjzRPWzXH1YQdRsufs//th6xDqcel6rO1tPDEyvIq/LdRt96Nq4T9orwz4e/wCEmj17w/bRrBev8iwy7lj3fM1ebJ4iln19b22tI4EVv+WdavibxBc6hpscPnZhX5tv92uqODqYXFxnR92L3ifnuZZj9dptVNSdvCaaPDseFWaRN3zfwrVG509PtDWaf3NyL/FRputf2ppZf7eyPap8iyPu8z/ZrS0W8S4tVvHTefutu+8rV6salX7Z81KhHmjymZb+Hb+ZhNbJ8m3+Kr0Xh+8t7hneHKNt37fu112i6lYLZ/6MitLs3N8lbOnww3EcU14io7Ju8uP+GuOeI/eSO7D4PmnEyvCelTWcY86H5JPufJ/DWvdafbQ2LQ7I1Ez7l3L8y/3quqyR3Cw2z8/dfbUt0sN1pvzvt2y/P5b/AMVeTVlPn5lI9/2MKMOZHD3nh/7RdtDDCzQ79u6tTRvB+m6RajUnRWK/djb5mqz5yWszwzJGPM/1XzfeqrqEl5Gqo/yhm2pt/irqlipU4cq6nlVIx57mnb+IPFU0zvDr0lhZrKreTCu1ZG/hrpvDfxu8W+E7Wa2TxLdXDyNu3SS/dZf7tc5oNuJLMQ6lD+4VN7fL92tQfGb9nb4cWKW3juOOV1vVaKFYvMkkj/irz4U/b1eSMOb0Lp1amH9/n5TqtD/bd+LvgGSHxD4S+LVxpWob922xumRmZW+VpP4a++v2Wf8AgtrqnjzwXP8AB79qDw/pfjG3urP59QO2Kc/8B+7X5IfFz4q/sz/EfZP8N3urSWOVv3bW+z5W/wDia4mO48W6Nfx6r4b8QSBoX3RFf4v7tet/ZbhCy9yX94y/tOtKd6nvo/o5+CH7SX7H1rd3Fh4Xs73SLy8dWsbdk/cbdv3dy/LXX6X8WFtfGVppuzzbPUN3lXC/dX5vu1+D37In7S3x1uvE9tpWo69G0Sy738xd3lx/xbVr9Wf2RPiVZ/Eq1h1LxDqqqNNi/wBFbe26SRv9mvBxmAlRxC5pe8foGS4uhXw8n37nvf8AwUq/am8H/sr/ALNU15qWoxnUvFP/ABLtGjkb5JJG/ib/AHa/IO+kvZNSN3JtR5JdzeTuVa92/wCCt/xu8PftOftReH/g7oV3Nc+FPhfpyyX95D/qptUk+Zo1b+Lb92vAWunaRpvut8zJtbdur6vK8L7OlfufLV6kfb27FjdDcMHR2dm/iVvmqjJCvnMkb7Ssu5o2pYZnjVHmdtzfN5f92pZGhuofuKH+638W3/davSjGEp8tzmlLrylu3s7aSNXmRo0b7ix/NW7pdrM15sRG2KnzNJ95V/vVj6bb+XG375flX5fm+9/vV1WlrDeSGEowdYvmZq7af905pk0cdzGB5McjJ915N/zL/dq19heGFkeH543+bzKm02xuY2Te8aKqN8q/8tKvQ2afZWREaUxv93f5lenT+A5nHmMaRYbePyXk3/P8rSVV1Df50r/dk/u7/wCGtKRd0jQvc7mWJldV2/erN1dtxZ5trR7VXdv+7XZGJhL3vdkZV0r+Yru8YRU+dV/har9sFbQ9u8MDCwyv41DfTIyuk275k3LuT7tT2mU0bMhDfu2JweD1NftHgtGP9r47/sGqf+lQPb4bdsVVj/cf5o51pHh2FEY/wtueqU15NIpPzFV/hb7v/AasapeQ2rb9/mIqbvLj+VmrmNUvoY18tEYrHL8rb/mWvyj2fN7x8z7bl2LMl5NJcP5M2xWf5Gar+k3CQt5dzc7mb5pWVq46G8feIZJssqf99VpWOsItwiQzfOzbUrnqU+bU6Y1vc+I9S8P65Bb2aRo7FW+VGri/ip8fLbwjp76am24mX5v3j7dv+1XMfEj4nWfhGzZEuWlm/wCXdYfu18z/ABK+Il/fXTzalcs0zf3nqI0eboeVmWbc0fY0v+3i38VvixqviS+k+06k0jTN95n/AIa4q3vnt7d7l5l/vbawZtS/tC8d55mJ37lVadqF8kdt5PnbP9ndXTGnCJ4NjRk8QP5jvI7bf49r1h6vrmpalcDZNuij+X5ag+0Qyw/OPvfdqGNkiV/n43VX90rmLf2v7PDvdNq/+hU1vEj2e5IX2Ls/3v8AgNZuqatDt++o2/L/ALtYtxfPMu/5mLUc0R8p2un+LppJPJd2Ybf4vvVvWt4mpQ+c7qPk+T5/mavLrW4fer/eH+996uo8J6xukCTPx91d38P+zURl2Mv8R1qQib/SVDNufbtanLb7Vb727/fqa2WRmR/4G/h/u1ZmtfJkEKI395WrQn4feKqxGOPfsZfM/ip11Hube6M3y/eq+LN9q7v9371Maz3SNuhbav8ADVRjGURfaKFtbvIu/YoLfxNViTSnC73RRtfdV/S7EM2/7y7tzLsrqYdDhmtV/wBAVv4vlrOJUpHns2lpJH5czqrfx1y+seG/s8g1DSv3My/3Xr0nxBoqWbPGm1XX+GvPNQ1Z217+x7l9ir83+9Ve8HNzbl7w3qmq31j9g1K2Xdv2vM38VLqWj+T8/wDAv8X96tK3js1jVEdv7u1f/QqmvI0mz/FVRAwrWFN3yJ/d/grsvhx8SL/4Z+IbbWLaeT7HNKsV7bq+3av96uXWzRmKb+aka3eS1NnM6n5P4v8A0Koj70zOpT5oH6R/BXx5pt3pdt4h0rUldGVWiZvvV9q/s1/tBTW7Wlt9v8k2qL8rPt3NX4//ALEfxmSO8f4darqX76P5IPOf93tr7d+H+tar4ZvormO5Yo207V+WtqlP3PcPFlKUavKz7d/bv/aW1K6/Zl1H4VX2qpNbeJbm1ls4ZZgZVkinjmkIHXYNoBPYsPWr/wCyt8Qv2bP2vf2AbX9iP41/GvTfBHiDw/qbT6VeX8ccKNCkxlSVGkKRSMRNLGy71kOC2CMk/I37RPjB/F9l4enaTIhiuFCnquTHx+lWbT4XeFNEsNA1nWvDj3tvqenW9xKPtbplnQE42kY5zX7zgo8P5J4UZficdVrU6k8RKtTnSjCUqdWHNBPlm0nHljqnu32OmlWlSlrrdW+R9M/8E4/En7Pn7GH7bHxC8OeK/wBpDw3qOgW3hee00zxTGzxW18yzQzMikgqZAsbDarsHYBY2kJFfNP7IHxZ+H/wf/bL8N/FL4hTGTw3Z65cjUpo7ZpQLeeKaEyGPG5lAl3FQCxUEAE8V2Xjb4FfBn4i6v4Z+CPwB8LX0XjrUna81SS21E3SW9ljhDG7HEhwSPWvlv9p3xncfsr6V4w8RXfhGLXLjwleT2n9k38skSzzLP9nAcxEOMOwYhSM7cV9TkOfcJcSf2vjYzxE3PDQjXco04NxpwnFypqDaU5JydnaKdraaH6FwbKawGPStpSk1v1TP0sP/AAT+/wCCYDeKz8dD+2hpn/CBlv7S/wCETGr23neX9/7Pv3+fsz8vleV5235d2/5q+Uv27Pip+zv8V/jrdar+zH8KdP8ADHhmxt1s4ZtPgNumqsnH2r7PhVgBGAFChmA3v8zED4y/4Jk/Fb4rfthfHeXw78Tb6yt9OnCGLS9NsfKhtw0m3Adi0rn33EV9Q/8ABe34Dav/AME2rT4Oa38G9WkisPGUl9b+IZriATg3KJG8SqZc7BhmyByfWvn+HPEfhrK8zWNx2NxuLlCLhTU404xjF2vzRhNKpN2V5zu+tr2a+HrU5uPLGKXocJXS/B34peIvgl8U9A+LfhKOB9R8PapFe2sV1HuikKNko467WGVOCCAeCDg180/swfG34gfE3xnf6P4s1iO5todLM8QS0jjw/mIucqAejGuM/aM8X6hpHx11KyivZo0W3tyCkpAXMKdq/Tc+8TspjwQs6o4WVajWm6LhNqDaalzXtzq3u2t1uYU6DVblk7dbn7o/GT4Z/wDBPX/gpjrFj8e9H/ai0/4feKZtLt4/Eul6nJBGzMqDAdLhot8iAiMyxsyMsa8cZrhf2t/2jf2YP2av2Pf+GF/2O/GVv4pl1m8kPjDxEB5w27kd281VEUkkhWONTHuVI4iCd21q/GDR/iPFIsem6lqbfaEVmtpFuDtZW/hraudZu1Qf6Y3ypudvOLV/NeXcd4TC18NSxEK9bB4WSnRoTrQ5Yyj8HNJUVKcYX9yLdlZbrR+39UjUg5wkrvd2/wCCfql+wT8Iv+CbHjv4D+LNc/ax+I66f4ptpJRBBd6y9k9lbCPMc1lGjYvJS2/KFZcFFHlgHL43/BM34FfslfFL40alr37Q/wAWtJtNL8NyrcaL4a8QSpZprQ3nbJM8jeWUTCloAxLlufkVg/5YalrlzJMkw1SZNq7t28/NWZN4yvd2Ib+Xhv3v7w19DjPFvFYqlmUacsRB4vl5f30WqCWjVJOl7qkrp2aezT5lzHLHBJOOzt5b+up/RN+2p8B/g1+2j4gsv+Ei/wCCjPhHQvDWkKP7H8K6fPYtBBJt2tM7G8HmykEqGIARflUDLlvij9sP9j34Nfso6NoXjP4TftfeG/HeoT6lhtHtIYnmiCYYTfuZZkKAjBEmzORjf8wH5VjxVOsph/tKf94u1GaQ7qjk8XX3nB0mcpGv8U5WvH4U8S8z4UjQw9OrOeFp3XsbUIxknfeSo82rd278ze71uXVwsat21q+uv+Z++3xM139g/wD4Kt+DvC/ibxj+0Fb/AA6+IWjaSIL+DU2jhUZO6SLE5RJ0Dh2jaOQMBJ8wydoT4GfDD/gnd/wTR8ST/HbxP+1jaeOfEcWnzw6JYaKYZnQOmG2Q27yYkYAoJJJEjAcg4zuH4Dt4y1OTaft86p/10NMj8ZahAoT+0n3q33fONea+NIQwEsoo1MRDLpXvQVWnpGTbcFVdDnUHdq2umjbvc0+ry5udpc3ez/K59lftg/GDVfil408RfGHU4orS68R+JJr14o0AWLzWdwnAGcDAz1OMnJJNeOaP428m6aZ7ltkjK3l7/lavIf8AhNpp0a4lvJWU8CJmJq7b+KrmaRUhuYwiv83y/N92vM4z4rw/FObRxVDD+whGnCnGHNzWUFZa2j08jTD4adKFm79T3PQ/GVhMyfJviZvvM3+rru/D/jVLiP8A0N2RWdVddv3v92vnjw/rXl7Hn8t2ZNyMv/xNeheHfFDxtHvmZF++kav91q+MliObWJ1xozPcbPxBeTSRJDNt+b5IZPvbf4mrN8bakdRmhbaQELjJXGenNcjo/iK5m/ffaVkf73zfLtrUbWX1mNJpQu9V+Yocg1+m+D9fm8QcJHyqf+mpn03ClGSzulN/3v8A0lnfeGtce20W3is2yUhTevvirMniaGPdsfj7rtv+bdXBDxUlpAtkjRq3l7dze1R/8Jh5yNDDYbTs+Zq+H4hr2z3FL/p5U/8ASmeVi8PJ4+o/7z/M7ybxVNDMiPc53Ju3f3qrzeKZpVDo6lPmXd/tVwlv4kmk8ua5jWI7tztH83y1Y/tiYM3kzMg37tu35a8GWK98f1Xm942tU1aPbMjwsf4tq/3v9msHUrh5GmdnkXyV3eWvzNUVzq9zNtmO0hvnRv4VrK1K8eRi/wBsYfN80K/wr/eqPrEpbFyw8YwJLq6hWSF0/ii/1jN935qyZPEG5nTf937zN/FS6ldeZG+/ciM3yfxfLtrBvj5itNs8zavzbm2s1bU6hy1MPH4jqI/ElnHsmmf5tmzzP937tQyeKkvpmRJlO5G+ZWrhm15FkT5Nqxy7tqt/s/3qrf8ACSOq703A/wATb61OfkOqvvEUMkawwvvb7vy/3ax7vWIR/oyIvl+V/e+7XPya/wCZC01nNv8A4Xb+Jqz5taRrdnRJP91komTGJr3GuQ/OPmX97u27v/QaWx1TbmMOzJJ92SuRvNSe5U+duVvvf7VX9Pu55o47n/VL91FZvmb/AGq5K0eY7KZ6b4d1BFaNHdVPy7K6+2u4b6XzvtMjL91FjfbXmGi6g62p/ffe2/6z7tdRpd8YlEOxkVVX95/C1YRlGPu8x0xo9kc82jzRsU2KfL/i3fe/4F/FQ0aW9w0EcKu+z523fdraktXEYSZ/uvt8v+KkuLe5aN0hKl1+Wvj/AGnN8R+g06PMc4y+TJstk3bvv7n+Zao3kflu8yfIrP8AdX+Jq2b61SNm2MzFlXbWHeXH3pZnYsr7kjWtY1OaHunpU8L7vKc/fSJMzpMm6sLUlhWT99J5YrZ1SORWKJcspb5vmrG1RXkczfMrr8yL/C1dtGXuHV/Z/u6RMe+bciTI+9/4pG+7WBqK2f8AqfM2M38VbF19pmm2P0Z/urWXqDeWvkvtZt/8P8VepRxHwnnYrK5cvMZDb5PMeRGXa33qGnebefs2xFRf+BNSszqyuj7/ALzbW/u0lrGkmx0K7WTc3zf3q9CNaLifK4rAzpyL8MKSQ/P8zf7PzUyT5t8Lp/urUunw/Z22JMrp/H/tVaksfvSQo21f9n7tY1Kh53sZy0ZkzW+1Vfzud/3V/iq5ounPtbZ8v+zsq3b6TGsiTI//AALZWppun21yrIjsZV+Z/k21Eqn9446lM+hvh1AIv2dEtwwYf2PdjPrzLXhFhoqNJstrZU/vNs+9X0B8P4PL+BUcEgz/AMSy6BGfUyV5PpemvMqoibPnbav91a/afFpxeS8Pr/qFj/6TA+gz9L2WDT/kX5IzbPQ7Zdzo/wDBtWT+7V9bJ5LhIZtwRtu+Ra3rXQ7ZWTzrZcSfLt/vNVmbTRHtSGwXydn3a/DKlSHwnl0JSiYUmnfuVhT98y7tit/FWfcaTItwXtrZRGr/AL3d/wCy11raXcyRlIbNR8n3o/vMtQyaG6Wav9jV1VfkjWX5lrmqS9melTlzbnLzaebiQI9tJhn+6v3l2/xVWazmVfJRPl/jZk3NXYRaS7Wzp9mkRFXf9z+Gq13p0f2V4fm2fe2su35awlKXwnTT/mOSuLVJo/OTbuZ/vVUkZ5I0m+7u++v3dtdDcWKSQu/2b5P4W/irEuNPm/uRtKybvLV/vfNWfN7vKa/3iOHYshSeba7Ju3feZamtW85lhfbtZdyN93c1H2PdCg+x7JF/iX/aqeO18nYHffK33KXuRIjKRYs4bmSSJCVVdm75f4a0I4Z41W2dF2L8zMqfxU2zsXZ0f5fm/vfLWxHp7tHsSFt2/wCRt/3V/vVySqHoU7/EVFsUmj37Knj8u4ZLN3jC7vnkb+GlaDeG2fK/3dy/db/eq1a2SbkuYUYRs33ZF+9Vx5ZqJ0U5E1hH+7aaFFJ+75bfKu2tOzX7G7XNnDu+8v7tN21v7tLpVrDIoeFmVW/hkStmysYVkSZI1T5/nb+7RzTOqNT2exlWMf775LZVK/Nu3bq0mt3XaIXZzIn+sVflVv4lqSa0RdSdERvl2v8Au0/hrJ+NnjhPhv8AD+bUvDztNqcz7YFVf3Vuv8TN/tV6uFw9XETjY480zShluG55y97+U7nQfBNzeRp9vvLexST5lkupdjbdv92tBfhg9xZyw6J4z0u7uY03W9vJLtX/AGa+I4fjd4z1LXpb+8166eS42rceZLubav8ADXR6D8cvFVrfJdWesSJJG+7asv8Adr6GngadP7J+aY/iXMMVV5oS5Yns/wAZvFXxa+GsLWeveDNPjtvveZprM25v95v4q8ruvipf3FoupWviFV8tvn2y/d/2a7W4+MUfxK8A33hvxg+97ja1u0b7pFb/AOyr5S8eXeq+B/FDJbXLRpH5iy2q/dZa640YdIniyxeKqSvOpI9b1D9orxbpuobLbxPJ/wB9feqCH9p7xOzDztY/d79yxt96vK9S1Dw9Haw39s/mpNEsqNJ95W/iWsG68WWbzOn2aPbv27t1V7Kl/KNYnFR+1I+iof2ltYhjDpqv3l2vGz/+PVNa/tMX9wwRL/5lf5l3fe/2a+arXxPpskmx93+z8/3asf8ACQabEu/7TJu30vq2Hlq4h9axUvtH0lqX7QmvSRj7Hfxodu3av8X+01SQ/tD+IXhihS9YfL8zbvm3f3q+arjxVD5izf2kzfLjb/dotfHDruS5vI3Tf8tP2FLpEXt8RGPxn0+37QmqzSbJtYkYLB8it821qsn4/X80gd7yOXd8ybv/AGavmqHxk7fxqf8AgVI3jK/U7PtHy1H1WG6iH1nEfzyPpmz/AGhpoXZLnVN3mf8ALOP7rVctfj99oZEsPFDRMqbfvbvmr5Sm8ZXjP532ln/h/wB2o/8AhMoYVZPtKr8/3VSl9XpS15R+3xS+3I+rtQ+N3iTa6W3iRZpZE2/M23d/tVGvxq1vT187UtSmilk++0d1/D/s18pv8RNrfPeNtX5fl+XbVab4tTKvkpebgyf3qtUIR15SY1cRH7R9oaP8evD1wrQzeNmtvkVUa6f5lb+7ur1jwjcaJr1rDf6br1rf7l2tJDP5n+7X5fX3j77Wqv5zBlf+Jv4q6b4W/FLx/pOqInhrxDdW0i/xQysqr/tba05XHYxkpVNZSP051jXtE0m3ijS//eqm1Lfd95v7q1V8TfGbwB8EbGPWNVe31PXJomS301k3RQ/3d3+1Xx3b/tCeJ7jybzUtea5uLODZA395v4mqlJ42vPGniJZtQvGY/ebc+6oi5SkZ+z5Y/EfXXwz8feKvi14i/tjxa63Vs0TbLNfljj/iXbWf+2F+0xbfDL4W3k2ia232prPy7VVT5d33fvf7NcZ4D8XQ6V4QS8sL+SP5V/2fmr5T/bg+Ll/428WLojvGsNquzyY//Qq1lHl+EVJ+0lzHguqXOq+J76bXtVvJJrm6laW4mk+ZmZq9S/ZB/Yq+Nv7ZHxd0/wCD3wd8H3eq6nfXEaZt7fclvGzfNJJ/dVa534c+CdX8eeItN8H+FdEa/u76eO3gt1T/AFzM33a/pO/ZO/ZQ+G//AAQd/wCCTPjX9rvxpplo3xHHg2S7uLxk+ZLiZdttap/tbmXd9K3hDlpc89iqtZyqqjDf8j54/wCCbf8AwTN+BvhT9rh/2UPhnNHqUXwtij1P4zeMm2+ZqWqfK0Onwt/DGrfe2/3a/U7xZDpt19pRJlhMP3WVv4q+Ff8Ag2n0maH9kLX/AI3ePXkbxN8R9fuNZ1a/umy0ytM235v7tfYHxO8QWdu815C63EMiN5U0fzL/ALVfknFGN+s4ySX2T9I4fwTw1G8ux578ZLPTP7FZ9YSzumZW3rJ/6FXwT+1B8H7XUtQbxP4eSQfat0UtrGyskK/7K17f8ZPjTqV5qV5o9hfxtF5+12b7yqv3dteLeKfiBpusWo0q/v44Nrt5Um7azNXzeFnKW7PbjhY+05nufHvib4Q69ot1ea3rtzdIiy7ovLTb93+GoPFljN4w/Zz0nTbzzm/s3xyqpcXEW39zJH833a9d+LvjLTbHS/s15qv9pSSbldVT/UyL/Ey/+zV5pZ+NpNe+Gut2d/YRomn3tvexLG3935fu16FSpXnS54n0OUVI0cQozOo+D7aJqnxE1jw89ntfT/D0n9msvzKsnl/K1fFfje6vL24lvNRud9y0snmzM/3vm+7X2V8H9Pv5Ne8QeOfDd+zyafo0k6Qq67pNy/d2/wAVfCvjPxJNLfTJsVEaWRvL/iVt3zLV5LTnWrzkYcQSjGhYbY6hpGlTx2Fr+9nupdnmN/DWj4ksZtEtfLfcV/iz/e/u1jeA7LTr7xpYvqR3RM//AAHcv3a6j4rSJb24eFF2ebt3V9HiFy4iFPufn81zRk2ZGh77y1d0RY1/hVq0dIW8sYftTpJ5bfw7qq+DYXa1R/l2yP8AxV0t1ZpJDv8AOVAv3F20qnNzSMpa0lpqVtO8SPYzM/8AC3y/f+atqx8XXMezZMzLt27pP4Vrk9Qh8y4Kb2VW2tuWtvTYd3zx7v7y/wCzXnVox+I3wdWcauj0O+8M332pD5O7bN8/zV0UPh68uYd/ksu35lhj+X/vquS8F/K299rMrqybq99+G+l2HiK4jmgddquuyFf/AGavDxlT2crn0cf31I8x8M/C258UeIBZwurI25l/3v8AZq74q+EV54f8RW+g3Ls0cf72eT721f8AZ217+3gnSvAjDxK/lwvbuzRQxp/epfh7oOieLPiM0z3MKXKy7UvLr7qx/wB6vOjinKfN9k4Hg579T578O/B/Uvjd8UofhLYaxNoNrfQL5F5fN5G5m+6zf7NenfH7/gkh/wAM0fDePxr4q1WSHX47rbYatap9ps/LaP8A1m5t275m+7X2Vb/sT2fxeWHxDoT2o1a3i3Ws1w26JmX7vzfw17BP+xL8ffEnhGPwf8Q9YZrCGLcvl6izJG235Vhr6LL81lh+WNOPzOatl9HFx5amkj+cvUPBlz4R1680SS0keazlZJ2kgZP3n97a396uh0e6mjsR9pdvlXb/ALtfqt+1x/wTV8MeF57GFP7Q17W9e16zs1uL7a0vnNJ833f4VjrhP+CiH/BNn4afBfS9Qs/ha8b3Om2dusW3a7yMy7pK9avmmGxH8RnJ/ZWJoT9nA+Kfgi/xEXWJLz4faVNeSSRNE/lp83lt/DX2Z+z78UPjf8F/B8viq70G4tbqa1aDTbeSXb+8Zdu5t392pv8Agi54H8BxeNpbD4i6V9pSS88ho5Pl+zsy/eavr7/gsD+z7Y/Db4b+C/iR8N9P3aJbzSWGttb/AHbdpPmjmk/2W+7XkxhDF47kPoKdGvl+GjPm+I+BraH7Cs32+8865uJZJ724Z9zSTM25mak+0JcMyIm5fuqzJ95v71SfJDv2Ooimf/WKv3v9qq6x7ZDH8zlk/wCWdfWunGEOU86MrT5hqSTNt+8zqu1l2Vcs/tPltDbWbOkf8KpSraQyKltO+5vK2/d+bdVu3s59pG9drfK8jN97/drkj70fM3lzEmi/vpFme5jVW+Z12/Mv+zXYaWvzwiYqdzszbWrntA0fyZmkeGF03N8yv97/AHq6zT7cf8uyLv2bZfL/APZa9DDx9nozz63vF/TYUmupU+XYyfe/iq+s3k27w/MHX5VVUVd397dTLNfJtfORI9rNtXbUL33l25e5hkcM+3y12/u69WjzchleMd5FDVtkyp5IaNlbc/y/eWsa6vJmke2mCpu2tE0O1l/4FWzqypMreTN5X+1t+9WDeW+2GW5877r/ADfJ96u+jGMfiMZe7rEoveeZJLDDNGG+7t21ctm/4p5mVM/uXwBznrWf/qo/JeFisnzeY33Wq9ayGTw28jMpzDJyBx/FX7b4ORis0x1v+gaf/pUD1+G1fE1pf9O5fmjhbyaaONvJtt259rNv+Za5rWrhG3u+51X/AGtu5q2dW1Sb7OZn3YVWV1VfmriLy8Ox5kTG5/vf3q/JpR5o+6fEynzSuMuLy5VUh+07Ubcz/wCz/s1qaPdQWqm/v/uRruaRf/Qa5+FYrp2S2hYBk3eZ/tbq5n4pfEKwtV/sTSpvlt3b7RJv+Vm/vVzVv5Tlq4j3TM+LHjqxvry51KGbY0nzbV+ZV/3a8E8ZeKPt146b62PHni8XE2xHz8m2uCuJJry92Iitub71L+7E8/l5S1Hqrxrv85lVf7tTQ+dI38WGXd/eq54f8J3l2vyQ71kfbXTN4Lm0+NPOh27dvy1pyi5oHJxw/d3vn5PkqrfTOsjb9qL935v4q39aW2tWaBPv/e27Pu1g6kqTf6xFbb97dUSLj8Rj3W+Rj2DfxVWVX+5sXNX2h3ebsG3dVbycKnybd33m3Uv8JfwkfnZ+T5v73y1e0W4eOb5G+VfmqrN8sfH+7SW7OPnQZ2/+hU/hJPVvBN5Nq0aQvhnb5fmfbXc6b4PS8jZDy/8Atfw15J4K1j7HdRTO/K7a9r8KaklxarNC+5ZF27l+bbTjIzqR5omRfaK9kyWr7X+f+H+GlttHudweZP3Xzbd1bEi+ddNvhZVjlbc2z7zVNHa2zMuxGXb/AOPU/h1MYy5TM0y38q6/1O8b/u10tir+SqJtSL+8tZ62W1lTZlVb+H+KryyfY7dt/wDu0lHlKl73vI4/xpeJpt8+xPlkl3MzV5r8SNBm+yxa3psnzruZ9td38WI3t7WGZLlnG/e1ctperJq1vJYb1YN/DIn3Vo5eWRpGUpQKHgfxMmsWPkvdf6TGn3WrpljeRXT73ybmVa8l1YXngfxY4hRkG/dt/wBmvUvCepQ65ZpqSP8Ad+8u2l/dCS+0LJDunUp8m1PvNUGUWQf+g1rXlvFtZERvv1nzxxhVR5vn/vVUZAVLXVrnwP4s07xVpk3leTcKzzV+lP7NPxQ034reC7LUkeOWWSBWlbzV+8v8NfnNcaXZatYvprso/dNt/i3NXtH/AATm+Klz4N+JUXwu8SXn2e2vLjZbs3/PRvu/99V10Ze4eVjMPzfCfc3xCiuYTaLcLgfvNv0+Xivqb4a+E/CnjP4T+DBq17b/ALvT7ZJSgzIo2DK/+O185fGvTZtLtNFtZmPEU21T/DyldR428Q+Ovg1+xB40+O+mWtzMuheBpZLHarbftEsYii+7/d3bq/Y+KaV/CDJor/n5U/8ASqhyU3dRU+p+YPxm/be+JGrftsePPjT8H/idrXheVvEE1jolxod60EkNnD+5j+b/AIDu/wCBV9B/GjU9Q8X/ALLV3rninVLjUbvUdEs7m/vbt90tzM7RO8jk/edmJY56k1+atlPc2zrctc5f78sjfeZm+Zq/RjxzOD+xdazuw+bwrphJ+ogrz/DGLjleeP8A6hpf+kzP1ThOKWBx6X/Pp/lI4X/gn58Sv+FK/tDeHtY0FIYUmvVS6aT723+H/wAer9Zf+Dpq30f4w/8ABIf4fftCaa8bzaD400u4imX+Hzo2jkX/AL6Va/EPwrqD2OsWmo21yqSW8qusjfeXbX66fF74n2P7U/8Awbl/Ev4Xatfx6hrfhHS49VttvzN+5mWTd/3zur8hjKUKyZ8NGUbuB+bf/BP/AFKLVfFV/dRyK3/EjIYr6+bHWH+1vcT2vx71N4UZ98NqNo/64JWV/wAEt9Z+3fEHWrDDDy/DofaenM0deqfFv9nr4hfGr49X9l8PvD8+p3c8UCJbWkO+QkQLX7JmNWNPwKwspv8A5in+VQz5JPFuK7HhskyXVn51q0e9f9v7tdT4B8aJrVr/AGTc3K+bC3yN/FJ/s1yd54d1Xwrq1zoPiGwuLS5t5ZIpYbqJkkWRfvblrmhrT+GfFiebuWOR12t/tV+KLklHmiduHqTjPl+yex6t5M0azJHu+dvl3fdrnr5XaTzt8brv3bf4q39FjTXNJTVbYfMyMzeXt+Vqo3Gjw+csMMjMnzNK2z+L+9RKpy+6eh7GMveMUzSGZnh8wL/B5jbqg8yaP9yPmDPudmatB9DEe6ZNzqvzI1V7rSfLV5nfyX2fP/tVlKQ40e5Sa4dZPn3EL/FuqC4unuN+xFG3/wAdq42nu670Rm+fb81Rw6JMtwdiNsb5t22sJc8Y8xtGnzSI7e6u2h2TJ8v8LLW9psyLHDC6M3z/ADsv3qp2dg/mO+zYip/301aFna3K3Cwof9p1b+7XJUlPlOynTgdNp948q7E3Af3f4ttdRpWsfu4Xhdk2/wCtX+KuN0uCb7OuyP5m/wBvbtrqND/fL9phRmj/AL2za3y1w1K3sdT0aeFjI7rSte3XG4zSb/u7mT5q7fwncNLHNF5bqse0IW79eleX2104khm875Y1216D8NRi0utsxkUurBj3zmv0vwTxMp+JuCj3VX/01M+kyLB+yx8J+v5MkvtQl+3TIoUmOVgHY528+lI+pJbgzWc0kn8P935qo38yw63dCZt26ZsH+5zUunxpM0UzvudUZm3L8tfAcTY3l4jxi/6e1P8A0tnHXy+U605eb/M0o5kZUmRGaRfmdVl20lvdO0zzW3mBPuyt/ep1rZw7Umh5LLtbd95alk0+aTDoWDx/wr/Ev+1Xz8sd7w1l/uDftW7YjvtWRtv+zRMr3S7Lm42tGjbl+7tX+9SzWs5uJbZNqJt3f7P/AO1Uc0LrMnkp/wAsmXdIvzf7tb0a0qn2jKph4xhsU763mMf2l32/PudV+bctc7qVs7QvNNDHmRGVFVK6uazeGFzC+Cr7mVf4WrOutNhuITM6N9/97/D8tejhqnLH3pHlVqPNocJqG+PaiW0ats+7Iny1j6gHtbrzhuQMyt8v8P8Au12WqaHbXcjonmbd275vu1QfRUhdN8O5W+/t+Za9WNSPxHnVMP8AzHI3SpJarNCn3Z9qSK23dVSaL98zpuy332V66m68NQxxsZvkfd8u7+JqoXli+nr50M251VU3Mm7d/tVpzcxj9Xlz+6YMNgkKqiJv8z5fmrSsVSJQjwqrr8m7d92ntp7+c7787n27dtT2NjlVtkT5PvIzJurmrbHRTo8s+Uu2t0V2o8O7b9+Nt27durqNHvIVj2b8tuX95I9ctawvGpTzsP8ALu3P93/gVbGnXD/JbO+9lT90zfd/2a82R6Macoq7OtuIdri58ld6vuTctUru5/eK77d/8S7K6G8091jLzRq7fw7U+6tZGpadCsf2yZ1RNv3tn8VfH/aP0ijTk4+6c1cfvrdXd4yV/ib5VrntW/eStMj4VX3Sr5XzN/utXVa1HZq2yGHIb5tsn8VYmqR3M0e90bcqblhVvlrop+6ezhcLzHH31+FGxJsvH8yKsVY99vZm2bVZv9uug1az/dukO5NyfO277tZeoRwyKyXKRllVVaRfvVtTqez90+gw+X83xHLalazNMro6s8f3FrL1KB9whmdUZl+7/FXTaitnCv2nzW2b9yRqm5VrBvo4M+T5LM7N/rNn3a7qNbmFistjymLNYusZTequr7U3VFDbzSN9mdNzK6/Nsq/IEhGzf8sfzf3m3U+1sRGp2Rtlf4m/ir1qdblh7x8Jm2X8vMP0+z3W6/J82/8A1e+ta30e4uIx5I3bfl8uT+Ko9Pt0WOLzk3P97+9W5br5sib9oLIy/L/d/vUe0lzaHxmIo+zgVbPS2k2OkO1I3Zdv8S1sabZz+RJtdR86/Lsqxb27sphf5ol+bzF/iWtPTV87CQuzwqu528rbWP8AekeTW5ep694Mgf8A4U/HbuQSdOnBOMZyXrz7T9LeaH5IZEfZtbd/C1eleE0UfDKJFjZ/9BlBR8ZJ+bI9PauX8O2MKyKRCy7t27cn3a/bPFybjknD0l/0Cx/9JpnvZ5BToYS//PtfkirDp5kkjTyVLL95vu1oNapFA2+zVPn3bt33v9la0rezS3V0SCORpPli8x6kms0Zvn2u6s235flWvweVT3uY8ijTjHVHONpsykbNqyf8tW3fMv8Ad+Wl+y/uzYTRq8m3d8v8K/xVtTaTczKsyJuPlf39rfepI9JdZseRsVXVmb7yqv8AtVEpe0jaR00exgfY/s7Jcw9NnzKtV9Ss/M3uLOP/AGNr/eX+9XQx2qMzukyrGzt8q/Nu/wBqqtxYwuomhfak3y7m+Xc3/stc0pe+ejT5pR90468015WkvJnyGbb833fl/wBms6ax+Zfs20ov+xXZ3Wiv9oVEEe7+JWf5Vqh/ZdmsLQ+TudpWVZNvzUv3fNzdDSUaso2OXWx875HhZAv3dvzbqS10v7++3Ziv8Lfe210DaXCyyvC7Jt+82371Q29nN5w8nbj/AGl+9/s1MpQ5gjGRDaWszXHku7bV2vtb5q0V3zSBETc3zfd/9BpYbea1+dLb7z7n/wBpaufZU3JM7bwr/e/+KrA7YxlErQ2EFq2x08pvm3/xfdqzZW/+ledDD91Nyf7W6opLV47hrn7Mz7mZUVW3bq1dFh+0tHvTeFRldt/zf5WnTlKMd/dNvinymtoujJIqWyOyrt3Vu2fh+SaNzJ9xU/i/ipvh+zhiX9991vuMz/w/w1oeKlOm6HPcwuzn+COP+9VYeM62IjBG1bEUsJQlVnLSJ518SvjNpXhHVn8N2d5HNqUjqyQ+V81uv91v9qnfE7VPDfiDw7b6CmlRxG8t/n+0L86/L95a+Z/i43ir4f8AxQPjHxJDIrXFxulVnb5m/wD2a9T+I3xFsL7Q9A8bW95DNb3EH3VRtsP8O2vvMJhY4SlZbn5BnGYVc1xUqjl7v2TwLxl4fn8O+IrizR2T978rf7NOs7g/Z1dPvr8qSf3q6341WtlfeV4n0p1KXEW75U3LG392uHtLh7hVfCl1/hX5a7tfiPJ5vcOp8L69MsywpNtO/duZ/u1x/wC0NDDNcR31s7Zk+8u+tCG8TT2aZEZTt/v/AHa574iXCapZ72ueFTb8zfNUy94qPvHDWOqXMKrZ3T70/g3VVvFRpN8Lsqt81LM/2WYOiZFRXVwksexIP4/vNT5eXQ2jLmI2uHh3bJttEupTLGu/cw2/w/xVWkVGTeqN96opG3L9xcr/AHqfwxD3i39oeNl37iqr/FRHqm5hvHy/eSqDXW1Sj9f7y1E0zMqyZ+bfupFcvKbS+IEDf6za6p92optcdlKQ3TL/AHKyfOf77yc/3v71EbIdrvubbVfZFymnHql2u5xM3zff+annWpljH77ft/iasuWZ9xpPtLrDj5TUi5ZGlJrlzdSEu+Vb+Gq11cI2E8mMVSeR3QPv203zMqv7tjtoHylppLfcifdrsdJvP+EV8NrPDNvuNQVkVv8AnnH/ABNXF6db+beJvDH+9urQvtSfVLpcriKNdkS7/wCGqk/dsxcvvnYaLrczKru+Qv3a9X+Ecf8AamoDzl3K33f9qvEPDsEN1Mi78D/Zr3H4c7NP09fJ+Uqnzs38NO0Ix5jGZ6n4s8ZW1n4fa2dGEdvBtiVfl+b/AOJr468dalc+IvHVxeB9/mS/dWvc/jN4wh0vwu8MN5IrSIy7f9mvDfAtrNqXiVpkhaV22/LULmlVHHlpw5mfrR/wa4/sAab8ff2lj8fvHug/atE8ExR3UUMnzR/bm/1P/fO3dX27/wAHjfxsvPAX/BPTwj8F9NuWjbx94+hhulSTb/o9rG021h/d3bfyr6S/4N/v2U4P2af2CPDd9qGlfZtV8VRLqeoGRfnZWH7vNfnj/wAHqOvvL41/Z/8ABkwZ7fytWvWj7Ft0a104ufKuRfZRllsZVE6j+0zoP+CLH7V9h4L/AGOPDvhjW4ZGjtYFgW3hXa0aqzbfm/iavpPx98dtB8ReHX1LQvEkM5+ZZbW3/dtH/sstfK//AASH+B/h7xB+zDpWg627Mtx5c8U0kH+pZm+b5q+ivjJ/wTu17wr4dvPGHw98etDN/wAfHk3zr5TL/wBdK/DswjTqY2c0ftdCXs8LC/8AKfPXxX+LGlahJND9jVJpnZrrzIvu7f7rV86ePvEtzHcPDo+ox3CQ7mlWOX5lWtL4qXXxR8M+K7zRNe8N3yhk/wBdCm6KRW/iWT7rV5neaD4k1icvdO0PnL95bVmb/gVGHwvu8xt7aMYf3jhviB8SJrqV4ft8flRtuTb97/dZqreBZLzWL93ke4W2uott1Gq7v3f+0teir+zdDqEi6lePHcJMnyq0DR7qztQ8F694Llms9NRltlTcjW6s33f71d3JGnEUJ1/iPObi8+IXgvXGv/DGsTQNbv8A6P5cu3ctcjqHgPwl8Qrj+yvElnDp2rzSs32yP5Vbd93cteueJG/4SzT99gjS6pGm+Bl+Xdt/hZa868YabYXnh5PE9heKt1DKyXVu3ytG3+7WeHqToTfJodWM5sTDX3jyfxF8GvGvhPxh/wAI24USQurRXG/5GVvutR8UJodNs4NKluYXuVddzQvu3Vc8ZeKtY1KEveX8kjxp8rM/8NcNobTeItYWa5mVoo3z+8WvocP7TFctWp9k+HxnLh5+yX2j0Lwvapa+H4Jk/hZmdW+81ak+tOsa22yFl2fut33qzo9ht9j7m3Jt2/w1LHpqeWn7lt0a/JXLWlGM+Y55PlVkT/2X9sjW5Tlv4lVfu1p2Nr5OD83+zu/vVn2P2mz3/IyozLtbf95q2IZEXY77fNb73mferhxNT3TpwcoxLul6lZ6fMuyP55H+bc/3a9t+A/jKG3mjT7Yrt5u542+XdXzrc6k7TCaSHcV/iX+7Wr4T8bPo99Fc7Nrxv8vmP8teRiMLUq0nKJ6+DxlKnP3j9AY7qw8a6b5M0MKrJF/rP7v92nfD34Q/2bqU32O5kuV3K0u77v8AwGvnD4V/tD3LQ29tc3m5lf5o/wCFq+mfgj8XtKVYftLySbhul3N8y7v9mvGlGUY8s/difU0KWHxK5j7F/ZH8NeM2e2sbGzjkMjbrVW3fKtfU8a/Gq50qDTZobGFNjI7R/Nt/u181fs5ftEeD9PW2ub+2ZAqbEa1+Wvq3wP8AFbSvGMca2w+zxhP9ZN827/ZqsP7Bx5eY58Zg6+H/AHihzROS1f4ReG/C/iPTfHnjN49Qk0WKSfTbOQLt+0MvzSf71fm7+0dq3iT9or42eILOG2/126CLTV+Vodv8Py/xfxV+jf7UnxO0fwP4Uub/AMlbqaGLzWWT7u3/AHq/KHxJ+0Po/hv4ral8XdHmtYvLlkllj837237vzf3lrsdOnbkj6m2X04Rh7aqvekdT8Efhanwr+H+qa8eNS0m/VpY9qrL8v8TL/s19ueFvG/w//bK/ZS8Tfs7Xs32qTWPDUkVncRxeYy3SrujZf91lr8yrf9oi/wDjJ4w8R+J9BuVh/tD97f2cKMqtN93d/u7a+tP+CZfxNm+GvxGikuVX+z3u7eFEj+bzGb+7/wB9V6NGNSlWjUUh1qcMVhKkHH/D6nwfY6Tq1nCfDetwzNqOn3ElrdRrFtZZIWZW/wCBfLUyxmRvOSZn/hddm3bX0h/wVZ+Blv8ABP8Abv8AF2l6SPK0bxZaw+INMWP5dxuPlm2t/D81fP8Ap2jpDH5DooRW2o2/7y19soc3xSPhKdaPLdRGWtinzeQ+3b827+L/AL6rSs9NT7ON/wC+f5tjbfmX/ZWnww2zfuYdzo3zLtRv4aljaaS3aF4dnmfL838VKNLm1ibyrRjuXtH8nyZnttsrR7V+Z/u/3q6Gzb93++3J/D+7+b+KsnTLXz40RHZXaVWfbW9bwwrb7/mYLu27vl/3t1enh6cYw944ZSlUkWVW1aHZ9m8vy/ux/dqtJHbKhmRN211by2+81TQ28N5IXm3MFfb5e7crLt+Vt1MuLdIIfv7PM/ib5mrtpxCPNUKGsWsMMrO8youxvmb5vLWsK8WBd0abcrEu7d/Ev8NbeqMzWM1s+5PMX7yurLt/3a526uHmmWFJt/8AFEzL/DXdT+P3ialORQvry2ZjDDu3yJ86t91Wq7bE/wDCMOVBH7iTHGT/ABdqzb7yZlWZEZNsv73cn3v92tCFw/hSVoyQPs0uC45H3utftHg875rjv+waf/pUD2OG4P61Wb/59y/NHlniiSaG1lmg/eu3/wAVXK2qpfXDQQv5vztuWP5trV0Ovb7yXf8AKVb+H+9tqtY29tYxy3l/M0McKs3zfKq/7VfkcvhtE+IrRlHUzvEnh+5sfD83lbVmuIv4U+ZV/iavnz4mWttoqun2lSv97fuauo+LXx8upbyb7Nqv3V8pNv3fLrw7xd4xm166+0u/zf7L1z80uY8r+JqY2r3T3Fxvi2vu+/trofhz8P7/AMTX0KIjE7t33KyvDOivrGqIkaMXkbb8tfV/wb+Ftn4D8Mp4n1VI0laL5d0W6tInNUl7vKjJ0X4W6b4a0dp7xI1dV3KrJ81ef/E3XLDTZJLa2mjY/wANdX8XPi08cbQ2r4Ma7UVUrwXxJ4k/ti4fz3Ynf95qzlU5pl06MY+8JeXj3TGZ5s7v9n71ULpoyrbNxb/dplvIjrsd2P8Au/dqG8Z4s84FVGJp8RFPI7Lv2Y3fw1Ey/KXxuX7zVIoSaQOflVqRIUjZo9mWb7u6jm/lJjsNaPzofP8AJ2rT7e3SSP5E3f8AAaVI23bE+7/HVm12R2+z/wBBqZRK+GAab51rcK6JuZWr2X4V+KJmsVsPlbbuZF215LZwoy7zGylvv12Pw/1r+xdSi3u3lfx/Luaj7PukSjzHpDTPMzTb9zbt21X+7UqyJ5fnec3zPurHvLzbeb4fuN83zVNJdJbqXd/l+9tZqfNKJHLH7J0K3Ds2/YrorL/FU1xdTLy/zbn3Iu/5WrO0G6S7j8lH27vm/d/xVq3FqFkO/bhU2/N/DVEc3L8Rxfxm+xyaD50Mf+rXa22vIvC+qeTqDJv/AI9tet/GAGTwu6InKu3zMu3dXhen3Xk33/2dTzfZNYy5vdOo+Lmg/wBqaTFrFnCu2GL5mj+83+9XO/C7xpPoN/8AYHmby5Gwyt92u70vyfEHh99NmdTuT+GvJtf02bw9rcsPksu1vkq5R/lLpy3gfQUapfW5vIfmH8DLVO+sXjYYT7v3P9msn4I+Lodc0/8Asq7uVLr8qq1ddeWm7986MtTGRFT+Uw7GMRzKzorD+CrV1dalpOo23jDwxc+Tf2twssUi/K25W3LVe9hNrKPO2/8AAa2PCujJ4ruho803ktJF8kzfdVqqn7szHEcsqR+lDfG7Tf2hf2fvh98TIHT7dLb3lrqyr1E8XkKSfrnIr9fv2R/2ePhb44/4J6eH/h54/wDDlrd6f4w8GW7anFOinzVkgU1/PX+wvD4i0XQvEnhDV7qR7bT9Qiazjb7gLqwdl9m2Kfwr+ij9jfVpvHX7E3gPwxpF2bTVNO8H2CxqOsiiFcH8RX7fxIpx8IcmXapU/wDSqh5b5G218j+cz/gr1/wRl+KP7CWrX3xd8J6LNqHw/vNTkihvrdNy2O5vljkq18X7s6f+wYl2n/LPwlpJH529f0n6z8I/h3+038GPFP7OXxi8PwXtlrNnJBe2VzFuZNy7fMH+0rfNur+dn9vj4Zx/Bn9nvx38HFnEy+FCuiiRh98W13HBn8dlYeHdSnUynPGtGsNK/wD4DPU+/wCBIVIZdj03zL2Tt90tD4s8H+MI76FP3y7f8/LX15+y/wDtr6D8Afgb8R/h74wMl5p/ibwRfWFnY7fMjkuJF2xr/wCPV+eGh69No98uH2bfl/2a9W0HxNba94fNt8ryLF95q/FYylzRPl6lOPLdHpH/AATd8OXHhf49eIdPZR5T+Fd8bAfe/wBIhr6E8P8A7U/j79mr9r2bxF8P9VS0lRreO8Z4VctC9ugbBb7vBrwz/gndqE178ZtahuGJNv4adI8/3ftENZf7UXiWPTP2uNZtGuioW3sy2P4c20dfseeYeniPAnDU57PFP8qhz4SpVp4lSW6R+on7cX7GfwE/bA/Yd8Qftk/BDQ2sPGHg2wW91a3h+ZLrdtVt235vu/NX42+OrOG+0eLUi+4xru3L/E1fY/wD/wCClX7ZP7Ofwn1j4UfBzxho/wDwjviCyZL3S9S05ZdrMu3zN33vu18oeJ7G5utNuE1W8a4nm8x55NqrukZtzV+IYCnUw2HVGfT4X5HQ43rurF7/ABep2nwD1j7ZpJtodpaRFl/3v9qu71DQ7aWTeltJn726vHf2arq5s9ShheGMLHL5TRs+75a+hJtFuftHk71d/u/7q0qkZU5nv4P97SOMutN2nfN8iL8u2qt1oZ2/uYZAv8G75mrvJtH8yApLCrfNtSTZ/wChVWbQ7lY3/cq+1FXc1YnXUoxitDhJvDMcciJ833tzfPt3UQ6LukfZ/vIq/wB2u8Xw680f76237f8Anp/7LSWvhm2hcoE2Ue/GJUaJxi6O8ceyaHbt/harEOl/vmeL5kb5WXb92ulPhkys7/d+b5lbd81EekvDtT5kT7u2sqkToox5ShY2MNuq23k/PJ8r7a09Ntd0f2Z3ZUX+FW/iqS302BJmj2bv4ovOVv8A0KtbTbFPOd3hjKrL93+KvHxEYy5j1sPzdCa1tflWa5hbCrtZY/71eg/D2ForecncQUj2lmz2NcdYw21vJF+//cx/+hNXbeBwFiuI1PygptBXBHBr9E8D4cnihgLdqv8A6ZmfTZQ28VG/n+RTvrFBqczbnLyTOdg7L/eq/pOmwzNvSb+Hd8v3akcr9qnZlUO0hUEt/Dmr+j6f8o2Q4T5mT/er8v4ol/xk2NT/AOftT/0uRvOMXUlbuXNL0+2t5kT75kb5N0X8VW7q1RmP2OH51ZvNZX+7UkPnRqhR40+T5G+8u2p5vJaQQu+15Pmdo12rt/hrwVHmnzEy92HLymLJZvJCXhttiqu75v4qgWJJl+0wvvRvl+V62Psdt/BMrhvl3M/3WqvJYwqkoTcP4fMVdv8A3zXpUKxx1qPumVJG7wtbOm4r8+7Z95qz5rF7qaNLlFXd9xvur/wKthrPyZGe9RUh2bvv1Vms4Vm2Om9JvmRV/ir1aUuWR5EqMKnumRfaXN5LfY/ldv8AlnVCbR3WTzpkVTt+Zl/hrpWtUZl32rQpG23b/eqnqdq6x/YzMqK38TfdX/Zrsp1vd5TJYSMtWc7Jo9t5exAu+b7/AJi7qytS0+GSHYiKp2fJ5fy7f/iq6m40v5Wh2bj/ALTfvKx7+3SOEOm4FW+VVfdu/wCA1p7SfNZHRHB0ub4TktRs5vkheCRS3yxNH8rUklmlvIsPzI/zN5ez7y1u31j5k3z2bPtfcm1tvzVNb2264b7T5cj+V95vvbqqpU933jL+z4+1MG10ua3tTCiSNu+f99825v8AZrV03TXjYTeSybl2vGrfKtX7fSZvLV96qytu+Wun0XTXnsw6Q7fMl3bm+X5a5ec1+p+4WpmS4jDp5au3ypt+bc1QND9sjMMlttdfmZZF+8tXJY/OUyecqtH8q7v4afGyXC+dD5gb7ryN97bXycacoxufZYepGVU5jULVFY/3Ff5mkTdtX+7WBq1htj86Z1RJPu7fm2rXb3EbyQyTTIpSNd33/vVzer2Kbi88SqWXc/8As/7tdUactj6bBy5Th9Q09GbZDYeaaxdWiudzFLNY/MTc275v+A12l5YJ5ZmQMfmbYuysXVNLvFhWabcHk3KrMu1WolTlI+rwMonF30MNvav8m1vuuv8AdrEltUVuX+X5vlZPvV0msWMyxnlSy/N838Vc83O5Nn3UZf8AgVKnzU46HoYinSlQ5jNvrdGhG9FT+/TI18sr5PKt92rs1i80fk71+Z/4aiVXhV/k+98rbv4a9XD1OblPzzOKPUtaXDD5bGGbe393dWrp91bLcFJEYlk2p/eWsRZpoZGh+5uXan95quWeoOrOjpIP4E3bdzV6FHm+I/Ls0lyz5TpbUf6Ps3/vFT5FZ/vf71bWkzblGyFf9uTfXN2epJ5yRzQsqs+FZq6PR7oQ3AR+Il+dG21FVVIwPnqkrz5T2TwiNvw+hBJGLaXJ79WrN0+zM0guYXbZ/ufNWr4YZR4FicsMfZpCT6ctVOykeNY/szxiJduxll+Zv71fsHjK7ZJw6/8AqEj/AOk0z67M6EqmHwvlBfki3bxwthPsyr8nysq/M1WFs0muGCfIjfMqs33VpNJjcsf3zb9+1tzVqw28Ls6TIuFRfmXa25q/BqlT4YxOOOF5feM2PT03J/tf7VU9WtUhSZ4YdzfxeW/y10kbov750XLJtRdnzNWfeNbXEbpCmP4ZY/71XTkKUY09jmrlblo3+zJHv/jaOqElrtY3O9Vf7rfJ8tbt5pLwt51rCrt/zz+7VGRX8kOEUfIypuTdRUpwLoy5TLurPz1KO/y/7P8Ad/vVX+zorGZ4Nj/9M/4a0biGaGPzH8vP3dy/3agby41875kfZtZv726oUfsov23ve8ZF5HDIu9Fk3btu1fl21R+zwx7/ACXZXVty/PW3fQzR/ubx2YR7vu/+g7qyZrN9rn+Jv+Wcj/dpSjGJfNzEbb7hi6Phtnyxsvzf71W185ZDCjthk3MzL/FVNmeSREm3fL8zqtXdPuoWWVE3F9rMqr95VrmlGZ105R5i5Fa7Zgn3l3/JJ91lrZ0e0SFU/cx5Vv4U+9VfRdkw+RFxt+dpPl2/7S1u2qoWWySGN0jVm3L/ALVc1STjLlOunHmL1jbrbvHM1tG8kf8AyzZ6o+Hfi/oll4yu9E1KzWaOGDY0cifMzVbbVbCxs7i8ufl8u32oq/Krf8CrzDS9JfVtYudVheNdrM3y/KslfX5Dgeb99I+F4pzSL/2SHzO2/aV8O+Bvjf8ADW80rStBji1G1l3W80lrtdmVf7y18d/Dm8vJvDuq/ArxOjW17Hun0hpIv3ny/wANfW+i+JrbQ9WiS5jmeVtu6ORvu15l+118M3+1Wnxj8DWarqOmy+fcLCvyzKv3t1fUfFSPhIynFHj/AMM9WtvFGl3HgPUvmaRWW3kZPm8xf71cf4o0W/8ACuuTQ9DH8jbm+7V/xJqFtofiiLxnoj/u7pVfbH8u1v4lrb+IWqWHjq3t/EKWCxFUXd/tNWcZcpcY8vwnIR+S1mu/5dvzfM26uU8RXELq8L/NW/qF4kJe2+bd/e/hrjdeuIbiR/O521RcYmLfQ7lkTf8ALVPzPl/d7WFXLry1Yon9/wDv1RjX940CJt/vVXxGsRGh3fPv+X7zK1Q3C/3z/wB8ir01u6xhPmcN/DVS6X/pn8y1JRQk3s77D92o5G3NUt0dzbAnNQfOFJf+GgcdwzJt6fK1Sqs27Z8v3ahLZC59akST5vegQ5mVfkAXO371Rec7fJimyLtaljX5hl6B8rF+8Pkpg3rgB8bqmGyM/PRbKZp0THP/ALLREI7lhpfs9nvT77fKm3+7S2uW+fZUF0yNcHZ8392pLePzG/121m/u0fETI6rwyvlus0O1TXq3he4v5rf/AFyqipXlPh9fsixzTOrbf/Hq7qz8TTNZpZ2Vts/4HWnuxiYSjzFX4talusSk0yuy/crc/YH+Ftz8ZP2jPCvgm2T99q3iO1giVvmX/WL8teX/ABE1OaS82TTbj/Ctfa3/AAb9eA/+Eu/4KAfD55oVMVrqn2h2/wB1d1aYWP7xGOLl7PDM/rI+FnhWw8D+BNI8IaZb+VBpmnQ20ar/ALKqtfhJ/wAHqVjMPih8ANV2KYvsGqQfe2nc0kdfvbpF5i1V33D5f4q/FP8A4PMfh5P4m/Z9+Fnxctot0fhrxXNa3kiruCLcL8uW/wCA1jXhOXOb4KtSpxgjD/4Jd+PLbTPgjpDwzNuhtY/3Ky/e+X71fT3jL9qq6ure4s3RbhI7fZ5Myfd3V+Zv/BOX4uPp/wAEdLtdiuY4tr3S/L8v92vXdb+NU11cMlzcssW/a21fmZa/EsXTjHGzifteCq3w8Kh79qGvfCLxBa3P2+2aK5aJvlVFZLf5vl+WvKvF2ofCy0y/+hyyR3CrtVFXd/wGvK/E3xFdbGYQ3k0QkT/lncbdu37teI+OPiFc3WoTXM2sNLMv/H02/bu/u1pRVX4eYKkqHPzSPW/it4/8K6Pb3l9Z2cMRkf70d1uaNl+7tXd8tfNnxI+NV5rDNpVtPMSu4O1vcbPvf3ttYXjj4lX+pRy2cNtazRTf62O4+9/vbq4Vdck+0PeWdqsMXy/LG1ejh6NWXxbHBUxsPhge3fCi68PeH7W08T+MJLdEhTcnmMzL/u7a4f8AaI8YeAPHGuPqvg/wwthffduri1dlW4/3l+7Xn2s+NLieFrN7nA/55791Zmk6sLy8Uvufc3zs3zVpHByjzSbDEZpT9lyQMjxZp9/cafcJsVGX+633q5fwrHDDfLHsY7fvqv8AFXqfiHS7OTT5P9J3S/d2/wCzXCw6L/Z128yHaNu7dXp4WtH6tKJ8pjpSqVec6ixk8u3QQozfw7ZKmbUZo2f/AENVTftT5/u1haHcTKrbIfl3bvvVLc303lt/CjfeZq55UrS1iT7SPL7xrTalCp3/AHCv/oX96ga4kjffyG+VWX/2auea6SaNX6t93/ZqRdSeFlj37l+66r/DWUsPzRsRHFWkb0379Sjncu370f8AeqrNvkuNzv8Ad/hVPvUumXELRomxd27c7bvu1p3K2f2NUWFd0f3q4pS5Jcp0+05/ecg8L69d6bf/AOu+Xcu3c9fQXwZ8fbrpLY37GWR925X/APHa+Zo5vLuN8abt38TV6D8LtWmj1RE38KyrXnZhho1I8x62T5hOnXjGUvdP0s/Zl8aQzSb7+8kRfKZ0jk/iavs34HePptvnXN5IdPj/AHrQyfIsf975q/PD9mjWnjVLZ0kuPlXYq/KrNt+9Xsni79pTR/DujxeG9Nv98ELK2s3DO21v4tsf+7XzFFTniLRP0mlioYjDF/8A4KdftueKviPdj4BfBTU9Lht4dy6veSy7Z5N3/LNf7u2vyk+K0Pj9r6XStV+0TfZ5WiTbuVWZfvf71cj4t/aA1rVv2h/FHjbVNakX+0PEVw8CK+1fJ3fu/wDx1a9x0/44fDfxV4VtodS1JWuo5du2aL7q/wAXzV9xDBf2dGLnHmf8x8x7SljY/u58vL9k8u+Cfxe174b+Plie5kjWaVYrqFn+Vo2/2a/Wn/gnHcw/FL4q6HpNpYeWt226JvK2xLtb5W/2flr8sfixN4G1rUIde0GzjSaOVf3y/wAS/d219/f8EmPi+3hCfTda0+8ju72wRoVj+80a7t21WqMTXoUXGrym+WxxM3Uw/N732T17/gujrGlXH7XvhHQNNljZrDwbJZCTb97y5F3f98tXx9YtDcXRheza23fcX/np/tV1/wDwWR/aB/tP9uH4dWrXDJP/AMI5dXF/5kvzeXNIu3cv/Aa5SxtbmZleMM67Fb5vvN/dr6/AL61ho1v5j4vH/wCxYl4ZfYtzepoQW8yqgRJPm3bFb7rf7NT2un+XI7Ikasr/AHWXdtqWHT0j8uF5l3tu3sv3t392ren2McfyImx2+baybv8AgTV6tPD8sfdOH2kqgabao8aXPV/m3/JtWr9nNlAiIwRXZd3/AKFTbW10+3ZU3yI33VXf8tWLiG2kjPnTMm2XduV/lVa7IU+aPwlQ5ia3ZI9z+TlNm1mZ9v8AwGmX+yS3ZLZ1zs27f+ebU+RUuGFs/wC8/usqblVdtQzWt5HG8xRkeNNzK38S1p7GUT06EY81jD1BtpO5GVv4JGX5dtYt98siwyXLY37f3afMtbmoRoqs++QN96JvK+81Y+pQpJI0zw/Kv/LOR9u7/gVbRlyyOiWHjy3kYd5G6xMsybv4lb/4mteAlvCcmCAfs0oyO33qrahBbMw8jajbW2Rs/wB1v7tXLZQ/hpk6Zt3z9ea/Y/BySeb45/8AUNP/ANKgehklJRxVW38j/NHk2oQ/Zmx1Pzb1X7y15F+1N8TE8N6HH4M012+0t89xcRt/D/davbPESw6dY3GsXnywxxNLLuX5lVa+KviRrl58RfFVzfwiSX7VKzRK33tv8O6vyLmlI/Os0l7P3TzrWNY1LVrppnfJ/wDZqteH/B2q6xcL9mhZ9332X5q9x+Bv7IHiT4lXyNDpTGPfu3N8y19R+Bf2J/B/gPyrzWPJdoU3y7v4f9n/AHq09nGMfePAeIl8MYnh/wCzL+zNBGqeKvE/7q3hTzW2r83+7Wh+0Z8ZLOzmfRNKvdkcMXlKsfy/u67j9or45WHhHR/+EQ8Lpa26Q7llaH5fu/8As1fFvjjxlf69qT3E0zFv7zfxVjKXtCqceX3iHxN4qv8AVLp5nmb5vlVf4dtc7cTSSMz/AHfmpsl5tkO8sy/7NNWYNL99sN/epRibR5ZFmGR1hZ/PbP8Ad/hpkjea4T7p+9upPM2q33jz/doZkk3eW/z/AMCt/do+H4g5ZRDa8i7H4ZX+RqI18uTyU+f+JGanMvmK+R/wKm/xA7Pm/wA/LVgWfL+YfPuMlTx2IWHZvwf4FaorCT5d6Q4Vf4a0Y5I2kWd03bf9il8IvckQWfnRtsdNo+7/AL1dN4VtUuLhN7qpZ/4nrFe33ZfZyPu7auWqTWNxG6chfm/3amURf4Tvtahm0uxhuUdmP3Xqj/ayNgPcqzf3qZNqlzfeHXhkdsbF/wC+q52TWt1vE7pg/d/8epylHluL4Ze6ej+C9StmnG+ZflrsJms5labyd+19rs3y/LXlHgvUlhvPIQZDOqt8/wAtenSXD3FuHRIwG+VP9r/aq4GFT4zmvirHB/wjUqb8rXzrqMiR6mU/2/lZa+hfilJ9n8LmF0+ZU+bd/FXzjq0/l6kyd9/zVlyG9OPKd58PdWmWRbZ9uGqt8aPCvlj+2LZPlrO8D3nl6hE4/v8Ay16h4p0mbXvDaJs37k3U485Upcsrnivw/wDEU3h3Xo5t7bWZVr6XhZNa8P2+pWwVVkT5NtfLGs2NzompvAybWjb5a+gP2e/Ey654b+xzfO1vtO3+9T+0FSPNHmLV9ZPD9xPvfw1Hod5Na6ojom397uZmb5a6TWNJ87e8Lqy/M0W5vvVzV1C9rl4du5X/AIq0fvaGMZQ6n2P+zVqFjrGl3+q2kmWljtlmQfdVlVxx+GK/az9kb4v+Hdf/AGQvh38W/h1cq954R09fD3iWzjkIaOW3IQlgOu5QG/4FX4hfsbG3n+HdxewS58y4VSmc7ML0/Mmv1R/4JmfCfxF8K/2efHh1lbm1v/El3Z61p9vdndHd2EqiQSRDsRux+FfuWfUpVPCHJVf7dS//AIFUPHxFSNOUklvsforFqlhq+lad8XfC211aJWu1jX/WRt96v50v+Cvk8culfHG4jb5W8XX7KT6HVeK/oG/Yu8RnXfh5eaDc7cWN15aIvZf7tfgx/wAFMPhn4g+Kms/G/wACeENPkub1td126t7eJcs62t3LcsAP9yFq4fDWD+oZ5Se6w8l/5LM/ROCnFZXjqn2XRb/CVz8gbxRJGJk+Xd/47XR/C/WkW+WG6ucqz7flrnWk8u3NtNuRlTbTdFmez1VXR9qK27dv+9X4zKPJM+YjZo+zP2CNJi0743a3PAi7ZvDTNuVs5/0iGvJ/289TbSv2vdYnj72tluO7p/o0dewf8E8rqLUvFuoX7gecNDZSR/dM0RH8q8b/AOCi0JX9prWZliyWs7M/lbx1+1Y7/kx2F5v+gl/lUOKD5cW/Q634U69Dr+iRvM/3k27l+WovFGiyQtNv2vu+4q/w/wC1XmHwF8ZPDfLp83CM/wB3/ar27XI/7S09LhDn5Pk//ar8R5Z851VPd1Ob+E+gz6X4qea2+ZpGV/lf7rV9U2/h/wAyxttV3rNK0S7mX5W/2q+cfA9x/ZviCPzvLTzGXe0n3a+pPDOmw3Wl2z2G1921X8t/lWscV3PbymVoyUjL/sOGSSVP3ij5n3bf4qhm8K7fk8qNv7zN/wAtF/vV2cWj/O8zp8u/5GX/ANmqWHRZo2Donys/z/7P/Aa4Iy97mPW5eY45fDsIHnJbNj+63zfNVa68NzNM3lQ7g3zO33dtd/b6L8/kukmVZv3i/wANQto1ybzeifMsW2X5PvNWsZF8qPN/+EfupJpPO8z737rb/DVabw26t51zD8yvu27/ALtemXnh942Z54WR12/Lt+9VSTQfOje5RPm+Zt3+1/dquXm94qnzROAj0vKtsmU1NZ6fM100c0y/vNvy7PmWuqvNHezj857ZfvbnVl3fLVGaxhWRvL2gN/E33lry8RTnI9XC/AUre38zZZ+TCu6Vn3SLu+Wut8FYEVwocNtKjI/GuY8txtnR5FX/ANlrpfAkkslrO0wG7eOQc561+g+CcZR8T8F6Vf8A01M+jyjl+tRt5/kSecp1WZmfcquVK/3a2Le4gZXRXZVZ9qfN/wCg1zGq25Oq3DRL1fMjf3eatafrVtb2f77cxWXbtZfm3V+T8VR/4yTGt/8AP6p/6XI6pStUfqdZDdeWwhmeMqu0bt27/gTVat7rztsW9lmZ2+bd95a53T9Q3fvkRl+b/vqtK1uEkuw+/P73cjbP/Ha8GnHlj7ope9K8jUmjRt37mOTa/wB6P+KqrRzRr/pLxoI5Wba3zKy1JHev8sNtCsSLufdG/wA26nyTOy75kUbvl2t8yr/wKuqj7xjWiUNzysyFFVJF3eZ5X97+FqpfY9t1strn5Nmf+Bf71bUlu8m3Ztx93cq1Ev2OGRfnjVd/z7ov9mvUp1IxjocFTD/aM2GR928wxumz5tvyt/wGotQhs2s1S/8Al2/Nu/iZqv3UKtGyPbRhpF/5afwrVKS3jmdI25C/89P4a6aco81xRjywMi5tdqMHhaRm2/vPustZM0KLM9480Mpk/uxfdrodQs0jxbXMG9P+Wsbfd2/w1lyQ/vPMh3IrLt+5826unm5jqjTj7pjQqjeY/wAuxtyptpI9PDKqRWzblT52ZtrKtaDWm26aEOzLGnyNIvzf8Cqdbd1uI3+6kn3lqZS5S/Zd2Q6RYI0iPcwrtX5F3f3a6/TbWz8yK2+6v3l3S/K1YNmsLW/nJbRo6xfIyv8AL/u10WiyWduy74fnk+58n3awjHmmRKnyxsZlkqW+/wCzTb337lb/ANmq/CryRiW5hVfLTc26sqORGm+R12Ku1G3bflresY0aWP596yfLtb+7/tVzfVeWPvHRg8RDnMnUNJ3XGy2dWWRd37n7tUb7T4Vgl3vtaZ9qsy7ttdZHapC3yfM0K7UkVfu1TvLNLiObzrVmVvmt41/ho9n7sUfV4PER+ycBcaNC3yfZm/h3x/dasbVNLf7QyTI0iqjNEzS/davQdS0+bavnfKyurbVSse80W1W38mGzW2VpW+9/49WVSj1PqcPiuXU8u1zQ/tFudm0bvm2s1cNq1rDb3R3wqu6LajR/dr17XLGGNXSFFeJW2LJIm2uL8RaTtm2WyY8v+LZ8rVzSjyy5uU9KWOhKHvHE3NujLs2bR/Gy/wAVZ9xJvk2SIyeZ8yK38VbWqWrpcC2Ty0/vs1Zd1bw25NsjszRv8+7+KunDx5veR8Zn1TlgVwqbm877sf3FZ/mWiO923CTeZG3l7vvfwtUd0IoVZEdUP92qzRzXDPeJ8nybtyr/AA17uHjGUD8mzKXNP3TpdLvNsgLurD/x2up8NapCzJv3I38bSfdrgdNaRYd6Pvf73zPXQaTeos7pNCoGzajbqyqPl0OTB4WdSZ9FeELjz/hrDcu2c2UpJK+7dqxtN1SBoRCky75Nq7tn/oNXvAsjn4OwSZJYabNz7/PXGaffNt2Xn3vlWJlr9d8ZFB5Hw7zf9AkP/SaZ9tisPJ0qKttFL8jvPtzSS/vo2Pz/ACf7VbGntDcYh37VWL5P96uJtdYe3aLzpGKtKqbfvbf9pq6O01aFd/kxeay7WT5tu3/ar8G5bdDnlg5RidVH+7j2TTRgNt+b+L/gNV9Qt/Lh875VVvut/FVZNShmj2PlZJEoW8Ty/M3rtmTDM1aR5ubU4sTheWN+UpahNvjif92VjVll/h+Wsa68ncH3/Lv2/K1at5cQLZsjooSN9qsq/NWJeSPNIH3xhlX7uyuv2cJanj1JSo6lW6vprWSSFEVvm2vt/hrP+2PHy6MXX7m5/lp80iWcjJvVmk+b5j/FVKS8e0kV5kWTav8AqW+5/vVhyolVJT1HzTPclER9+35nXf8AdqnI3mQPeZYOrbdzL91qcuoTXUoRE3N/DGv8VZV5cOofzdw+f56JU/d906I1I815DJFdLpHf52/vK9bGkzbRshh3J/Dt+9/tVh+e8Vwuzcm7/wAd/vNW1prXJlXztpVU2p5bVxVo8ux1YWUZVToLHfCy7H3xSMq7W/8AHVrZtbyGzj+eTyW+ZW21z1jbvcSLbQ7ZFZ1+X+7UHjzxRbaLobfabzykunZIvk/iWsMPh5YutGCOvF42lgcLKozk/ih48v8AUGmsNHvNiwqzIqv823+9Wt+zr4q+1aXqUOqvCvk7fu/eZa4hYbaRWv7x/vK33U+aSsb4f+Jv+Ed0/wAQ2fnSb2fzYlr9KwtOOHoxjA/HsZiJ4qtKc/tHV/EXx9Db+IrqPTXba3+oaT5tv+7Ulx44m8TeGWtvtnmM1vsaFvu/drynVPG0OtSx6qjxujf6pf4VqnZ+NLzR4bh/tO9G+Zlb+H/ZrWMZfaMOXljY57xp4Zv9FYeHtZTat1un05l/u7qX4d3TzWM/h7UtrM25oPk+bdXD+LviXqWueKvO1C8kfy2/dbn+Vf8AZrW0XXNl9Hre/bL/AMtVV6qXJKRUecf4i0W50mR0kfO52b/d/wBmuD8QSI0zJ5fzf3q9c8ZW9nrUKarYP8zLuljryDxtDNb3pR/k/wB2lKMTSPwmZb/LcL91tz02aRFun+6GZ/u/xVHpt2i3A85MfP8AJUszJJqT+S+4bv7lTzcpfuiMybS7hs/w1XuIXjjbZ8zN83zVa+zlpFmR/vfc/wBmlmhdfnkfe1Vyi+EwbqPbJvd9x/u1HIu35CmK1NQs0jf59uG+7WfIzqv+7/eqYFRIFXHJpfu/OvVad96Ty+opu5P8mgYrSPu+Y4+amt/fBprM+Pubv9qlb5iv8NVylcrHFvMHNWLMIkZbOHbj/gNQrJtX7i0I5jk2P81Ll7kiyLtbzEf5qns5JGb+HctRzSIsfyfxfxLV7w/Z+dIsju2Vf5l/vVUdifsmzo2xpP38yt/Ftat7+0o7e12PxtTcrLXOeekcmyGH5P46g1rWkmtRDDNs2/L8tHwklHWNQ+2ah533Ru/75r9NP+DaXQ7O8/bi0HWL+FtlnazTpNv+638Py1+Xm52kXZ8y1+rf/BuDpM0P7R0GsI+1YbBtjK+3c27/ANBrbCx/f8pw5j7tA/ps0rUY7nSVuIZg4ZMqy1+f3/Bf/wCDFt+0D+wF498GtazTX1nYNqOmxr8376H5lavtLQdcubfSVe5h2Lt/4DXjn7S2oaJ4i8M3+n6rbRyi4sJoNsn3W3Ltr0qmH5YTPGp1nTqxkfzX/wDBPv4n3kHgCXw3NcyRSw/Kit91dv3q+ktH1K51Ngkj7/L+Xc33l3V8d6v4d1/9lT9srxj8Fp4dkcetyPZxzL96GRtysrf8Cr2/S/FXjCdg3k4Vvm3K/wB2vx7PsCqWMk4R1kft2S5l7TARS6HfeNNak0zfZujJu+42/wC9XhXjbxRcx3E1mkyxJI26WT+Jq0/GnjzxI0MiXj/vY22xSK+5VrxzxLqGt6g0syOz+Zub79cVDD1Yx946cViox94s+ItbtrxWm+VnV/mZX27lrndS162jzsh3rJ8v36y9RuL/AHohfbu+8u+sxVubpmKI277rV6tHD80uaZ81iMZJz9021v3Wbe83+8rVds9STyx8mxVf5KwbGzv2X7M77trf3a6G10dGZY4UZn2Y27a3kocpz/WJS903YdchvIxa22mqzL8ryb/vVj6tY/Z5C8w3bl/hqy1reWeLZNysvzNI33aZrEn2O1KXM0Zdk+Zt9cs4xpStE29pzRMZm8tVdEb+9tZ6RtQS6xvTG35dtV7rUPPt1SF1A37fmqD7VCqsic/JWnLOpH3jmlU+yTSXO3cnzbPvJTdztN58O5k/u021WSSFvvbtv3WpGZI38nzMj+6v3anlZjzc0TX0q8JBSSFt38Tf3q05LzdGX3rtbb8q1gRq8Mfkw8NsrVtWS4ZH+78n3tny1zVKMPiiXGpP4TQ0mxSa6CIjGvafgT8J9S8W30cMNg2/cv8AD83/AAFq88+Hfgn/AISDVIk/56fd+f8Air3rUPi5pvwv8NJ8MfBu1NSuLX/ibXnm7mt1X+Ff9qvncbOpKfJT3Pey3Dyl70j0bxJ8WtB+F+kxeAPD1ztu9ipcXi/eVv4trVW8L+JLHxFY/wBlQwyR7YGSWNpdzKv97/vmvljxR8SPM8RS3szsfn+9u+61ei/AXxlbah4sT7ZeMqyIq+dD83y/7VdFDK6dGMZH1uHx8f4VI4b9qT9hfVdHjPxF8APNdabfNvWOT/WK38W3/Zr50tfDnimPVF0SFrpWkfbtjf5lav3Q/Zd+Cfhv4xSN4e1J7O5s5NNkb7L9nZpF2/8ALT/ZrmfG3/BG/wCCGh/Gq0+LV/4kXR7O1ZZ5dH2My3TL8zfN/CtfZ4bGYf6rH2j2Pl8VlmK+uSdHm5T8aP7H8ZeCvEU3h7Xtakja3+ae3uH/AHke6vob9kD9r7w9+zA1/r2t63eXiyRfuNNtW/1k38P+7XOf8FbPCNn4F/4KF+OrPTbSOCzvILG6sFh27fLa3Vd3/jtfPVncTRtsRF+X+JvvV6VTJsHj6UZT6nhQzrHZZiZcnxRPRPjn+0H4/wDjJ+0TJ8ffHlypmvtsEUcLNttbeP8A1cdfdXwP8Qv4q8A6Zr03lyzLEsXzf7vytX5z/wBnprmi3Wkv8zsu5Pk/ir7c/wCCefie28YfCtLCbafsq/Pu+9u+61ep9VjToRhD4YnmU8bXxGMlUqvmlI97+xwxI6Im8xtulb+7SWtvcsq7XbLfLt/2au/Y5biSJ4XVGjfY235V20q2t3CyJc3imVW3/wCzV0aMT0JVJhDZ3MMhfqvyq+6tJdNf7Ozz7Qdn+rVfvUSQzTQq9y6p/st/Cv8AwGr0apCsX2l5JfMbZtVPl+9XoRp/DE2oVoop2dncttcTR4VNqbU+Zf8Ae/vVLJbOyvD8z7l3bmT7tbOn6PYLIJrNJA7Ozyr/AHamm095oXSH7+791/u0/ZxjM9jDylzcxx+r6SnlmZLbcvlfJXJ3djNCu+HzJUj/ALy7ttei65psMkbTIiq3yqkO9vlb+L5a5bUrU25ZIpo2XYrNIqfxf3f96so0+WV2exH3onF39iJlR/sf75k+Vv8AZq3Cgi0J0I4EL8Z+tbdxpttcMl5NbNv2MyfJ92s+5hit7eSJ8lQh3ce3NfrPg6n/AGxjv+wap/6VA9TKafLWn/hf6HhXx8864+H95pNvcxq940aRbXbcy7vmWsP9nn9iSfUJrTxP4qkjtLPazr9ob5v+BU79pD4oaT4B17RtNeGNyzSTuqp83y/d3Vwt9+29r+rSQeG9NvPJt40VIo93y1+RU5TpxPyLOoyqY6UeY+zm8U+A/hzocWieEobWJ412+ZH/AOzV458bv2gNSjsbjTbaSMBvuSRv/rP9pq4q1+IQk0NNVvNY3Oy/dkl+b/gNeF/Gz4xPfM0drNnd8u3+6tTKU5Hl06cYHM/FjxxeaxqUjvfs3zV5pdaghmb99z/H89R61r1zfXUrvMxO2sxLhNob/vqlGJ08vuFuS6eRm2PgP/E1FvI6gd9tV1b5V2fxVatLZ5Gx/wABar+ySW1kQqrn5m2/99U7a8ap8m41Yt9PeGPyf4lqOZUjYohZT/tfdanLYrm7CKvkr5z8t/s1D9q3RqPvf32qTzP3bfdVmWoYVmmk2TTLto/vEe9KBoWrPMR/Cu/+Kr1qu2M+duB3fJVKzk/d4Sf5v92ryvuj8z+Jf71EthfaL1rskjCZ3VqQxwx4SZF3/wB6sLT7pGZ5P/HW/irZs5hIq+Tt+b/bqJf3Q5jet1SbTXR0yVX5FWuFuL6aOaVJnUlX+7/drrYbx7LdC6NsZa4TxRNNa61LC6bQzfJVe5yExly+6dR4T1ZLeRHTd8zfOtey2d5Mvh2F0+f5Pl3fw18+eH9UeGZN6bm3V7Z4buPtHhuN0f8A2k3VRE+bm5kYnxSvJv7FmR0bKru+Zq+e9ZkdtQdtn8Ve6/Fq8f8AseTCbvn+6teCXs264ft81LlRvT3Ok8Et/pip/DuWvd/DbvqGisg6bPusn3q8C8Gs8c4fzPlr6G+HLPcaLEnzKuz/AL6olsTI8a+N3hN7O4S/httqN/FR+z34lXw94si87lJv3TR16l8YPCsOqWL232bYqpvVq8E0ye58N+I1m+60cu7a1HwwJjLmjyn1pqFnCrM6IvzfdaOuS1yx8uRf3W/crN975a6Pw3qn/CQ+H7TUofnZol+61VtSsdzF3T5Wb5KI8pnLmvyntf7Bs0v9jeJLUnEa3Vs6JnO0lZAf/Qa/oz/Zy+Glh8VP+Cffw+bQfIOsRfDy2g067IwUlFuB5bMOdu9a/nN/YSSaPTvEsc0SqRNacr3+WWv2P/4IZft32euaXqX7LvjHWD52ialcLpIlkXCxeYx2LX7XxLVqUvCHJJw/5+VP/Sqhyxp0qladOps0dD/wSS/bYXx/8RPEHwu8cQw2GvabqU2k6tZru+W4hkZflr51/Zu0LSvEv/BXi58M65ZJPZX/AMQfEttd28vKvG6Xysp+oJqT4paEf2CP+C0Gs+IfGem6hbeDPiVq0epadfabEqotxJ95fm+X733qxv2d/Fh0z/gqefGlphQnj/XLpQ3PykXbYP4Gujw/hF5bnNWP2sNL/wBJmfVcBVWsuzOjU2hTl91pf5H5W/8ABaz/AIJz65+wH+294m+GttpzJ4c1mRtV8H3Cr8klrI27y9396Nvlr41+z3OnybJk3N937v3a/qz/AOC+n7CHhT/goj+w3f8AxZ+HtvFc+Mvh/ZSappDQL+8mhVd00H/fO5q/lv1a18lnhv4eGbbu/iVq/Gq8VUiqq6/F6nymDrulP2Utvs+aPpn/AIJkXjT+ONbjJzjRCc/9to64r9v+xa6/aM1zcoJFpZtF6/8AHuldd/wTEtltviRrscS/INAwD/22jql+3vo89v8AG67161wwltbeOZW6cRLX65jnzeB2F/7Cn+VQ1fL9bfofMnhnVJvDfiCJHb5Gb+9X1D8O9YtdZ0XZ9syzRfdX7tfNPjLQUs5BfwptGzcn92vSP2e/GiMwsLl4x/CjNX4rKMoy5jq5Y1D1DVLFFV33so/vfxV7v+zP4y/4SDS/7HvLlvtdr8qKv3mrxbVLdJUf+JP4mjrQ+EXi7/hC/GVvfzXrQ20cv73/AK51GIp+0pamuDrSoVY2PsS102a3XftVU3/3v/Hmq5DY7vKS9uYWWT+KP5W21f8ADa2eqaXDeWj+Yl0iurf71X5rF/tT+dDH8qtsZv4a8aP2on2kOWMIyMZrENNNcpzM3yptf/2Wlt9LmaT99bK7SfM+35fL/u1t2FvcrIm+2/dN/rW+Xczf7NTx6f5fm/Zn2su7czJuX/erqpx5SpRhL3kc/JpPmMqeSobYu/c/zLVG80G4RmdE2ur7naNPu11dxp6Md8aKPM+bc33WX/ZqrcWaLMszoxMbfJteteXrEv2fNvscNr2jw27bLYfe+bbXI6tZ201w/nOqlf7tejeIo/LV0tvur8/+7urg9cX+C5SPfv3Sqv3mrhxETqwtNxlpsZUy7NronzL8v+ztre8GmX/ShNFtYMoxtxxzisGSTbb7LN2WNW+9I3zLW14DkaRLrc+4goC2c5ODX33grGS8TMFftV/9NTPpsqjJYuLfn+RS1S8c6jcxoR8s5VAG6tUFveOqyzPN95tyeW3zLt/vVU8Q3ccetXLLF0uWUr75+9UcdxZxr5c0yp8zN8v3q/KuK1y8QY3/AK+1P/S2U5fvZerOh02+hiZH87ezfNu+6tbFhqSNMyQzbiv3F+7trjdJuIfOl3uu2P5f9qtWO8SO3i2fON7eUv8AEtfOxpxK5jpYby5W2U20OxtzM8jNtVqtrdItqsPks0n3du/+GuSW8fbsSZYxu+dZP4f92rENwkimeZGRV/h3blZa6KcvZil73wnVf2l9nWSN3b5W3LHH/wAs6hkuobi4dPO3DZuij8r7zbvm+asdbqFYVENmv7z5vvfeqaPU3877NNHtdX2p8v3lrshU6nPF+9yl9FhXek3mMPvJt+9S3kiTSIXmYbdvzVUW88xt6chfuSf7NPhkhZBsf5t/9z7q11U+Y0jThLcbLb7b5vMg+Zf+Wn8NUZrW/uJnxwW/2/vf7VaS+TcKfOkk27vkaR6j8l3lH2l8n5vmZq29pFnTTp8xktb7Wi+8LdWZXb73zVFbr5kcT3KSebH99vvVqzW8NsoSFNir8yKq/KtUJp0j+/OqM0vzt/e/2axliPsm3seWXNIfDHCqq8kflq1XNP1JGmTEysFf5V+7urMmvIWxvnVEj+9tbatQ2+rWCs298v8AfXd93bUU6nvcxliKfNEsaTH5jeTI6lvN3I38P/Aq6rT/ADiv2nYp8v5XXbXI6DdWrXw/cq0K/cZfl/3a6XTZJobYpvyixfJ/e3bq9mOFPksHjuV6m5NFcrG6Q2ak7l/75qK4Xy13xbWG/b5bfw1HHcJDGv77D/3ldvvUbnkZYftKn5NzbazlThy6H2WBx0YxKGsWrzWjJ5LM7fN+7f7tZGpaXDcRt/rFaNPkVfm3N/eZq37eHbuM8Mm6Rvnkb7tI+jp5jPDNx/47XLUpxie9RzCR59qGjzMuy/SMp/C1c3rWhwtG++Ndm/7rfLXoN9o8y3CvcopRXb5VT5WrN1bS0+z7Hhyu/wCbd91q4akYfaOx4zqeLa1oNzDOERF2/wC0n8NYOpaO7SeZbJIyf7ler+INHhZm+9tX/VN/E1cxqGivIj+SmI9u5P7y1zU6kPhPMzHEe2gedSabud5pkYFf71VPsfnTL53Cbv4a7XUvDcLRiRPut99Wesq60eaMb4YVO37td1PFRUeU+MlhZyqmE1uI22Q7VZfv1cs7mSOLe8Pytt3r/wCzVNLZ/ff5Vaoo7d1Xe6ct9/8Au1MsR7h7mW5Tyy5mfQ3w/d5PgbE6Mdx0y52nvnMledaHs+0IsKyMjJub/er0T4flk+BUXmDaV0u5BAPTBkrzfR7xIrgyfN8ybfv1+0+Mkr5Bw5/2CQ/9Jpn1uCwUa3MpLbQ6KzaS33TO7B/9n5q6PTNSfcJndnXZtdWX7tcYt0I4xv8A3Urffb+6tbGjXU0ca2c028Q/fb/er8HlsXiMvhGB2NvfQtZpcwuxaR2/dt96pZL5/MPnWys/ytBGv93/AOKrI0/UHZoVe5kxJFslkjf/AFa1ejuJoZEuXT5N7bv7zf3aujKcZe8fMYzC+zEupHYB3fcW+bb/ALNZ1zFCv+kpM0L793y/Nuq9cMFk/fP8qxfKzVnMYbiNLkeZiNWr0cPKEY8x8Zjo+8Z+qIkyvCk251bc6/3t1ZO65t4fJRIy8j7fmrXuLX7RcCaZ1Yfdba+1qpLazeQ/nPtlX7qsu6meVGN5cqKK/u5ilu+197DdH826qV1C/mbEePYrfvVVdzf7taFv9pdvO+b938y+X/FUN5Cl1I6IjIfveYv97+61ZVJcp10/eM+GNIGCXMLSIr/Ku75V3VrW7Iy7O2//AHaoNC8mwb2Dq/yzNV6z/wBVvmdVZV3O2371cU/fnyx0OyjPkgbenWd75rzJu+Vfn2/dX5vlavL/AIja1f8AiLXDc3kzOti7LBD95P8Aer1TVtesPDPw9ub+5eNb24XZEq/ejX+9Xh9xq25pnfa7N/rf71fVZTlqwseefxHw+e5tPG1/ZQ+CJLca1Dx9mflV+f8AhauB8QaxNayav5N/+8ktWb5k21sXGsQ/an875dv3WkXburjvE+oPJcSOU37lZNu3+Gva5keDGXMc74X8TyX2mvZu+3y33LVbxJr01vZvDC+3cm165LQ9S+y63cWzuqrv+bb/AA1Z1nVHm3Ojtt+7U/Ea8v2jntSkK3hmL7h/drQ0fxE9rsff9779Y+rXEzMfT/x6q0Nw8Mmz7o/vVXvDPSl8bTTWqIk/8G3atcR4q1Z7i6Z3m37XrO+3urDfM33/AOGq11cb2MzPv+ep+yVFdSezkjmmCdBVv54p2fft3N93+9Wfp94isf3K/e/iq5DcJNcStvXf91F/hWqiEi6sPnAp8zL97cvy/NTrhvOJR0bd92ls5t3yI2P4an2/aF8nDfN92iRJlX0ICj5GO37tULyPZIX2bS3/AHzWpJI8LOm//gNUbqNt29ujfw0cvuDiUWjEY3r/AHf/AB6omVBtf+7Usy7JsO7fLTX+6amBYxsK3+9TY1SRfmFO2p5fzPxtoVkA/eL92q/wmhLbxou6SnyRoyq+aiWfbJj+KpFkRmbZ8q/wU/hMyGRvmCfMK19PuPslq+yT52X71ZTHkO/PzVJJIVX5Pl20vhAueXMq+cz7T/HuqrcR/vPv7v8AdomumkkV3fK1Esg3BE+7Rze+KI61jMjq7v8A71fr1/wbr6GF+Ik9/Mv+rtY1Vt/3vmr8hbFWkulR143V+y3/AAbo2MMnifU0eNUElvCu6Rvut/Cq11YL+KeZm3N7DQ/d+G+vG8M9Wc+Uvy79zbq8H/aCh1K40+5hEMgDJt2s3/jtetabrW3R4ZvtKv8ALt+X+KvOvihrFtNG6Xm5EZGXaqfM1etWlGUbHz655H4Uf8F0PgjeeD/HnhX9pnRIdzLL/ZustGm3y/8AnnIzf+O14z8P/ipqWuaFDZ2t+vmbdz+X8tfp3/wUi+Fvhv4+fA/xX8N0hWaa4sJGsNy/NHNGu6P/AMeWvxh+B/iCbQbiXwxrULQ3lncNBP5n3lZflZa+Dz7CxrR5o/ZP0HhvHyivZSkeo+Ntems7x7OZFdmRX/2f/wBquI1rxEn2fZD8rf8AoNdL4sv5ri1d0jVw38X8S1wGqSbpN77lG7+Kvmox93lkfS4itzakEl0j/vl/4EzVWs7qbzC7u2yopo2bckKfNv8Am3PSSXDxxhNn3U+fbW0Y3PHqS5joNL1az8z/AFKsN/ztXT6fq0PmK8KcbNu6vNPOdWZ0f5fvfera0G+nuF8n7SxH91WqpU5Sjy9DOMonS+JvGlhHCkKQ7m+5uX5mZq5TVr57pdjou+P77Vt3Glww/wCu8sbvubfvVlappVsvHkbhJ8qbayjGJpUqS+Ex1keNvs2PmX5qmhyyqMr5rf3qmaxKqnyfN/ufepjW8xXfNt++yuq1fuSMSb5Gz2Zflp8MTsfLSLPz/wAX3ahjjdJmdPu7P4f4astJbSMqu7Db8yMrferP/CVH3S3Zw/vGz/F8tdHpOhvcXUNnHD5u75vl/irDs1e4/wBG2Km5Fauz8LskNiPJttr7/vbvu1wYyc40rxOnD8kp+8dVJqln4B8Oslncr9rZFVdq/NG1cHrXiSaxie5v5mNzM7PLM332b/4mtLxE2pahJ5yWy7Y1+9/eavK/iL4uTQ9Sks9Sm33iou23j+7H/vVz5Xl1Ss/5mz0q2KlGPLD4S5Lr1zcTPc314w3P/DXf/D7xF4k8MtBqulWczN95dybVZa+en13VdVvA8shyW/dKteqeBNd+Jul6cdSe+kks44NryXS/u41/3q+ixeXVI0uWJhTxWIpS5oM+w/gd/wAFfPGf7H3iKy1RfhbFqDRov+kQX/ltt/iXb/EtfYPgf/gv/wDsX/tDwPZfGfwvdeD7hYFi3TKTHNub5tzCvxK8UeNptc1BpjMs25PvL93/AIDWV9smuG+fbtb+7So8NwrYflm3GR6dPjKrhf4kFNn0X/wVd+Onwo/aG/bq8Q/EL4Faw174Yh0uzsLC6aLarNHH823+8tfPI3huZNy1FGr7lfzF/wCA1Lbwo0jpvbb96vrcNRVChCn/ACnxGKxEsZip1nG3MzqfA+oPa3SoNvzJ/F/DX2D/AME69DfT18R6U9s0aRtut/n+VfM+6y18XaLdQ299E7plPl+Va/RH9h/w2lj4FufEKJGEvoo03bPvbf4d1dfN7vKclH+PE9ks7P7Ooebc6L/Ft3Nuqe3t7OOTf5y7Puouz5v96rl1aw2Nos1m8m1U+dlp8ln9om85HbKpu2/w7qdOPunr8w/TdJTzXQ3KqjfNEq/erRtYUkuG2o22NP4k+bbUelf6tUR+WTa+162NO094pt7ou77sSt/FXXTjbQ2oykP0ezhjtVdEZ0k+bzF/i/2qufYZrhWeZ13Mu3y4027f+BVdsVSSNXZNn/TPZUqKiqs3zJu+5Ry8vvHu4eUzmdY0XaoeFGx8v75vux1yGoabCyzpvxGzs+7Z95q9D1RfMt/9Yynd80a/d/2WrjvEFrjMPy7vv7m/iaspS6nvYVc0oxOaax3bfs0TKNi/L/eWsHXrdYtTnt4yFGQAc9MgV1Fn9p85POf5W/8AHa5vxYEXWbkwtuXAKk9/kFfqfg3K+a49f9Q1T/0qB9JhMP7Jyfkz83/2zvH02tfHTWNKhvWa30eKOziXZ/F95q8z+Gun3OueKLazQbjNKqqzfdq1+0Brb6p8fPF04fcs2syfN/u/LTvAMkOi2dzr1y+Ps8X7pf70lfkPwn4bjeaWKn/iOu+MXjz7DeTaPpV5ugt08r92/wAu5a8f1zXJtQkMzuzNTvEWuPqF89y82fM+ashm8xi+/NL4viMOUb5j+WXfrUkK+Yi92/u0kcLyJn73+ytaulaO9ww2Qtmr+IciGx07ciuXZt1bWn6SzSb0Tcu2tzRfB7rD5zwbl/2q1W01NNVvMRc/3a15eWPKY/FLmOcuLN7VS4RtzVmzruk3gfdrX1q6RlZ4Wwyp92sCaWaSRn3rtrKUiox5iG4mkZhsRm/2mpbNXVmR0/2makaT92u8MWp0Nu7HY7712bqPhH9g09PX7SypvVVX+8lbDafM1rvh+Yt9+sXT7jbPs8v5P42rpLOS2EOzftDVXMTLYzPsc0LDzE2urf8AfVaeks63I+78r/OtLdJDx5PPz7X+f7tNjkh3L5Pyms4+8HLHqei6Xoej6tp6bJtpVNvy14/8YNPfR/Ewtndl+X/erudHuHjtmSzdlZdyv8+5WrhfjJNcTX1tNO6lvK2s1IcY++Zfhi58y62TP/Hur3jwTNu8PiPzFYLt+WvnTQbt4boP8pr3X4bXf2rw7M/zDyU3Oy/eq/hCXPsY/wAWtQdtLebqjbv+AtXicm+SYu235q9M+MmpbbYW3n8Nu+X+9XmKcMKZdOPLG50Xg9Q1xGm/Zu+81fQ3wzkeTR1hhTPyfNtr5/8AB8byTRun3d/8VfQ3w3t4Y9PfaPk8r5f9qjm+yYy+Mf4wvIZIz8kgMa7NrfxV4d8QvDL3DS6lbWzZVq9j8TR3OoTM9ykg+fbuaslvCr3y/Zns2f8AiWTZTjyGXvc/MWP2cdck1Dw7LpT3i+Zb7WWNv7td1qVhuje5R8n+JVT7teT+BY38A/E6G3mdhDePt8xvuq1eztcuF2b/AOPb838S1Pw+6aS5Ze8ep/sSwywW/iZXl3KZrQp7fLLxVT4X/tKeL/2Z/wBqnVfHnhm8ljFv4hmaeKNv9bH5p3LWt+xzapbJ4meOYMJLm3baP4OJPlrxj4vgp8YvEUpkwx1mcYP93ea/aOJkv+IN5Kv+nlT/ANKqHDDm9vI/oq8afD74W/8ABZf9hDSvEXhnVhF4m0y1+1aDqqsvmQ3ar91tv3dzLtr8/P2aNW1zwB+1rZ3vj7TrifVLK71WDWLeGLMhuTa3MUuF9Q7E49q80/4IH/8ABTC5/ZL/AGiYvgJ8SdY2eEPEk+2CaaX/AI85m/8AZa96+HGNd/4KZ6jNpl2TBcfEHXJWlgj8wvb77pn2juTHux9RUeFNWdTKs4ozfurDyt6NSPtOFqUaeBzSsvidGX4KVmfcn/BPr4z2HinxBefDfxBdE22u6cYfsrtuVm2sv8X+zX8zH7UXw70TRf2jPif4M0dFa20Hx/qlratH93y1uG2qtfvx49h1T9jb44WXxavrdbDQZory98OtM/7xLVYW27v9qv56de8ZXniT4yeJ/EOpXLSN4g1y8vJWZNvzSTM3/s1flmNjLD1H/LI+Cy+pCtTjzfFG56f/AME1bO5sPipr9tJjYNBbaSuD/r4q6L9sXRIdX8Y62hhYyiG2MbKM/wDLJaq/sCWhtfi7rpOfm0E8s2T/AK+Kt/8AaHla5+MGp6YzrseGAYbt+5Wv1nG/8mPwv/YU/wAqh0yl+/b8j5LmtodW06awuUZXj3LuauY8GapL4T8VGGZ8bZfl3V3HjDT30HxhP8i+XcPtRV/u/wB2uF+IGlfYNQXV7ZPk3/O392vxeXve6dlGUubmPp3w7ep4g8PR3jvhW2/Kv/oVVNQheO4kmh/hX+H+7XD/AAF8YPqGlrYPNv8A9lm2/LXfaxE9quxE3tt+8r1HN7tgqR5Zcx9Zfsb/ABSfxl4Jbw9eXO6fTX2eX95mj2/LXsPluxdPL+Zvu+dXxB+y/wCPH+HvxSs7y8m8uyuv3V02/wC7u+7ur7qlKXE3mJNHLbyIv7yNflk/u1w1o8srH1WU1vaUOWQ7S4Zm80ImV2VPbJeMpMKfMvy/7y1ZsYYVj2bNif3d9WLXTUjXfCjK+/am2iK5j1PaS92JnXdrDJhJLXarfxM+1V21QvLV1kkSa23ity+t3l2o6K4VvkXb92q11Z/u33zttb5vMrojT6oqNWXNyxOG8RLZxyfacNmRfvLXnPiK6htbgw2yMXb5kaSvSfE0CKyPCkgSFG2RyL8u7/erzPWrGaEn/Vum9tvzfNu/3q4sRGHKephfeOcnWa3uNkL7xu+eRf8A0Gum+G0iML5Yx8u9GB9c7v8ACuUure23TI9zMiKu6VVf/wBmrf8Ag87GG/jJyFaIqc5yCGr7nwXa/wCIm4JLtV/9NTPfyqP+0xl6/kZfiWbbrl7HK7H/AEhiMduazobwrMwhf52T+JN26ovGN3cL4nvoowNpu3ViW461jyakJpvO+bbGzKrL8qtX5lxPHmz/AB3/AF9qf+ls5qvu15erNuORI9QzvX5tzfLVyPWIfM2JJIzL8yfw/wC61chca48K/uXwyttT5vvUN4mgaHY8qpNs/h+bbXgxp8xh7Q7NtY2qJppv3rbt6/e+b+KrNj4iRnb9yymOL91Mz7V215/N4gFxGib/AN60Xz+X8u6mf8JFNDt+dn3fKnz7qJU5S0COK5NT0218STblT5UWPdvb+9/u1Ja6tNIzu9yu1vk3L8zbq81t/FkLKttM/wD9jWp/wk7283yJGw3bf3b/AHmrf2cuoU8RCUz0nT9Wha4EKOzpGm3+7tX+9VzT9YSS3k8l2Ks33d9eZ2/ix4ZXMG4rt3ff/wDHWrQ0vxM/meek23b8zK33ttZ806Z3UK0JTuekfbHmjVN//LL5lb+GrEkltfWfmQux3fd2/wB2uJsfE9t9ohd7zajL821tzNV+38WJbyLsfYnlfeb5W20qeI5XoejTj7xuXnzSI+zcv3fvVk6lNbSTR2LTK7fNt+Xbt/2t1Yt94geQM9lMq7n+Ztm6sW+8aeavyTN+7fa6/drKpU97midsY81K5ratrXmQtbIG+VPn3Rfe/wCBVlRa55wi8mZmTZtRW/hrB1TxVNc7kR1Z1T/V79u3c1ZU3ibbEUNzsRfmRv8Aarpw9Y8vFU+U9M8O6pbbYrZHUv8Ae/3q7DR9W+7C8yoGX5mX5vmryXwzr0LN8833f4f7tdrpuqboWRHX5m3V9nKn7vMfklPFTjI6231Sbz2SaaRfn/e+Yvy7f71aSTJcSRBPu/e2x/LurlrS83eZs8xl+6jTVrWt15kavM7Yjbcu37q1jKjGWx7mDzCrE342+0XT3PneaGi2su/7tTeWkjOiOu3725U/8dqto86Ru8z2ysv3X3fdZams45pLhb+Ha6L91furt/irzqlO1z6fD5lLlTKF9awy3Hzv/D8rfdrF1C1s5ISly7EN/E1buofvl37F279qN93bWXcKkcyzP8z/ADLt3V4mIjyyPYp4z91c43WtP2Mu92f+4uysebR1uFWbZsGza7L91q6y+jRpFs03D97ubdUH2G2TciPu3M3zL/FXn1JcoU63tjg9U0F5GaZ/3S/elZvmXdWLfaLtgZ9mG+7tWu+1O3mjjeF03KqfKrL95t33qxdW0ubzC8yKFhT/AFap93dR7T3Tow9OMp3OH1TR0t5Mum1fl+WqbaS5Zk2SJ+9+7t3V2OoaeJGXz02fw7f4lrOmsU3b33fN8yNRzS5OU+xy+nCMT07wTbfZ/g0tsw6afcjBP+1JXmmm6fM0e/ycsr7VVm2/NXqnhWHb8LVh8sD/AEKcbR9XrgdG0/zPkhfHz/Osj/dr948ZZcuQ8N/9gkP/AEmmejlKjz1v8X+Y3T7GaVk851cqu35vu1o6bbzRscOxDfKm77tWIdNMcEcMNtt/i3f/ABVWlsfOzYPM2N+1vL+9/wABr8EjU5TfGU/c5mSaWzyQqVRUDJt2yferRVXjs1R3VV3qz7m/h3feqvp+l7Wb/WMPuurLV6GFJDsRI2+7t3fd2/71dXtGfDZhLlG3Vim24mmnzt+baq1R8x1VHhh3+Z8m5V2/8Cati6bzGbZHJKrJtb/Zb+7WZfLM3zzOzKu1fLZtrbq7aM48tuU+JzD4uaJl3W+a8e2Ta5V/3W19rVn/ALmSR5vmDx/MjK3zSNWrJCjMkybWLPt27Pu/8Cqmtv5l0yJCyMz7VZv7taSlzQ908fl+0MZXt5vtk07R+XtX5V/1n+9UV5HbXUY3+ZFt+8rfdZq0ZrRGhXy9u/ft3bGakWxm8vZ8ruvzfu12+WtcNSTlqjro0zHa1RZkSF2H+z/Cv/Aas6LYPdagltNM0sXm7pd38S1alt4W3u/mJ8/3m+81P8A2r+MtW1+wsJlf+ybJnn2t91v7q/7Vd+W4f22J16Hl5xiPquG5Y/aPOfid44e81SfSrPaI7d2VV2feb/ZrgdLvB5k1s8y+Yz/dan+KLpLfxNf23zEtu+VvlZa5KPUraHWvs1zuUbN26vs4x9nG58DzTlPmZU8Xao9rM+98fPt3N/D/ALtY2oap/aWn79/zbdu5Xql481aO81BpERm3bvmrmrfUJrXd3T7v3vu1EZfZOj4jl/E8n9n+IpJId21vvL/tU2TUppoT2/2dlM8YNuvldI+W+Zv9mqlrcbY97vurWOxXMNuJMzP/AAlX2tVa4mhY7HT7tLcSOzM4f/Z+aqsr7lFL7Q4j5JnEe9Pu/wB6o5Wdfn/ipIW+Y8fL/dpxUbd79f71Ei/hFs5N0y1d0tHlmkjT+9urPs5Ns/P96r+jzeXeGZd33/m20v7opGpDHtkK9NvzVZjkfOUm27m+X+9UM0YwNnP+1Sx3XlxmBP4fv0pcsTP4hLyFJG3pu+X+L+JqpTyeaV/ib+7sq150zfPs+RU+838VQSKh3eT95qAM+4jT5nwuWqm0bj+PdWj5O5m39F/iqrNG+3eiUR900K6HaPu/8BoLbWb5P+A0/wDiaPf/ALtRSfe3ZzTjIBwERG807zHHyJwP7tRxkg5xkU5sKcZpAO3Sffcf8CoaTzDv3/71RMH6sKVW+UjtQBatI5bmeO1QBjO4RGJ7k4FfaXw1/wCCDX7a3xg8Ka94u+GsGjaxp3ha0F3r13Ym4ZLWP8YgZGxltiAttVmxtUkfGehyb9YskZP+XuP5v+BCv66/+CXf7HHxG8I/se+PrjV/Emgv/wALb8NhdBFjqBuFtAbe6hBneNSoOZlJVC5XDA4YFR9jklPhrD8OYzH5mlKpCpQhTi5Simpyl7T4Wm3GCcuyt12cz9q6sYx21v8Aofzy/s9f8EJv2tP2kPivp3wp+F3i3wndapfszkyXNzHDbxKMvNK/k/KijqcEnICgsQD+hf7M/wCyR8av+CK/xTfwJ+0uLLVRqNnHdWN94TuWuLa8iB2lo2nWJhhgVKsqsMZxggnvPEP7MP7Tf7Hn7Yfhr4R+AfHWlf8ACeXNzbHw9qXh7WU275/kVJBKFMeclWSVQHU8B1YZwP277b9pnSv2jdV8O/tZeNY9e8V6fBDE17a3EbWxtyu+IwpGiLEhDbtmxDliSoJOf3LBeGPBuYcUYaWAr05YKpQdRQ55+2k+ZJTjrbkV0nfW91ZvWPkV26tBxqJ3Tt5f8OfSdv8A8FYfgxDYx2Y+HnilNhydi2/P/kWuJ+In/BRb4a+Mdy6f4S8RQBgAWcQ54+khqP4Of8EWP2uPip4Mg8Z65deHvCSXkaS2en+ILuX7U8TKGV2SCOQRZBHyuQ4IIZRXh37TX7I/xy/ZI8WQ+FfjL4VFqt4JG0rVLSYTWmoIjbWaKQdxwSjBXUMpZRuGfUy3hPwXznNJZdgsTGpXjf3Y1m27b8vSVuvK3bqcs8FOFPmlFpGp4q/aF8J+I7meVdE1CNZHJXCoCM/Rq+BPjt+wZr/jH456p8TPhT4i0vTtM1ZxPcWWpNKJFuD99hsRhhvrX05SxxvLIsUalmYgKB3Ne/V8D/D6qrSoz/8ABkjTDV6mFmpU3qeS/Av/AII2ftwftRPIvwc8N2eswQyGG51JWkhsoZAqsUa4lVYw+GU7N27BBxzS/tC/8EF/2+P2d9NbxF8WvB9jZaSpUyavaztd2kW5gqiSWBXWIlmCgORkkAZr9f8A9uz4z+J/+Ce37Lnww/Zf/ZvvX8Malq2lNe6/qdowN2Nqp5pDkZDyzyOxcYKiIKu1eBm/8Er/ANrf4g/tQ+JfFH7In7UniGbxnoniLw3cS2j61JvnXaQs0PmDDsGRy4JOUMQKkZr+eqnh3ldbJ6nFeHwEHlsJS9x1av1iVKE+SVRO/s09HJRa2W70v9DLM8RKaoyn73orX7dz8LJf+CeHxPdht8ZeH9oOdpef/wCN1ufC7/gkj+0r8cPH+n/DT4V3+j6trWpSFLSxt5JRnAyzMzRhURQCWZiFUAkkV9g/FXwXL8OPif4j+H0ySK2h65d2BEzAt+5maPkgDJ+XrgfQVneGvE3iLwbr1p4p8Ja7d6ZqdhOs1lqFhcNFNBIOjo6kFSPUGv2qfgF4d4nL3VwVKXNKN4OVSbjdq8XJJptbXSadtmeV/aWK5/ef4HFP/wAGq/8AwVPcH/infCPK4P8AxVkHP61R8Tf8GzH/AAU9+F3ha98b694Q8Oz2GlwNcXi6br0dzMsajLMsUeXfA5woJx2r6Qj/AG7P21J5Vhh/ad8dO7sAiJ4hnJYnoAA1faP7b3xj+KP7Lf8AwTz8L/Ab4g/EjVtY+I3xEtWfxJeanqMk9xbWjYe4iDFjtUBktsdGBkPXNfkGaeDeLyXNsvwVeOHqTxVXkUYfWOZQiuapPWpZRhHffVrQ7YY/nhKSurLy+XQ/Gj4Jf8Elf2qf2iPEraD8G/D0HiG7twgumshMIbYPnaZpWQRxA7WwXYZ2nHSvUPGv/BtV/wAFQfDemXHiOf4f6LdW8C7ja6XrkV1OB/sxRku59lBPtX6jf8EqLnXvEn7AHxL8D/s2a1Zab8Uo9UmlS4nRQ/7yGMWzbnJGCI50RiAqvkkdWaz+xr8Kf+Cv2kftGaJqPxq8XeIIPCdnen/hIh4i8SQXtvcW+07kjjWVyztgBXUDaSCTjOfL4h4K4Vwmb5lTw/1fDwwTt7PEVqqrVrQUrwSlFWne0LKTel+hrTxeIcIXu79UlZH4K+Of2Bviz8PbTV5vEGsaZb3GhxTtfafOs8U8bwhvMiZXjBVwVIw2MEYOK+f7r5rjZ/ef/vqv2f8A+C0Ou/D7xJ+038Wbz4d+Q1vHpUtvqUtsuEkvo7PZcEfMQSJAVYgLllbgnLN+L16rSMmw4bbt+Vq+a8UOFMh4ew2U4rLKEqP1qj7ScJycnFvldtddL28+x0YKvVrc6m78rsSSSQy4REVf4qZHDBJcb+nyfP8AL/47UCyQ2+1Eh3t91/nq1YW9zeTLGkLbm+7X5LGMTv8AiNfwrYzX2qJbJtb/AGWf5q9Hg0dtNsW2Ju+ba23+Ks7wB4PvLOFdVv7ZovtCMq/J91f4trV0nibXrPRdFfVbny38uLbFG3y/NXkYqt+95YRuejhcPzayNT9nzwC/xB+IiabebZrPT7W41G/h27ttvbwtIzf+O18YeINRuPGni3UPEz8ve38ku1U+6u75V/75r9MP+CTXwr8T/EjxB4/1vwT4ek1XWm8IXVrYWqqzfvJvl2rWh/wW6/YD+HH7N/w6+BfjPw/8N9P8L+KtWt7yy8U2emuqrcLDGrLI0f8Ae3My7q9XKcdQw9aVGXxM9rH5VVnTw/s/tHxH+yP+zxqPxe+IFnYP91pfkVk3LurY/bk+J/hnVviI/wAHPhRZW9poHhVFtb+4s5dy6pfKv72T/dVvurXtvhLw/afs4fsSeKv2gNURYNVmiXSvDTRsySNdXHy7o/8AdXc1fDtmXkgyzMXZ90srdWb+Jv8Aer38u5sTUlWnsvhDi7D4fJcJRwcP4so80v0QCN45Nny7Vq1DCi/cfb/F8tJGqL8n3y3+zUjRvu2Rpt/v7vu17R+cS2FaTj+L/eqS3O4+Xt+9/Fv+7UO6Ers8n5Vb7y1csLczQ5RFpykKPPEkST7LNH5Ltu+9X0B+zV+1V4w+BPjTwx9p17b4P1SdrfxHbzLuW13femX+7tr5+aNFmVs7i3y1r+Ire5vvh5Klna+dLb3Cskkf3lVvvU+Xmiac0r+6fr/4Z1nwr4ys01P4e63b6xYXn+qutPuFkWRdu7d96rjOlvMl46Mo+7t/9mr8XPDvjXX/AIa6na674Y8SalZ6la/8eraffNH5Pzfwqrba+0fgR/wVM0Sz+FM2m/G/SvtXibS0/wBAmtU2/wBoRt/z0/uyLV060afxHXCUJH2/p8KTTfJIpMjbXb7rKu371b+k5b7+5Qz/ALpVT5mrxD9l39pXwB+0locuseG7mSw1G3+a60e8lVZ1/wBpf7y17ZY6gjTJNc+Yjr8iR7a1jW9p8J6NGMpQN2zi81pbmG23/L87bvu06SJ45tiIv7x/n8z5VjqGxkmhjf8AiWT5tzP8qr/EtTFopv3yPlVT5l27t1axkerh+bl0KWpWsJkKPuCfd3L83zVyniSzSHfvmVvn+T+9Xa6hCgs3+X5GTdt/irifE1y821Nnmqv3I2+X/gVctapLofU5a+aRg6aqfaGRIVZ2bd8qferkvGsXl6/dQmU8Kg3rxgeWvNdx4bt3a4MKW22Vk/h+Zf8AvquR8axMfG9xEF2s0sfB7Eqtfq3g075zj/8AsGqf+lQPpsPOLlJeTPx++NENxafHjxLaAfN/bEykt/vVB4q1aHT9Ft9EQbWj+dv96us/aY8OyaX+1H4qgvDwNRa4DL/EteZeINQ+338k+zf89fkkfgPwnF/71Nf3inL+9lO+iGNGb2pY7V5GxXTeG/Cd5qEyeTbb/wC8uyqjHmOWU4xIPD+g/aGX5flr0nwn4JRYVuZoVC/w/wC1Wn4H+Hr2savdIr7vm+Zfu1Z8UeKLbw/F9jR18yNdqsy/drb4fdMeaVSXKiLVprCxt/J8lV/hdq5TXdcj2l3uWO7ms3WvFlzfM3nfxP8AeV/vVjXl49xGOxrPmmXy+5oQ6lePJcvM53bv7tVGmIXZTppvm2eXz0qPy3ib95Ux934iveFEkZk+5xtpbNvm8j73yfPTWj3b9n+7VixjHmK+/bt/8eqhRl9ks26zM5T7q1tadI7Qqj7cR/3f4qorbzXEf7lNm3+L+9V2FXhjGxKCZe8WWt5pmZ0RlLfe21DIs0JPkp82/b8tamm/MuX+bb/e/iqxHo6TSLCkjAs+6l/dJ/wlPSdSns22IWxXO/FWXz7aGb/prXZ3Hhm5tY2eHcR/47XE/En5bZEmVi6t/wB81HL7xpCWpxtmSlyAf71ez/C3VHXR7iz87/WRbvlrxWP7w5zXqPwzvoYdKldH/wCWXyrV83LEqsc58Vr0TaosL7fl+/XKWse+4WtLxhfPeaxJv+Yq+3dVXSLd7ifYlMfwxOy+H+nvNcfOmNu1v92vVLfxZpvhuNYXudu377R/NXnmhwzaRpiuIVYqn3lrO1bULm6lZ9/8X3t1RL+6ZfEeo3nxQ02YN8m59m7buqBvihc3H/Hgiwp/d2V5jDHeTTD52+Vfuqta1qHsVZJH5VN1Eeb4iuX3eUl+IHiK5kvLS/uXb9zLvVY/4a9v8JeIn17wpa6lsWVvKVWZVrwrUoX1axkTZtCruf5a6T4C+OvsdnP4Yv5stC/7j/ZWnEnl9w+yv2OZHe38ReZtz51sfkGB0krxn4yxBfit4jYwHadYnLEt1+c167+xTefbLfxIxfcVktMnGO0teSfFBHvPjD4mijAcprVx8p/3zX7TxNHm8HclX/Typ/6VUOBXjXkcT4g1DUvDuoWnifR0aO5t3X9591q/Wn/gklrU3i/9sf4Va94luN82riSW8kkPLyTadOWJ9yzfrX5ReJrF9Q02aF/m2xbkVf7tfb3wJ+PGqfsx+E/BPx50dz9o8NWel3I5xlSsUbj8Vdh+NYeGKSyrO7f9A0v/AEmZ97wk+bBZh/16f5SP1N/bF/Z21X4w/Anx38GoL+W48Z6TfNFoytulurq3b7sca/8APHa3/jtfzMfG/wAE+LPgr8bNS+HvjLTZrO/0nVJLWaGZNvzK22v6pviX40uv2gPgt4b/AGxP2d/GE9nJr2jLpeuzaay+aqyfd+b/AJZsrfxf7Vfjn/wXK/4JreIfCWpeF/Gfh1LfUvF2rJJJqmh6fO11eLGu399Lt3NuZmr4fmpY7KOaUo80du/mj8jo8+BzbkjGXLL7vI+cv2Dtk3xC1G8yC7+Hzlh0/wBdF09qT9ou7Ft8e9UYEsRBbfIP+uKV6N+x7+xN+1R8CfBa/G340/B7VvD3h/Vov7N06+1S1MH2i4YiXaqNhsbI3OSAOK+z/wBnP/ggzYfttaVbftReOv2g7Tw9o+sTNDbaZaaY010jW7GAl2JCAExkjnoa+8zKrCj4F4WUnp9af5VD6SnRnWxbhBa2PyC+Lnh/7dpf9pQ7t8Ls+7bXB6hp7+IND3+SzfuvnVkr+mzwZ/wbbf8ABN3whpEt/wDER/F/ijam+X7RqXkR/wC1+7jWqOp/8EYf+CFc6p4Mvvgr/Z0906+VcR+IbiOTc33drM3/ALLX4U84wSlqz1qWW42rH93HY/mO+FmsTeHfFSQyrgM2Pmr6JhP9pQwukOVuIN22P5q/fn4Of8G7/wDwRTl1GbxZ4b+EGp66ltcTQSrqniOaWDdH95tq7a6HUPBX/BFb9kS7m8Mah+z54K0+e0kVbO0bTWvLiX+795mqK2a4Kjyzb0kdOHyXMsZzU4QcpR8j+e/w/wCC/FuoTJN4b8PahcPHLtRrGzkkZf8AgKrX3f8As96X8VPH3w5017z4e+JP7Rtbf7PKraDcbpmX+Lbtr9Y779uH9lr4K+EtG1lfhfoXhuXWIWk0nw9a6NCmoeXu2qzxov7v/gVc18Lv+CvPhXXfGWseGtf8JQwJYzqbeaGRctH/AMBrircQ5fGav+R7+X8J57Ti5wht5o+KfC/7OP7RviqHzNK+Bvii4RbfdK0mjSL5n+7Xf+Ff+Cfv7WPiSFUh+COrWisyr5l48ce1f+BNX3Ha/wDBUT4Jy2rTCKYvGjHyU4b/AL5rjfit/wAFk/g74B0K6ns9JuJ7rZ/o0ef4v9qpjn+W8t0/wNZZHxDKfJ7K3zR4FD/wSg/a9mj8xNC0NE27vLutcXzWb/gK7azfEH/BKb9shLNifBWj3Kf88bXXo9y/7X+01Ubj/g4D16bR45H0LTneKWRZWjuPmb5v7tcR47/4OFfiJLpV1b+HNHs7WVp/3V1v3PGv+0rVhLiOlKN4wZ6EeG84pytOcIlTWv8Agmt+3RNbtbJ8AL+UrKyxbb23b/gX3q4DxV/wS4/bvsmd5/2YNZnC/cks7iF//Hd1TaT/AMHB3xe0fxn/AGjc30dzA1nJb+U27/WN92SqviH/AILu/tAeKrZdHsdcl0rdKrNfW8i7v8tWE88jKPvUpHpUcgx8Ze7Whb5ngnxY/Z6/aE+EvmH4nfAfxho0Sy48+88PTeWrfxfMqsu2sT4Nzw3K6pLBdRyATopC/eTG75W96+vvAX/BbD496bMLfVvFlprFsqq0qX0aybl/iVt3y1zX7W/xp+Gf7QEvh74m+D/hvoeg61eQ3KeIp9EsFgF6wMZiaQJwzKGk56/Ng9K/T/BLGUK/iZgoqNpWq/8ApqZ9Fgcpx+FqRqycZQW7Utb27Hxd48vZI/FOowLKwBvpDk/7xrmLrWtkeYXx/Duatfx5Pb3HizVpba4Yj7fNGy7v4hIQ35EA/jXG61JtZXR1+X+Fmr4HiaF+I8Zp/wAvan/pbPCxk25ya7siuvEm3akMLN/Cn+1VaTxVbJu/c43fxLWHq15tkOxGXd96suaSaWNdiZSP/b21xQw8JRPna2InE6qPxY+0+TNtbdt3N8u6nN4wn2o7pGqfdRv4mrjbSWZ2L/N+7+Vfn/hqyrzLMnnfxN825K1jh4bHH9cqyOwtdcS5jffc7d3zff3Vbj8RPHJE6bnWNv4f4a46NraFv3PmO/8Ae2fdq8rTMrPv+9/C38VRUpyidFPEc0Tq4/FTyMyCbf8Axbd23a1WrDxVM22Hev8At/3v++q4/hY96J977m6pLe4fcYXfYdy/eauGpRnL3j28LiPh7noGl+Lrlplttit/d8v5ttaK+ILmaNUvE8z+6u75t1ee2s1yrGazT5t+1GV/lrVtdUufLbfNw3y7d+7/AIDXBKPLM+lw8jpr7XJVWZMyQt5W92X+GsnUtYmmxG958u35P9qmRzSWrND8yxtt2bvvL/e3VBqEf7sbIdv9xWX71KXKej7SMYlGS6htpPOd8bn+bbUH9pJIy/O2Ff8A1bfxU7UkTbvhmbd8rfN92qMjeTIsJRsbPn8v+Gt6MeaR4OMxHLzHXaHq0yyN53l4ZNz7f4t1dr4f1SaNVd3V/mX7r/NtrxrSdcms45PORj/stXa+Hdc3Wqs7shZPm/2f7tfcU5e6fj8j1vR9aE0gfylXy2+7I/zf7tbWj300zb0uWi85vu/e2steb6DrUCx+TD0bbvb7vzf3q7DQdVdS77Iy/m/d/vNRKUEdOHrS2kdtY3H2NUTezvH80si/8tP+A1ajvN0z3UIbzJnVdqv/ALP92sOLUppLPZt2yNuZ2/h+9VhpraNneH50XazV5tan9o9zC4qX2S3qV55bAOm4/d3L81QNcTXNwHmgjlC/61W+8rUyRnaMW0PDsm6JpKfG1zLCba5K/Ku5mX+9Xz2IVKR9JRrVeSJVWDzpBNDCv32V/nqNY87ntl/2fLb+9WmljM23emxVXdt+7uZqtLp/+jojou/71edWjCOqPVw8u5xWoaa8P3E3sqM3zbt22si+09Li55MgMiqz12mqWSQt/pO1X+4zf3axb6z2/OEmabb+93P/AA1lGXMerRl72pyN1Yuryu6eavyqnyfNVObT4YVbyVb/AGFrp7rT4Zd7w7o3/vN826svULFI0/vHerbttLm98+oweKOp8MRGP4bpEyuSLKUEN1PLVxmkw/vPnTK/88/4q7vTNp8DsEcsPskoDL1P3q4Kxknt2H2aFsfNu3N826v3fxoV8g4a/wCwOH/pFM9TKqsFOrfq/wDM3bWN47dUS22Oyt95ttSwh2mP7vZt/wCWiL8rf8Cqpp90ZoTvRtn3t38W6rthv8sI77Nz/PGzfK23+7X4PzcpOZYqPL7pbsYUk3lJmi3PufdVpY4Vj3Wz7V/gVU+7UCx7YHme4h2r/CqfxVYj85WZ7aFVVvlrWMpx94+IxmI9tLlBf3Hl/afvtudVqpcWe6P7Z/Gvyo38Natvbu0fFsrPHuWKTduamXFujWqO8MyBvmfzH+9/wGto1re8fM4inKpPYwbu32q6XKfJJ8yMtQR6akr/ACIzDYuxd/yrWncWLyLIdjJtZvmk+WrGm6K9vH9pdMrNt+7/AMtGWn9Yly+8cv1ecZGfDZ5V7aaZsbN+3+7/AHasNp7tiF0ZG/iZmrVXT/lXcixKz/xJ8u3/AHquJpMMjNC8yq2/dEv8LVwe0fPv7pvToy5rM5C80mGO3e5mO1dvzRs/3q4j4P8AjWHR/EnjuZ4WtlaeFd0O1lbcu1f91q6v4nao+k6hHpsKMrLEz/u3/u186eG/FD6b4o8SWcyTFr61ZvLjl/5aK3y19vklCpTw3tX9o+D4grxni/Zx+yVvilefZ/GVzC8ckXmSs3mSfeavPvFkj2N9FeQ+Ydzbf3lbXjzWvtlxb63skzt2StI+7c396uf1q4m1Kz+0vMpRkZtv97/Zr3IylI8L3TlPF2oTTaozptCsn3qzrySFbYzTbfl+4rfxUniK4VZN7p8+2ud1rVnkh8k7g397+9V/aKH+Il84iZ0wrf3axIpvJZofmrZgc3uijzDny2rMkj3Sb4PvUfCEfeIpP3f393+9TJB5kf3OP71SXCBl+5yv8NQTcMqb9w/urRI05eaRAx29ak2Oyq9RuN5yaVfu7PMpc0S+VCx7Nxy3SrWmSPHL9/738NU6ktWCzLu6UhSidJbSSeTs7L/FTZFeNm2fNu+9UdnM8kOxNtP3eThFTKf7VP3dzEb947M7W/vNUF0zxr8nB2U9piy4mk+X71MkV5FP75WWl8Og5EDL5ihEfbJtqG4DpDh91WrhflR4X+eqszO6N87FVo+If2dSvI2G+cq1RMoZ99Sts279nzVFMvaq+EuO42HO7bu2nNSbUZN9Mt13ueean2umUdFqhy3I8pko9NfZ/DUjR7V87fTRJkYC4NZklvQ5HOuWeHyPtcf/AKEK/qX/AOCSd7eL+yJ+0aq3coEHhjdABIf3Z+wX5yvoeB09K/mI+CHw08TfF/4p6P4D8JQK95d3itmR1VY40+eSQkkcKis2OpxgZJAr+iH/AIJf/tqfD39lvxV4m8EfG6PUJPBvjSwjt717OIyrazKSnmOgIbYY5JAxTL8LhW7fsXBeS5tmnh5nDwdCVR+0w0opLWbpVPaTjHvJRtp5pdTlq1IQxEOZ23/E8y/Ycurm9/bV+GF1eXEksr+OtNLySuWZj9oTqT1r7R8ZeCfDfjr/AILyWtl4muFWOws7TUbWFkQia4g0tJIl+cjGGAf5QzZToOWX5/8AHF3+wH+zj+1j8M/iV+zN8VvEniLQdI1221HxPHPYGQWqxTqwELyLCzsVByhU4AB3knaMz9rj9srQ9Y/4KETftY/s5akb2HS7mxk0q41TT3jiumgt0hfMZKyeU4VhzsfDHhTX7Dm+BzTi7P3i8BRq0YVsuxFKMqlOUHCpKpFRjJNe63a67x95HJCUKNLlk07ST07H2n+2frP/AATy+JfxmutN/aG/bE8aaJrXh5ltv+Ec0rULiC106QKCWREtGG9shjJuYngZwqgeMf8ABT/9rL9kn4sfsseGPg58KvixfeOvEGj6vBJbazqFrM1xFBHE8bvPO8cQd3DKCQrFipLAHDVq+M/jF/wSS/bumsvjF+0HrGs+A/GcdtFBrlvAJkN6URcZeKKaOZF5RZMRylQAwACgeK/8FAf2yv2f/ih4C8O/syfsp/C+y07wP4TmZ7bWL3TNt1LLkr/o7OzSJE4w7vJiWVtu4Dad3wnBHC1aGcZTQq4bHKphJXmqvs4Yei1FqThNU71YzltGMryTvKWmu9eqnCbTjZ9r3f8AkfJ9XPDupR6N4gsNYlEhW0vIpmEMmx8K4b5WwcHjg4OKp0V/VU4xnBxezPJPvb/gu3bya545+GXxNsHkfS9Z8JzJZvvymVlWXIGOpWdMnPIA9OeB/wCCJ/hzUda/bgs9WsxL5OkeG9Qubso2F2MiwANxyN0q8eoB7V3XwR/bd/ZH/aS/Zn0b9l3/AIKEf2pDeeHpQmieLrW3c7Io02Qu0kO6RZgjNGd0bI6orMSxrbuf2wf2A/2DPhV4m8M/sHXWq+I/G/iKzEUfia/tnkjtmBIRpHnSMYjDu6pHGVZlUPxyP5spy4ky3gKrwLHLa0sU1OhCoof7O6c5u1V1dklCWqa5rrZX09N+yliFiOZW3t1v2sfHv7ZXi7T/AB3+1f8AEXxZpTyNbXnjC/aBpJN5ZBMygg4HBA4HYYHOM15pXv3/AAT8+JH7Kfg/9oKfxZ+2j4d/tnSrmxmNpealYtf2sF6zAma5twrtPuXeAdr4ZgdpOGTjv2wfE3wB8YftDeIfEP7MvhiXSfB9xcKbC1eMxIz7R5kkUR5hiZ9zLGfug9EGEX9pynH1cFmkMgjhKqp0aMGq7S9lK1o8ile/NbW1r6O6Ss3xTipQ9pdXb26nq/8AwSV/Zph+O/7Tdv4z8UWit4Z8BRrrGqyTD9286k/ZomPu6mQ54KwsD1rgv2/P2lp/2qf2nNf+I1pdtJottL/Z3htCeFsYSQjAdvMYvKe4MmO1en+Bf2yPgL+z7/wTR1z4SfDHW72D4jeLr2ePxXPd2HlJa2jcSSrPynlfZ18tRu3h2dyqjBPxKvxk+ELkBPir4bOemNct/wD4uvmsno1MXxtjc9zVeyVP/Z8NGp7r5ItOpVSe6qTsoyX2Y22ZrN2oRpw1vq/0XyP0/wD2V7/wB/wT4/4Jvj9tXSPCdnrfjzxrM1lp1zc+YY4g08iRW7cqVjUQPK+zaZGAXdgIy8N+z7/wWr/aTg+L2n23xxOj634W1XUY4NRtbbSEgmsYXbaXgaPBbbkHbJv3BcZBO4c9+xh/wU1/Yn8Qfsy/8MS/toatBqPh4XZ/sXWdP1JJxBEZDKquIn86No5MlHjD5V9pUKp3eg29t/wQt/ZYuNM+O2r/ABm1rXRa3qvoVhq7z+Rd3cZDqsYe3gjldSAdrybP7wIr8jxv+rVHMM2p8S5ZVxmKr1ajpVYRVVOk9KMadRStScFo9murey64uo4w9lNRSSuttet11PBv+C4X7M3w/wD2cPihr0HwysYdP0jxT4KudVXSIGfbZzMJ45QgbIWNmTeqg4XcygKoUV+Is1vMq74du77rV+sn/BUj/gob4C/bQ8X+JviDYeLdNsdHtPDFxp/hrSbjWoJJlgEch3sqMR5sjsWKrnGVTLbQT+UckyTWju4+Rf8AvqvzDxhlmNPJshoZjVU8TChJVPeU2nzKyk03eSVk3d3aer3O/L+VzqOK0voU7PT3mkPz7G/j/wBqun8C+GbrVtUi022Te7Ovy/erC0eJJPn+7/F8v3q90/Z48MwtcjUprBnb7z7f4VX+LdX4TiJyp0pXPbw9P21WJT8Sa5b6Pp8Vgk0bvZp8/l/wrXk/j7xdc65Jt85hDG3yba6r49al/ZesXFjbTY85md12/dX+7XllxeJdRNt3Y+7t/irmwWFjpPc76uKlT/dn6ef8G/Pxavvh3458QajaWE00MGkfariTz9v+r+8qrTP2wtJ/aB/4KJftXTfFfx/4buJfC2jxNZaXpNnudLGz3f6zb97dI33q+e/+CP37QulfCL9qLSdP8Q3VnDp2oBrW8W++6yt/DX7R+HvFnwd/ZMste/aS+J3xK8JaL4L0pLjUPKSeMz3m1d0UMafxfN8tTSwl8zcXufreR43J6WSfW6qvUhH3f8j8Xf8AguDceHvhr45+H/7IXgPdHYeEfDMes65DG25f7Qul+Xd/tLGv/j1fDW/d8mzc392vU/2rv2mL/wDbA/af8fftIeIdPW2HjLxBNeWdqv8Ay72/3Yo/+ArtrzC4037O29/+AV+hYOjGhQjA/C8+zKpm+ZzxNSWrC3Z2X5+v8dTySf6xPl2L9yoY4/3ex3w396rG1/4NrfJt+aurlieSNXZuXYjfe+dmrY021eWFk2Lj+HbWNJN91Jkwu7/gNdj4P0lL5VQr8zfw0yJe6ZN5Yzww7/vvt/uVom4+x/DvVbn5d8dvub+Fq2te0V41/cpu/h+9WT4zP2X4S6mEh5by1bd/D81ZyKpnlEep/Z4mvZ5llmb7sbVd0+SZVa5cfeffuasKxtZrqUMseRW62+KFkz/wGg1+E7f4d+Ptb8J6tBrGg63cWN5ayq0Vxay7W3f/ABNfdn7MP/BU12mh8MftIaas8TMqReJLFfnVW+VfMj/2a/N+x1SaFldE2stdR4f8RTLCN/8Avbdm6plH+U1w+Iq09j90tB1vSvF3hu28YeDb+G/0q4+a3vLeVWVv9lv7rf7NSyTTR3O9zIiMm/cv8Nfkz+zX+1l8YP2edSe8+GfiFYra4+a8028XzbSb/aaP+Fq+pvhp/wAFWJrqaHTfip8K7X7NM/m3F9oNw0bbv91v4f4qX1iUdGj3cHmFCMfe0Z9htffavuQ/Lt/hbbWBqVj5ly+9F2fLsZpfmaofhz8X/AHxq0GLxF8NPE9vfpJ80Vm21ZYf95avSRvJCLZztk83c21Pl/3azrVos+owNRSipxkV9Nt92zybZY2b76r91q868cxovxPnijXj7XDgE9PlTivVNBtZvtSQfZpNy/Lub7teZePFeP4uyhhgi9t+q/7Kdq/X/Be39sY9L/oGqf8ApUD6fL6/tKko/wB1/ofmd/wUi0v/AIRv9qLxBd20OwX1lC33Nv3lr540rRrzU50jRGYv83ypX2p+3R8HfEnxs/a+m03R7Dzo7fSYV8uFN25v71dT8Gf+CZ+q6W0Oq+PYfscEn31+8yrX5VTgvtH4pm9aNPMJwXc+QvA/wR1vXJoXhs5m8xtrMqfdr2/wj8CbPwtYrea3/ooVW3s33t1fUXi7Sf2df2edGmRPJvJIYtqxyfI33fu/LXxh+0J+09N4o1Saw8PWy21uv/PP/wBBqpVI/ZPN9nOp6D/id8StN0WH+x9A2rt+Z5P4q8c17xJLqFyzu+//AHqx9W1q51SZnvJmZmb7rNSRq8j/AO03+392op+8b8vL7xN5010yuif8BpWh2wt3ZqmsbJI4w7uy/wC1U81qir02/N8jVRPxe8Zd1Dvfd977vzVJ85I3sodv4anmt4Vh2bF2/wDj1QvIki/+PPS+2VKMSNlO5v8Ae+8tWYV2zb3Vcf3lpkaom5U+633P4qtMx8pMIv8Ad/4FSXuy94Udje8Pyw3MCo/8P+xV6exTzNkJYLsX7tY/h9d1xsfcxb+61dPb2bpyibv7rK1HLGQ5PlM6xaSOTe7sqf3Wb71alncPHdfO7Mjfd/2agnsY2kV03SM33l/u0jW7rIH2Nt/h20fCTHmlA6/TptNmj2Pc7mX5nXZXlfxskha6RLbjc27bXVWupTWbbPOYGuF+Klx9ouYn3/71OPMVT+I5BPvCu88D332XQ5nd8FU/hrgq6jR5vsPh65ldP4dtEo8xrU2Od1Kd7q8kkk+9vre8G6bPNMjJ/wACZv4a5+CJ7ib/AHmr0Lwvpz2dj9p2fw/dojsKp8Jf1aVLa1CQvjctYyxwzN8/K0mtaptkaJPmLNurPt752Xe7/Lup/ZMeX7R0FvdQpCqJ/D8qN/FUsbSXUmdn/fVUrFXmVT5O2tyxtvs8fz/Lu/hWlGH2QlV5S7pOmpHbSuE/gavPJ9Wm8M+NHmhm2rv+Za9Ek1ISN9jS5UL/AHf4qwtH+BHxd+MXi6LQfhj8OtW1q+updsEOn2DSyTN/sqtVKIqcoyPpr9jj9obwj4HmvLDxhKba11ZI2S+VWdYXjD4VlVSSG3dexHvke2R/GH9lLVr+S8XUNCnuZmLyzNojF3Y9WLGLJPua6b9gz/g2G/4KA/GfTIdY+MOnQ+ANEuHV0k1yfbc+X/1xX5lr9Hvgl/wam/skeBraGf4nfGbxRr10sOydbHy7aNm/8eZq/TOHfFrOeHclp5WqFGrTptuPtIttczba0klu272vra9rGM8JzzumfmI3j/8AZhkh85xoLJ0DHRePp/qq9D8JeDR8UH07wZ4O8JHXF1ZY49M0iz08zfaFIBjVIQpyMAEDHGO2K/UyT/g2u/4J0NJbFIvFgSDbvj/tv/Wf+O/LXyx+wv4P0L4a/wDBV3RPAHhpHi0zQPG2s6fp6yuWZIIIbyJASepCqOe9fqPB/iZjc/y/Mq9TC0IfV6MqiUItKTSk7TvJ3jpsrdT7bhDCOng8wTe9Jr8JEHgz9gD/AIKeeE/CX/CKeBPhb4u0bRbhNz6Rp/iSG0gYEdGgW4UA+xXNPtv2Bf8Agp/4dvW16x+HXiuyuUT5ryDxZbxyBf8AeW5BxX6t/F39qv4b/CK3P9vazGsm/atfGn7U/wDwVv04eHLrRfh5f2s1wZZF3LL8zR7fu7f71fk1f6RuYUk1DL8K/wDuHL/5M8jA8JV8XZttL+vI+Ividpv7VWuasfhr8WPFXiHWLnT5g40nVPFP24QSYKhgpmdQcEjI7E1+nH/BOH4S6J8JP2ftD07xd8SYZ3RZZvsvmCNIHkkaRoypJzgsRnvjOBnFfj54i/a0m8J6hc/Ga5uYbwX11Ikscy/6THJu/irFs/8AgqT4ztbyO2sL+4jRnZkVW2sv+z/tV+ecb+MnEvHGVU8txOGo0qMJ8/LSi43lZpN80pbJva1763srfYYHg/AYWTlSqyUmrcztovKyR/R5B418HwKlomsW0ny9pFNc78TvC37O2seG5/EvxI0HQprSzj81ry6hQFQvo33q/Cr4S/8ABVHxnNcW1lrfiS4V5rqO3gj3MzNIzfw16/8AtOftvfEvwHotnpXjDUo5XhiW4TS7iJpFmbbujZl/2a/MqWb1KfuzpnYuEKcZc1Osz7y0f43fCLT/AAZe/B74ZWt14Z0zV3kS3ubOVnut0jfeVW+7ur83v2j/ANnj41/sMftTXfxd+MXiS38Z6P4gtW/4QjxBq1vtttNb+Lz4/wDn4VfurW5+xT+2xpXjjxY2sa3eMbi4ut3mMnzR/wC7/dr6p/aUvvg1+018DtX+AvjCdkttQ/fadqN7tlls7xfmjm/76/hrhp47nlKNZ/4fI+qw2AlgpxlhvhfxefzPz1174ueA9e1a88f+KvEk1zPqC/uLrULpnvL7/dX/AJZx/wCzXl9n8Rpv+FlQ6r4A84W0zsjMvyqy1698Cf8Agk74z0mbUfiB+118XdPs9Nsb2RLVtLl8+W8j3bo/L/hjXbUn7R3jz4LfDmzTQfgD8E76+TR4Ge41S8ibc3+03y1206PNFa83MdNbMqVGteH2e5Mmm/FTS5X1vxJqSwwMi+RC3ysyt/EzV86/tGfEzXrdryCz1hXdXZd0b7mVa9W1LxN4w+K2i2sPijxzefYJrJXSz01Vj2qy/L81ZGj/AAA+C0dxFc3+iXl+6/LE2qXrPub/AGlX71d8MjxMve0R8niOJI+1l7OVz4lvPiN4q+2LDC7YklZf3KMzSf8AfP8AFWjbt8TtcUPpvgzXLtZH+9b6XM3zf981+gPhXwj4A8Jag03hvwHoth5nzOtvYRqv+y3zV00etXMby+TqrRLt/wBXb/Km3+KvYpZXhqceWR5VbHY7Ee85H5wN8Ef2h9S8nUtK+D/iS88yX7sdlt2r/wACp/iL4S/tUaPH51/8EPFEUUe3fItluVW/h+61fo7NeXlzcD55pXjT+/taobi+vDCUS5uFXr5fm7f/AB6r+p4WJnGtjPszPy+vrj496PeJDeeFPEln/wBM/wCzZG+bd/u19Vfsp+J/GviPwLcR+MtKu7VrW6CW32yDY0i7eWx+Ar3bXPtLrNeQXLYb/W/PuZv+BVyFpuN7cyMWILLhn6nrzX6V4NYShDxMwVSO6VX/ANNTPpMhxWNjifZVJ8ylf8Fc8H8f6Vb6X4p1ee3yTJqcszMDkeYzN8v5EVxGpYM/z+Xhm/hr0b4uW0za3qDEzAG6bH93Ga87urWaaHyfJ+b+Dd/DX53xJH/jIsU/+ntT/wBLZz4uUnOXq/zMC+s3mco7sy7/AJ/k+as24s9rGzTzPm/h/irrxpe5lfYqj/po1TLoO1mSHan8btt3K3+zXBTqR2PnMRTlI5X+x08svv2bdv3qaunXMbG5mm3pv3J/FXeWPhl5YXmmtm2bN3yp92kbwjcrIf3Oz+L5l+9WntoRkc31WUoKSOMsrF0jhd3bLfNuVf8A0Kr9vo811MEhdtypu3bN22tubQEb5NjKGWpF059yO/y7vllVflpVKkJS94ujRnExV09/7/8AHtfzPu/8Bp/2F23zTJsRf4mSuh+x+X5UL2zbG+R2ZPu1Jb6Hc3DFPup91q4KlaEo8qPawuFn8Rg2envb7Psybov+ee/7v+7WlYwQsQwTD/Mvlr97/gVXLfQ4ZIfMdGX+H+6y/wC7RHbvbs88Ls6L8vlt8rNXBI+jwdOUR9vCW3zOjDbtZ2kf7tOuIXW1cuNw/jX+7Vux01JpvJ2b/LX51b+Kn3Gn+dZiHY0Qb5vlqeX7J7NGnzR0OaurF2mKImxFi+9J826siaz/AHgld5Ei/iWN/mautbSXaPZM6n+Gs/8Asl51dHeMBVbbXRRlA8PGYOXPzM4C11IzXDed8pb5k/urW5o+seYyo9+wH8Fcku9pN7vmTf8AJtT71La6k8fz71xX2EJfyn5BKPL8R6xpuvcFPOVlZF3+Y+3ctdz4f8Q2ybZkmZo2b5WWvDNC1nYyeT8/95pH+9/s13Gh+KnXc/n4ST76q/3WqK2Il8I6cep7HpmvJdSK+y4cs+3y4327f9qt/T7xNrJcozOzfw/xV5Z4f1y2lX/XNlX2/K1dZo+tM0eyG5YOrr5sleTiMRPWCPcwtP4ZHb2mzkzQs5mX+Fv9Wq1fhazaNd75ZnVU/i3Vzel6k74hS/8A3bff2/xV0ujzukZd3X5v4V/hrw8RufUYXXc1Ft0aGO2eTe7ff8xflWrs32beuEZF/hbZ/wCg1UgkEcYdH2/N8zbPvUv9oeYuxH8xl+Xatcso80PdPToylza/CZWpx2crN9mTj+Ld97/gVc3eW9nawsiTTZV/4vmZv/sa6HUrzyYVm3xn+KX/AL6rB1a8hVXmmlVmkl2p8v8AF/CtEf7x206kYGVdM8OLnzl3Mnz7vlZV/wBqszUJJ5rX5J98X92tS8+xpvmT55pPlf5t3/Aa56+kmkjdESRdv/LNaz66HqUcV7OZ2ukBIvARCsSq2koy3turgIJJmtTcwOvzP8m77u2u+0qRH+H7OCxX7HNyep+9XmU0bvtRH3fP97f92v3rxnhKeQ8Npf8AQJD/ANJpnVPMZYRp9zoLFprGFn+3rs8pWZtm1a1rWOG4zJvyyv8ALtf7q/3q5/TZvtMiQv8AMipt2tW5Z/uLlLx32wr/AAtX4DKM+blOCtmntomlYyPCG3jeqv8AIv3VZa0VsUmZ/nZWkT5P97/dqlZzQ3C+dC+NqbmVl/8AHau2sc0g2O6yLIny7k+Vf/sqXN7usjy5VOaRPb27x7NjruX+L+H/AIFSHT0m2wwpJhW2qrP8q/8AfVXrDTLm3gRNi/K3977y1qw2dtcRibZHvZP4v4a56dYytzfZMRtI+1Mr+TIFb7zb/wDx2thtLtmjj+zI2xflT5PutWla6L5bRvCilWfdtWtKGzkuLcW03yfP/q2f/wBmqeaMmk5Fxo+5zSObXTZvsYWZ/MG7ay7PvU29s4dNs2d9sSKrPLN975dtdCump88MKK6N9xWf7teeftReLD4F+Hd48Lq8twqwRRr/AHm/+xruoQlWxEYHNi5Rw+FlUfRHkln4k/4Trx7rGpbFW3tbVktfOuNy7dv3q+dfGmoTaB48a5SZQs0zI7Rvt+Wux8D+NE0PWNVsIEWJLi3VZZF+Zo683+L0iXX+nwo2/czK1fpdGn7OjGkfj1WpPEV5TcjM1jVJLy4vNGm+XvAv8O2sHS9chjmbTbybake7ZVG41aa8VLxOXX5XXd/drnteuJluH1KB8Kz7tu+tRR/lL/i6NzcS3ML7g38NcfdTPdKUf5TH9yt268Rf2lpOxNu9fuVzl1Huk3pu2f7VHvyHEv6PdJGrQ3L5Rk+6v8NVJLhIZsmZvl/hqKNvs7B9/wAtMnBlcuE20FlopDcfvkf5f7tVriNFZvKTaGqOKV4n21Z+1QyITs5quYPhKW35s05PvClk+VmSm1RXxCP901ImSy1G/wB008fIyf3anlCRr6e0a437l/vMtWZFh+dN7f738NVdNb5dm/duq1cyiSEp0VU/hrOUp/CZ+5zkEkkbbtn3qj85Gbp/wH+9SPOnl/u3yf8AZpi7Np+fbVFD92z7gxUVxGjSNIU/3/npzbNuzp/tU/anlsjv/wAC/vUE/aKkzfvOOPk/76qu/wB01Zmjzl/u7aqfwfjTj7pcYj7VtswOauTWs0kn3/vVTsXzcL8ma6RYEmtg+za+3alVGMxS5UYFwkiybJui1C0bL9zpWtdWqeWibMP/ABLVDy3iIx12/PUC5j27/gnAhH7W3h8t1+yX3/pLLXr3/BS34y/Fj4b/ABZ0LSvh/wDEbWNGtpvDommt9Ov3iR38+VdxCnk4AGfavI/+CcygftbeHc9fsl9/6SS12v8AwVjx/wALr8OFv+hWX/0pmr9zyfE4nBeCWLqUJuEvrS1i2nqqXVanFNqWNV+xyHwH/aF/aE8V+MV0/VfjJ4iuIlhZmSXVpSCdvpmvY7/4rfGmwjW5HjnVJUQZfF6/6814H+yppqXmvX135O6OG1Xd/e+Zq+gVs4W2xp92RG2K38VfiWN4mz6ElbGVf/Bk/wDM48Xyqpoj7g/4J8+LvCfxP8G2R8ceH4NSvA5jke5t1keRgvQ5HWvtfw98CvgvqGnxNc/CnQmnfh0TTo+P0r8q/wBiv4np8M/HEWgvNNFb3E+9I/u/N/F81fpz4b+PHw38G/De58VeP/GWn6Pp1nBuuta1KXbFbr/Erf3m/wBmvWwHFedTw1p4qpdf9PJf5nl1FUlU5YFzxx8B/g7pMLsPhxosA2ZVVsEDH9K+U/2v/jt+yr+yjo6X3xNk0iwvJ4WMGg29sr30hH3Ssajcqt/eavCv21v+C6Wr+Mrq/wDhj+xtYf2fZSRNa3XxC1aBvtd0v3Wayhb/AFa/7TfNX5V+O/FPibxX4uvdf8X+I7zV9Qmnbz7/AFG4aWWX/eZ6ipxNn1aVli6v/gyX+Z6uCy+75qjPrXx//wAFNdc+IurzHwjp0fhjSYnUWtvCA1zIP7zyDp/u1na/+0n428W6BHeWnxV1fTNQj5R7HVJFikX/AG0zw1fIO98ZD45qwup38YCJdvt/368+pmXEPtLxx1X/AMGT/wAz1FhoxleKPoXT/wBoD9pnWIT/AGD448VXmDhpIb2Zv61U1D47ftgC4a1tfE/jHOcKVmmb+teLab8RPGujqE0rxNdWwX7vlS7avD41fFvem34haplfu/6W3y1pDNs/hvjKv/gyf+Ztyp/ZX3H6PfCm58Z+KP2LTN45vLy41m88Makt1LfMTMzEzqu4nvt2j6Yr4+8KfB+2haKF7m1kmZv9THPG0i/7TKtfVXwJ1DX/ABH/AME+0vtTvZ7rULnwhqwM0jkyO2bkLz69BXxj8JfBOq+BfElt4qv3k+0w/N5e77395Wr9W8YfaV8myCc5Xk8LFtvVtuMLtvuzz6FRU/aWetz7S/Yv/Zfv/Fvi6yeHSvtFusvzbW+Zm/2a2P8AgsdqFtdftDeGP2afDvmNpfwt8Mq10u7cralefvG3f7Sx7Vr7X/4JY6H8Pbr4Tv8AtCaqkNto+j6XNf6lJt+WFbeNpJPm/wCA1+cnijXtT+NHxC8U/HLxI7Pf+MvENxq0rSfeWORv3Uf/AAGPbX4rTj7OhdnHTqPnlVkeN3ngWa+0/fCjKY9uxq4vUvPtbiazmdkVX2vu/ir6D1DSYVXYHZEX5UjrkfFXwoTxpH5EP7u5bau5fu7f71clbBxxf+I9LD42cPi+E4Dw7dQ2tv52yRP4fubq+uP2Q/Deg+JtBks33STSKrL8m3d/s18neNvh54q+Gd5BZ+IYG8u4T9xIv3GX/wCKr2z9iX4tJofxJ02zvJo5LJZW82OT5dvy/wDoNfF59g8RTpuB9TlGKoVKsXL4TU/ai/ZN8aeIvGH9o/D3Qbi+Zkbda26Mzf8AAa5T4M/8E5f2iviZ4nt7b/hANS03TWlX7RqWoReUsa/xMu771fob4V1azj0C38VaVcRm+a4ZZfsqfL97cvlt/u1u+LP2irDQ47zXviR4wkt9I03Tftk8zRfu4VVfur/tNXJgs1n7ONOEfePpa2V5fOXtec+Bf+Cxnhv4afsxeK/hf+zZ8DfDFnpl54b8KLq/iDWIVX7TdXlx8v7xv+A7q+M/iF8Y/in8ULK203x38QdU1WztW3W9pcXTNFH/ALq16B+1H8adb/ae+OPiT4161JIF1S4WLS4bj/WQ2cfyxL/3z83/AAKvJ5rU28hfZuTb8lfo+Gw0XShOovePhsRjKsas6dGTUH0Es7jawRH4/wB2r7SPJGA7b3/g21mQq8M2/d8rP86tVy3uETMz/Kn/ACy2128vunnSkS28myb98i71/i30s10NrP5NUdUZ4ZEvPlZPuvt/hqBbnaQjzMf760uYZr2snmSbH3K/91q9l+Degp/Zb6rcpt+Vdn+1Ximh3KXEyJMfuv8Adr3v4e+Rb+FxMJtz/wB5qfLzGcpFLxhZpDdvsRpUXdvjVvmriPi8ws/hvJZ/KryXC/Ktd74mk8yMvt2p/e/2q80+Nl0kfh6KL5t32hV3N91v71Eub7IoL3zgLKKHT7Vf3eX27qZNdJcfP/C3+1UdxIjRqibm3Lu/3aqx3AZv92o983j8RbhkRlZ9m0fx1qWN1ux5U3y/e/3aw47j+NHbG+r0Nwn2dE3baA97nOw0HXLm3uAEmZVb5dtdz4f1yFdsaRqTv2srfNXk+m3STTbAjBdv+sb7q1q/8JhDplwn2BGeRf8Al43fLuqvdKPZ9P8AFlz8P9Qj8Q23iG60qeGXzYri1naOT/gKr96uv1T/AIKkftSyaOmg+HvG1udq+V/al1YK9zt+7Xy42sXOpahLf6lfyS3DN/rJHrc8O2v22ZX+81R7GlKV2VRxOIofBKx6Bq37Q/7Q/iaZtS1740+JJZW/5537RLu/3Vr7L+BHxF8SW/7PmlfE3xHdTazqNjp095K17MS1yYZJCqMx5wQgXJ7V8H6tdWtisNtBcqWk++q19qfB9Fh/Y6jWZyqjw5qG44GVGZ8/lX6/4NU4wzbHJf8AQNU/9KgfacH4qtWxtdyk3+7lv6xOQuP2zvDFn8ZJPip4b8NzRLeRRxPZ3EXzW7fxf71dH8WP+Cgm3QWtbDclxNE29tnyt8vytXytJ4i0qzXZpqb9vG5vvf71UL64sNcVvt9t5is+394+2vyH2cYnwlaXtqvPLWRzPxi+PHiHx5qjzXl9JJ5m7f8AvflrzS5vri+m86R2J/hr2K8+EvgnWbX5JpLQqm3dH81YF18C9Y0+YPYOt7C3+qWNNrVUY+8HwwOGsdNubhRv+b5vvVtaf4f8yRcv92u20f4SaqrLbJpUxO/7qr91q1tN+EevPcNbx2DBv71b+zI9p/McTFpsNra/Ii4V6z9UmRV+R1WvTrj4FeP75fLsNKZg393+9WQ37NPxguZFR/BkjIzfPN5qrtX+9S5BRrRkedXMe1TvfLN/eqFVTn/vnbXsln+yD4tkk/4mviTSbBNqv5lxeq23/eq237N/wx0dvtPiH4tQv95mjsbfd93/AGqy5YfCXzfaR4psc/3dq/LV2zt3WP54WU/7Ve1aX8KfgCyqltealfTNKrRL5qqrR/xfL/er1P4f/sm+HvHV8mm+DPgteTNcS7Eur64ZlX5fmZv4VX/eq40+Y55Yjl3PlLSLe5hbf5GTv+9XW2du81uj/edl/hb71ffXh39mf9mb4Q6PPbeLfhjpvibxJ9n8q1jVma0s22/eb+81YWg/s2/DTVNY+2ar4YV3ZVaXTbOLYka/7P8As1XLTM/b1f5T4l+yvaxrv3Dc1MjiRbrY8O4/d3fw1+iUP7PPwKtbo2z/AAl02K3hZW3KjeY3y/MrV5t8WvgV8H/Mlu/CvgOFIlbbLIr0/Zlyrcvu8p8V6hYoreckLNt/u15t45mM15sHRa+7dD/Zl03XN7p4VWNPmZZvmVWrTu/2KfgtY2Ym13wxb3N20W54YWb5qXLHlCFbll8J+dWk2JurgJiui8QWM9loax7P9lttfeOk/sE/CvVNUhmTwfDZwSL/AM92Rfl/2q67w7+xP8AdJ86HUvAcepvv3RW9xKzL93/x6nCMf5gljJSlpE/NDw1pH2i6R50wiv8AN/s139xZXjWYs9MtriZ1X5Vt4mbdX6KWXwf+FHhqFE0H4N+H7OX5lVZLJZG/3vmrpPDPwjub6H7Xc6bp9nDbory+XbwxxW6/xMzKv3aUuWOpP1iVSZ+V0fwp+KniC8CaV8OteuVk+55OlyNu/wDHa+ov2Vv+CFf/AAUc/an0yPX/AIf/AACvrLTJuftmq3C26/8Aj1fof/wSY+Cz/wDBRb9py/8AD2l+cnwt8Cyq+rXkabV1KRW/1at/dZlr97vC3hHw94K8P23hnwtpMFlY2cQjtbWBNqRr6CuiNShQjdwvITWJxWkHaPc/nY+EH/Bod+2jqzw3fxL+LvhnRIpP9bHHK0zxr/wGvp/4ff8ABoF8GbfT4/8AhYf7S+sveNFtnfS9OVl/4D5lfs1swPljH4GuR+HPi9/G8Op+I4Jt1m2qTWthhfl8uFtrNu/2m3Vnic2nTpuUYRj6L/O4U8opuXNUnKXz/wArH5zfB3/g1P8A2Dvhx4ng13xx468UeKoIJAyafcNHbLJ/10ZPmavvX4Ffsn/sw/sv6RHpHwK+Cvh7w3DCm0XFjYL5zfWVvm/8er0jc3c1ka1bzXnyIjbPvNXxONzrEyd4HuYfC0o+6WrrxVZKjGGZWEf3m3VxGv8Axshs5pIIbmNpFbair/eri/j78QrfwLobusjIkcTO23+HbXyav7V3/CG/b/GetnfbNKrQW7JuaT+LatfPVsdmGIu3I+nweWUILmlHmPtPWPjVcaP4fXWtV1FbTzpFiiVm5aT/AGf71fkV8L/iI/w8/wCCiF58Rb66jZrLxrrU0srnarlvtQP0BLfrXtmm/tbeAPj94ulv/i1qt94ZttPv1ntfMRl/4DHXy7d3/hu0/ae1zUb7Fxpa+ItUfLHIkj3T7Sf0NfvHgxUqzyHiNT/6BJ/+k1D6zJ8NTo0a6jFK8X+TLn7fX7bt54k8VXdsl5NbPay/PCu7arN93/8Aar4q8YfFabWGW8m1Jre9mf8Ail+VvlrR/bi8babq3jee5s9RZ0kddkkdxuZVX7q7v9mvmjVvGk0l0yJc5C/d3V+M08LSlC6PJWIqUpcszsPG3jrWPFXljWLyQXML+Vu+6rL/ALX96uKuPEGvWEjQxw+dubbEyv8AMrUxfEH2hvJd1DyP8rSV9mfsd/8ABHzxd+0P+z4f2tviz8aNB+GHw4S5YWviTxLbtLPfhflb7NAv3lVvl3NW0MLSStI9KWMoRjF31Pnbw74R+Iug6LZ/EL+29N0prN1uLCS4v1Z/l+b5o619H/aY1X4xeJtXPj/xlcX+q3Uu9Fkl3Ky/d2r/AHa+mr79i/8A4I1+HrBf+E2/bk+InjL7KzLOug6dDaW03+7u3Mq1xnj74I/8EmvDscWt/BaHxRDeKrfZb661xpG8z+FmVa5JQy2Xxz947q1XHxpRUIWj5nI/Cfxd4q8I+KUfw9DIf4W8lNv3q/RD9iHXtNsdcitvjxpv9sahMm+10u8bbHa7vuSN/e+X+GvyQ1T4gal8NfGe+bW5r+2Vma1uN33l3fLur6r+Ff7cXhXxVr2meMUmaz1lbCO11L7ROqpIsa/Ky15mKwtOPvRReFzD937Pn/xH7YfDWz0nwt4rstW/4QzSr/QZv3U9g8XmNbq3/LRd33q1f2k/2RfAPxH00/EP4f2dr9ssoma40m9TbBeQsv7yP5f9mvkH9kP/AIKC/Ca/8PC78R+KYb4WaM0sfn7YlVfvbmb+Kpf2af8Agof4hvv2ltV8H+Jri9vfCeu6jI2mwPLtW3t9vyrH/erLC46UI8jiRmWUyrVVVpz+z9/kfAOuX2ifCX4i3nwZ1vWI47631SZ9Gs9nl7rVpG2qv97b92um0u8t1VIf4tzMjM38VeWf8HFfh/w5Z/t+WNt8Lbq609v+EZtdRtVtfla3aSRv/iab+zn4i8bap8MtMvPHepefcqyq8yrtZl/vNX6LhZVqmDjKfU/Mak6dLGTproz2qLU5+P8ASVk8xP3qrF92ry3k1qoPksHk+V137VWuVs9W+zzboSu3Zu8tU+8v97/ZrX026uZLrfvaRG2ruX+Fv9qrlHl1O+nW+wdXp+9o2h+0szfL93+L/Zp+5JJAknnRJJuZFb7y7W/iqhpa3ir5T7Y1jbc/8Tbf/iq2Psr3G3fPvX725krlqS5Tvp1PsmNrGmw3EMyb+G/iWuRurKSzuXDmT5mPDnvntXeTWv2WN9m3Kt8ism1V3fxNXL+LorWK9C2YITc2cSblzx92v0rwYd/EXB+lT/01M9vI3J5lTv5/kzxr4i6Ib7U7uEkoHLEqDgcn71cJceHUtWebfkM33W/hr3TxV4VN7a+a0bASKHJ/2d1ef+JvCqNM3+hqkf8AeZ6/L+J6kpcQ4yP/AE9qf+ls1q/xpX7v8zgE0lGV32fOr/M3/stWbXT7mE+ds3rtXcu//wBBrebQUMizTJ86ru+X+Jakj0Gbzt/k/Lt3ba+ejU5dDhqR5veQ3SdJhuFebyNrtt2fP96rS+H/AC1WZoVmC/dZX+9WvoOhpayOk3ludu5V/u1uaf4TMcRhm01Sjfw7tu3+LdSlV5ocxtTpy/lPOrzw/wCTF5zo22RvuqvzLVJtBuZJNj/M+z7zN95a9R1Dwj8yySIzBdzfN91qzpPCaNIkiIxH3dqp93+9TjWnL3WRKjLnsonCLodyrjy+qv8AxVdXTUt2CTQ5RV27o/mbdXSyeHYYwiP8zMu3b/E1WbPw7IFWb7S25V+dmSsZSh8Uj0sNTlzcpyseizSQtCiMq7/+Wi/d3VlXWl2cbO7/ACqv8Lfe213WqaXeMuya5XYvypI38Vc7q1vctJNtmjKqq72/2qzp1OY9yjHlloYJXy408v8Ah+ZP722pGuJo5tn2WRxs3fL96mX023Z/Ei/M7L/eqH7Z5x+020i/L9xd3zba1jKR69GnGQjTf6OEKSB/vbZP4Vpk1mkK/wDLNmb50/2qYs1tNCNiZWNNqbqTzIfOyj/dXav8TNWtP4jjx1PqeXarYPY3Don8Kfe2bWrFmkdt8aDB/wCedepeJvCvmKs+xV/i+X71cdrPhSazk85Nv7z/AGa+kp4jofi+Iw0o6nPaXcTWbb4UY/Jt2/71dZouoXTRh9+7au3aqf8AfVZEemzRMu/aGZa29JsZo5UtYUx91mb7v+9RUrHNRpcsveOt8PyQxqtzM7fN8u1W+7XbaTqWJPkdn3J97+KuA0uxeGNnjfzv4krq9Nvfs0Pnb8PH99VrzqnvS5uY9nCx9n7x3ei301nt3orf32rr9C1hLhV/1aLs/i+WvLrHUoZI0S28xTvVvmetqz16Xar3Lw7N+1t3yyM38NcVSjKUz1KOI5T0tdWtpoW8lJBHu+VlTau6o7rVEWb55tiN9xl/vLXJaX4gmaN3d22fdRo/mXdUOra69vdb/tPzfwNG/wAv+7trGOHn3O6OK5o+8dBq19Gsn75+W+VG+9urIvrx5DseZif7sf3d1Y114mc5+07W/hRf4qo/29bTb5EdnKv8m37q1UqMuX3zT65SjLQ0by++aWGb7yt87fd2rWRdXkMz537Sy/eZ/wCKoNS8QW0K7/Ok+b5fLb/0Ksu8vpmb7iu2/d83y0/qvuxKlmUUeq6C0k/w43eblmspsOQP9rmvMVjdmEMKLj5VlX+7XpPhlwPhaJC5IFhOST1/jrzyx3tKPkV0/ut8tfuPjGpLIuHEl/zCQ/8ASaZ3ZpiEqVCT6xT/ACNXS1+zqET5nZvmZvm+X+Gt2xsz5becjCON8o2/duasfS3RYfM+0qkv/PNvvNWrDLCsyQu7O0L/ADf3dzV/Pcqkuc82GK5ocpsaT+8keHpJHtZ/O+626tuzCXClPJZJWT+F/lWubtbya3VU8jO5tyVehuJPJ/4+WZ1b/drmrU/3vOduH5pSlI6G1uob6PZ50Z3ffVvvLtrWiurZdttNbbH3/wAXy/w1yEN+8kkWy227f4fu/NWtZ6s65TyVfb8qbn+7XFKnKPvROyn7szrLW4hk2wj5Ek/hZvu/7VasLW0zb4PnSNNv95q4iHVkhZnM+f73ybm/75rVtdVdXR4V4Xav/AacYx57m3N/MdPH8reS8ypGu7bJInzV8n/t6ePEvPE2m+A0dcWe28umVPm3fw19G6n4kTTrW5uby+VGWJnRvK+VdtfBHxa8YXnjDxpqnieaZm+2XTfe/hVflVf92vqeHsL7TF+1l9k+T4rx31fAxpR+2clpmvFfE1xamZYkuLdk3NWD4uuHkhe2fkxqqvVbxRffYNUS8875f9ml1TVIdQt/tj/P5y/Mtfe8v2j82+E88m1AafcPDcrwrttVflpl3B/aFr5Py7WX7y1L4s06PdK6bUXf8n+1WNp946/upvl2/KlPlQSlMz7q3urHd2RmpkkjzbpFdvmrW1S3+1Wp2P8AN975awnieBz6Uy4/CEgKBlxTJJE28VMpWQh/mP8AvVHPCit/8TSlIqPmR7fMzv8AvUwllb56e33zs+7Ss26P56UTQbTWV2bpTqKctgCpGj2hNn/fVR1LJJuVS6YK0yJblzT1/ds+/dt/hq55iLjZDlf4kqlZyJ5Owp81WtrrIQj/AHan4jIim+Vm/wDHaZJIm3Y7sp/u0sjOzb6Yzbmx/F/eqTQfHJwqJ/6DToIYWVqjSR4+j/dqTzn8obP+BVfoZy+KxHcbVjLtxWezbqs3zlhhvl/2aqnk5NL4jWmPgO2ZCP71dhb28clur7MfLXGhtrq5/hrs9JndrON0m3bk+61UTWKl1b7lyn/fVZslt97ZBuNdFcwusfmPDtrLuFfcdibaDH4fiPXP+CdUZj/aw0BXAP8Ao19tI/69Za7H/gq+qn40eHGIJx4YHA/6+Zq5X/gnnE6/tX+H3f8A59L7/wBJZa7L/gqmm/40eHe3/FMD5v8At4mr9lwH/Ji8X/2FL8qRg/8AfI+n+Zy37H+jvJoesar9mXYssaRSN97+9X0P4J03R7q087yfPuPvJ/D5deYfsg+A7aT4Nw6lczTRNqGqSbWjib94qr/er2XS9JttHX7NDYSSt5W7dJ/6Dur8FxFGUqtzgrc0q7scb401a50PxMr2G2B4/nRlb7tRftBfHzxz8btH0rwl4k1DZomiwK0Wkx/6uaZfvTSf3mqb4jaK8MKarqVmyNcM33lrz/Wr5I0SGzTG77y1hToe7bmLpxieeeMYbOC1mv0tlTy03LtTbXhV3I01y8rvuLOx3ete0/FRrm18Pzb5vmk++u+vFXh8v79ejh48sD1MPyqmMeNVUU5Y3kpT3/2fWpbeGRmDhcf+zVudHMyHyXLKnZq9E+DfwdvPHN8LmaBvs0LfOzL97/ZrO+Gvw51Lx54mttEs7OZ1kl3SyRr8qx/xNX2T8Jfgrf8AiDULb4dfDfT22RyxxXEirubb/eXb95qiXPKPLE4sZiZUz1v4W6Bb6T+yxH4d0dBGseg3sUKp0ViZun4mvk6TQX+2fYIUwscux5N25t275q+5fE3w/f4O/DrUPAcSSCTSdFlGJvvFzCZDn8WNfLnwt+GviHxprS2em6bIqM677pvlVf8Aar9m8V4t5Rw9f/oFj/6TTPKjU0cj6w+G/wAYNX+E/wDwSK8T/CLRHkTUfiF4th0G3bzV3R2O3zLuRf8AZ2qq/wDAq8Ek0m2sdL8iFFhWNFSLy0/hWvUvjF/YOg6L4Y+Hugozw+HdNk+0XDJu+0XUn3pP/Za80uI7zXJPJeFj8/3fu7mr8WjH20vdL9p0exz39izX14+yNnaR1Wu68N/DvTfDGi/8JJ4kh8lm/wBV5n97+81dJ8O/hfZ2Ni/iLW91vEvy26/7tcR8dviw8zP4b0p1XduX5f8Ax6lWxEMLS5Y/EKXK/Q8x+N3ii28a3klhDbedbR/OjbP/AEGvGLvUtS+H/ihP7Bm/eKm9f92vUraz8y6+0/Nll27ttcD8RNFe28QLfww7oZk2oypXku2I/ivmOqjWnT96J6B4I/4KKXvw68KDwn8QtE1K/aDdPZrZ3Hlp523au5q8p+Lv7Z3xU/aI+yaP4huY7LSrX5V0+z3L9ob+9K38Vcl8QNHS+0+Qwwtvj+ZN38X+7XncUz2dxt3421tgcqyylL2lOFpHtRx2Kr0uVyPT1ukn8v5GP+0qfLWdd2aNC29PmrH0PXJtvzu2G+XdW3HcJMrQ9vvbq9nmOSXPGZhXSPbr5LorfxJUc2yFU2btjf3a1dQhQR7HmVqzZInm/c+XsZXpy5hx194lhkmmj+yzJ8jfLWXNvs7h4XnUlW27v9mrLB47hfOf5VenapbvfW/nBF8yH/x6iWxcSx4XZFut7oq/PX0N4XkL+C7a6+0svmLuaNk+61fOHhu4f7ZG+z5m+XbX0Bodx5fgdZnfd5fzRR0RlymVSJpXkL6hYyo7r+7Xd9371eOfHiTy9Jtrbfs/f7mhr0iPxM62YT+KRN25a8Z+M2sXmoalFHN9xXZkpSJw8feOSjvJmjKO9RvJ82xxTKOCKfMjq5R63DxR7A9Wobh5I/nm2p/s1QTp+NPaT+D+7THyo1G1J/uJ8sO37qUsNw8u1Efhv4f4qzF+Y/J96r1jNNHJ+5g8x2/u/wB6szLlOkhj03TbUXl47E/wr/E1amk+Orm482HQ9HVVX5WauRnjaE79avMOv/LFW3NUkPirWBYvpWnzfZrVv9bHH/FV/CHxHb3V9pWiyJf+JL3zb+SLclrCu7yf96vt74LaiLz9hldSMW0N4W1Ntuc9Dcfma/OW11D5lcI3/oVfoZ8B3B/4J9I4XA/4RHVsAf71zX654N/8jjHf9g1T/wBKgfYcEpLGYhL/AJ9S/NHxXpusPdXzoj8yfc21ryLpmn7H1jW22N96GFNzLXAJqdyrDyXk/efKixpuauw0JvCvguGLW/HkK395J/qNB3/Krf3pm/8AZa/IvhPi+U6nwrpOuasq3Oiab9mtPNVP7S1K42r/ALy13eoX3wl+HLQ2Gq+Kpta1hdz3Cr+7trdf7qr/AMtK8M8RfFTxb4w1KOa/vMW1u6/YrGH5YrdV+6qrXOX2ralcXz3N5eM8kjbmap5plfF8R9J2v7QHgy3meG2hj+zq25o1+8zVUvP2sNH0Ni+m+G7eVm/ik+avnGK6mjjLo/zM1Ps7O81OZHSGRi38WynyzlpJhywPbNe/bE8Z3CyW2lTtbJJ91Y/l/wCA1xV98dPiLr0vkvqsy7vv7X/8dp3gn4E+PPGd5HZ6bokxaTbt+Wvqr9nH/gl74n8TTRX/AIw8vTrdZVaXzvm3L/Ftq/Y296RjKtTpy92J8v8AhvRfiX8QrxNKsIby7Mj42x7m3bq+n/2e/wDglb8afikyX+vaDdafbLt837VE25q+8/hD+zT8Af2S/Bdz4217+y7e2s7ffLeahtjkb5vvLur5o/bE/wCCziWsN54D/Zmf7HFNuin1Bn8zzP8Aajp81KHwmcVUre9ex6Rpf7Gv7LX7Mc1s/wATtY0+81WS33Raasqs/wB77rN/DXfah4g0258OjRPDFtZ6PZXHzxR6XF96Nl+6zfxV+Xvw5+IniTxZ8QLn4heOdZuL+9m+Z5rqVpK99b9pzXVsYLC8muES1i2xSK3y1HPVI9j7x9dQ+BfhFpOi/wBveKvFUKf3odu6Vv8AgVcr4m/aM+Ang2NU8Nw3V3NuVNzbf++v92vib4tftRa3qEkltYXmfk2xSM3/ALLXEWnjrX/Fmool5eMfM+bc396sv38pGvs48p90t+098H9SuHFzY3lt95Uh/d/vP4vlqfxB+0l+zfqGivYJo91EFiXz1aJd3/Af73zV8cW+nvfN/wAtMbfvK1al5pdh4d0s3Mzx/c2p5jfxVpH2sY3bJ5YylyntV5+014DXW7iz8JeHry20tdyxSXnyu3/AaJP2lvDcMcKeH9NkaeFtss00W7/K182trH9sXvlaVN/s7v71dd4R+G/iTxBNDbfvNjN8/wAv3t3+1R7OUve5glyU9j27/hfmpapH9ms4VQru3Rxr/rNzVt+E9Q8beJmltrCFlmkXduk3MsfzVH8N/wBnn+zbUfbLDY6qru275lWvfvAeh+G9Ls/s1hFbonlR/wCkN97dWkacacfiMZVPabI5X4ffBvWL6N9b1t/KVZVV5JPmaT+9trwD/gqB+1Y+gIv7HvwguFgn1JY5/Ft9Zj97a2/8Nv8A7zfeavo79qf9pLQfgD8HNT+J1/rEYuLVNul2MMH/AB9XDfLHGv8A7NX5l/AHQ9Y+Knxqi8VeObuafUNY1uO41Sb/AGpJPu/7q7ttRDkky6dH2ceaR/TV/wAG737KWnfsyf8ABO7w5evp3k6n4ukbVL12TDNH92Ef98/N+NfeJJOMV5t+ypYWHhv4BeEvDFmipFp+g28Cqv8AsxrXo5mRByRV1+f2judWFlTVBWMD4r+JIvBfww8Q+LWmMX9n6NcXCyL/AAssbFf/AB7Fct+zZpLeHv2fvClrdERzyaJHdXW7/npN+8Zv++mrj/8AgpR8QJfAX7CfxT8UaXtknsfB9xIqbu33f8a/L/xj+3n+3H8WvAOg+GPBetR6FpS6Xaosml3X79Y/JVVX/gVeHnFSVPDKNviPUy+nTxdVx57WP198X/HD4QfD6DzvGXxD0qw/uCa9VS1fI/7Tf/BeP9kz4La23gnwPb33ivVt7JIthF+4hZf7zV+bt98HfGHiC4+3/GP4tahNbx7mltbi8Zm8yuT03xl+yj8LZLzVZvAd14n1j7R8+5WjRf73/Aq+Tti37spRXotfvPo8PgcvpyvJSl+CPp/4lf8ABSP4oftD3R1O+0GLR9Ml3fZbGA/M392uE8VfGqaHw69tbaIuoXUkqqkc33lb+9/31Xzf46/a+8c65rESeCfhXZ6RayOsEUP3njb7qt/3zXo/g3xlf+GPCt9q/iqa1h1VYIWt45vn2xt/FURw0YbHqwxMKn7uJYsfG3jDxBrmoQ+LbaGGJUjfzpk2xw/7rV5n8evHmofD7w14j8c+FdRjuJbWSRra6kbCzI8uwsT/ALSsefeuW1L4u63481zxP4J0bW7i6abbO8ccvzbW+XatdL4p+EGq/ETwNcfBy0jMd3cWi2uy5l2FWiwSGY9PuHOa/ePBqEHkvEdtnhJf+k1D6DLZtUa1t1F/qfJnwr8A/EL9sP44aH8H/BOlySaz4q1dbPTreP8A1bSN95pG/hVV+Zm/2a+5vjb/AMENf2VP2ara20P45ftT+M9f1w26/wBo2PgTwzC9vYt/Eu5m3SbW+XdXhf7HPgv4i/sD/tYWXxc8S3Nm9vpuh6klncWsqyNa3EkLLE23+Jqwfjd+3V4w+KHjy38eX/iS8iuVsI4vmuNqqy/e/wB7c33q/F6mIlh4eypRPHwuEo1H9YxMv+3T17wb/wAEj/8Agnf8UL6Sztf+CkHiDRrlv+XDWvBsKyR7v4fvfer374//ALbnwr8A+Ebj9kPwlqMepeHvhjpen6To1v8AZVjguI1j/eTeX/eZvmr80r74669qnio6xDeSRSq6sm19qs1c78Xviff+LPFUnip9v2+6iWK9k3f6zavy7q4KtXGYmPJPb+tz0o1snw0ZTo/F/e/Q7H9qTxH8Or7W5db8H6Ja6bNMzNLHZptX5v4dv3a8cj8TX8iukMzJ8m1dr/M1Mg0/VdfmSGYKjbt3zLXT+HfgD8SPFFu15pRhwv8Ae+X+KuqjBOHLPc+YxWZYipVk18Jm6B4F8beLozczQTLZ7lTzpPurXs3wh/ZHspIf7e1LWFuIo4v3sK/drj9N+CvxR0XUIvC+rePIdN8xt/ls25W/utX0/wDsG/sTyftIeMtY+Hmv/tE6xpt5ZwbUutLVfL8xl+Xd/u1x4xVoJy5oqJlhac8RU9xSuY/xSm8GfD34f2WjeErOx8P20MWy6mbdumb+9u/3q1P2Rf2rIdN8bWXizxpq9nFo/huLeuqK27azf7P+1X2f8M/2SfgJ+wva6d4c+KOveGfG+p61p10mt6l8QLJXgtY925bhVZv3bKqtX5a/tofGD4Y/Gz9qrxn4n+COh6bp3g9bpdO0aHTbXyILiOH5WuFj/wBpt22uLJcLHNcTOlf4ftG+PzbGZTaUv/ATf/a2+Pk37c37X2u/Hia28q0uorew0aHbtZrO3+VWb/aZt1el+F7pNN023htodsUcGxFj/h214Z8GRZ2t41zPtaaNP3W5Pu163oN9H5azI6rt+Xy1/vV+mU6P1eEYI+Kp4iWKryqz3kegaTvureL9+qps2zqsX3v+BV1mlTPblEtoWVPupt+6tcB4f1qOJ0R7lYV+95ddto+qblE6bWaRP3rLL8u2s6kOvMehTrHbaTC7XG93Zt3313/L/vVtwt9nsxv+9t+Td/drldL1hJrcpM+14/7r/wDoVXjrXmb0e2aJF27FZ9yyVwVI856dHEc0NDQuLn9ym+2Vzt/esv8Ae/hWuV8YrslhGQSWkJwMckittdSRWdHTG751jVvl/wB6sLxbc/aDbndkgOchMDGRX6P4MXj4l4NPtV/9NTPpuHpKWYQt5/kyafRjf6LAix5Vo0Z2rldc8PpdJsSFU2/Kzb91dtpk62+lQlZWwYhvRl9v4ar6hp7pdfZoZlf5V8plTb/wGvyriv8A5KTGX/5+1P8A0tnRzXqzXm/zPNrvwfZyTM6bWb/2X+9TY9Bfb50KSEb9v3PvV6ND4bRrhv3Kt5m3fI38NOfw7PHMr20LLt/2PvV8pUxHNLlbOiNE5PRfC9sk3nQo0x/jVovlWuls9F+1W6O8Kqn3dzfK1bGj+G9rK8tzJtX5W+fctdfpvh+GS3+zfZY32vuRpP4fl+7WMq0Y7nZGjLocC3gndCdnlhG+bzPvL/wGs/UPClztaaaFkdf4lT5a9X/seHyV8m2V/LT/AFa/LVW48NpNH5m+N42f5/LesJ4qXwhUwsYnjV54XdWZEs4Wf/llI3/sq1QuNJmWPzvs67W/8er1XVPDtt9oYpZrhV2vIvzbmrkfEelvbsyI/wAi/fatadbmlymtOjKMOaRw99Z+ZI0KJH+7+aVWT5V/+KrjtatUDTIkKqG3NL5cW2u/1eZ41+R9kSv8y7PmauM8SfaW8wJwJP4mT7rV20ZHVh5csveOE1aO2hw/ysyttT5m+b/gNZrRusz5RdjfLub+GtrWEmjtGd4t7N8q7flWueW4eNvLwrfO21lfctdq5pR909ijJe6TfIsbQu6j/a27dtSwzPNGJlhXZGnzSb/utVSTzpl/ffKi/K+3+KrdvvaFNiR/e/hq483UjFRjKMjqdY8PoY5JHhkT/gG7c392uR1jwrDJsT5d7fLtr1TWNNjbfbJcyIvm7lVW3LXOaxos0bMnk+YkPzfu/wD2aumjWlL4j8wxGHhc8xuPDsMjB0+YK+2Xcn92ren28McK7NoZn+Rm+8y10l7p81xJ8+52jTbL+621Xt7OGGYwvbLjflW/irT6x7vxHH9XlGr7pDZ2Mit+5h3hvm21pWtjNuXfFkN9/a/yrU6x+TZhETa0fzfL/Fu/vVJBNMLeI/Zmf+H7lKNTmjZGkafL8RNHshbyYdv7xv7v3aFnhhYI83y7927+7Wa3nRy+TDOx+f72/wC7RcXX753R1dP49v3du2tKcb+8pGcqkTej1Z49vkps/wBrzflZaoalrlyoZ5pmZf4d1YP9oTN8kyM8rIq/K/y/7NF5ceSwT7T935fu1tGMY6GUqkuX4i+2sT3jM81z8v8ACu/duapI9SeRXuXuVTy/uq38Vc62pJHMfLdaZ/aiKrwzeW8W1XSOtJU+aFjm9tOMjdk1J1X7U94vzN+6jk2/N/s1BHMl1I9zNMzOv3tr1lzXiXEiTXK703fuvl3bWqeG6SOQvvVU+Vfl/hrKpyqBEcRKUdT3Pwgqf8KlRYxgfYJwMfV685t1dUXUk2v8/wC6jZdqrXongxv+LQo/X/QLg8fV68xhu4ZP3szsy71X92tfsHjK5/2Jw64/9Akf/SaZ9fnDTw+Fv/IvyR0+nybpo7bZmVvl2yf+hbquyXSLN5LzLu37lrmLfUn+1jyZmlbYy/N8q1fm1D5v3Ykyv8O3dX87Vuf29+XQ5KHJKJuw6m7r53lLGy/L8z1I1x9oVIUuVRtn73zH+ZlrmZNQ8yYJ9+Jm+ba+1lq39rmZv3x3rH/49WXLKUeY9WnW5fdR09vq1tDDCnzSv/e/vf7VWrHXIYX37d+75dv97/armY9Q2xxJ9xv4G3/Kv96pLfWkh/c3LeUG2sjf7X/Aay9jPp8J2RqX6nUtrky/JZwxqv3vMVvmWrWna08bF0mUhovmZvmrkI7rzJv3j7wy/My/KtWo9T8m4CIn8H8NbqnzbE88lIk+Nnjx9B8A3lzDeNHLJB5SNH8zfNXxtrV08MjfOzp975vvV7J+0J40m1DWk8PQv5dvbxbpdv3v96vGdWZJGbuq7v8AeZf9qvvcjw/1fC80vtH5pxLjPreO5Y7ROV8WxfaLUps37l+7XL6PrG1WsJpGUfd2/wB2un15d2770q+Vt3fd21wfiCOa3vGuIX5/javaj7x4FP3SzrUz3jGF+K5a8t3gm+TcwrdtbxNThVPmR1/i/vVT1axm8tkR927/AL6olErmM61vk8wQu+f9mpdQt0ulD2yKrf7NZMy3MM3z8H+7UllfeSzbn6/3qfwj5SOSOW3kKFv96nrsmj+T5WWrkkCXy70dfuVmyRvby7G6rU/EUPlhEZ2VE/3jU8bJPD84+ZagZXR/n+Wq5Rx3Eoo8zeaKocRkf3x9anm56dqiC7WWpJt7S+1TyhIu2EZjkTZt+b79XJF2qdnX/wAdas+zDsv3K0RvZN833f7tTAkgmkfazyJ81VfM+b5Eqe+Xg/O2P7rPUKqnlr8n/fVP+8T8Q9XT7mz7tO8xF3b3/wBxagj2LN9/+CpJmhxvzupBIq3Tb2AFRs3lr70s33/wqP761US4/COrr/C8if2fEjov3PvNXHq2eDXV+E2RtPEaP8/+1T5kFQ05pvMZvn+VvlqjNDvZsfKPu1dmVI5/nG7bVSdvN27H3bd1RL3dDH3JHrv/AAT4iCftW6G+/cTbXv8A6SyV2H/BU0MfjR4dIUn/AIpkdP8Ar4mrkv8Agn2f+MqtBHX/AEW95/7dZK7f/gp1bC9+Nnhy1Emxn8ORorf711KK/asu/wCTGYv/ALCl+VI5J/72vQ9U/Z58Pw6L8C/D1mEuE8y189o5P9quj1DULbTVed03Iy/xPu3f7tPi/wCJT4V07R7OZW+y6Xb2/lr8vzLGu6uL8dahc6fp+HdmmmfZuX/lnX4HUlOUpNHn+9KfMcx8QPFd5rmoNDCGeGH5UaR//Za5ePSUt7eS/vNv+wsj/erUu5kt1k+0vtdm/u0uk+Bdc8XTp+5mwybkX+FqujRn8ZtGpyv3jxD42NH5aWcybGml3bV/hWvL9Ut0VTs+Xc9eiftBW/2f4mT6Jbah5v2GCOJ9v3Vk2/NXAXVjeTR7ztdl/hWu2nGfKenT+Ey1QtWx4d0G81u+hs7CGR5Zm2RL/eb+6tVrHS5vMCOrKzfd+Svt/wD4Jw/sl3Orf8Xv8VWCtBb3HlaNayRbt0n/AD2rojR5jPFYj2MTW/ZV/ZD8SaTo9ho9hYSS67qksf2hV+bbH/zz/wDiq/WP4K/sP/D39kP4Hv4w8YQxjV7iJn3Kv+rbb5jKrf7Ndl/wTz/YRtvBOk/8Lo+LVh9nubhGewhvItrRx7fl/wC+q8p/4KVftUal8SvFMPwz8Hzf8S2ziki3WbKqxt91v++q0rcuGhdfEfPSqSxHvTPm74n67a+Ptf1jWTGTBqDSDbI2SU27OT9BXHQ2KaTp/wDZulafDbwtAqu0aqu7b/tVsW9tFaaZ9nDllWM5Zuc9c1xPjLXftFydN0e/k/eJudmT5Vr9Z8WIueS8Pt/9AsP/AEmB1RheJzMnnaheN50Nw8sm6N2+98u6u9+G3wuh+XWNbTZEvywLI3zf981B8P8Awe8khurrcWjbbtb5f++f7y1r/ETxtZ+D9JbyZleZovkWP7y1+J1sRSwVK4SOf/aS+IVh4b0u30HSrlo5mfZKsdfNl5Nc6tNJ5yMXb52krqfHHiy68TapLqV5czM0kv3W+7HXMyR+ZJvR2RN25o1r52piPrEuaQ+WUoxGTf8AEo0z7a/yiRNqN/C396vO/GGvQ+c0Oxs/wKr1v/ELxgkcaaVpiSOzfKq7vlWvPtaZAzO+7zm+bbvq6Me5tD4TJuLf7RM6O+5pN3ys1cB4/wBBfStU81E+ST+Ff4Wr0SLyvtnnTblH8FYPim6ttShmtnTO5Pkb+7XqYeUoSO+jLl944Kxu3t22/wB2t2x1TzF8nr/Fu31zdxH9mlZD/C9W7G4/g3/NXqfYOw6aW6hk3R/eb+JVqBpHbbvHP3ty1V+1P1R03qn3tlWrX5UV3dWf/drQzj7pG0b3HyOjD+41EPnN987uzf7tWmj+X7/Kv/DTZLPbcGbewb/ZqdfhHKXUr6bZvY6wsKchvmir2/R7zyPh6jv823au7Z/s15NNpPnWcV+isz2/y7l+9tr0m1kh/wCFftHMinbKuxv7rU/fIkY11fT28Oyb7uyvKviBM82ulN/3Vr0PVL5P3rzPuG/5d38NeXeILh7rWbiV33fPt3VBpRKVFFIrbqr4jYFXApaKKoCRY0Xl3x/srU6ahc7fs9nujVuy1X8z5XcnJ/2qWO6mVfkpfELlRKLG8lk3ujf9dGqRlhhUpczMf9mOq8l5cyffmZh/dqJiWbdml/dFyls6htXybZML/F/tV+iX7PTNN/wTojPUnwdq+P8Avq5r85beF7iQIn/Amr9HP2f0WD/gnWiK/C+DtX+b/gVzX7B4O/8AI3x3/YNU/wDSoH2HBkUsZXt/z6l+cT4GtZodFhZ4Zle62/NJ/wA8/wDdrNlvXuJDNNNudvmdmf71QtM7D7/C0kbJJ8ju3zfcr8dmfFe/9o3NJ+aze8mh2hvlWo7WxudSvFhh+Z5H+7VqSFI9Hhtkmw/8a16Z+zn4M0S68SR3/iRI0gh+d2kfau2nGIjY+Af7F/xF+L18JtN8N3D26/62bym2r/tNX0Ev7Mv7OXwJhhtvH/xCsbrUlVfNsbfayxt/dZq439oD9ve/8I+B3+HXwfmj0mO43JLJYuys0O35VavkiTxtr2tak+pXmpSPPI/zzMzNuolUlKPukSp825+kHw/+OnwD+HccM2m2sN1cNLul27dqrWp4u/4Kg+Hvh3o7zeGNNjiuG3M9rIisq/3a/OJvG1xptiYYbmRX+8+2uY1nX7y/kV5ppC/97f8Aw1lKM6m8hxpwUT239p79ub4tftEapN/wlXjC+ubbzWX7PJLtRV/hXateP6WlzfXS/LisiGHzm2Abmauo8N6b5MyNMjKG+WtIxjEfuRPTvA7WtnpG/ftK/f8Al+9TvEHiy/WMw280ixfd3bvl21V0Ev5K20L79v3Nv8NXrHwjqWs3z2yQsxk+9troMvf5zC03RNS1S63+W0hZ/k3fNXrXgn4bPb26zXW0Ns3bv7tdR8Kf2f7mGz/t7VbPEUarsZnrttc0fStH02TY8YMabVXZU80Sebm+E4q3hs9JhFy8O5I13M275mb/AGa8v8eeKL/xVrn2LTd3l72/d10fxK8XfaJ/sdttHzbdsbVT+H+g6Da3D63r1zHiT5kj+981R7TmJjRnH3j0P9nv4Pw+IFhl1VIbdYf3sv2ivprwfpfgDwjpkUO+H7Zubzd3/jtfJN58eLDw+zw2dyscX3d3+z/drB1T9pPWLqRUfWG8hX3bt+1qy9p9k19jzR94/QlfGHhtpt6bYopPlRfN+78v3v8AdrA8UfETStOZ7mw1ViYWVkVZflX5fvV+f2oftVa3bvvtNYmYxp/z1rDvP2ovG2oRyo+pSbG3ebt+61KPNLRkxo8p1n7aXxO1P4v/ABWtPB8OqtPp+h/v5Y1ZvLa4k/8AiVrqP2P7Gz034jaVf+R80OpW+5WXd8u75mrwrwGz65NLqty6vNNO0r7q91+CrfYdSS8s/leGWNk2tt3Mtc1Sp7OrE58R/Kf0x/sl/tFWGp/DvSLO9uWdo7VV87d95dtez33xt8N2dqlzM+5W4VVb5mr8nP2M/jxef8Ivav8AbNm2Jf3ay/dWvpi1+Jl5qkYs/t7NF5XySL8u6vbpyhUhdxPM5p0/d5juf+CiPxPs/ij+xx8XPBPh6xk2XHgHUAkjJ96ZY933v+A1+SnwB+OtnN8G9A1u5v4UH9g26SqrbpGZV2/NX6Z3lm/irw/rfhjUrzzINU0a6snVn3LJ50LLu2/8Cr8Avhv8QvEPhXQ9S+F2pTNDd+GdevNLuo1+XcsczKtfO8Rx5sMpRXwn0XDtaNKrI+vfiB8etKufOhhud+75/Mb73+9Xifi74kW2rTzXNntiaSX/AIE1ebar4qvL682fatm5dyfPXO33iS4t7r7T/aSokKbdqpur4WVacj7KnjIy0Pa/C98+rXiarretxxtHu+98q7a4342fHy51LxE9homqsbaO3WBfm+9XmmqfELXplWztr+OFG+//AHq53Ut8zM9y7Ft3ztv+9WtOVWUbSIljIxj7h75+wL4ihs/jxeXOsfZ2ims1l86T7u5W+7Xvnxh8ZjTtL13x1AuAZpLlVRuAHkzjPp835V+f+l+INe8N6out6DfyWlzD8rtvb94v91q+wfiXrMkn7LTa5ekO82g2UkpPcuYs/qa/e/ByMlk3Eb6fVJ/+k1D6XhnG06mFxClvGN36WZ4j4y+LHifxZJse/mKKvyeZL/D/AHa5+38B+Cdd8C6xqupaxdQ67avHLpFvb7fLkX/losm7/gNYl14ks5I9iTbFX7yr95ql0W6triORJpv9Z8yLH/8AFV+IfBq5HL7alWlrK5w2pXlzY3DGF/49u1vvbqfpcmpahJ5E1nvf73y/xNXT+NPh/o6TLeaPqXnSsm+eFv4WrL0fXE8O3EdzNb7drr95K6ZKlOHu+8ePUUva8sixcW+uaTb/AGx9BvNv/PRbdm2/8BroPDf7Qn/CO2/9lfb5Eb+7cIy17T+zz8fvDf8Ab1tD4h0e3lRn8po5ol/eLXvWvSfsZ6feRXfjz4XaTqVtJ8zqsSxNG38O1lrzY1MJU9ypeMkbU8PWTvB80T458NzeLfjlrlrB4EeTU9RkfZFb2aM7f981+mH/AATC/wCCdH/BQH4RTXHxG1n4J6e9tqEvm2q3mvQwSs395v8AZr2//gln8Uv2Z/Dnjy3+Hvw7+Hnh3RzfJJcNeWtnD5/lqv8AFI3zfer7rn+Knhu1uv7Nt4Y2j3fLJCqqq1yYh4Tlt0PbwuGxeElzw3Pxn/4OCfhD+0l8HPAXgnx18XfFuiyv48164sL7R9DRvKsYYYd0cPm/xN/6FX5h6LCkLGCBNkW9dsa/w1+qP/Bzr+2J4P8AiX4q+H37HPhKaO4ufCt7J4h8UNCys1vI0flxQt/tMvzV+VOn3kDats+XEn8Sr/47X2mQYOhhMDH2ceW+vqfnud1KtTMJqcuY9G+HupTabDLB5ylmfdub+7XceHfFybfk3LtT7zN8rNXk2m6hJaqyIm1WT7y/w1f8M+MMRpveQur/AHl/hWvaqfAedR933T6H0XxVDuSZ3Xatvt+ZN3/Aq63T/Eb2scU32lVfZu2qvyr/AHa8F0LxIt1dIiTfMu35Weu20fxnN5LJ57bl+V91c0eY64ylGR7PY+IIV3+VuDSLulZW2qzVqw61Db27v9tkzH/qoZJdy15N4b8VI9uYZvk/6afwtXR2+sp8n77O5FrGUftHXh6x31hrlzbzF3m+6v8AF97c1Lql3HdJEVO1lB3Rf3Olcxa63N9+Z13t97/ZrT0+6a7DydQDgMepxkV+i+Dcb+I+Db7Vf/TUz67hef8AwqQj6/kzo7K7lNpG8gZhEgXy1bkr7VdjmtpE+0+dIrsn8XzN/vVzNpq9ukjQRSEGP75WTGW/u1a03XEYzJDPHu3/AHWf+L/er8p4tt/b+M5/+ftT/wBLZ0KpKOKm13f5nV2dun2MpDN975v96r0drNHh/wDWN8qvCzfKtY1jrCWqql593Z87L83zVr2uoJ+5SEs4m+avh61OXNzcp7OHrcxu6bY2s376G2U7v96tnT7e5jj+RF+ZmbdJ/C3+1XPQ6sm1PJjYN/d3/LWhDrx+0JbQzKGZWZ7ff/7NXnSlKU+WR3+05jaaJ1mFzCn8Py/3Vaqt8u3dMgUbk/u7arTa4kLMiOpdvuqvzbaz9S162tY2mubloyz7WaR/++ajl5dipVOb3WQaps+xzXmyRPMXY6r/ABVx+uSPNC1t/AsStWvrNxc3CunnSOPKV3Xzf4q5m+1J03ec+G/5ZMr/APoVdMYy925cZfZOX8TbJJlhxIxb+Jv71cV4ksZmhkeaZv3fzO1d9qUcxm3p87/wf/FVxHiaSF43h8n5t3zfN96vVo/vdjGM+WZ534hZ5N+x2Td8yL96sKS3mhDwj+FN27Z8u6up1uBJbpdiNCqpt/vbqx7uOOP93Cm0/wAfl1304np08Ry6lGO3ePdPN5hP+1/FWhp+n+cf9Svy/wB1vu/7VNWR4YQ6WzKn+18ytWjpMfltD8+yKZPnbb8kn/Aq09/4i62JjytHockKQ4hSdZWb/lo1ZGoQySM9s+5m27vM2V0l1YuJGj+4n3t1ZOo2ryW7vchjt+5tb7tT7sZe6fE1PeOYuNNe4VXebLN/yzX5vlqhFp80amYOreZ/d+8tbC21wrIlzNMsW/5ZNvzMtFro8LXUPk2EmJEYsu/5l/3lrblhGXvHPKPu3iUrOzmuI/kRdyv+93fNVn+zXkhim3r/ALS/drasdBe3jldH3MzfN5abVWra+H4bqY74Y/l+8zS+Wq1nzRfwmdSjKJyVxpaRrJ+5be3/AC33/LurBvNPeGGP5M7VbYv8LV3+oaDDGfOSGRiyN/qW+Vq5TVLN41be7A/M3zP81dtPljDQ4KkeX4jkbyZ4WXztqLu2/L/DtqlqWouq/uXz/Cm5v4as6sba3aWaZJG3J/vVzepalNCzJsUt/Bu+7XVTjKUos8mtU5fdLv25E2uk3K/Nu3/eok1KFwdgZpNv3lrnm1rbv2JtT7u3ZV+3uplm2O+dybt23+H+7XdKny+8c/NKRr2+oOtvseFtq/6pVqza3UNqXlf5U+81ZtrJIrJNH5ixs33f7rVfh03z5Bvl3N83yt93dXHUjCUrMr35aH0F4GlEnwSjlkPH9mXPPTjMnP5V5at08d0mzd5W/cjf3vlr1PwOxPwURmiVf+JbcfIg4AzJxXmNrY7k+R921NyNH81frHjRLlyPh1f9Qkf/AEmmfbZvrQwn/Xtfkixbt5cm/equ0X3W+7UqxzPh3Zf+A/Nupmnw+ZCN6b9z/d21Z2+ZtdIliX7u1W+9X8+xnJx9w4I+7GJDJbzQ22+F1R/4f/iqns5kb7k3yqn3du1d1RyQozPjbH5e5WVm+anx2rw26PBM2P4Vk+as51JSjy9jrp1JR90kkmufL875W/iT593/AHzUtrqDzNEnkso+/tb7q/8AAapN/o/3DIrfd+X7tSWm/ck3nM6r8z7qco80IxO2NaMYaG3askkDzPud9+5VX+FqmupnsrV7l7xVWOJnfc33WrOW8RG+R2V2+/8AN8tYXxW8SRaTov8AZUMqr5yN/tV34XDe2nGETnxmMjQw8ps8j+I2uQ6xr1/qtzC37xPvN/FXm99qaW8jvHMw/wCBV0HiyZ2V4U+cK7bJGevPNSu3eb55l3f3q+9pU4xpcsT8vqT9pVlN7li8vkkZvvb/AOJW+7WFqMCXXm/w/wC1/darElxHLJsR9pjXc3z0tts8tt7/ADM/z1vymXunJ3Vvc2N9+5fipY9aQt5Nzyf9muh1TSIREZvJ5b/vmuHvrhLe+l2HaVb+5T+2OPvF3UrOzumEyQ/O393+KsS6tZIpG/dbQtalnqyN/rguV/8AHasTWcN4vyP96l8RfwmBBczW3yDpVmZDfL52V/8AZqs32iuql0f7v3/krMVnhk+R6kv4gIeFsZpXm81drJ83rVvEOoxAqwEu3G31qpPC8LlHGMUAMf7xprLnkU5m3UgbdzQWLHhm4NKykmmKu2lrQCzZt83rWhbs6/I7sQyVm25KkfdrRRt1vn2qYmMiO5kf+4p/2qgLSN8vZf4qmkjRW+/j/Z2VUkYLnY+P4fmqZS5vdFykjNs+5Iuf9qmtJGFZGT7v3KZCybmR0zQ8if8A2NPlgWQyfN8+ykoop/ZKiFdR4LXzNPZEG5t9csxwOK6TwT++tXtv9ujmFU+E3bpfLj/cyL81ULqHZ8+f4P4av6mqW8ImTd9/bt2VQmk8tc9C38NHMYSPX/8AgnuGX9qjRAX58i93L7/ZZK9Z/bp0BfEf7VHgXS5Y1McunW6yMy5wBdTH+leU/wDBPuNP+GqNCl3Ek2t71/69pK9+/aY0yLVf2t/CaSSOBb+GfPdU/i2zy7f/AB6v2jAP/jRmL/7Cl+VI4qq/2len+Z2OqalbRyXDp5ezd8it95dtcJ46l/tC8FrNMrMvz7pH+7W3c/6l3MPz/wC1L95mri4rXUvGHiZLdJt0K/K3l/Nur8Gp0+afKcXtJRlzGl8Pvhjc+ONcie23SwtLsVdjNu/2v92vuf8AZ1/YMhm+HWreNvFX7nSdJ0i6v7242fLHHHG0jLu/u7VrL/4J+/sn3/jnXLCyTTbrbcSr5q7dvlx19vf8FZJdB/ZK/wCCRfxY17w9M1vcXHhePSIm+6/nXTeSu3/gLNXv08PGjhbnLF/WMZFH8wvinXE8VeKdV8QRzM4vtUuJ4mZv+WbSNt/8d21nxx3KyKidP49taGjaO5tIS+07YlH/AAGrnkwwzDemAzbdypXGfSc1jpPgd8Kdb+KXjTTvA2iWEj3mrXUdrZqq/ekkbatf0rfsFf8ABN/RtL0nQbnxVoNrFpvhfSbe3+WD91cXCr+8kVf96vzA/wCDaT9l3RPj1+3NpureJ7BrjTPCekzavIpT920i/LGrf8Cav6CvjJ4ts/Cfha40Dw3bJbwKnziH5fM/2VrsoyjGJ4eOqTrVf7p88ft0fHt/CPhW88JeFbxbG12bHaNdu5VXbtWvy48c6ii6lePM7YkuN67n3N81fW37ZHizUtSkENtdSPEr73Vl/vfer498YWc17q8t5bRrsb7zL/FXNWj7SV5GMfgM6eV5tFmkVTuMD4BGDnBrj/DPh+FZPOd5LmX5l+5u+au503Sbi4EWjsS0k77BvOSS54/nWr4q8O2Hwst4rB5F+0yMyxRt8zK38Nfr3i7Up0MjyCUumFj/AOkwNOZLQ47WtYh8G2rTXkK/a2Xavz/Nt/u7a8W8deKNS1q+e5vLnarfLFCv8NejeLmudUhuHmmWW4kdm87b8qr/AHf96vNvEmmW1n5r3Nzt27W3L/FX86YiU8RLml8I5S5vhOWvoHkX532/w7W/i/2q4/xV4qmVja6bu86Pcu2P7q1qeKvESX15JbaVu2/daT7tcffN/AjyMzf3vvVxU/5Xsa05Rj7sjEvZnW6d33O7J93+7WLfiFVLybXb/wAeX/ard1Q7oykdsyLs+eT+9XD+PNfsNFtHgtXxK33mr1KEZOR004zqe6Zmu+KIdPjOyZX/ANquR1PxPPLlLPdjP3mrNv8AU7jUpjJK5x/CKgZscCvZp0Yx3PTp0VAGZ5GZ35NSRvtPXH+1TKK6OU0kbOn3SLGEHzf71aELPJD86NXPWMgjkXY9dHpqm8XG/lf4aOUktRK6ts37v7tacVvJIqv94/3m/hpun6akki/Jkr8ybkrYktPKVJoXXay7WWnze6YS+L3g0G1S4hlsn2v5iMu6P+GtuS1ez8AvZ3PmZjlX7v3vlrO8GyQrrH2Z9qbl/i+7urpvG32ZvCMt5Ci75Jfm2v8AdpRIkeY+JNQSOxld32t/drgJH3ylg9dJ4w1FJYfLR/vfermQDnLVJ00/hBhkcUKu2looNQopGOBxS0AFFFFVygFLHhm4NCx7l+em8sPSnHYmRaiuEjj2Inzfx1+iX7PmD/wTkT38G6x/6FdV+ce75sV+jn7Pv/KOJP8AsTNY/wDQrqv13wb/AORvjv8AsGn/AOlQPseDV/tlf/r1L84n50xk7tnf+9uqxDIkd0oyvy1T3t605ZnXPz1+Pnxso8xuTaw7TL/s/LV+L4gaxYw/Y7O5ZNyVySyOn8dL5zsfnfijluHKy1faveahM8tzNvbd95qktbpLZV/vVQbZjilE0i9GoCUTRuNRnui6Oyr/ALtJa2fmSCGY/wDAqqQLDK338VZW6VF+dvlV6v4RcvuGzpOmoI1m2LuXd8tdHosyKqF5vl/u1wn9pzRt8kzfLVjTb52z515Mw/uq9EdjLlmet6HfbrpfJvI02t/FXsvw18R/DTwCn9q+NvE9ncTL86wxv96vlttQ0q3tWmuZrpfk+SP7Rt+asDUdViumzskd1+6zS7qzlGXQrlj1PtLxt+214CgD6boN4qRqnyL/ABV5r4w/au/4SBX8m/bDJ91flr5wS4ST/XQ7v71Ss1mql0jVWap9n7oRjE9Sb4oaM159tub/AHDbu+/96mXnxSs7iZPs1+sQb7+1/vf7NeTNePHI3yL/AHals3825Akfcn3sGrgOUTuNe8dPeXAh3rs/urWReeJPM3/v2X+H71YU198v8P8AwGo/Odl42/N81Ll/mJj7ppnVHbKbtr/epJNSmjt2S2mbe1UIrpyu/f8A8CqW1vPnZH2tTCXunrXwhjabw/Dsdd6/e/hr2z4c3X2dvM+Y+Wm99qbtq14H8FdSddLkskmYlbj5VZv4a9r+Hd463ywpuCTLsb+GvLrR9/3jz8RH3j7x/Y58ZPJDZwojSIr7FVV2tur7g8ByTXlv/plyqIy/Jt+9ur8z/wBlvxIlrqDw+cybvL+WOVvmZW/u193/AAl+LiQ6ampWEO9422SrJ8yr/tba9LA1vdtM8qpTlL4T2zwPpOsf29CsNyyQrcf6xn+8v+1X4W/tqeCL/wCEP7eHxg8GeT5cbeL5L23VflVobj94tftp4d+L81nqx/sG5hieaJt9xJ8ywsy/3a/M/wD4KsfC1Na/aw/4Wp52+HXPD9vFcXjRbfOmh+Xd/vbanNPZVsM4HpZTKdPERTPj/UtSv9vzp5RV9u7d96s+4k/cu77Q7fMPk+9Xd3ngHUNQuBYaPA1zMzf3P7tc78RvDmp/DB9Li8d6NdaU+u2bXmjNfWrJ9qt1ba0kO77y7v4q+Jll9WpG8In1Uq3s5ayOcvI5reUvs+Vk3fNVNm2sN6b1hXc7N8qtXG+LPjpZ6XI1no9q1w6/K8kn3a838Q+PvE3iOeRrzUpFjkP+pjbatdmFyWvUj7/uoyliP5T03xJ8WtB0e4ltoU+2T7tu2F9yr/wKvtn4mXIuP2LlvHULv8K6c+30JEBxX5j6VuOoR4Xdlq/TH4s7rf8AYdIjxlfCWnAflBX9BeE+CoYbJc9jHrhpf+kzPpeGqknhMe3/AM+n+TPiOz8aTNceS9tlFl+WRq0rPxwin76oN+3bXC/PDcNs8xPn/ibctXoWdpNiP/tV+OyyvDVN4nx9PGV6fwSPRbfxNNeL+5maV/4/9mqlxffapPs3nfMrf3v4qw/D+pXNvKqCH5G/5aVZ1iz8mZrm2mwu/wCasKeS0KctDSpmVWUfekdn4A8J+M/FmoJp/gLR7rVb9tzxWtn80n/Aa0tY1D4x2M7+Htb0HWra6t5dz291YSb93/fNcx8M/iN4k+F+uWHjzw9eSQzabeRzqyvs+7/u19p33/BQ8+MPDFlr1neTXVzdf8fVrbwK0kn+zuZflWvXwPC2VZi7TfLI8XH8SZnltpUo3izK/wCCd/7QVj8F/ii/i34u6DeaUsem+UuqX1u0UTKzbtys1fTf7Vn/AAXI+Hvwz+Hd7o/wQ1ux8VeMb61b+wbfT/3lrp7f89p5P9n+Ff71fFnxq+LXj/41aLcaV4h1W1s7a+t9iabY2+75d3yr81fPHiX4d6x4Lwtz4emtrTbuVvIZV21lmfh7Qy+rHEKfNB/ZO7L/ABEx2YUvq8klIdrXjDxh468Ua18SPiJr02s+IvEF011q2pXTbpJpv/ZV/wBmqMbzQ6kroihG/hb+9SRxPGreS8bPv3f8BqPd/piQoiu9aRhy+7EzlOc580jo7hvJ0+W5dG3bf4q5fRdceC7ZN+5d/wDC3zVv3myaw3o/3l+fbXCrcwrfMj/K+9vl+7tp/EaUdz1Tw74k/eJDI/y/3m+9Xb+H/Ej42CZWeb5v7teLaHqnkyBw+3/a+9XZaPrUqtsdGdtn3v4aylR5jo5uU9e0vXHuIUm+aJWdv93bXV6D4kd4US2mjRG+bzJH/h/iryXR/EMMMiohYMybnb7y7q6PR9QgaFPPfJZPnVX+VazlR/mHGp72h6rY65cySCF5o3Vm/wC+v7tdp4MvzfJckk/Ky/J2Xr0rxvT9ciWZNkylFRlb5a9P+EMolsbt1nDqzoUx2HzV+geDtOS8RsHLyqf+mpn1/CdTnzqn/wBvf+ksl1C6a31e4isZ1QGbfcFV+YjPNaunatuuEdNuxXbdI23/AIDXMa1Kttrt3LNtIaZwGZ+nPTbVq11S2jmX/Vh22s7bP++Vr8x4rp+2z7Fxf/P2p/6Wy3W9njpv+8/zO5tdV+0XDv525m2t5i/8tK14dUhtLj9zC277yNv/APHa4OHWnjYPbPslmdt7L81WYdam2pHDfyF403N867mr4zFUfd5Ynp4fGe8d7b+IGhuInR97bP8AU/e3VpW+tzMxhjm3bvlfy/8Ax1a85t751VJppv3X3maT71a+k6oiyeZC7Mv8NePXw7lVPVp4qMtUdlJrVzMu93VJV+Xav96qt9qUMcfnTfN/e8z5tzVi3F0i3m/ZvCoreZv+8v8AdqGS+eONdu3+981Ycvs+Y2lU5pFjWNQeNvOTy1Zv4Wb5l/3a5241KaaUP5y7d3zNUt/qSfZ5ZJn3bW3IzfN97+GsDUr6GOHY6KvzMj7V+6v96tKPNLcunUjEmvNUjhjcu8aDzflZX/hrm9evbaGxdH275Pm8tvm+aodV1yGON4bN2x/Hub5t397bXJeINcmvpjZpeRh1+ZpGf5m217GFoy5jmxGI5eUr69ND5zwQ+X+7Rfu/Kv8A9lWPJH9ouHTdkt91l+9VbUNagupC6bf3fy+Wr7mWqf8AbTo0XzfumfdE0f8Aer144fmjGxH9ocupuwzJDb/fYoqf99f7ta+m3CR2apMm6JfuR/3a5eC6eO4KO64k+bd/drWtb6OTa7vJ83y/NVSo8vwkVMzjOWp7hcWc0cju+0/N/vfNWbqmmpdW/wAkPHlfvdvy7a2lZxcP9m8yFW3Inzfw1JDb+dK6eTuRlVX3fd3V50pSjqZxlCRx3/CMWat9pSSR/nX5qn0/w6i3Hmw+d8v3m27mbd/erp20/ddeT5Me5m/hrS0vRdzNv+R12/Ky/LJSlKIcq5eWJiW/htI2/veZ/eX5t26rj+GhZt52xcM/zbq6eGzRdjmRS2759zfdqWTTpreGSZ7aMvN8rrG+5VrQipT5TgdQ0Xy4dn2bZ87b1ri/EmizKsvkptT5vlX/AOKr1PWh9njWGGFVZk+6zbt3+1XEeIoXmjdPJ5b5vl+61XCc46nn1ox10PGddtPszKjovzfN8rfLXD63ZvNcPNav8sb/AD/P96vSfFVi6q/ksqCNvkbyvmrktU0lJG8t/wByW+438Ne5h48sOZnzleO5xMNu8f79PmG7czSfw1oafcXUm77TGzL/ALX8VWptNmWTyblFYNS2Mc0jvbTOuF+ZW3V3y5ZHDL3eU0LG1875NjRIybt275VbdW5odpc3UyQud6t/y0rN0+3DKba5dj/fX+Fa6rRbHy1S2hZW2/d2p92vPqctM76fvHsXhG1nt/hGlrIxaQafOCT3OXrzq3tYbWNfs0G12RW27NrV6f4ZTyfh0kcZ+7aTAZPu1eeXkUN1dDzplj/dfKzJ95q/W/GOi6mS8PSte2Ej/wCk0z7LN5RjhcL/AIF+SH6TshbyURiWb5W27VWmx2pt8Q7Msu7aqp97/eo+0Qt8kM2x2/h2/NU8Pk2wKJNJuZ1bdtr8C+rxjUPMp1I8vLKIq6bNNtd0V327kZUqG6jLwjfOzxsnzqvy7a0lj8uOWG26/K3zN96s273xsYUuYyW+9Ht27aylQ5feKjWjTK0kjwyJ86o2z7u/dVaPVolYpCjff3f7zVV1K8CSMIUX5trP8vy1lw3ztM2xF2M3/LOuqnRjL0I+uSjLQ3o7raxuUkVE3fdb+Fv/AGWvNfiZ4k+2axtd8RKjKu1PlZq6nUtSh0/TZZt8jMyfIrJ81eG+NvF1zcXU1zNKy/3Pmr6HKcHGnKUjws6xk6lJQKmt3TzedNbTZ+bbtrhda3rI3zrvV62rHWHaN33s27+H+7WJr0yTS+d/6FXvQPmZGb53+3y1Tw3SRyBN/Lfe21n3Fwkcfyc1TivEkmZxuDr8u3dQUavijXktdNeKGZt7LXBzSSSSNMXyzVratcSTN/u1msjt1StBxlcrZOeXY1Zs9VubOZXSRtq/w1C0Pl7eflamsh27kWpkafEbsOuJeK6TIuGqhq2n+SouYU+RvustUPnUVpaPqkKt9mv/AJ0b5V3fw1IuWW5mxO8Mnmd60murbULEo8aiZfuNT9R8OTAfarSZHik+ZdtZcgmtpNjqytQP4huHVvnopWbd2prNjgVfMiwUYWlooqAHwLubIq9bsnl799Uo1/5Zn+KpoWRW2P8Awv8AxUTMZE8kjK3/AI7UEyoy79n8dLNJtb5JN1I0u77+3/gNARIoup+tNkG3kvk0rYx8lMf7xoKEooooNBH+6a6T4fyJFJNvT/gVc2/3TXRfD+VFu33pVx2M5fAdTqFv5kO9H3fw1k3Vu/nP/s/N9yujms0ht12bfm/iWsi6hdm3u7ZpfEc56t/wT+jK/tSaAw2qDa3vyr/16yV9O/HpYNO+OEXiG5jUiPwnFBGf4gz3M3Svmj9gGJ/+GntCfbwLW85/7dpK+i/2tL/7F48sI1KqZtJjBO7BIWWU4/Wv2XB+74E4v/sKX5UjixH8dehgnXLm+mM00PyLEzvGyV7L+wP+znrHxe8ZQpJpUnk30qv+7Ta0a7q+cobua4ki01LlovOlWKWRX+ZV3V+1H/BG/wDZR03/AIQ3S/F1h5awwzxw/f8Amb+KvxvLqVKVTnkeViXJU+WG59ofsb/sX6B8FfBNtf3kMYnmtV807fm21+WX/B2d+0XBq/wO8N/B/wAI3Ey2OqeL4kuvLuP3U32dWb7tfsp+0p8X9E+FXw51C3h1SGG5WxYBWfDKv3a/mq/4OAPiJ4Y8WfF74b+A/DepXReGyutUv7WS682NZGbbGy/71d1ScqtPnn8jvw+Fp0K8Yw6fEfBun2M0caP5KudnzVKghkmCX9hu+b5dtaVnZeYq/N8rfw/3qsx6LukCI7b91cX2z0JW+E/bD/g028Gy2E3xW8XxQxxQNolna+dt3SRs0jNt3V+iP7R3i5LfNnZzKFj+RGX7tfnJ/wAG0PxUtvBfgX4teCby8hRrrS7HUUZfvK0bNGy/7vzV9gfFL4oWGrXks1tC0vnL8i+V/wCPV20fePDxMeXlPDP2gpvtVnNqt7M32nfsVv4VWvEdB+Eut+LNW8mzs5EST7snlbo46+i7vw3qvxA1SZHsNsTf6pl+Wovjd8Sfh1+yX4PhtoJIZvEF9Fs07T12yTs2370n+zU4ipSpwvMy5XzaSPkrx3oy/DX4ny6XcwmUaVcW7yR8DfhEcj8a888ZeNte8VeIpvE+sCNrmZ22Rt/yxX+7XQeJfFuteObm+8X+IJS95emSSYsPqAPwAA/CvOtU1CG1jm/fZX7rfP8ANX6P4z1H/YXD774WP/pNM1+ymyO8WG3Vrm5uYUXbu27/AJa8O+KHjD7drDw2c2yPbtWFX3LurpviR48RbR9K0253vs2quz5dv97/AHq80j02a+unuXVvm+bctfz1GtKvsXGXNuZt2yXErbLrG5t33Pmps2ivGz3OpXKt5f3P4f8AvqtSaztrWF5rmFTt/wDHa4jx14uKwulteRpDv+eT+9/s1vGjE2jT5pmP8SPF1nZRultMoVvmfb91a8R8R65c61fPLJMzIG+TdWh438X3OuXjwxTN5StXPKMDFe9haHs4XZ7VGl7OI1V3U5V20Ku2hW3V2cqOgFXbS0UURAkVvmyP4a3PDt1I0gT/AGq5/cfu1e0e+eGZE3/xUpRIlE9Z8O25WFfuhvvL8n3atX1h+73iHd8vzSfw1neC9U3xo833W+Wusks4bhfvsibfkqvscpjKJx1nvt9W+0oigq+5GWuj8aXv2fwSXTbhn3PJv+ZflrD1axfTZvMhh+VX3basa1Nc6t4BvLOGHbtt2dt38O2oJ9n7545qN295cM+eP4ahjh82THrTWOFr0r9k7wn4S8dfHXQvB/jOwkubC+uGSeON9u75WoqS5Y8x1xjzaRPNmBXgiivsX4lf8E6NB1Oea++GPiGSwLXDCKx1D5olX/erwjxj+yR8b/BzO914PmuoVf5prH94u3+9XNSxmHq7SNp4WvS3ieY0Vf1Hw7rWmStDf6ZNC6/eWSJl/wDQqqfZbnbu8lv++a6ozRhcjop3kuv3kxTeAKQuZCsfmz6UlFFBQrNur9Gf2fP+UcSf9iZrH/oV1X5y1+jX7Pn/ACjiT/sTNY/9Cuq/X/Bv/kcY7/sGqf8ApUD7Dg3/AHyv/wBe5fmj85k+8KGG00lKx3GvyL3T48Siil3fLtqSeZCUHPeiigdkSK3ylD96mD5l2fw0csv0o+98iD5qBRHrzt/2vStKyVLWHf8ALtV/mqhHCjf71TXV4qxeVC/P8VVzESVxNSvjeXG/HyDov92qrOQ386WRm3mmVIy1bs8ak72pl1Ih27P7tQq2T8v8NDNu7VXxFcrF2D1NWbf93C29P4Kgjbc3z/eanzSbcIjttqSZIduRVxu3UrNlV2cn71Vy20kUsTMrbw/NAFqbfCn38j/ZqLzAv3Hx/fqNpy2Pm6U3zB/cH50Adf8ADXxA+l6wltv2pI/8X8Ve9+CNatm1S28l2c7/AO592vly2umguUuU3Eq33lr234X+LIdUhhm+04dU2uu/5lrjxlPmic9anzQPrv4L65c6Lr9lfvMu2OX5W2/LX1V4N+LFta2ahLnaWdvmZvlkX+LbXwT4J+IlhCI3v9VjhRf70qqq13cX7XfwF8AWq3/ibxrFd3EL7fsNs25v/Ha8b2uIheMInlexq8vwn2w3xo+15fTUkb/Zjf8AirnNU/Zr1v8AbmuLn4Y6V4wh0bxxHpdxP4Ih1BP3WpX0a7ltWZvu+Yvy7v71fGXij/gsB8OPD6TWvwy+Gt3Kyrtgnm2pHt/usrV5P8Qf+CtX7SHjG4SbwTBZeGp45d9reaeWaeFv4WVv4Wrow9HH1JxlKBtSw+JjNTXun3/+w/8A8E6/ivD8QbzxP+0/pWpeCfD3g+Ka7+Jeva5b+Ra6TYw/NLGrN8skkm3av+9XwF/wVJ/bw1r/AIKA/tm6n8adA03+zvBmg28eh/D3Rdm1bPRbf93D8v8Aek/1jf71WP2pf+Cq3/BRT9sX4b2HwS/aP/ar17XvDdjbx/atFjSO0ivmX7rXLRqv2ll/6aV4HDEjQ/PCq/Jt217kIxjK6iepzSUPi1OV8V/NqDzJ0Z6y62fFUSRXHl7MCsatTSnIuaDG8mrQqn96v0m+Odwtn+wnNPKdoTwppuSO3NuK/OLwbZ/atchR3xtfdX6JftJyGD/gn/eyKenhTTO/+3b1+veGH/Ipzz/sGl/6TM+v4Y1wmPj/ANOn+Uj4WWZLiH5HXbVmxjjmkZEfdtri7PXpIVMPr91j/DW5pesfdSN/95l/ir8dPipROphRFwmdyrWza2qX2nvbTf63duRq53T75LhhMnH9+uhtbh1nWG25Vk+8rVpT3I5oX5ZEcMf7uXSb+HI2fe317j+y5+xz+1T4806O58LfDe9bRr5mlg1CC3Zo9qqzbty/d+VWrxq7s+ft9sm14/4f4f8Aeav3K/4N7/8Agqh+w/4c+Alp+zN8bfENt4V8ZPc/2fK+qhVs72Nt3lMsjfd3bq6cPjfqVWNSx52PwksbS5Iux+Qmv/tTeAPh2raR8P8AwaPEGt2dzifULlP3Uckbf3f4vu1+1v7J/wAPv2IP2zf+CW/jLx/8btM8Pt4gtPAd9qN79h2pc6XD9lZlby/vKyyKa+Mf2TP+CWi/DL/gqB4ttfj54GhvPhvdeJLi5t9Us4Fls2t5rhvL/e/dX5WXb81fVX/Bej9mz4Z/8E7v2XNc+NP7Jvh6405/iFpyeCtUt7eTNpZ290dxuN277zKrKq0sfmlfMa0Vz/CceCy+hgY88Yb733Pwc0FU/su1/wBJmcNEzeZ/eX+Gq02oPF4khttn+si+fbWja6Wmm2KQ9oYtm5n/ALtcbpOqTap463o+7a2xPn+7WcfePZR6T/rNNJ+9t/u159eLt1B3TdnftavQ7dvMsf3L7tytv2pXGXGmus0yb87Zdzt96spbG9GPvO5DY3j+YUT5GX726uw0PV4FjVJpm/3o65SOF4mRNnzrWlpsvk3B+7/wGtYxHU909B0nVE2q8L7i25dtdHo+oTSMqI8bIz/e3fNXnljqvlwhERmP8ddBo99DIqwsnlbfut/DUyp/aM41OWVj0Wz1gOyfvtu379ez/s8zRz2epvHISC8J2kY28PXzrZ6ttkZH2srfd/2a91/ZRuFm07WVA5EsBY7s5yHr9B8IqfL4gYR+VT/01M+u4Pqc3EFJf4v/AElkuvarAfFl9aTP928kC5X7vzGrH9uWklqnkuqP9193zNtrhfHWvmx+IGrRm5XZ9vnBU/7xrMXx4kcfyTr/AHdrV+a8T4WU8+xTj/z9n/6UznxOLgsZUj/el+bPSrfxA8PmbH+Rl2o0f8NXLfxZbxqEeaNGX5V+626vK5PG32iGKf7Sp/2Vaj/hLv3b7IV+V1+6q18tUwfNGXMa4fGcp7Pa69C3lvNNGV+9LGv8Na0fiKS1d4d/3fm2xv8ANXiOl+LplLbzJ8z/AHWbd/wGtuP4gJb3D6g9z9odk27m+XbXjVsHyn0OHxkJQjKx63/wkieWfkkiaHavlt825f71VdS8cWyyM8J2Bd3lRs25tv8AtV5n/wAJ07Ls+0sf3XzbX/h/iqje+NvMRJhMpSNdr7vvba5I4Pl956nd9eh0O/1DxY/lvczXKoNm7y/4v93bWPqviJ2jR3mYq3zOy/K1cNeeLnjkFtbOvzfL9371Zd94svW3Qo6u/wB7dv8A9WtaUcDUlyyPPqY/llY6PX/EE2353w+7564nVPFFzud98OPu7f4t1UtW8SujN51yu9v7r1yl9qzztLDDMqn7ySfe+9XvYXCy92MkcGIzE2pNc3SNc3L4Xf8Ad3fxU/T7pLiNn3qvzfeb/wBlrkpr2ZW2QuqKvzMv3t1bGm3k0j/I/mps2o2zbXrewjTieX/aHNI7OG+huoVT7Mvy/fVv4lq/a6gkeZHTytvzbWTcq1zelyTeX++uW3b9v3K1luLqTcnVt38X92s6lGEYF/XJc3NI+kIdQeSRHTdsV/7n3lrQhkST95cou2Nlbdv2rXDWviCaOEO/mOjPsSRm/eLWzp/iAyTfutzRrxukf7zf7S15VbByPbw+Mpcp2NjdObjzvJhd5v8Almvy7V/2a2LWRLdmk35bb8y7N23/AGmrkNP1q5dv+PlVWNvvN/DWjY6lbLIs2xk27v49u5qwjhPetLY6Y4vqzrIblLqM3CQxu6p/47/C1VppJWZzbNhmVm/efdZqrafqELfvPtmw/wDLWqV9qz3Tf6HtUSfdaZPmpRw3vPlFLF+7qUfEW+FWzMrlov8Alp8vl/7NefaxJ5Nr5kk292TZ5i/w112qXjtepO/l+XH/AAt/y0rn9WgSWGSOF/MCuvyx7Vruo4flPNrYiMpSZwesWsMl0z+Y37yL5N38X/Aa5rWNOQRlIbZijbmdv4d38S13Wp2e2Ty327V+Xy2+9/wGufvtPnWTYifOzs37z7telGjGR5NaS2kcLfWqeXl+GX5tv3WqmbflEhRc7d3mL93dXT61p/2xm2Qxuyt97Z96s0aX9on/AH0PzRv8+19u1q7Y0Tz/AGkuYk8O26bd95w6/wDj1dVo8CQzfvkY+Z/t7flrJ0fT5ftHzpy3zbfvMtdNp8KW7ZebJZsrtT5drVjWw/NO5tTrRien6CVT4a5hQ4FjMVU/8Crzu786Vvtlzt2/KrtIny7v9mvR/DP/ACT5MNn/AEWXlvq3WuN/s3zMJC6hfv7mT5a/X/Fmk3kuQX6YWP8A6TA+yz+rbC4N96a/JGVHap5i+cjfL8y/JtVv/sasLN/pMW+bdGyfLGv3d1LNYTNMtz9pVnVNvzfe/wB2mLb21r8m/wCVvk2/7W6vxGpheXU+a+uTHTfafLmlRGQLt2M33W/vVR1C+hWH7MHb5X3KzfK26rF95253SFX2vtl3P/D/ALNZWrSwq+x0Zzt/5af8s6qnhf7opYozbr7ZIpSGZdv3fm+6y1HY2s00n2Ysyfxbf7tTxxwwxvsmX+86tVm10+2kk89ptkzRb9rfdrf6qowI+tHMeOmgs7H7Nv3vJuVPn+avAvHGmvZ6hPbdXb7+2vZfih4mtrbXrPRPOjQR7mdpP4mrzL4hx2010LxJty/eZY678HTjTieRjcQ61TlPOILx7Wb+L+78z1T1qZ5GD722t/CtWNZmh8xnRPu1lX115kY2Phdv8Kferq5eU54lO4m8z50+U7qpSSPv379u75qmvN67kfd8v+zVCQ+Wdm9tqtSFH+Ukm2Mx2Ozbf4abHEWX/b/utUDSN5Z2fK38fz1Pp7edJ9/5v9qnGRUv7pXkXaNjpwr0zenmBNny1b1W0eNVd3/2flrP+dGHyUviCJJJb7t3l/NVdkZD861btZOf3xxVqS1SaFUoDm5SrpOsTWVwm58p/ErVtalp+la1Z/abOZUk+81c/eWZtmHerOl3H7l4XnxVRlylSj9pFCaF4pWhz92ihv8AXH+KijmNApsnanUMN3WpAVWx9/mpF+Zh3FRqu72pyBOfn7f3aCZbkzK6MPN5Vvu1EXOA3y4oZn27KYx+bPpQSDH5s+lNf7ppzL/t0BS1BoD/AHjSUUjHA4oAWt7wDIV1FzvxtXdurB74re+H7/8AE48sorGRNvzUES0iehra7bOLhtrP96qV9bpHNvG3C/w1oyXUMdr5OzezP8i/3ayb5nf7/wAzM9Bzx00PVf2EFiT9qHQvKxg217nH/XtJXu37ZMhh+IOlzl0GNGAjyuTuMsleA/sF3DyftU6ChdTm0vdwXt/o0te8/tooJPH+lh1ZlGjjAX1M0lfseGbj4D4u3/QUvypHBiJ/7RfyOF+HSwXPiK0e5+ZvN3bdu77tf0Of8EmNF8WP+wRF46+GTWf9t3sszWrasVSFpFXbGq/3a/ni0OH+w7q2uXfyp2dd0bPtVVr9ef8AgkZ+1Lovw5+DlnpX7Rmq3lh4It5ZLiy1KO48uO3ul+Zl2r975a/FKWIq0o+5ExjThKr755/8QP8Agop+0V8XNU174aeJ/A1jd6w3iCazv7e6vGVrdoW2su6vyP8A20virJ8YP2zvEusJCttbaWy6Xa28b7ljWNfm2t/vbq/XHxB40/YM0z44ax8e9N8bX1zDea9qWoy2MkWzdGysytX4h2mtW3i74j6/4th3GLU9burqBpPvKskzMv8A47XoSceSJ14eNRc0pHTxrlvsybXP95VqSNblbxETckX3tzfxVNaxvGq/PlW++1S6bp/2zUmkfbu/2nqfdLlGMT9LP+CEeqXLfFDxLo8MO3+0PAcyyqv3ZFWZfm+Wv0R/4V3c6lceYnmBJIvmj2fdavz+/wCDeNbCH9ojW7C/v42iX4fag3k7vu/vFavr39p79udPDX2zwB8FvLmvIf3V5qkafu7fcv8AC38TVvLERpxPGrx980f2hv2mvAH7MmkvonhLTbfWvFs0G2K137Y7P/ppNXwX448ZeJPHXiS48YeMNYk1XU7p5HuL6R9yx7v+Wcf91a1PFFxeXuoXGpalqsl5f3G55by4ZmaRm/2mrzjxJ4iNnILCwm82Xdu+98sa/wB7/ar53FVp1J3kClGXwnS/axD4WlvmXaI7WR8DtgE188+MviBqupXFxbWyKieay7o5f9Zur3eOaeT4a3MzMryHTrg5HQnD182yxpYyHzkV32bmj/h3fxV+veNMZSyPhtL/AKBI/wDpNMdpOSsVF0r7Urpf3O2JdzPJC275qp6tfW1vG3kusSLEy+X/ABNS63ryWy744eG3bI9//j1c3q11NJbx3+pIrK3yo2/btr8Lj7vum0Y8xl+KtecWro77Itm7az/M1eF/Evx0+r3j2NmyhF+Vttbnxg+I3nu+n6bdNu+622vMCWLfMcmvYwOFly88z1cLh+WN5BSMueRS0V6vKd4UjLnkUtFSA2L79OpFXbS1XxAFPt5HjkD/AO392mUm4qwokB3Xg3XHjm3u+dv3FWvTdP1B7yEb33btv/Av9mvDvD999nuNm/FeqeDNWeaNETafn/ielH3TmqROh1TRzqlk3+h5K/xf+y1k6bCn2O50yb5PMRo/ufw12tjJItizvt+b+Gub1zT3t77fZ/IG+Zt38S/7NXL+6KmeAataGw1Oe06eXKy177/wTr+H2s+K/jta63bR4ttLs5rq4btt27a8d8bacbnxncw2y8SOrbq/QX/glD8GrCL4S678S5kbfeaothYfL8skca7pPm/3q8/Mq31fCyZ6ODUZ143PRpPDMdrbx74WYb13sv8ADVy3sZrSGSZJt6/3VT+9XoWoeDXa+GYY1XZ/D93/AL6rOm8P2dqpt0tmMi/LEv3lr42o5z5bH2mHnCx5V4k+HngnxJH9m1Lwlpt5u+aVri3Vmrz3Xv2T/gVqrb4fAclqzbt7W9wyt/3zXvl/oLqqbLZUSN9u1v7v96ua1rTYYbh0O6Ir/DG27d/drSjjMQrxUth1MDhanvOJ8zeJv2E/hdqf7nRPEmoWMn8XmKrrXlnjL9hnx7pe+fwxPb6lEqfOqPtkZt3y7Vr7M1DTUt5GjdF/22X+Kov7Ff8AcokLI+zd5n97dXVRzTEwd5S0PNqZJhpS9z3T84vFnwk8eeDLw2fiHwreWx7eZbttrBk0yeFtkylTuxX6dT6PBHIXvLX7RuVfluolf/0KuW8Qfs//AAl8XGVNb+HVmC3zy3FunlSM27+8texRzilKPvHm1Mkr/YZ+dL27xln2fdr9F/2fQf8Ah3GgPX/hDNY/9Cuq838YfsGfDfVpJpvCWvXmnPu/1Nwu9F+X/vqvbfA/gC58C/sXXfgBrlJpLbwrqkQkj+628TsP/QhX7h4K4ujic3x/I/8AmGqf+lQPe4TwtfD4yv7Rf8u5fmj8vyMd/wAqGXdir13ot5azPbTQtvjfa2KgksblIw7wt/wKvyc+GUkQUU9oXU/PTSpWgsb/ABr9acu/HFGxvSjlTQT8Qd/nzQp2t8lJTQS2aCiTzvL+6cUskzSSM/8AepjDd1ooFyoVvmYmkopeWNBAsmVfikX5fn2Zpu75sU7+D8ar3TQX5Nu9P1pJm3/O4o/hLUYVvvmjmARVfvT9ybt1MBxyKEVzUi3Qr/eNCt/B0/2qMfwfxUHIBWq+EXMxY1zz61d0nWtY0kv/AGbctGZPvtVEsW60cqakSjzF/UtZ8Q3Ehh1DUpnPdTJWfuPIY8/3q0bXUklhFtefNt/1Tbfu1YXQnvJPtNnfwzIrfxPtb/vmiPILm5TH+8n3/lrY8OafG0h1K7Rtkf8Ad/vVfjsfD1nb+dqUMbO3/LOP+9UI1I3G2GzTyoV+7HVSJlItWcj3UzS/wt9/5614dn+p34+WsjTV2zfcU7f7tdFY26SR4RMt/tVUfeI+E47xlvWSJN/zL8tYVb/jtUjvhGm3b/s1gUG1P4TqfhfavJrC3Ozcq1+gn7Q9m17+wXdWi9W8LaZ+jW5r4K+G8f2azuLx32BU+Rq+/PjBcRN+wv8AaZMFG8KaWevqbev1zwufNlOd/wDYNL/0mZ9dwt/uuP8A+vT/ACkfm9d+H7yFm+T5f722qhW5s5G+dlr0aSOzulKeR/uMv8VZF54XS53ZTYrV+S8sT4mNSX2jC0zxLcxzLmb/AIE1dr4f1hJsbHXaqferi9Q8M3OnyF4UZlX7tLpOoXOnsu+baq/7dSVKMZHsdvdQyWYh343L/laqX9tYXUL2bvsX7q/3qxfC/iK1kgRLl1Zv71b9wvnfvkO7zH+Zv71ZmXwnNeI/2hvj9baTF8MYvjV4sXQYbhZYtI/t6byEkX7rKu7+GvozUv2v/wBrf9pb4S+Evgh+1L8Zda8ReD/DN59o8L6PcN8vnN8qyTMvzS7d3y7vu18vfEbR3juItbttqmNvnr9lv+CMvwF/ZC/bk/Yh8SfCXVdEtbbxjpOrW+o3HiSaJmlj02P5plX/AJ57fu/7VZVYxX90qpz+z90+Bv2qPhH8N/hH+yroPxOsPiFZt4q8QeI5rVPCKwN9pt7OFf3l1N/dVpNqr/er5T8AWV7f6u1xbDLj59tfv3/wU2/4JZfsjXH7IMfj7TfHepa94r8RWi2HwqtrOz2NIy7Wbf8A9M1XdX5z/sT/APBJP4tftDeKNUs/APhaS5n0fzP7bkvt0UFuse5mZmX+FvLatqco0qW9zjhWlL3ZqzPn2z0/UtLUQXkLIZIFbaybW21TtdD+0NI/k7R95K7/AOOXxWtvi58VG17SvAum+GNP0/S4dI03w/pbsyRx2+6Npmkb5mkkZWZm/wBqsfR9P/0V5JkUFn+796rjrHmZ6OFlzfaONk0HypPnf59/92o20ncrOm7d/u13lxoaIp875Tt+Vtv+srHk0J45G+Tb5j7nXdVxjyyNKn8pjW0bwrvmf+Kte1mfzN/+396mR2KQj54W+ZPlX71TW9juk+fdtj+5urblhI8mo+SoaMOobV2O+1f9n+GvoT9i68NzZ+Io2X5o5rYMfwkr5yhj2szp1b7m7+KvoT9hwyHTvEhkj2/vrX+Utfo3hNC3HmFflU/9NzPruCK3PxFRX+L/ANIkeX/GvWZLf4peIAjNui1WfP8Au7zXLf8ACWPgDev3KvfHNj/wuDxKqO2H1mdX/wBn5zXGMrxtsRMqv/LRa+Gz+nH+28U/+nk//SmePjakv7QrR/vS/NnRx+KppFCPCrKvzfNVy18RbrlZt7f3nrkFun8zZCG+b5mkWpIb65X99vZVVtrL5tfOVMPGXMXTxEonb2/iZI5N6TMFbcu7dU0Pi5BHs85TtfburjYdWRWT54/9U33qgTVJFjXhf73y1wVMHSl9k76ONnH3Tum8YTNIro7MnlbX+ek/4Sgbhsm+Vv8AvmuHhvnaUoX3M1WIdScrseZlCv8ALuSsY4Hk91HRHGc252MniR5FSbepC/f/AL1QXeuPNb/J8jN83365htYRoh5Zb5n/AOBVDcX03khN+fn+9V08HGOhz1cRKW5qahrEzfOdr/8ATRv4azrjVHuJNnyqf7y1Ukn27nRlLL/tVVWZ5Nv3t396vRp0fsnDKtOJoW949w33N38KyVv6PvjjWFNrrs2v81YGlwu8ez+NvuLXQ6LDcyPskhwsf975fmrSVP3TONSR0GnrNHsT5l+T51atu3t5lZEhdkOz59v8VYmmvcwxvDcpGdz/AHt/zVt6WzwyJNtY/P8AxfdkWuSVP3veNY1pHpl59pi2TPDsWTds+b5asRatNZzb0vGEWxWZf7zVl3urL5LI7qQy7kZU+7WXHq03nB4X2lf71aywvMdMcVyneabrX7tLlIWU7922Rvu/7VdFa6pHcxpN52/zHZXVfvLXmtjfQ7Yt8yt91tv8P+7XT+H/ABAkbfO6yRN8yKv3VasZYOPNsdMcZLuegx3UKrG7vIjt821f7tQ3jJJ87ybGV/k3P/DXOWurQ3O+Ge8kd/8Ad+6392lvtedo1SF2jf8A2ko+reRtLFR5CzePNqcnk+dt8tdibn+Vqz7y38uTGxoj95P7rLTmkdfke53Hb87SJ81VtSuP3aQw3KttlVWXb81dEcP7pySxRl6h53mHydv+q2/vE+7/AHax9UheFUme5Z/MTbtX+Fq3bmPzI5p/tPzq3yrWXNbpNcNN++RGVdnmfd/4DW9PD+8ccq3Nuc+ummZYv3KruZtjb/lamx6OskpdLPcf4/n/APHq6q30NJm2WtnxHuVvl+7/ALVX7HQ0hUbE3mT5Xk2bflrtjRgc/tmcjb6Pcr+5hdgVb5Nq7a39Jt3sU87e29Zf9X/z0XbWzb+H42f9yFKRy7dq/wALNWja+G3VNkkPm7X+fcn3v92j6sP6xE1tEhCeDBDjy/8AR5OB/DktXKeSkO9Emk+/95vm213QgZdLa3XJPlMB8vU89qw7rS/LjmfYqPD/AHn21+s+J9FyyjI12w0f/SYH2nE82sHgbf8APpflE5iSFGk3p/wL/ab/AGqpXCw7rgwpz/B5n95f7tbuq2qANND5bI332X73+1WDqUbyQumyNx8qxbXr8g+q9eU+O9tEydQv3baiTqHbd5/l/wALVkXF1iJcw7W/3q0NUmRoTDCi7V+aVl+X/drCurxI/Nd33vtVVVvurVxwfu+6ZyxRZtbhIZWjeCMrJ825k+7/ALNTLI9nDJePCr+TFu2/8CrPs9Qti62z2zD5FVlb7tZ3xO1qHS/Al68LtmOJtskf3l3fLWcsLOMveia+25jwD4tfFRNY8aaleJNytx97d/6DXOyePH1SHZ526ua8RL/rZvl3s/zN97dWPZ6pNDIfkqIx5TG/N7x0V9Ik1wz/AN77/wDtVmyfM4R/4f4akt7gyw/vPl/2v71RXi7tkzvhqCOWERk2z7jjlv4azry2dlb+9vq08iNMu/buX7lNupt3zpD/AB0ehcfdMqQJDnKfLup1vcOs6zBcU6ZnVnKbQWeqzb1X7/NOUh8p01vHDqlnwnK/wrWFdW6WuYXRlP8ABV/wjqyWt0IZn4Z60fGWgzW+L9Ifkk+b5aRPwyscuY2jkHzZ/wB6tnSbX7VAUf8Au1ntC9wq702lan0uR7e4/efdoCQzULWaGMpv+X+Cs+OTy2ztrodZVLi23oi7dn8Nc9JvWSguPvETM0jk5pVXbTU6/hT6qJoFFFFH2gClVj9zfik/4Bmj/geakBd3y7aSikY4HFXL+6Am3b82adRStyu+lEmQlFFFHKUFbfgWR49aV0P+zWJWr4P+XWEcpkrUkVPhPQ7xnaNfnX5f4l+8tZdxdJGp/c4Zk3VoXF09yqwv92sHVJsK3yYXb8qs9X7kTmPYf+CfCNd/tU6TM6bRFZ3jr75t3H9a+jv2sLeF/iRptzJGCyaKNrHt+9krwL/gm7pQk+OtvrzqAWt7hF+bn/UvX0L+1dOIvHGlqJGJOmgmLPGPMf5q/YMNb/iBGMt/0FL8qRwV05Yj3TyLxJC8MUd4j/3W+9X15+xPrz/FD4I3/wAKL/VY47j+0pEsFklZlVmhZV+WvjvxJvvf3KIse1NvzP8AK1dv+zX8Vrz4d6pczQ3LM0d5bzrHv2t8rfw7a/FsLU/e8pnLllSNv4sWHif4I/Cb4haN44SO31fT9BuINsifLM0jeWskf/AWr4x+FUDx2jFPn+X5fk+7X6lf8Fndc8B+Pf8AgnHo3xpsIoU1vUtcs9L+0Rr800bbpJFb/d21+ZXgSzSPTo977N33l/iruqRpKXuHXhuaOHXPudXu3WqfPtZv/Hqm0d3XUndtquz/ACf3fu0kCo1vs8nC/dWP+Kn2sKW0YmdPMPm/KqvU8oe9sfYH/BK/Xtb0n4vanqWg6xJb3Fx4PuoJWjZlZo2Zd3/oK19D+OL618LWLPcvJmTc7t93ctfJH7APxCm+H/jjVtYfSftzzeHLiCC183aqszL8zV6pr2tar4y1I6x4nuZEaTa3lq/yR/7K15mOxHs5csdzzsRHm1iS+LvGl54g86bSt0MX8Ks/3l/2awk0ua4maNIcK2399/e/2aluLqztbf7Tqu1FX5ov723+7VW61x2sRc3k32O1+bZDt/eyf3dteZ78Y80jKNM62OBYvh1dW4IAWxuBkNnH3+9fLWueJvPZ7PTX3FX2vJsr6U07UIdT+Cl1fWsTQq+k3YVSeVwJB+fFfLFx9jtYXMyNmP5ot1ftHjRJ/wBh8N2/6BIf+k0yo6Fa8W2sYftNzNu2y7v3n8VeT/F34i+RbTQR3Pzbm2r0+9XRfEzxtbW9tNsuWRF+b5v4q+ffFXiK58R6q9/NwD9xfSvxjL8L7T3pHrYPD/bZUurqa8uHuLl8u33mqOiivoPhPSCiiiqjsAUUUUSkBc0+GFoi7puqO4sHj5TmptKu4rdZFuXONvyIKn09od3+kuoH93dWMvdkZe9GRlEbOGGKA27mtPVI9NkuGENyr/7VUprN41zvX/gNVGXNuacyGwSFJg6jNd/4C1aG4u0R3+feu35K88VvLf5K2vC+ofZbwfPj+41XykSifQWm3yfY98zrtb79YXibVPtEMiPeKnl/dXZuZqoaX4ge40N33r8vy1zt9qzzTtvdh8u16XNzHPHmiZGqNCNQa837ZVT5GVPu1+0v7KvwR/4VD+yf4F8AfYNlw2jR6jfyRr964uP3jM3/AH0tfld+xv8As6ar+1Z+094P+BuiOuNW1aN9SZvvR2cP7yVv++Vr93PE3hu2jmfR9NRktreJYLJd+7y4412r/wCOrXkZpK8LM9HAy5avNKJ4dqHhPzlbfu+X+FvvVjXGkuuIURQ7N8jK3zV6zrmivbzbERU/h2q3zM1cjq2g7V4hz/F935q+ZrSlGWh9RRrR+KJ5jr2n+ZI8c22It8u2T+KuK8QafCzK6eWHX7vl/wB3+7XpviTS7xZNn2ZRD935vvNXF65a2yrLZwzQr/DtX71cUf3nvLQ9iniPaQOEuLOb7Zvj2lZNzbflpq2P+tDvIrLKo8uRvvVoapa7djw7d6uyxTNF80f/AMVUEzIsaJMjGRdv7xa09pCUrlyjzRuV2s/uwvMrsr/uvlpF86HzIZodq/xs3y/N/s1ft5I2khTydpX5d33adJavPuNzeK4j3bVZ/l3VcanLuTKP8pg31rPJHhLZt2xt7ferY1hFtfgRq4hHCaBfED32SGiHT5rq3GxG2/eZWXarVo+JrSGT4V6tZiIRpJo12pUdBmN8/wA6/oDwE5f7cx7j/wBAtT/0qmd+Uc/1ivzfyP8AQ/Mbw34Xv9a1p/tkLHdL/rF/ir1vSfhH4YXSnm1iwjkTytyNt+7XXfDv4V6bp2lrrFzHH5S/N8y7dv8A8VXK/F74hW2lrJbabc/d+Vdvy/LXwq5IrmPxv35zPLvil4X8Daey/wBlWDRP/Ftf5a8/lsUWT5Pu/wB5q1/EGtSalcO/nMV3/wAX3qp29q903mCo5uY6I+77pUh0ma6/1Kfd/vUv/CM6kq7xDuFdLoul7m+40qfxNWtdR2dpb796jb8q/wC1VcqJjLseezaTc26/vrZh/tVXa3f7ipXX6xqkNwGTYrLWRb29t53qzfwrUy/ulRqGMUfhKVY2YZH92t9NIs2/1iferRtdC01o13w7lp8oe0Zx6283UJTvss+MeV/wKvQ9N8M6JcYT7M3/AAGui0TwfoMMqv8A2bDt2bd01HKTKsePx6ReTfchZto3fKtOGh6kzbPscmf4dy172sem6XZva2Gm2+1vmdmiXdWKuh3PiLUEcQ72V/4Up8sQ9pI8butJv7OPzrm1ZV/vNUUaxPJhn42/3a9H+MmgpoukxLs+fdtZq8+02DdMsv8ACv36g15vcLdj4ZmvId+9VH+1TL3Q3sF/1i7WFdFZ/u7Mv/33urC1y53Myb8/w7d1BnGU5GUW2sSetM3O336c4yd3rTaDaIoYrR5jr/q3pKKuMSiSNsHe/wD+1Usb7W3pxUC71+THzVL87Y+7/utUGZMD50gff1+/VmE/88+WV6qRqi/f3VatvNZgifKy/wAW+jnIlH3jc0k+Wy70+999a6Cx2La750z/AH1WsDS5EmZP3e7+Gt6SRILAyOmAqfJt+VmoEcL4yuPO1Zk/hWsqJPMkEP8Aef71SalcPd3zzP8A36seH7f7Vq0cPbfWsTX4Ynaw2b6L4RZF+9JFur7c+PErw/8ABOYyxk7h4L0jB/G2r401fUbbTbeGzdG27fusvy19q/HuzOof8E/ZbSDjf4Q0vb+dua/W/C582U55/wBg0v8A0mZ9bwmmsJj7/wDPp/kz89tH8WTW6t9pfctdTpOsQ3UKO/zL/tVwl/ot/p8jJMjUyx1K8sZB87Y/u1+Pnx3LGXvRPS2sbK8Uokasuz7tYmreEUjj3pD/ALX3ab4b8Xoy+TM6ru+V2rrPtFtcQ+dDt+593furSMiLTOK06P7KyTRpg13fhnU4bixa2mT738X92sfWNH3Ik0MOC3+zTNHknspt5mYL/d20EzNfxdpfnafKjorBk2oypX0d/wAEN/jI/wANv2vPD/hjXvEOpW2i61fx6dq1rZ3TItxD97ay/wAS7v4a8EvJE1DTVTzsPs+638VZPwD8aXnwk+OGleKoXxLY6jDdRbv7yybmqJ04VKckKXMf0QR/tP8A7HnxW/bE0fwfqvhLUNHtPBeuNomh2eqaivkQ7d0lzcNH/CzNtVa8u/ZO8FeLL/4qeLPA+hftAar4N8NeOfEN5YX8mjxKslxp7TNtVWb7rMrferxj4nfDvwNJ8dvD37QmleNtJ1uHx9YTa59jsZ1Z9L8uNdzSL/DubctdD/wTh+JXh74+ftHRaJrviT7NYSSzNpax/K00y7tu5v4V3VxYiE48vIzzuWrWxPNLSx8bf8FZPgN8OP2cv+ChXiz4QfBjw5qFn4Y0XT9PTSnvl3fa28v99cK38Ss1eS6DYpJpYe5RkdXZWX+Kv1Y/bB+FPw9/bq8B+J/iXr2jx3njj4b6yunf2Lotwvm6lp8LfvJPMX+Lbu2/7tfmNoWr+G/F2paxqXhTw9Np1guqXCWFjcXXmyxwq21VaT+9XZGp7Slc78vlKVfkZl/YYZo2CQ8R/N+8rP1TS0ZmSBP7rbVrrG01GZN/7oqitu+9UV1YpcKr/ZmJX+Ja1jL7J21v5jg5NNmjbfs2/P8AJHt+7U9rZhd3nPs/irdvtLmjnPyb9yfMy/dqFdNhhkbzoWKqv3lrqjseLUqe9eJgLv2h7ny9y/L833dtfQf7F0AgsPEKZyfOts/lJXiq2KyZQ87k+9Xt37G9s9vZeICxB3SWpyvTO2TNfo3hR/yXmF9Kn/puZ9RwGn/rLQb/AL3/AKRI8K+OtqW+LPiTK5Da1Oc+nzmuJaPy12b2/wDZa9F+N1u0vxT8RyBE2rq8+5j/AL5rh7q3kVNibflT7zfxV8Xnn/I7xX/Xyf8A6UzxsdUlHMa3+OX5sypGeHdvRt38FMaQMp2bdy/eq59nuYY13ybfkqvH/rDv2/N97an3q8XlkRGUSLzkXr87LQziMN5O13k/h/u1JDbuyum/bt+41OmhRsOm7ev8TfxVyyjyyOynLmhzCQtNHGvo393+KpVuMQ+fs3M38NFrGkakPw33qGhRZEhO4q3zbv7tZSidEZe6PkuNq/uU+VU/h+akMjyN5+9fm+VPmpI4CqnyduPu/LU1tp+795sUfxbVWp5UORDHHt+d92V+6uyrml6XLfAbE+7/ABVYtbOa4m2eR975dypXXaDoMMcA/iXZ/c/iropnJXM/SfDLy5eH+FdvzfxNW5a+Gd02xJmlMfzL/vV0nh/wpDcSfaPI4+8i7P4q6Gx8IpNGNiqrt825Vq5ROL2n8pxNtotzCuzyWZv4NqfdrVTT5rfHnTbGVd3+ztrt28J39q0bpbMzNF83l/dqhfeEzGrzOkY8v7qyKzfM38NZ1I83wm0ef7Q/Vmv2nZ/JXDL8/wDdrAmv5vtR+dcbP7ldHfRzNDK8xZdz7drfw1y+pQw27fO+/d/dr6GOFhyeZxRxUuYlh1Y27Rpsk+b5vM+981bun695duNjt8r7trf3a5FZ0hU/eR/lVGarSXUxQIn3l+bctU8vjL7JrHHSjqd3/wAJQ8jb0mj3qisir/7N/tVaj8SPN5rpc79qf6v/ANmrgIb+ZFZ34M33G+9U32zYzb3xIrrsVf7tFLK+aLsVLMLwudxD4u/1aB2Zf4/Lf5m/2aY+oPIrOZoV3P8AdkbbJ/vf7Vctb6gLrbczp5bt8rq3/staulrHFL/pO0q3ypGv3lWtP7N5TL69zGwtwt7NshRQ/wB1Wb+Kpre3uVki865Vw3yvCqfw1Xt7OFfK2O2FfcjN/eratbNftQtpnbH3mbZVrL+U5/rnvF3R7GaSaP51bb8yLv2tu/2q6a10kqwhd42f73yvuVd1Q6Do/wC5aZ0jZF+42z5ttdhpGnpax+TMjNFJtbdJ96pjg77DljOWPvGHH4bTy2eSH545fkbyvvf7LVch8PzCP98jf6rduj+6rf3a6L7HDt8mT5n81fKZv4Vq4ui/M7wou/5vmaqjQ5Yk/WOY42dJHvjHMcMzgHd2qvrGk7W3w/N5i/e+8u6tHVIjB4geLptmHJOfTmpdSsYY9nkuz7tzMv8AFX6n4h0efKcnfbDx/wDSYH3XGOI9ngstfeivyicDrFnthz9yJkZX/irmbyHzIf3G4bk2MzJ/49Xe+IIf3a7EVdu5tq1xepxvCGkd2Mrff/dfdr8zp4fsfC1MRynH61b7pD+52IvyszP97+7XPX0LR7Xd9y/KrqtdZrVrumaHyd8ez59v3d1c7qEbsySG23fPt3bv/Za6Y4HljZRMI4iXxGX5EzKz72Rmb71c18Wle78NzQw7R5n3t33ttdXtuftHz7T8nzR/3a5P4sSbdPTZuiPzfNs/8drkzHC+zwrkduFrx5uU+bdc0+2maVC/3Xrm7yzSPc6bcLWlr2pPJqUqu+G3/dWol/fJsyuG/wDHa+U+LRHpR+H3inDK6qr7FVVT73+1VtmS8gYJJudv4qimtXDfvCxVvl21CsgjkRNjfL9xd1OISiQXCvHNs2KV/vVG0ybdjp8q/dapLp0kLbE/36oXk7x4kR/mqvcCN/hLMkL3C5jT7v3WqpcQtGuzHzfxtSR3jxYdDy33vmq1HIky7+rf3TWZcpcupnQsYZN/da9E8G61pvirRZtB1P8A13lbYm/u1wd1YzIvnJytO0fVbnQ75byHcpoHpI0tU0m60XUJLO5Rl2v95v4qrTQ+SwdOVZK66+jtvHGjjVYfluY12s33a5Vle1keGaRsr/C1HL9omIlrcZj2z7v92s/UI/LkLp93/aq1eSJuZ0T/AIFWfM+6TO/5auMiojA27mikVdtLTNQopD8vz4paACm7dvzZp6feFJU+6AUU3P3aU/N8maIgLRRRR8QBSMueRS0UcoBWr4PV21hNj4rKrT8J7/7VXYmSv8NSKXwnaXDItqPm/wBZu+b+7WBrEm3ds6/3fvVt3kjrA0f3VWsDyZtS1aCzhhYmSVV+WiX8xgfTf/BPOzew+I+hRykF547uVyOx8h69p/a0Rm8daYf4f7LAf2HmvXD/ALHei2lh8VNLa3UKkdnKEQptKt5Dbq739qtW/wCE+0uQu20aV90DOT5j1+xYGUf+IEYxr/oKX5UjyZ1IuvzHjmvW8ysiF1likT5F+61V/BNi9x4otJtNds+btbb/AOgtVjUo7/VNSWzs0x8+yLb8zV9Cfsm/sm+IfGmrLPbeFbq6upmV7O3t4tvzf89JP9mvxXC0Z1JmcsRClE8k/wCCkPxB8T2/7MPw8+C+qQ3CQXXiKbUovMf5f3cfl/8As1fPvhfENnDshVVjT71fUv8AwXD8Dn4Z/E/4X/DfUb9brU10O6v9S8l9yQtJIqqq/wDfNfMuixosaJD86Lt2K33q7uX3z0Kc5OlC5u2ph8vfNMzD726lWZGhEJT5v9yrvhnwzf8AiTVLXQdKhmuLm6uFjit44t25m/hWtD4lfDHxn8J/EX9j+LdK+zTb22x7lk3f8CX+KnGXKV9s9K/ZT/c6pfzQtGv+i7d0i/L/AL1e1LeXOrXDw6UnzRvteaZPkXdXjv7JOm22qXmow38MjpHbrLKv/Avlr1zxJ4qs7O4/srTYI43X7qx/eX/erycdKlGrzHl1pctWxFq1xYaAN7zfabz5l/vIv+6tcT4k1C8EM2sPeK00MX/AF/3a2vst1cyb7y8VFb5vO+9trkviJqT2+mw6Vsxudmlrx61aX2zHm5j1HwTI0n7OrSzRk7tGvSUz1GZeK+QPGnipY43s7ObcjfMzN/DX1x4PkD/syzOjdNBvgCPbzRX54fGPxzFYPL4e0yZmuZPluJP4VX+7X7v4uUJYjJOG1/1Bw/8ASaZ1YajKtPQ5n4meNn12+/s61uWe2h+Uf7VckuMcUrAt1NIq7a/LKNONOHLE92MfZx5QVccmlpGOBxQrbq1+EsWiiinHYAooopgOjj8yVUP8VDRPtZ9mQv8AFTaWOR49yJ0aswEqSGbayh32rUdFXyoCSaRGbeny062nMUoZRwKhqS3jfd/s1ApRO+8G6w81q9tM7bWT+H+Kq2qXjx5m2fd/u/erG8O6klrJvmfb/do1jXHmm+5xvpy90x5T6p/4IveOx4O/4KZ/DCXztg1a7utLn/3ZoWVf/Hq/cjXtBSx8+wSH5o5WRlm+996v53/+CfHiF/D37c/wk1p5mZrfx9p/zL/tTKv/ALNX9InjSzRdW1ATQsrfapG+Zv8AaryMdR5pG9OpyxPKfEuj2d0r+TDJCy/K275mWuM1aGGOeZHTcsfy7fK+98v3q9J8Sectu0bzMrN/F/d/2VriNUs90LTWz5Xyvn8z71eJWo8vuuJ6dHFcu55T4is5o5Hh8nasn/LST7tcLrlgm6XfDsdflVf4f96vUfFFsk0jpH5ibvvs38VcJrVrDDHNM7/OrfeauPlpS91Ht4XEcx55rFrmN8vu+8v91qobphGjzWfKqvy/xNW9qlrNZ3H7na+35fmbcrVlSWsLTJN5zLt3fK396sp0+V2jHQ9eNT3SrDG87SwzJHs2723L83/AWq3Z2/mKj9WX5vu1Fb6f9okea8RRIz/JtrWhV1s/3MOdvyvH/dq+WMpxM5S5feD+z3jkRJvu7d+6NvmVqZr8DL4K1GDdknTpwCVx1Rq19Nhdlj+Rc/3tny0zVLe1eGe2UEwtGVwR1Uiv3/wGp8mdZg/+oWp/6VA9LJKntK9Vf3H+aPiDWvH02n6HPps15t2ysGjX7qt/s14D441681a+d5psru/v16/+0xbw+H/GV3ptnbLDFM7Mir/vfNXlFv4dutQk3vbKV3/xV+e017SlE/JqkfZ1ZHJw2M11J5wh+X+9XQ6XoW2Nbl027W3V0Fv4ZsdN3b3Xev8ADWZrmtWdirJDNg7PmrYz+IS61CHTV2QhQfvba5zWNceRzBJNz/e31R1LXHumd0Rs7flaqLfN87/e/wBqlzcxUYltrhPMCI+fl+WrNv8AOo2J8/8AG1U4Y3kkCJ0rW03S5ZtrojZ/jqOWYvhC1WZhv2fL935q0tNXzm+dGVd3y1Nb6P5G15nY7vvbv4mrQ0+z2y/O6rtrQjm5dS7o9v5arvm/2katRtW8lVd/vb/vbP4qzluraGPZCm5qb/aEClt/y/8ATPd97/apSCXMdBp+nzaq2z77t8v/AAKu60HwnZ6DYia5dd/91vvf8Crz/QfElnp6rNM/zL821XrTuviJc30DW9u+8NubbJ/dqZe9rEOWUuU4X9o7Ura4voLa3m37f4l+61cNoVt8u/yd27+GtP4mX1zqGvL9p+UKn3ah0eNLdWHyk7Pu76Rr8MNSbWr57WFYYXbDJ861zV5MZmxs4/vVf1i+eaT7+Qvy1lSOm7ATbT+H3R04yEc7Dg0jDI4of7pp25NuacTRiUUUA7e3P8NSUOX5m+c4qXzFPzpubb/epiqjLn5s09f/AB2gzJI1jaNX+bP8dW7Pf5nnGNiN/wB2qm5Fj378n7vy1e0xn4Kbg3+1VcpMjf01Xl27/u/e2qtWPFGqJDpJTGG2/I2/5qk0W3O5Xf5VrG+I12rSR20fy/7NOK5SPtnJv9010fw9037VqyzOm7b93/Zrna7X4d2otbGa/k/u7aZtU+Eh8Yapu1BofO3iNdv3/u19/fFuYQfsDRTEDA8IaT1/7d6/ObVm8y/km+bDSs1foh8bWCf8E9N3UDwbpP8A7bV+t+F//Ipzz/sGl/6TM+t4VVsDj/8Ar0/ykfFF1bWGpWrNIin/AHqw9Y8BecrPbJg/wruqbTtVeT/lhs8v+9/FXT6TfR3S4MKlv4F/u1+Rcx8T/eieVtbX+l3DI4ZWWuh8NeLHtYwk3zfNt3NXT+JvC9hqlq9/bKu7+7/FXE3mi3mmt53ksqr92nL+aJftOY9F0vWrbVF8l3yPvbV/vVJqGj741ubbaP4dtcBoesXNjMvzso313Hh/xBDqGIZn+6+7d/epxl7pEy1o9vcs374fKvy7a5fx3b/ZdUhmSFsLPXZTW/kyfabZ2+Z/k21g+NtPmvlivLlG+X5mVaqMf5RS5j7/AP2JZpvin8K/CNnc2cNmNJ066066ks2/eTfeZVkrC/4J8eILPS/2gLGbXrm4g01tUmgezs9ytJuk8vb8vzLtrs/+CWqeAPF37F/jnRPDsN1N4o0nxbp9+8kny+Tp+1vOkX/0GvMvh7eal4P/AGltetvD141s1rq7T2TK3zeSzbttEY81GXIcsoylXPufxT+0T4i/4I+/HHxJ4mb9muC5sPiF4QuZ/Cr6zOIvs8kbMquy/wAWGb/gW6vzh+E8mpapoN9rGq+T9tvr+S6uo4V2xeZJI0jKv93burv/APgrh4y+PvxA+Pvhjxx8aviVqeurceGo7Tw+s4WOK1s1VW8tY1/2v4q4z4I2rzeF5Em2sv2hfl/2lX71c8aMqcbnVhKcadU3mtUaQPM+3cnzxr93/dqCbSkWQ2sXzq33tqVu/Z/Lb50+X733futVaRXt5Hm37tu7bt+63+zWlOPNM6K0uWBy81m7SGzeFfli+8vyrVRdPRV81PufxMtbt8uxdhm/1i/Myr/47UMmmpDMzpyNu3/Zrt+zZng1Jc1W8TBW1hb5ETb/ABOzfw17F+ypbJbwa6FdTmW3zt+kleX3EfyvM+7ezKqt/s16r+y3EUtNccOCr3ELKQMdnr9G8J/+S7wvpU/9NzPq+BJc/FFB/wCP/wBIkeNfGW1B+JOvYTKnVZmdf73zmuKvLFJZk2QqW+Xau2vUfidoUkvxD1qZZdobUpnw3f5jXPXHhmFmV5rZk/utXxmeR5s7xV/+fk//AEpng5jWtmVb/HL82efSWfmQvvjbc3+q/h21VuNN2sg2Ln+Na7u+8LTW43ptdPmVFase60OFgmxPvP8APuryvc+Ewpy974jlmt5lykKN8zfd/u09dPfcnyfL/F838X+7W1No77vJ87au/wCXy/uqtRLprtMYX27925f9quWUT0sPU93lM2PT32b3Xb833Wf5qmWyeNdi7S33ttaUMKRtsSFSv96nQQvGyo6Ns/vMtYyiehT3MtbF5NzQow+T7v8AtVftbF5lRIQq/wAPzfxVLAschZIYW+ZvvMlbml6Wkql7bk/d+aol/KORNoWk+Xb73+7/AHdld34Z8LmT959mjYf+PVR8M6Huj8mdGHmJt3bdzbf9mvV/BvhdDl/J8uLYqPHGnzNWlPkictT3iDQfBrraxQmFmEi7t237tdboPgd2V/MtmH8PmeV96um0PwrDZwtNclvmZdkbfw11lnpNtaqHeHEUjbNuzdtrOVb7Jj7CBwX/AAr/AMxWtjbSb5Pmikj+b5qyNc8D2y5SFN38X+7XslrpNnMrfPvVX2pt/vVQ1DwXYLC8KWzY3fN/dojLm+IUqZ836hZ/Z4wZrZtuz7q1y+uWLrG6PDgr92u81zTXmkaBIcqrs21f7tcdq1uitshTllbbuf8Ahr9F+r+6fKU63LI5Zi7XCpDMr+Z8u1v4dtPWGaFVgdGETfM+1/mb5qdcK8kjwlGYL99tn96pLeGFdltbWzbY12/vK6KeHtC8Tb23tPdHSWaKpdHZmaX7u77tOW1IzN827Z8vl/NSWilpPubhu2/K/wB2nstst2Y4XkKr8u77u2uijh7fZOeVb3CzpNnC0nnGZn8x922uh0mRFmf51+6pX5PmVa5qzW5kYfPuH8W5K6LTWmuE3oivtf5mX5WqpYOUfjJ+sc3unQ6XHtuP3zrtZflXZ/DXQ6fGjXEU32lmO3y33NWFpbIjI6P5vyfvVk+Wuo8OxldkM9tGn/AqwqUYRNIy9w6/wfZvFD9mk2na7Lu/vL/vV2GmWME0KI6SbI/9Uslcr4ZaGH+CTbJF80a/ers9Lk2wo824qu1fmf5v+BVw1I8srx+EuMoyjYuNYpNuCQxuy/xN/eqRbV5IxD0ff87LT0kT7RNG7xsn8P8ADtanIyXFul5M8jHZs3b9vy0uXsRKXKcFr8Mdt42eDAdUuYweeG4WtXWrBGaeb5S+z5VX71ZevqqeOmWNcgXMQAYdeFrb1KTzI2kgTJh+4qxfMrV+nceJf2blKf8Az4j/AOkxP0Hjdx/s/K3/ANOY/lE4TXrW2ms3RI2WSTciq3y//s1x+sXX2NUfZ80e1XjVd3zV3muWsbSOjvh/mZVZfm3VyGpWn2Jkd3Vk2/dVv++q/PqdM/Pub7RxusPDeySzO+X835l+6zVyOpRwNIz7JIy27fu/hrttUj8n5EkmL/3W/u1yuoWb3i+d9mVVZv8AWK//AI7XoYWPxKRnze+Y7w2SyG5fd/Cv+9/tVxfxesoWs4kSFg7O37xn/wBn5a9Alh3TH5GRWTbtauQ+L2nvb+G01B0V0tbqOX/gO75m3Vx51h1Uy+fL9k6cHW5cTFSPkHxBa/Y9Uld3+ZZW30+zZA29EY7vuVvfGbw9Np/iKa8hhbyZH3xf7tc3pbbmbY7f7tfmkf7x9R8RamkO3+6v8Tf3az7r+JML/vK9WbyZEU79y/7VUJpPOY/7X/j1P4ZBKMSORvNC7EVV/wBmqNwok27+Garc33VQPgf7NVmciTZsZqfL7pUSpL/rClCzTR/x4NXG035WkfpVV7dlByPu+tLmNOaMi7Y6s8kohm+ZP9yrl5psN3++T+5/crDU+WM5rqvAz2epI2n3O3zNvyM1EiJR/lM7w/rVzoOobN+6Nn+f/arY1aOz1KP7fpu3Lf8ALOsjxFov2W6KJtG3+L+9Wbbahc6fwjsv+0tTy8xXvDtQV1B38f7NU1+X71Wb65+1S+bvzVb7/tiq/ulREVtpzTw27mm+X70qrtoiULRRRTlsAUUUituo9wBaGXdiiimArM7cvSUUitvbFZgCrtpaKKACtjwWv/E0WaP7y1jK2eDW14Ng3XDzdlq47Cl8Jv6szrCzydG/u1r/AAD8IzeLviBbbE/dWrNOzM33dtc94gn/AHexP4q+0/8AgkP+xn48/aEk1nUvDGgzXO5lt4pPI/1ar8zM1T7OVT3EefiqnsaHMaf7OOnT2vxYsW8ghBDPliMf8smr1jxx8FfEXxs+K2keFvCeiXuqajc2fl2en2EO93k3sVJ9Bz1r374g/wDBO7Uf2e/DJ+J0sbyTWEEcepCdQptWkcRqBjqzFuR2ANbH7LviDU7bxFp+jeGNXbS9TXWVnXUbeH97ghFRN/8AdyHO33r9ryrDxpeB2KjP/oKT/CkeB7R1KXMjE+BP/BInQfhHq1tqX7Q2txprk0S3D+H7FvNntZGb5Vkb7u6vq3wn8PdK+Gvh2Wz8K6DJoVqsX/H1ffNcyL/vLXf+JJrDT/F194kjhuPEmrSOqXV95W35lX+Fvu1558Trrxnr0n2a9maC2k3K9naxNLLu/wBpvu1+URiqceWETJUf3vMz8bf+C3Grf2l+31p2g/aZJY9L8HWbRMzbvmkZmavCNPtXVvPf/lp/er07/gpxZu3/AAUZ8T6VePIr2OmWkT+c25t3l7v/AGavOLOHzpAj3Kr5bVwy+I+ljH91FnuH7JenfYdZ1Pxgk0aT2tk0Fv5z7WXzF/eSL/tKtQftHx22reE7DVU1KM/ZZ18pVl3yeXu27m/vbq5v4P8AxY0T4a6tNc+PLBr3SZE23Edvu3r8vysv97/dq/8AtG/Hrwx8VrrTLDwNo8lrpdnYQpPI1ksX2iRfu7V+8qrUS96pYyjz/Ea37Ndxr32e7s9NSR/MTa0kf+9/6DXrjaVZ6T5t/MitMybt0n8VeVfst3D2y6jPbOq/ul+bzf738Nek3Fwk0j793krF+93fw189mcX9ZvE82tz8w2+ZLiN7+aGN7fY3y/3v96vGPih4y87WpLCzTzdqY8yN/lWuk+JXxNext5tE0GaSG5mX/XKm5fm+WvIPGGpJ4P0ebUtbud02z7sn8TV5nL7afKRTjzep9T+D559N/YzvLuJ8yQ+FtUkRgc8gTsDX5c3d5cahcveXMrPJI2WZq/Sj4PavJr3/AAT4n1iXOZ/CGstz6ZugP0Ffmh5ntX9LeKMFDIuHu6wkF/5LTPZwEeVzXZjg27mikVdtLX46eiFFFFTzAFFFFUAUMdvWikZd1AC0UUUAFFFFTaYC/dapYztRnZ//ALKlmhRbVJg/zf3aYvzff+796jlMx6yTffBw1Cs8jHe+TTJGy3yU9Ng46VIHd/syatPof7RHgLV7WXy3t/G+lyLI3/X1HX9QvxChS41698mzVUW4Zom3/wCsr+Vv4fag2leN9G1VGw9nrNrKrbf7sytX9TnjDUEuLq3v9i7brTrWf/eZreNq48VT5uWREpcvvHBeIJt0bJIm4Rt821/u1xfihYbfda745vl+X/ZrstcuJlWXZCsvmM3yx/LtrifEXk/8toVT+4y/w15tajzaG9OpKUjgdcjma+3j5Xji/wCAtXC63bv5jvvVNy/vVZ/mVq7/AMTTIzP5KLsb5fMWuE8Qb0hXyX85mVllVv7q/wAVebOlGjK57GHqSicHqEMK+a802yJX+61ZMyzSKnyKrrLt2s/3q2dUvIYf9S+zc+5P4ttZLNE3yO7I7P8AeWKo9jzS5uY9SniPcCzhmvIUmSFQ6pul8n7tXNPtZt3k+ZJ8z7Nu3726jT7O2SHa/wAi/e3f7VXbXfb3CukzOn3vJX5f+BNSp0+WroOpU5Y8xb0+NLWzms4bnHlv8jM+5lb+6tV9XbyJJnmbAVMsfT5c1es4ZnjjTfGHm3M8aru+X+GsTxrP9g0DU7hgD5FhKxBXrtjPb8K/evAxp55mCX/QLU/9Kgexw1Uc8VWb/wCfb/NHwl+0RqFtr/xSu0d5H2vtT+L5q4q4utN0G18m5dR/tL81W/GWvPda/c6lN/rZpWavN/El9PdXDfvGC72r85hH91FH5nUl7SrKUi94m8bvdGVC/wDB8rL/ABVx11qE11Lvkm3Nt21O1reXDL5aN/3zWto/gXUr5kRLZn3fxbaqMbkxkc5BbzSfchZq19L8LXl0yfuW+Z/u7K9J8D/APVrxftNzats/i2rXZXHgfQPBOmrc6rAsUS/KrN96tOWFOXvGftPf908x0P4b3Jj86/RlT+P5f/Za1pLXQ/D9l9xd7P8AI38W2q3i74rabbzNb6JC2F+Xds+9XFy61rGsTec/X/aqJS5jT3pam5qWvQySecnWqEmvXPl/IWDfd2rTFtbWEGa/dl2/3q2vB9x4evFcw2DS7U+9J95v92p5uUj3zAW88Q3G7ZDINv8AeT71QNB4n3b/ALHIv+01eoWOraHZsAmnR7F++s1X77V/CV9CpfR1R/vfu/us1Iv3vsnktnda2v8Ax82zf3vmrVs9aeZRvfb8nybf4a7xbbwHfE+SlxEP9pN1VrzwHol5bG50y5xt/h27a0+H4SJHmfij/iaa4JvLbCr/AN9Uy6keGEH5V/vf3ttXdY2R60/k9IX2bqxNW1BGmcb2/u/dqPt+6aGbdS+ZIRvYhXqCl65NJT+I2iHBFIq44FCrtpyfeFUEgZdu6mKNq7yKk+8xSmqNvSgOYdG3zBEqS43x7k602nq7qmwfN/vUEj4W8xW7N/erS0mBJNvz7mrNt1cM+9M1saDGhmRP7396gmR1Fuk0dutyk20Km3dt3VwvinU31TVHn37gvy7q7XXdQTS9DZ/lQ7PkXdXnRdnJduu7mojzjgLCpllCJ/E1ekaXappvhuJAn+u+auA0S2+0X6J/tV3Oragn+jabHtTy0Vt1P7QVJfZMnVNK+bzpOGWvvX47Ar/wTxKqQP8AijtIA3dOttXxEslteW+9EZ9rN95K+4vjzEJP+CfkkQXg+ENKAH421fr/AIYa5Pnn/YNL/wBJmfXcKaYPHv8A6dP8pH5+6fcMW2P1+9WzZzPGyuPlb7y7XrEjhmttu9P/AB/dWrZ/M2/fX5DHY+M5jo7K++QI77tq/wBypNQ0m21CEuiK52/drHtZHXc5f/c+etWzuvLK/d+WtTLlkcnq3hu4sZj95N3zVJpd2beRE+ZStd/qGl22tWXyQru/hZa4/WNDezm2Q7i38W1Ky5UaR/lZ1tjq32jS4kd9/wAv3V/hqDWl+1W/91GrD8N332VhC+3av3K39QkS4s3dHUfxKtVGX8xnUjOR9if8EWdW1XVPiN4z+CFheeUPGHgi8giaP5WaSP5lWrf/AAhqL4p/4SqzSNJrVvKut27zWZW2t83/AAGvDf8AgnH8Vtb+EP7W3gXxVZzR25bXo7O6aSXav2eb923/AKFXv/xgk174L/tReNPh7co0ljp+tzeVDJ8q7ZG8xW3fxfe+9W1GPNKSOWp7soM5v/gplJpXiZfAPiHTLyaabT/D6xXCyS7tsjSN/wB81zH7PsLr4G+0zQ+an2j5l2fdar/7QVunijRfO8+PyVg3RQxpuZW/u0vwH002fwrs3udyPJLI7r/EvzfdauepFxid1GMvanS3Cv5P2l7bb82379ZuqMkKpsmxF95l/utVy/ZDIZodoVk+X5/mX5vu1zuuagkDSJ52359u7+Fmp0Y/akTipfylW6unVvOm2/M+3dUUl5t2pC6jb99t1UZtQRnOxNqfe+akMiNJs3/e+bdXYeJzTjL3i8sUN43ko/yt8zt/8TXrH7Nlutvp+qJGfl3w7cnJ6P1ryq1byvnTc6K235vvbdtes/s5MHstUdIiil4cZXHZ6/RfCdW47wvpU/8ATcz7DgP/AJKih6T/APSJHDePjt8Z6rstvNYapKef4fmNY91HbNmN0Ztqfe/hre8dW7r411V0kfedQkOAvBXdWfZwwz4mRN6/e2r/AHa+OzqX/C3iv+vk/wD0pnzuacqzGsv78vzZkXGn+c6pBCoP3/Mb/wAeqjqGi2zY32y7vu10clvC2XLzfu3bY0ifwtSjTXkgKPbfeXcu5/mryJS5djgj7vvHBXWgujfc3bn+ZqpSaTCq75o2PlvtTbXZalofkyJs+VG+Xy93zVkXFn5e5IYWST7v+zWFSM+Y9rB1OaJzv9nwxt58O4vv27V/vVN9heXdvRnH3dy/dWtBrG5+V3di/wB3cvy7lpiWkMaPvG5/vblfbWEonrUY+8VLOx8mTYjthf4pF/1ldH4ds7ZcJHD8v3l+T71Y9vbozZ2b1/2v9quo0CzhW4E25sKq7/8A7GsJfGbyjGO0TvfAunorLNHC29fldZE/h/2a9i8J+H7O4t4X+9u/iX71eceB9NeRVuftLLuVVRZP4f8A9qvZvBdnNJGiCCNl2ru/vK1ZyqGNSMOpsWemTQrsdPM27VSPbWxHZ7pE85/nklbzWX7rbaks1e0Vpprb7ybUVn+ZasRw3Kr+5EcZk+40kX3f71c0ZSlLmOeXu7iW9nDJ5Uz/ACIzbkj+6zNU02nzXUchWHa0P3V/iZf4atxr+5eaBIwu/Zt2/eq1ZrtZnW2kRdi/d+6y1tGpymfLynyJ4gb7Qd/3trr919qt/vVyeuQ7ZHmfl97fKtb+qahmzZEdVC/cb726uX1SREy/k8Ltby/4v96v2Gn72h8NHlUbsxZPOVtibS6/eZvvLT4bdLeF3eFvm+b79X49Nma9d/JXLbf3lW7fw55jFJ0kA/vfxVqpUoijzxMaRWg7xpFt+8v8VSw281xtR/7nzyRp8rV0LeFXmhTybbcuxV2tTZPDM0O1GhkXdu+Xf97+KtvaUvskL4jAhjeOQwpu+X7+7+GtvS5PMVZsKPmZdtRS6W9vHvlhmZ1fd9yraWr2sg85G/ePt+WorYiMoijH3/dNvTbhGjRHjZgz/wC7urp9HvvtCoj7tkf93+H/AGa5K1X7OzI6NsWXdFuf7vy1saTqCRK0Pk7W2blb+81cPtoG3LLqejeF79FuETYqy/xL/s112n6xDC0sKfM7SqssK/w15Zp+teZCJp/mf7rf7X/Aq27HxVNH87yfe+Xdu/vVy1ImkfM9IsdYtplCJCvyv87Kn8X+1VptUe8j3wbd6vt+avPbfxE/l/uXjzH/ABN/FV6PxRNI2+bayt8u6P8AhaspS5ZGsY8wa1db/GJu+OLiMnIwOAv+FbGqavDbyPI8myRv9b5b1zF7fmbUX1CQbSHDHK9Me34Vma1r3nyDe8mz73ytX6dx0k8vylt/8uI/+kxPvuOVFZdla/6cr8olnV768upGuUDJEysrybK42+1BLiaazuUb/Y+T+L/ZqbWNQuWk8yGaZArL8slULjUt0MyJDh2bdK0f8S/7NfAUZQjH3j87qcvumVqnnXDHYkizLFtRWrA1KGZyIZuGV/4k+X/9qt268l2fyvMi2/Mvz7ty1k30HlzCHy+PlZGV90daxxUI6lyozkZkMP7tvOeNhsb7v3mrA+Kdul94D1O2S5j3rZSbdvytuVfvV1Mi21ux37cK+3bt+bdXI/FrfD4H1ObfvRrVlfdU4/FQ+qzX901wtGXtoHg3hW48PfFLwmnhjxDeLDqtqvyTTf8ALRa888b/AA18Q+A9X+z39tMsbS/Kyp8rL/erN1TUNT0PVPtmmloju/hrufDX7REOoWv9k/ELR4dRSRFHmMvzKq1+XrXQ+o5eXY4aa3huoTsjZnX5vmrEmX98UC17JZ+EPhL4quWn8PeLPsDyN89ncfdVf96sbxd8CfENrGbzR4Y71FbbutZVZv8AvmtI/wB0nm5jy9mdfk343Uxt6/6n/wAe/iq9rHhfXtJm/wBM0e4Tcm5PMgZdtZu1448v97/aWl9k3NGyvIVZftNTSSaPdMUkmUfLt3Vl2UUt1OsIT5m/iq7e+DtSgXekbNtGXap5CfdGXWh2zxt9juVP+yv8VU9NuJ9M1BJgWRlbGajltdS01t7pIn91qZLcTTcO+6qkUdB4kuPMaG/R93mJ87NXPTS+YvlpWnb3MWq2gsp22sv3G/2qy5oXgkaF+GWpiOMRtMZtxzTlbPBpPL96Cx1BbbzRSMu6tAFopGOBxSI3Y/hQA6iiil8IBSv940gOORQG3c0viJiFFFFP7ZQUUUUuUAre8GnCzv8A3krBre8NxhdPd06/79TIiXwFiffcXwRIWPzr8v8AFX9En/BAPwn4Y+Cf7Iov/FulNDp/9qW76prEdvuka4m+byW/3Vr8Iv2O/hpZ/GL9o/wn4A1JV+z32vQteNJ/q1hWRWk3f7O2v6y/hl+yB8Pf2ZPh5rvgrwzfR3XhXWpk1S3025iX/R7jyVX7392to0ZVIvllaR4eYV5RlGNvdPlX/gqZ/bOtw3PiL4Pa+JPh68du+sWEhCNHe+Yqoyq3zOpyD7YrkP8Agn1pXwg8P/DXU/iv4ssvtPiG18RtZacjjKRRG3jYOR/vFh+FbH/BSH9nTxPoHgu3+NngnXC2hy3kdr4m0uSTAgbpCyL/ABDeVGa8z/Yk1zTdS07xF8MdRvBDJfRi8sWmP7vzIwAy8c7ipxx6V+3ZTTlS8EcTF+9/tK/KmcMnCSemh9I+IPjRpWtal9m+2bYmlXbHap8rL/FWdrXiywWN7bTblrcN/Dt3btrfdavMPDem6rDfPDDZ/Nv2r5afL/vLXVX0KaHbpeX9z5T7t7rcPt2qtfjtSUojp8t4n4c/8FAdem8Uf8FG/ijql5eee0eqLb7l/h2xqu2uS0mNJtqP91V3f8Cpf2g9cTxR+2D8UfGEMqyJceLbpVkj+7tVtq7abp7fKsycNs+bbXHGPMe7P4IxJbxQzCH5T8/3v4ahVv8Als6fN91F+7t/2qmWZG/c/wB5vnZqbNJ8u9+F3feo+IiX7uPunrP7Pa2dvDfb3Xe0S/NJLt210/izxQ8Nq8Ns+F/5ayK/3q4b4Ptcm1uYbBJHdlXbHt3bm3fdrV8ZRppNxJ/bzrF5abvLavnM0lP255mIj+8uc9r2pDTPM8Q63c+c6xfLCv8Ad/h3V85fHH4g3HiHU/sK3LMN26RWfdt/2a9C+LPj2eLTptZu3URp+6t41/5af8Br5+vLuS+upLub78jbmrXL8NeXtJnpYGjGUeeR+jn7Pf8AyjcT/sS9Y/8AQrqvzfr9IP2e/wDlG4n/AGJesf8AoV1X5vM2OBX9AeK//IlyD/sFj/6TA1wnx1PUFXHJpaKK/GYncAbdzRSKu2lqgCiiigBd7etJRSK26l8QDtqfwUBDI3FJVzSdPm1KbyYev3qcfemTKXKV44HZtv8AFW/4B+Fnjz4neKLPwV8PfDGoazq99LttdP021aWWRv8AZVaTTdBm/taKw3LukdfmZa+gvGvwY+Ov7K37PfgT9prwhqv9jxfEfU9QsdE1DTbpo76NbXasrLt+ZVbd96qqcsYnP7SUp8sT518XeENb8G376Tr1s8U0MrRSq38MittZf95ay12eXX1r4f8AB8fif/gln8Q/iJ8U9VjRNF+IOl2fw886BWnvL2bzGvVWT7zKse1m+981fJLMitv/AIa54y5om0WMJR23gYFSLsZl2feqJR82PSpoWh3/ACfw1XoVLcuaRO8V9HdJw8Msbr/vLItf1J3WoXN54Z0G8vPmkbw9p7eWqfeVrWOv5bLLZK6Fj96WP/0Ja/p1vtUhs/B+g2fzPNJ4X03/AFiNtWNbWP8AirnxHwnLiPhMvxFcQq2yZ2i2/daP5fmrh/El0kau6Iyy/d3NW1rmsPwnkq7K25tvzba4/XNQdVDzzeYvzb2Zf71cfs+b3hwlKxz2uXiR3Dvc2yqV+VW3/drhfEl6itMls80fmbf3n3q6fxNdTQ/JPtRdn+sb5vvVxGuTeTcSI75+60W37qtXn4iPvHqYepynL6nMkkjujqX8/dtZPurVBpo1ZspIRuVpfk+WNf8AZq5r0z/8sU3/AD7tyfLuZqzWjS1i33O4rGu51WX7rf8As1c3+I9OnK8NC3Zq/R5tm35Ukkfc22tbT981vG8yNmZPn/uyL/vViWsxbYo24k+bzFXa3/Aq0LO4eP7/AJ0asnyLHtZW+atI046GntDa01bmDHk7V8tPvL/Cv92uc+JtwsfgbX7qTcQuj3LNnqcQtmtq1vn+ztDDuMi7l3N/FWF8Unx8P/ELhw+NFuuSMZ/ctX7d4FK+fZhL/qFqf+lQPoOGZ82Jr/8AXuX5o/NbXJPtV000MrP/ALX96qFv4Z/tKYOerPUWo6g8Ko8f+7Wn4R8XabZyD7Ym4b/utX55HY/NJc8jqvAfwRTWJUfyfkVvmZk27q9o8I/CHwlo8K3OpvH95UX59rL/ALVeZ2fxkttJhVLDy/LX7u371Ynir47alMsnk3O7cn9+n7b3fciZ+znKXMep/Fr4z+Ffhvo7Q6OYzeeQyoflr5X8dfFTxL4vvnmv7xvL3fKu6meItW1XxVqD3M0zPtqnH4ZEgDv93+7Sl70eZm1OMaf2ShDHJcSb/mLr92txVSxs0mZ+P/QWqCOwS0XZs+Zf4f4aiuPtl9hE+7911o5UHmZ+qatcalfbPm2b/lrpPD95HptqMuu7726s6z0NLdvOmT5vuqtalvZ2e1Ud8/7Lfdo5Qv71iyL68vmZPLZVb7/+1XQaPoNy1qjujKPvJuqv4T0+zvJm+zW3mOrLsVfurV3xdDDdXhhudVm+4qy28Lbdv+zVe4Tzc2xauJPD2jx77/VbdJo33eWr1k+KPihpVjp62Hhvc1xIn71mT5Vb/ZrmfGngV47FdV0rzGRV+dWfcy1y+n71Vkd2Wsyo/wAxozXW23d0fczfM7f7VYV1I8sxd6uX1xt+R0Zd1ZxJ3ke9BrGIU35F96dRV8qNAooopgLsb0oXofpQzbqFO00pbEcrHwru+d3qSNU8yo1Z4/4Ny06ESbt2zmlykyLNvHMz/J/3z/erotFj2r+8TKr/ALFY2mw7bhXTmuo09U0+3e6/hVPvURlymcpcxiePtQSaOKwWHb/Furmqt65qE2oalJNI+4L8qVUqjePuxNvwJatNrKOEyV+b5a1vEEgm1R5tmxl/h/u1V8DLHb29xdhPnVfk/wB6rVwu6NpnfcW+/wD3acY85lKXvD9Lvvs67H6feWvvv40xpcfsFtGM7W8I6X+X+j1+e+51jHzbP9pq/Qj4vlR+wYpbOP8AhEdKz/5L1+u+GC5cpzz/ALBpf+kzPsOFHfCY/wD69P8AKR8A3Fj8+90/75ptrN5Eyo/T/arY8nerSPu/4DVKSxQSB0+dl+8tfkEf5j4otW6pcSM8LqP7+6pBdTRtvd9rbqr2cscVxsdKuXEaXH93a38W37tMDb0HWiu37y/wtu/iq/faemqQsj/e+98v3a5O3kmt1Z4U3bfuLu+9XTaHfboxvTKf+zUS5ufUzMC40/7DdeSU+Xd8yr96tBmM1mYfJX5k/u/NWrqVjDeKzoiod21f71ZM0b2Hzypz/tUGhZ+H/iq58L+LrHWLP/j5sb+G5t/96Nlav1A/aW+GOifGz9o3RfiQibLTxp4FsdSguPtSqnneTtZW/wB1lr8mtUvIIboO6fumb52av0V+C3ijXvif+yL8LvGdheNLd+Edem0S4kmb/ljt3Rrt/u104KX+0rzOfFQ/dXPPrXS5pJrzwvMkcvlyyReZs+9tb7y11U3h2bwj4dtIfsawwtFu8mT+Jq2bzw/C3iK/s5ns0v5rprhFVWXcrfwrW58VNah8Xfs86Df23hVrKXwveTWWrXHm/wDH00jfKzf7q1eMp8tWRWHqc1L4jyDXtas7VPMm+VlTdtX+KuL1jWvLmdJI96f3m/vVc17VJo2m877zPtiXfu2r/erkLy+/0gb+f95qwp+8RXqSjHQtnUN27zivzfeq7Z3X74RhGVfu7q5xtQRZH3+W6N/eSr9jceWuwv8Ae/u1t/hOCUTqNPvkkjaEcf7X96vZ/wBm+4e5sdUkZifmhxk57PXhFnfJ8kPyv83zLXuH7MVx9ostXbYikPACEOez1+i+FH/Je4X0qf8ApuZ9XwGv+MooPyn/AOkSOO8fiCPx3qcpMrML+bA37UDbj1rNjm8uZUR9hZWXcv3dtWfH95s8f6y7xnZHfy7gejfMaz4rp45P3zq5V9yK3ytXx+ef8jrE/wDXyf8A6Uz5/NZR/tKt/jl+bL8mF+S1dnRUXdu/vVb3JH88cLNui2/N95azYblJI96Jlo9zL/DtqeO4uUtV3v8A7XzfeVf7teZynncvN8I3VFSONnR+Nu35qx7pkht22W2/5d26N/u7q0by4RbeVHRsfMzeZ/7LWPcXD+UgR4y33dzfxVz1OY9LBy/eFS8kEjbEtVRvupVWOP8A0jM0LP8ANtSprpoZJld/n2/LuX5d1VppN0nkvt+/uZV/iWuOUox90+gp7D4ZHmZ0R2yr7dtbfhqR1m8m5dVVfuKz/ern1aCaTYny7U3Ve0nUHtXV32ttf+5u+Wsai933Tfm5T2nwPdPHAr3k0bOrr93+Jf8Aar2/4f3jtDC8zq7r8/mL826vnHwVrSRn7Mkyq0f8Tfxbq9i8BeIEt5beF5mKSfN8rfKq1zy55aky+E9f028e6uPs1zbbh977QqVttbzsn+pZyvzRM3/s1cf4d1bTbjY8Ny25ZdiNv/zuro9J1KFWCPbbX81t0jfLurGXN7r5Ti900bLZuPnSf7/+zSRxyQzM6TZH3Io/4abcXDzQqtt8v3vmqhqWtvaw+S80bSbd237q1p7SO4HxTqV86M1sjrtZ/nZv4aRbd764M0zttjXdKy7W3NWGNSubhfkuVKs/3f4maug8O28ilLl4fuvufy28xVav1WOK90+Sp4X3bmro+j7mxsV12r8rV0mn+HY1VLl0j3Kv3aZo9kk8yuvPlp88kny7q6fTbHbGHe2Up/z0/wDHq5a2OlKOkjslhfhM5fCdtcbHjtmCR/N/wKluPBaLl7k7dr/LG33lZv7tdpo+nzTQiZ9zCRdyN93/AMdom0+2jLQgqo+VFb733fvbq4pZhKMr8xjLCRied3mhwxxpMSxM25Pm+9D/AL1Zl7pKNcD7HujVvusz7v4a7/UdLdpGtofMCbdysy/K26sTUtL+x5eG2xTWYe03kTHCyhscsrw29wiOiuF/vfeZqI7rb5jv5ip/spVu6s5rWSbY/wA80u5Gb+7WdeTW0kB8n7irtddv3qUsV7xp9T7l6x1p9Pxv3b/uvuf+GrNn4mS3kfZNuH91q5K4uUt7dbVNu2P+6jfL/u1A2tOsaRP8m75dypWksdzbGUML3O9bxck0buHXau35aY3i6CzYhNzfPtZt/wDFXANrnlRs+9l2p8jNWbJ4s2xrc75Nky7mVqr65zlLDyie76ffGbwob6J2Yi3kKljzkZ/wrh7zxJbTMZkmZjJ/ra2/Cupi4+Cp1MfNjTLlsDvt8zj9K8lsfEFzcQh5J4wzL91v4q/T/EGtGGUZQn1oR/8ASYH3nGtKTwOW8vSivyidsupQzM0z3Mm1V+b5vvLUc+o/MPnZ/MTbtV/u1y1jrDqu+2mZvM3Iysn/AKDWjb3KbldPLYb1X5flZm2/xV+XSxnLHlR8LHDXleZo7nkjTzvM/uszfN5dI2+RmTezbU+X5f4qhs2hk23KTf3kb5/lqyykSI/y7Y4tu5fu1nUxnKdEcPKRUuI4XbeifL8v3m3fNXAfHqNP+FT63M6b/LspG+VNteiX0fmRpBD8zMjfvF+7XFfGyz+0fCnXYU3FI9NkeVW/i2/3ayxGM9pQ5eY2o4WMalz4w1aPzrFfmZtyKyKy/wCzXLXEL2txvT+9Xa60v2PTYpvMbay/Lu/3a4bULjzpSf4t1fN+6elH4gh1SaF1dZm+Wul8P/EfX9PmR4dSk3L9xt1cgqFm21a0+3dm2PuA/vVJXLA9Qs/i54rmiENzqTTRbG/d3C+Zt/76ouPE2j6gz/2xoNjcq0XybYtv/oNcJ500asiPytWre6f5WTnd/d/hqoy5ZGUtjfk0nwNffvrbRJrfam793P8Adq7DcW0lm1rDuddu35k+asS1mdx9zarfM22tPS5nWffPM2F/urVxFKX8pDrnhW8uoYtiK0ez7rL95q4/VvBmq6fKdttJtX+7822vWtWW21zRdnzI+35GV9rLXneral4k8OzSWs037v8AvL/F/vVPwjjKfMcoVmt2+fhlp9xdeeNkyfMvetr/AISawuTs1LSo2/2l+9WdrTWAYNbJyy/w/wANHNE2+IoUUUVBY3y/elVdtLRVfCAUUUU47AFFFFMAooooAKKKKACiiilyoBVXd3re0yaa30kum3bWCpw3z/NXQWUciCNIduNq8GiUTGqfRf8AwTv0+bTfFmp+PEhX7THZ/ZbC4b70MjNuZv8Avla/qG+HHxph+OP7O/hX4j6HrCy22reFbeK4h8r5o5o41jk/8er+cfwL8Nb/APZ30/w34J1V9uoXmkQ6tfwtFtaH7Qu5V/7521+y/wDwQ/8AilD4/wDg34v+Dt5qqyXHhe6t9RtbVvm22s3+sZW/3v4a6MPLlmfPYqpKpLQ7j/go28nhv9ljU/D1/JiTUUspoiW4l2XcQOPzrwD9gXwFD4o0W41eTTVmNrrEqh40BlUmCIjGe3Wvd/8AgrzbXD/CXSbm2gZreCURNJt4UNIjD9RXhP7Hvxu8DfAH9nTxJ498XeLo9NlTX5FsbdU3T3cgtosJEP73Nft2EqRpeCmKl/1Er8qZzU4ylTsdvq3ijwB4CttS1jXtehhms7qRZVa4VfJ+b5VZa+H/ANrD9rzxh8bLq58N+Cb1rXQt7farj7slx8rf6v8AurXP/Gb4yXPxW8V3usanbSWNrfXX2r7HJ96Rt3ytI396vKvHnjaw0vRZ/I2ysyyNtVfm+VW+Zq/nrG4+cp+69DelGV+U+MfCljNeeINYuXDFW1eb+P73zV2Nu3kr5PzfL/DXG/DeN76zmmmfa81/I+5v9pmrs/JTcmE3Oq/Jz8q/71d9L4D2X1HzQvJDv2LiT5fmqNti/I8KuPu/7tTXB3Q/J/wLbVJo9u/zkZlb7u2r+ID1/wCAPijw34K8N+IfEPiGGOW4j+zrpas3zeZu3NtrjPiV4yvPF2sXPiHUrny0+Z3j3fKq1Q0CRDYsj7di/wB3/wBmrzD4+/Eh7iY+EtKmUL/y9NH/AOg14lajKti7HPHD+2q/3TjPiR40fxZq7JbP/osPyxD+9/tVzZOeTRSAY716tOMacOWJ60YxhHlifpD+z3/yjcT/ALEvWP8A0K6r83y23mv0g/Z7/wCUbif9iXrH/oV1X5vMu6v2nxY/5EuQf9gsf/SYHHhPjqeotFFFfjHMdwUUUnz+1UAtFFFLlQBQG3c0qfeFG1FUbPvUogNUYGK0PD1++n3n2mPrt21QrY8EeHb3xN4httD02NXuLqVYoFZto3M22nzcvvEVPehY1ZtcvdS1RLmaZs7l+996vtD4Z2HwK8dfC/4b6r+1pD48ufBPgV7hvsvh3UVb/RZJPMlhjWT7rSN/EtfNmp/BLTvh/wDFr/hWvjT4neH4bu3ljWe80+6+1wRyN823cv3v7rV6N+1Z8U/Fev8AhDSv2fPDXhHQ4LrS7VZLy68P3O77Rb/dVdv97+Ks69eMrQW7OWitbs9P/wCCnmp/szftAfBHRfj3+zx8SNB8F+EdEvF0nwH8DrOXzbqys/8AlpdXLI3/AB9SN+8Zm/2V3V8CS793FO1CyvNNvHsL+2aGaN9ssci7WVqazJxvpxjKJ2jWG2ShndpN6CkY5bipLdfmOf4aCPhNfwjp/wDaXiTTdNT5TdX9vF/31Iq1/S94nWG1tbXTXEzJDpFnEsf8Py28a1/Od+zb4dufE3x58EeHkh859Q8X6bEkK/e/4+F+7X9E3jq++x+ILy237mWVkRm+b5V+Va5cQ/hic1bocpdXiTXDQvbMnl/3m2rXO6o00bMifLu3N81buoNJI0r3KRqu3ayr8zVg603mLvfcSy/JJ91q5pe7EuPxnH+JLf7VbhHKoq7Wfd821q4bxEsy70tnb/b3fdr0XXFhXf5wjCRovm7vvf8AAq4HxEtz572yJ5nztv8A7rLXJU5pyO+jE4fVJHtWRJkZPM+dvkqnJdTHfJs5V/mX71XtYbdutoUkdNzfKvzf8B+asuFZtyzQo27ftlVvvVzyjM7oyLFvNNG3mujS+Z/D/dqzZ3ELRs7pJsWXbuX/ANBqpH5PlrcvbSRyebtTbT4Vh+a2Tdne0rx/3m/2amP940lHmiWlv3jOxJpHZX+Vf7u6ofHTG4+G2uG4JGdHuw5PGP3bg1VWSRZn2IzBmVW3fwr/AL1XdYhF78P9Qt5WGJdNuEYjnqrCv23wLnzZ5mH/AGC1P/SoH0fCitiq/wD17l+aPyq1PVHabf5e3/ZqlDdvDumR9n8NX/E2jvZ39zZzSNuhuGX5vl/iqlPapCvzphW+ZK/OYfAfnsiRdYmjX55mqpJrE03Dvn/ZaqcxfCp33fJTJB8+08/3v9mr5fcJujXtdchjVUfcP9qtSPxFprQ/fX73yf7Vcltdm2fdX+Gnx71XZ/7JU/ZGdO2oWDbd6LuX+JaZJqkLLsRFT+JdtYVv8o+/81Wo28ydXd2qpS7GZPNqTIu/fuNVbrULmTdJvbC/d21JJH5iu7v8u/atWLXTrZnH2x9gpf3So7EfhfxlrGg3QubUZVfvV1+m+PPD0twXm0eTfI/zNI9VdH0bw3Lb/JD+9X7/AM/3qmuNN0f7Tss4W/22kquX+UOZHq3hTQvCvjDw+88KNE3lMssbbf8AO6vD/iR4TfwbrUkKcozfumr174WtNpuj3Ezw7UX+H+9XL/GzS5Nf0ptZCZ8v7jLTl7xl76nc8Wu5nuJi70z+H/2aiT74/wB+koOvoFFFFBoAXbxRRSbflxQA5V+b79G35s7KSljUSHZQTzMlLbgP9mpI49zLzhqjVDJIz/3as2qozLv6fx0+Yk1tFtst52xSqt8tafiS8Sx0V0L/ADSf7dQ6XZou35N/8VZPjS+Sa6FjC/yx/eWp5kZKPNMw6WNcsOPlpKn022+1XaJ/tUcyOk63QbEW+i7OvmfNuWmSQ7/uJvq1YzIzLZu/lIvy/wD2VOmhfcyo+F/gb+9Sic0tinNbv9kVH2/M/wDFX318aW8n9gPKk8eENKA597evgib/AJ47/u/xV96/HBHf/gn+yIdpPhDSucdObev1/wAMFfKc8X/UNL/0mZ9lwpf6nj7/APPp/lI+HLeTzIQ/3m/36WSNPMTyf9ZWbYXzw/u3f5v462I7q2k2I8K/c3J8lfkX2D4rnKiw+XP52cLvq3C0O4/40k0fytsTKstNsVeORpN//jtKOw/8JLGu1fubd1WdLvvsrbPO2DfuqJ4U4m3sN1ElqjbRsyy/N81HxQFzWOjW4S6t/wBy6qV/iasrVofs8jfP5u75qsaPcRtGE+4zfeWjVoy3zp97ZVxIt0OQ8SMWtpfn+Zv4Wr7P/wCCYuvf8LC+CvxF+DP9pNHqENhHrehqv/PSH/Wbf+A18Ya03mQzpNDtb+CvWv8Agmd8crP4NftUeGdT151OnX1xJpuqRyPtT7PMu3c3+yrVVKUoz5hVqfNStE+spPGniG61jQbnxVC1tHDE2y4ktfmm/ut/u16Notnbap8MPG3gPVdS+2Pq1m15YKz+WsMy/N5i/wC18tfoqP2Qv2b/ANuf9lfQrDTbLT9M8V+H7C4t7DWLS32RfL93d/vfLXwNpnwr8efsu/GSz8JfE62jtP8AiZfZ7ea4bcskf3Wk+avpcdg41sNGvT/7eifP4bEVKdX2M9+h8R+JNWdbrfv3qybfm+XdXOXmpJCwR3+993/Zrv8A9srwqnwx/aQ8U+CbaRnt7e/+0WDbFXdbyfNG22vIpdUhLDfuY/3a+e+E9SN5Q5TVk1KGPdDhnVv4lqzY6gmVdN29v4d1c1JqTtJ+5dfvfKtS2usPDI7+c33/AO792lGRUoSXuo72HUodzlLZl/uRrXvf7JtytzZ646k/6y34PbiSvlmx14xxoiTN8zfw/er6S/YmuRdab4iffuPn22T+ElfpHhN/yXmF9Kn/AKbmfU8DQtxLRf8Ai/8ASJHNfEeVf+Fga3G0zZ/tCYnaM/LvPy1kx3dvcKEdPmbavmfxbqg+KGqiH4na+EkUeXq04I2/7ZrNXU0m3+d8oj+Z1/hr4vPdM7xX/Xyf/pTPnM0p/wDCnW/xy/NnSR3iKoRHjba6ttqU6g8cm/zt7K/3a5y3voVZE8xVWP7q7P8Ax6rJ1RGtzPMiqm35GZtrf7teZzTjE4o0/dNDUL7zl865mX5V+633q5261RBMYflG1t21VqtqGsfu8fKr/d+b+H/gVY9xrDtcb0mXc3y7qzlI6cHHlnqbDaluUoibE3fxfxUf2jD80r7drOqoqr826seK68yRZN+4/e+WnySbVaPzP9Z8ytv+7XJL4j3qfMbFxcJHGv8ApOwf3l/9BpY754pPk+Td/CrVmNdPbqmxFbbtV1+9uqbdMzbEkjwu5nbZUcvKbSkdv4V137O3lvcq4avTPBfipIbUQzXPz7PlWT723dXg+l6s8cImh+Tb83/Aq6jSfEyKqvc7V+X/AFi/N81RKP2omVSUeU+nfC/irav2bzsoyf6tvuq38NddpviosuLm5aQRtuSNW/h/ir5v0Px15P7uabarL8rbvmrstL8b/MiQzfd+dZG/iqJR7nJKXvHtf/CaPDZmCFFkT70C79rN/s1j6t4yd45X8tdjff8A4v4fu159N44eRhsust/e37VrD1b4geXE01zMsTN95fNaspU+aIuaETyPR9Q8ySPekar/ABx7vlWu18MyRxxkpHGi7trQx/Lu/wBpa800Wazjjaaab93v2/L95t1eheF2+zLE+/7vy7pP4v8Aer6ytiuX4TCnh4/Cdz4d+SFYRGqbk+633q67QZEwyOnlt92KPZXFaXHDPIJi6zbfvyb/AOH+7XTaXctDGk118qK25Pm3Nt/hrjlipS1NpUYna6PNMzf6TN/d2SbdtXZrVGzcwhdk27zV3/8Aj1Zmi3Ft9k3wTNM0fzOv8NaUdul5tSY4LJu8tfurXFUxQ40jP1SzdVR9m8wozbVfcrVgavbPJ9y2kVpNvy/7Ndmqp5aRu/735l8lkrH1izdfNtoZt25/3Uf/ANlXN9ejGRUcLzS0PPPE0aSs1zbbU/e7Ny/NXL6jNNtaBPlP97ZXc65ZwwK8fnKzb2b7u5d1crqFrB8s0Ls0rfK3y/K27/aqvr0u4SwvvfCcdq19DF/y7KzxptaT5l/4FXMzaw7SKn2lsruV2+6tdN4ktUjt2hd2UL8u1v4q8/1z/Rrp0+Zn+9tZ/lX5a6qOM9oc9ShKJZuvEU1uqO7/AL1W2/L/AOhVj6lrl3H+8eZm+f7tUZtcmt7j5EX7nz7qydb1qGWNnfqtdUcR/KYSo8p9V/Da43/s1rcMTj+x708+zS14fod9H5aO+7Z95Fkr2P4S3Ak/ZPjudxIOg3zZ/wCBTV4P4T1Kfy/J87Lt9xWT7tfrfiXU5cpyT/sHj/6TA+04shfB5d/16X5ROys5kjZHv3VU/vL/AA7q19Pb7Psh/wBY33l8z+7WDp80/wBoieaZXXZufalb1hdeS3nO/P8Ad2fxV+PyrcsviPkYx/mNjS/JUqn3nk+9D/s/3qstskZn2KVb7y/w/wCzVOxWa4Znfa0W3d8v8NXo2RYWm/5d1/u/e3f7tc0sZL7RtTpkcnkRxb0h2S/ebbu2r/s1x/xRhhuPhvroebLNpsibv7rN/s/3a7ia4eOzZ0hYfL/FXD/FRobL4Y69fpCskq2DN/d8tdy/NWf1qUjWVHlPjH4qX0Nnb29hCjfKvzf71cAzfx1t+PdZfV9ZeZHyKy9O0+a+uRFGjGrjEmPuxuFjb+fKBt71vW+nvDb8HczVs6D4Ff7LvkTa7VDr7Q6XCyIMndtq+XlM+aMjG/fecyO6k7/u1oaTapu+eFqw7jVEZjsRvlpIvE15byB0fil8Ivfkd5Z6I8n+p2hPvbv4v92pI9Njt1i33Knb8zs38NcpY/EK/DeTO+1W++y1c1rTLnV4vPsdaDI23YgpylzClHlOuF5YSL9j/tKEj/rrUV9pMOuWv2aYK8ez5JF+avO5vDWuxMXhDOF/iV6m0xPHdsv+hw3RVfm2/wANTHnKjGO5V8S6DcaLqLwn7u75aypGfOxq3Nb1nUrgbNVsSHVf+WifxVhzO8jbmpGsRaKRWzwaWr+Iob9/2xSMu2nKu2hl3VAC0UUVcZAFFIrZ4NLRHYApeVNN53b91LUAAbdzRSKu2lrQAooooAfHHukXf0avW/2VvA2m/EX46eFvBmq7fsE2qRy37M/3YY28xv8A0GvKLH95IN/G2vev2WfDepWV5P4wsJZEmX91ayMn3W/i2/8AAamXu+8ceKkoRPrL9rbWP+Em+M0/iq2mV7aZVit2V/uwrtVV/wCA7a+xf+Df74jWfhL9q7WvDd5qvlL4i8HzQMsn7xZGjbctfn7pdrc3V5/aXiR8xwp92RPvN/er3f8AYB/aAh/Z7+Plt8WraFbiHSbK4/0eR9qzNJHtVaxp1LS55HiOXOfqr/wU9+I3grW/2cZ9GbWoY9Qk1G1Sytj9+4dHBcD2VQx/Cvys+Lut61Dd2elWDEpHG00fmyfJFIx2Fwvc7Rg+wFbni74q/E34/fHKX4n/ABL8fxXGZ5hpPh6zi2W1jAUIAX+8395q4r47WllN4itZ7y8nVRp5QxxvgYLNz9a/YqWJ9t4C4yf/AFFJfhSHFezkctdWaPdP/aXiFX3Kqsqy/Kv+7WJ461DwloHgvVHSaNy1rM26NNzM3l1Pb6f4es1KfY1Xau5dz7ty1zPxq8QWui/DPVb+zhVCulzRbWX5fmXbX8+r95Vi0VR96rFHzp8M7d4/DsE3k8ybm3N/vV1Mm+HOxPl+9838Vc/4H/0HwzZwpD/rIl2bWraWZ5Y/33yn+61fUwjoerU+ImjjS3jaZJmK7F2rVSS48kF5Plb723+GoLrXLaPMP2nZtfbtasa+8RIvyQ7Sn96l9rQXLGRqeIviU/g3wfdJbJH9ouG/dTfxL/u14TeXc1/dSXly7O8jbnZq6D4iatLfXsMH2reiqx2BuFaubqY04xlKR2UafLEKKKKo0P0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs9/8o3E/wCxL1j/ANCuq/N+v2XxY/5EuQf9gsf/AEmBw4T46nqFFFFfix3CMMjiloorQBFXbS0UFd3FT9oAooZfmye1FPlQCt+7au1+BWr6bo/jb7Zf7d/2C4W1Zv8AlnN5fytXE0b3Vg6Nyv8AdqZRJ5UbUWnzLdPc3k2597M0m/7zf3quW8F42rrq763J5i7dszP8/wD31WENVuQhTPBqM31y38bCr9zlMOSrzXub3xDvbLVtcTUbYbpZrZWum37t0n96sBd67t9G4NIzvTJF+bmp5TeO4L8nCPU0bKmE6bv71RbTu+SrEMM0mfumpJaufS3/AASn8K/8Jh+338LdKmto5IYfEa3T+Z/0xjaT/wBlr9w/Fd1CLyWZ33edcMySL/eavyN/4IS+CpNY/besfEkyRvD4b8L6hes0n/LNmj8uP/gXzV+sd9Ntt/kfeJPvKz/xf71ediJe8Yv3nymRdN+7d/m+V/7n3qyNSie4ZY3s22sv975lrS1BrZbqSGO8Zwvzbf8A2VarXiw3UaTfbP3f/LKNflaSubm6s6oR945XVrVxG6PbZf8A5a7n+Vv96uS1LSUhYiZ23yP8vlv81d/qcJaRke2XLbvNb+Jq5i8sZppJSzyJMqfJtiX5f9ms5e9E6qcTzTVNHcRo6Iqtt2urL92sSTT3jZ7Peqn7zbl/75r0DUrFBueZY5TMrK23+GsO6tLZQ1s88hRtvzNV/YOnlOVW3uo4/J2fMsv72T+FarFZlZJ98ed27zFb5q3NUtUWHfbQt838O/5WrNuP3yt9p3IJNv3fm2tXLKjKUtS170feM2SZJVdEdkZpd25futt/hrYMYk8HTwncN1nKPlPPRunvWTJHtaVBMuyN9y/P/wCPVtGQjwxNLvPFtIQynkAA4r9p8DoyWf5jf/oFqf8ApUD6fhWFsTWl/wBO5fmj8z/jp4bfwv8AEi/heFkt7iff+8ri9auNtqnloo2/w19V/tUfClPFnhd9dsH3zwt8y+V8zf8AAq+SNehubeQWdyjK8bbZa/MsNW54nwWIozo1PeKD75GX5/4ql8t2XYNtQK22T/Z/u1Pbx/effk/3a7Obmkc/2B8caeZ845+7UbMiyN89SeZ5cZb+Gq/yeYetMB5zHJvdKuWsbzN8ifMv391Vlj/eM7u2Nn/j1aFnC8O19jb/ALz7amMeYiUSXb9nVt6bvk3VWvbx5JlRH+T/AGaTUL6ZZGh34Zvvr/dqpG0zNwn+/RER0Ol3lyq70euo8P6XNfTRo8n3tv8AHXJaLE8zhN+0f7Ner/DnR4fJ8z7Kqbfli3J/49T5eUcubkN1NN8nSbews9rH/lrtqa+8DzSaTNvhVk8r/d+b/ZrrvB/hWHb9qmddy/N8qrtatPxBbJcM9sj7Pl/u/LWspGEZfzHxF4r0z+yNeutN+b93K33qz67j4/aGmj+PJtnzCRPmb/arh6zO+n8IUUitupaUdiwpfmWkpFbIpgKW280/bvxsprIFApwY5L0uZAEP3q09LhSZvnXH8Py1ST7wrc0O3TztjvtpmMjXhaGztn+fbtT564nUbp7y8eY/xPXS+Lrp7XTvJR23M38X92uTUEDBoCnHqxa1vDlm6yNN/Eq7k3VmW6+dNsrqtN0/ybXydm5/vfNVfEVU5ugkbfZ2+fcf4q1Ix9ot/kRhu+b5qz5V27UgjbG/73/stW7G8Ty9nzGp5eU5/iJLq12oHTlv92vu343K3/DAzIDtP/CJaUPpzb18NSRo0P3GX/gVfc/xtLL+wW23kjwlpf8AO3r9d8L/APkTZ5/2DS/9JmfacKf7nj/+vT/KR8BTxvCyom1j/eqxYXE02Wf5T92msJvM/efKrUscZjbfvr8kifHmowk3B4fvfdTdQJplk2b1H+z/AA1Bp9w6N5fysy/xM1W1jSTLvH83+zR/hM/cFt7hJJFhcM396rEcyXEOzO35dvy/e21Vh+WPdv2r/eqZW24dEZ1/j/hpa/ET8USW2vvst4IUT/crZuv32ns/3dq/erHWRJFXCfd/iq3HdTSW/k/xL99f71WP4fhOX1uHbcOkLt937zVzeg6hJo+vLdrMyvG+4bf7y/MtdR4i3qx/2v8Ax2uEupdt57K3zNWZpT94/pW/4Ik/tBQ+NP2c4YfOZ7q4sFVoZvlVZNu2voT9rD9mbw9+1d8HY9N8SaD9j1zT1ZdD1KHbvaRfuqzV+Q//AAQp+P1/pug6n4S1LW/Ljjuo2TddfMq7fl2x1+3n7Hvjf/hZel6n4Ytk+3JZ3SvdNJ96Hcvy/wDAa9rDY2q7QlL3Tx62Ejyua+I/Bb/gsB4BuvCPi7wN42udNjtru60OTSNZXdul+0W7fK0n+8tfFz6om77mx2r9qP8Ag5A+Adgfg/qHi3QdKkF7perR39vNDb7vLj+7Krf/ABVfh7PeeZJvd2wq/IzferjxFOVKVgwdT2t2/iNBtQT75TD0+TVv3Y3sy/32WsRrzaF3v8rURX/y7C6tXLKXKd/L7x0On+IBGyfI2zd8rV9Wf8E+9QW/0rxQMcx3FoC2c5+WWvi+O8eP597I33v9lq+uv+CaV19q0fxa2zG2eyGfX5Zq/SPCX/kvcL6VP/Tcz6rgqMVxFRt/e/8ASZHB/GTxA9n8ZPEsSsny6xcDkZ/5aGs/T9e8+HZcj5W+ba33v92sL496zJbfHPxWqP8AKPENyG+X0kNYsfiiaGH/AFO4/wC196vjc+/5HeKS/wCfk/8A0pnzWZ6ZlW/xS/NnokesI1u8aPiKOXcu2ql54utoY2mdFdV+b71cTL4gu5G2P9xvufPUcEm6b53Vf71eVLl5jijGXLdHR6h4oe8uNiPsVvurVdbpGl3u+1ldfl+9WRHMkb70RmLfK9SrJD5g2fK38TNUy5eh10ZRibsNxuZHtn5bd8uz5asRzKJFV3z8rfwVjreTf67+997b/dq3b3CQwvshbd/Buf8Ahrm5UejTNSNvP3Ike7b9z5qk855EWabav9xVqlDcboymz/f/ANqprfybnL/ad+5v87aXLzQOnlRZWbyY49n3G+/u/vVLDrU1vJFcw7flfbtVqrx3Dx24heaNdy/Nt+am/Isfr/srRTiYVNzsNL8VJJCH3qfm+9u+9WzY+NJYZEdvM2TfxM/3a82t5prfaJLZm+b5FVPmWtG3vH8tf3Db2Rv4/wDx6rjRjKJ5lao6cj0abxw8cH7l/mVNr/Nu+WsHVvHE11sthcsWX+L+7XL/ANoXkm5C/wAkabfv1QvLp9y/PsX5tu2q+rmPtub4jc8N3/nN8k22Vfm87dtr0Lw3q20JeWyKjN9/a+7c396vFdAvkvJV3/IzP95a9G8LXkcMm9LmTcqbUVfutUylLlPa5Y/ZPXdNvoJLdJkmxNvb7vzfKv8Ae/u102g3iKySJu/haVY/m215roerPCyPbTN5jbV27vvf3q7DQdY09Llkjm/e71Xyf4vmrmlKrGI+U9N8P3kMkiO53fLu2r/FW7p8kNuqTI7B/NZ9sP8AF/vVxnhfVEVQ7uqNHuZP727+7XU2sk21XdN25lZNvy/99V52IxH2Tqp04yjHlNS8/eIHmST94+7csVZetNc3Mb7HjhibcvmSf7P92r81/wDZ4mmhdn2/M+77tZOoQwXG25SGQ/P8kO/7teRPERp/ZPQo4X4XGJzmrW6CZC80kT/di3fdkrnNW0n7K32a5Tzl3s8Xz7lX/arttSW2uI186Ft0jbVWN/u1gaxYpbwCFEZF+98vzK1Y/XPaRtc7f7N5o8x5Z4s0/wA6N/k37dzN5ny/NXmXiC1ma8dPO/g+evZPEmiu0ZkmTbL/ABqv3a848QaDMscySIy7U2v8v8NevhMRyzj7x5OKwMtzzbXN8DbHTcy/NuWuZ1i+/ct+/bGz7qpXba9pqLan7yuvyu1cL4is33M6fJ5nzV7lGp7/ACnhVKfLLQ+t/g5M7fsbxzE8jw5qP6NPXzn4LuHZk3zMDv8AkZv7tfRPwZVl/YwjWTk/8I3qOf8AvqevnDwbavJIkL7sL/47X7T4mK+UZH/2DR/9JgfV8XK+Cy//AK9L8onovh+6haYQu8jJ/A23+Kut0lkW1/fIu/c3mqz7vl/3a5bw3ZpJMqIq4rr9NjtoZFTDSmZdrMv+zX4zWj7x8jGVjSs1+yx70Thv4d33alWN2kZ/J+6/mI3+zRDawtc/Pcq7xp86r/6DWlY/vI1mdP8AVozbv4v92vPqc0TriVdszW8n7najPuikkfd97+GvKv2nNQfR/gz4lmSZUT7EqIv+823bXr+pSQTWCbHkQt8yx7N1fP8A+29qkP8AwrtvB+murI1wtxesq/N5m75Vb/0KscLGftb/AGS6ko8p8c2ts+oXXzx16b8O/hskdv8A2lfosQX7nmL96rPwp+Ef9pSf2xqSYgjfd/vVsfFr4haP4djOiaJcruhWvaj7vvHDLmloYPjDxRZ6DZvDCMbfl+X+KvLtV1i51O586SZsfw0/Wtdn1q486Zv+A1TjjeWTatH95m0Y8sRA27mlVNxwtaGneG7i8RrmUNFDH/rZGX7tLcLa225LBPN/2mo98Ob+UzmVwv3KvaNr2q6PKv2a5YJv3eX/AAtTFjUE/aZsf7NT2MkNq29LbNKQuc2JPiB4tkj/ANG2xjbt+WKoY/EHjCST7S+q3CfwsqtTH1CTyRDsX5vm2rUtja3OoTrDIjEyfcpxjzGXNyxOh8F3H/CQLcaf4ks47kMnySMvzf8AfVc94w8DxafC2q6RIrx5+eFfvR11ENna+HdP+zWzs1zIn72T+6v92orfTWmt/wDSXWGGT77NTFGU+c8yTr+FOZc8irWtWq6fqk1sj5Ct8jVU8z2rM6h1FFFXyoAoLbeaRjgcUn3/AGxS+0A6iiijlAKKRjgcUK26iIC0UUVQBS/wfjQGK06P5uBQBs+DdHu9Z1mPT7OzeWWZ1ijRU3FpG+VV2/71ftV4F/4J3+BvhT8B/BeiXnj+1stStdDt5/EelzWSvI11N80nzfe3Krba+Lf+CBP7H9h+1f8At6eGPDviSzkm0Tw3FJ4l1lVi3L5dr80as38O6TbX7GftOfA/wR8bNcuLl7mTRdRsb/fLdWfyrdL/AA7l/wBmt6NOXJzI+czKvzVeQ+Nvjh+zL8KNDtYJ9H8yRI0/dfKqrIrfxNXlX/CpdE0+3lhh1JoYvvrDGi/99LXrfxw+E/jbQfEt14bm8QzSwqi/Zdz/ACsq/wAVeUa94V8VaOxSa83n/lky/wDs1eXWlVcruJy0Y04x0K3gjRLPTPH9qYZixEUm3Emc/Iai+OFkLjxNav5W4/YAuP8AgbUvw90i/tPG1pPezksEk3Koyv3D3qf4z6bNf69a7GYKtqudrYx87c1+uYRSl4AY5f8AUWvypGp57Do/zJN5zSBVrzn9qu6Sz+D2ozIP9c8cDbf4dzV7JZ6LNHNvdFG35XZl+aSvFv27o4dL+F+n2aO2+81mON1ZPuqvzV+EYelz4qJthOapVPGbXUP7NsYFs03pDEqv/vbap614m2/6h8Lt+bc9Y9xqD/ZfOSZv9nbWbJcvNIru+6vqua8D1Ix5ZNl3UteeZV/iP/oVYeoaxcySMiOyt/6DT7q4eBf9YrLWbeXCMv8As1jKXc0jsZeoSGS6+/uqKiT/AFx+lFaHTH4QooorMZ+kH7PP/KN1P+xL1j/0K6r836/SD9nn/lG6n/Yl6x/6FdV+b9ftfit/yJcg/wCwWP8A6TA4cJ8dT1Ciiivxg7gpVXc2z86bt+bNDLupR2AWkZc8GlpCd33KYC0UcAUUAHBFFFIq7aAFoop0XQ/Sp5gE2utJRS7RxUk8yBPvCrunrukGU3D+6tVkV+Plx81amhWvmXsSf8CoJP02/wCCAPgN4bP4pfFqaHZtgsdGtZlT+83mSL/3ztr781K4hW1eF32bX3bVr58/4JBeAZPh7+wPot/qttHFceMtcvNWuFZPm8lW8uJm/wCArXvF9N8zIm11/ut8qqv+9Xj1qjlVlEr2P2ipIsDEW0a/NGjN/ebb/tVTmZ/mTyYWeFG8pmX5lp02oom+FIdv8PzN/C1V7q6ka4MPk8bf9Yr/AC1P2TeMfeKGpW/nbP3zfu/71ZWoWaTKfveZ97zP4q244/OuFR4coqbnbf8A53VBdW8zQyTTfw/xRtu21nP2h20eXVo4rWtNn3O8KNv3/LHt+XbWPcaGvkul5tz8uyNlruJrNbpnRz/B91U+9/wKsjU9M3Wr/I27ay7Vf722r5eZeZrLb3jgtc8P+Swv97Db9yNfmrIvrfbGJkhkTsrL8u1a7W6t91v9pghXzVT51k+VlrCurSGS18lHVn3/AN37y0f3pGManL7pyTabbWcjbIWU/eVmrSEAtfDckLsJMWzkkchsgn+tTXlnbW1x/pKK0km75mXau2mFEXQXRWJX7O205xwQcc/Sv1/wPSjxBmLX/QJU/wDSqZ9fwo08TW/wP80eP6to6NbvZzRM6zf61f4d3+zXxX+1F4Rs/C/i50sLZk86VvN3fdr7zks5lmltvOVlbc27+Kvl/wDbS8FfbLdNehhwGdvu/wCzX43ga0frNuY+azCj7TC866HyzjZIJuOPm+arVuyM3nPJz/s1DJsjkZHTd/vUscaRtw9fRR2PnV8I+4bco2f+PU6GDaocbWbZUW794EdN1aAhRV3pDllT5FqeXsIYsfl7UT5lqVrhId3ko3/fVKtv5m5If++qRbcx/wCyNnzL/FQKUftFSRTN87jeW/vU+GF1+TZ/F/DVq3hTaqI/8e75kqaOHMjb0Xb/AHquOxMvM0fDlvuugiOzru/hr13wbeJp6hHRSsfzfNXmXhGFGukT5VP8FepaT4fudQjRzD5QVPvf89KvlhIUvdgeg+F/HWmnT3TyVRvuouz5q1F1ZNSVPs1rv2/Ku6vO9N8O38d8qGaT+Jm3V1+m3Ft4f0nfdXKs+z5VZvm3Ue7E55e8eKftdeG3VrfW0h27fldv71eGV9G/Hi4m8QfD+91KbbvjZW2/7NfOVKR3UZe4IwyOKWm5+7Tl+/8AlSNgpf4PxpvCr9KWgApyLtGKbUi/eCPSlsTLcsQp5kmE2/7y103h+12tvf5f96sDS4ftEjJs2LXSTSf2fpst191fK21BlLnOe8XXz3WqPDvXEPy/LWTSySeZKZH/AIvmojj81tnrWhtH3YmhoVr5lwH+b6V0sDeZu+fB/iVazdPh+zWq7E5b+Kp4ZHT92nCt9xqXwnPKRbbZIvkdNvzbaqLMkbHzH+b7qLV+P97HvR+dv/fVU5oUti00yKxZ/k3fw0c32SY/CXrO4eTYPOwy/Ltr72+M4P8AwwiwVhx4T0zBb629fnzHqLxyZfa3+zX6B/GNz/wwX5mCT/wiGlnp3/0ev1/ww/5E+ef9g0v/AEmZ9pwp/uWPf/Tp/lI+EJIfOc7/AOL+Jvu1UVvvI7tlfu1o3UHmKiPDtDL/AOPVWkh8uTzkRTt/u1+QR2Pih1oqxMUT5T/FWlZzWytsebcW/wDHay2aGQbNnzN/d+9uqeONI5hv+/IlEtgNONi0eyFMj+Kk8yHy9/RFqrHJ+8+TduX5amEM3yJDyjfeanHmDkLFvIi/xq39yjy9w853Zf8AeqBYHjff53Ozd/u1ZjuIW3v5e5W+/T5gMDXm8yR1PCx/+PVw998t0/b5q7XXJjJcSw7GVV/ib+H/AGa43U+bpuMfWpl8WhrTjyn0x/wTC+Kt18PPj3ZNE0ZS6XY/mf3v4a/oJ/4JLfFm/b9o3WvDd/Nutdc0tVXd8q+Yv8VfzHfAHxs/gD4k6V4kVGcWl/DK6r/d3Lur+h79gnXtN1CbRPjHoOpXUdvZ3Czu0e3/AFLL/F/7LXRTqRjGSZ5GZ1pYaqp/ZPq7/grP8B4fjJ8C9Z0q003znuLCSB2V/lk3Lt2tX8p/j/wnqvw98bax4E15GS70fUZrO4Xbt+ZW/wDQa/sNuNc0r4nfD3UtGmmW5+0WEn2K8ki/ds235Wr+U3/go98MfE/w5/aw8VXPiRGE2rapNdbvK2fxba9CvaphYzj0OTBuEMT/AIjwncF++/y/w/xULM5ZUd9oaoVkeRt4jwF+5T1VJG2F2+X+9Xl8x7XwlppH3Nv+5/dr65/4Jhn/AIknjBf7tzZD/wAdmr5B3Oy/O+1v9mvr/wD4JjBho3jAnvc2WP8Avmav0fwj/wCS8wvpU/8ATcz6jgxW4jov/F/6RI8K/aFkd/j14vTZwviG6+b/ALaGuYhuk2tJNwyptSun/aGL/wDC9vGEQ/j8R3Q/8iGuVt43ZdkKfKtfHZ7/AMjvFf8AXyf/AKUz57M/ezCt/il+bL0M27B/vf3vvVI1xMuVT7jffaoFd1Vn8lS1I0yKoR32mT5f+BV5RxRiXvMdlR0+UfxrvqzDN8xm8lXl+XYtVF+aRX8nPy/dWrkJ8tvJh2nd81TI2pxhGZcw6/6SkLNtT/V76uRs7Qr5gZd3/fVVFRNu9H2t/tVcFvuX/WY3J96uf3YnoU48pat5oY1XyNyfI2/d825qtQpCu1IUw7fw7arWscPkjhm/3atWak4KTNt+7tb+GseaZ2x5uXmRP9nf7OcP935fmpV2Qr8n3vvbaSDf86J/vf71OY3PmB4YW3Knz/PXQc9YVmk8zzng+Rvlfa9TW8iRzK/2n5I0bcuymNC0IfznwG+43+zUkEky7Rv37fmfd92toxPExHx8shsk1s1uZvmXd/EqVQuoyJF2eYq7/uird1HuZdm1U/2fl+b+KqepSTbdnnb/APgH3avl5Tm5vsnPafevJMiQ8FW/1jV23h/XE8lkd9jL825f4a8u02abzF+dv95a6CzvgzB9+4158ZfzH0R7N4b8UeXHFvuV+Zf4fvf71dx4X1u2WcOkqpuX591eB+H/ABQlqu+YqpVPkZa6jRfHASZvMm3fPudW/iWolzSjoVHl+0fSXh3xNDJHGiPHsVVbds+81ddpWseZBKiJHs/56fxLXzjofxAbzBNNMzpu3RQ/3WrqtK+IlzJMjw/Id/zs38S14uKjPm5kexg4xPaZPED2zLDpupRsrNtljb5mZf71Nm1aGa9DpDDlbfDbXb7v/wAVXndr4yeScv5yn+Hcv3m/2q0rPWnvo1fZsVW+fd8vzfw14lapy+9I+nwuHv8ACdW115qx3LvsK2/zq38P+9Ve4jcK+z96qt86slVLNk8s2yQswk+/N/eqz50z/Om5VV/lXf8AeWuD20paQPTjh6X2jntctdyp+6+Vvlf5vu/7VcLrGl3MzSpbOp8ttu7/AOKr0XULX7RE0bwsgkfcn+9XMalY2saumxlddyyqsXzbq9PBS9vM8TMKMactjx3xho/l7vJ2qfNbzd33a898Sab5TPEX3BX+Rtle8a5ocLK7ujOm3b8y/wDj1ee+KPDbrumRFLLuVGb+7/dr6/C+9HU+IxUZc/wntHwstXh/ZB+yuo3Dw7qIIH1nr52+H9rDJfKl4/l/JsXd/u19M/Dm3Mf7MYt2AH/EkvgQT0yZa8C8G6XMs2/tvXYtfuniW7ZNkn/YNH/0mB7nGLf1LL7f8+l+UTsvDum+T5bP5Y2/Krf3q7Xw7Z2zWZSNJDufbukT7y1j+H4XaSOGEfO33a7LQ9NeO6Z3RSZF3f7tfjMj4uPu6Faz0+ETNsfcJN2+rEMKRwzQ2z+bM21UZn2rtrSuLN2meF4V2fd3L92orC1ht9014uyH7zSMm7btrjq04y0Z2U5cseYk8RWb+E/Ccvjm5RWWH91b7l+9J/u18q/Ha8m1XQ7m/vE+0brhXlbb8zfN/FX0N+1Jqzt/Ynh2N5vsn2fz/LVtqyfL8rV88/FCF5PBt8lh9/yv3Sr8zVth6cIxM/bSqS0+E8w8TfEKbQ/DZtdKfZuXbtjryG8tda1u9+1TCSRpG/irqm17TftEUOp8ruXzVb/0GvY/hd4+/Zp0eEP4q8K3F5L5W1FjZV2/7taR5VK8hy9rH4D5/wBN+HPiHULhE+xvhm27tlddL8ONF+H1n9v8eSeTNs/0ezX5pJG/vN/dr174hftIfD3SbO4sPg98PbW2m8rbb3l187r/ALq/3q+bvFF14h8QatLqus3M000j7mkmatfaR2gKn7WWtQl1zxN/bV4ttDttrb+COH7tQMqbWS2/76rJWN9x+RhRHJcq2yN2qNfiNuX+UvJZlW+d1ct/eqwtvDCux32t96qEM0zN8/y7fldqvW7PMy/e2/3m/iqjOXulu3td0Y8zrXcfDXQ4Lq4abZ86xMyLt/irkbE2y/O78t/DXf8Age6/s2MO/wAiM3zs1KK5SeX2m4y88P21jm81KZlTzd27dXDazrU2u6t9gs5pPJjf5F/hWuh+JniIaxeyab4c3GST7+1/lVawrfwvqXhvw9N4hubZi4X5G/u0x/DI5vxQ0J1hvI/hVQ/+9WbvX1p80jyuzzNlmfczVEy7aDoQ+iims2G49KBjqKKKXxAIq45NLRRR8QBRRRRyoBFXbS0qru5TpQy7TimAY249f4qlt4/3io/FRLwu+rOnIJJgnks5Zv4aXMiJS5UftF/waW6JeeHfi18RfGGzamqeDZrJmZFZfLh2yfe/vbmr76+OUk2jeLnvHdvKml2QMqbVVq+PP+Dea0s/g58OvGOoaq7Qy2/h+3tftEa/euriTzGj/wC/arX1d8aviJ4eutLXUrq8hfyWZoo2dV3V2U6sPZHyWKcqlY+WP2uPFmm2vjCz+23W77VB+9jh/wBn+KvFtQ8ZWF9uhO1Sqfwv/D/u1e/aY+JWleNfHxtobCREt4ttvMqbl+Zvm2tXlmqeIobXc8L/ACqrbG2fM1eLWxHNPlN4e6jsvCd79p8XQBJlK5l4Axn5TT/idOsWt2wKZLW4CnH+0a5X4TXsl14+thKQCEkxj+IbGrc+MG9vEVoI5FTZaZZm92YD9RX69gqkl9H/ABjf/QWvypBF8xl2947TN8/mlX3bm/8AQa+bf+CgGqRyr4Y0dbmRna8mnlhZ9y/d+Vq9xm1pLW1W5mbfKvyytH/F/tV8sftha1c6t8QtKtJm+W3tZGi+fd8rN96vxHCyhLERR2YOP708xuptyhPu1VuLpFxAn3vvfLRcSOqtvfj+7uqtdS+XGHSGvbl7x6luWVitqF5uXf2rPvJtu3f92rV1Nt++/wArf3apNvaPe6bl2VXulRKv/LWik2/Nmlo+E3CiiijlA/SD9nn/AJRup/2Jesf+hXVfm/X6Qfs8/wDKN1P+xL1j/wBCuq/N1G7H8K/afFZXyXIP+wWP/pMDhwnx1PUdRQw3daK/FDuCimqMrinFd3FABRRRV8qAKKC23mkVdtMBeCKXadu6hW29qAGOMrml8JMgY7jSUm75sUtMoKXcdu2hhtXFC4b5O9Zkx3JLdXDL/tVv+HLCbUb6Kws+biaVYItv8TM23/2asGNEUAua+hP+Ccvwgs/jP+1p4F8IahCz2f8Abcd7qP7rcvk2/wC8b/0FaitUjTpyl2FGMqlWMUftB8LPDKfCn4K+CfhpDbLEmg+FLOzlb7vzeXub5f8AearGrao8m5ESMxfeXa235aZ4s1ya61y5nv8Ac4kut0S71/1bfd21zepXG75Em3H+H+9/wKvlI1faT5n1PWq0fZqxY+1TSTK7vG6r8rL/AHv7tC3xe4TyZmX+F12fK1ZbTJdQt5txk72+ValtZPKuFR9vyr8kn96vQjLml7xzxp8psxs8lps2YfZtiqOZfs6ukn+ysvz/ACtUUN00y/67c6/xL/DUUc1sqrv5dnbey/MrUc0fiOjl/lHNC/kult+7WT5m/vKtZVxax3VvK/3fkbYzVof6M2XgTcV+XduqrfQwrvnTb9751/u1VPYVSXKc1eWexm3zbmZ1bayf6vbXPajpqPm2T91tl3eZ/tV1OrfKuxAzs33Fb5WZf9muY1SN4VZz5mJG3RNu3L/wKum3LHU4+b3vdMO+LtL0YozbU8z/ANlqs8Z/s+SJ4wuY2BUdB14q/fX1tJI1tNcqrRxfJ8+2qJIFi5WTOEb5j+NfrvgnCEc7zCy1+q1P/SoH23CElLE1tf8Al2/zRxDQ7bib7NZqh835/wDppXln7Tngv/hIvBNzctCryRqz/wC6teyzKtxdBP4Vi3blTduaub8SaDYa1o89teJJ/pETJLGybvL+WvwGMpRqxcTzJU41sLyH5ma1pP8AZuoTWbx7vLb726qGHVjv+Xb/ALFegfHTwj/wi/iq5h+7ulb5dteeyN+89q+zpfvIRZ8hKPJPlZF5iLGe/wA33qt2uqBCUf5l2VR2hW+T71OVZNy7Dz96nylG5b3yNj58f3Vp7SR+cz7Mlqxo45t3lp/vbqtWt1tk2TOqlv4qqJlyovbZuyZ/2qmWR2Vdgz/C6tUEVx91P7z1PD/pDeWtUHuG14ZvktbpXdMbf4a9k8H+NIVhS2ePLRpuVVrwuzjeGZHM25v9l66bRNUurVvMfdj/AGmpR934jOpHm+E9d1bxheXV150MKqq/NtX+9WPdapqOrXBmmdid21I/7tc3H4301secW3t/Ev3Vrb0L4g+G4VSaZFkdX+dvu0+axPw/ZJPiJpd5cfDe9sPIYLJB95k+avmaWNo5WRhyvDV9jTeLPD3izwq+lWd1Gr7Gby2/vV8n+ONGl0XxNd2Uibdtw22g2oe7LlMeiiig6QopGOBxS0AKn3hUsKuxb/dqJRuNTwr90VmRLc1dBgeRlTZ/uVP4vvHhtEsy/Lffq14ZjRlVJk27v4mrn/El4L3VJJE4VflWq90iPvFGtDQ7dJJWebd/sbapRQPM42c1p6aqJMLZ+mfvVRVSX2TV/wBav31X/wBmqu37xt/k/df5KvrDHJG1Zl1vt5tnzfN/DS5oxiY+zNfS5kklG9MBvlqe8t/MjCPtcqn8P8NZek3yeds+6W/vVrwsk0Z+fYd3zNRHYqUTFuoXjuVdPut99a/QT4yyFP2AVkB5/wCEP0nn/wABq+D7q1RszI27/ar7t+O+6L/gn3J6r4Q0r+dtX694YO+U55/2DS/9JmfYcJv/AGPHr/p0/wAmfDdrfPIoSZP/ALKpWjSTbcom3a27bWHZ322b+Iqvzbm+7W1FeQSRkQzZLLX5DzcsT4zl5ZleGN4WPnJ96rS7WjDpub/e/hpVt3ZmTeq7v/HabHHPCzb/AJt3yoq/w0fEHLyj44ZIPn61ZhWaSPbs2/J8lQpHcsE2dP7tTRs63P32/wCBU5e6RERm8tvJ27mZPmajzNsbDG35PmZadNHM0iu83H8LMtRXEyeS6Tvhv71KS5g+EwdY2bpX35Lf3a5S7YSNsT/x6um1aZ41O9Np2ferl7hjIx3n+Kj/AAm1Pcn0a4+yX6TbsfNX7L/8Ek/2jE1r4A/8Il/as00iq1vdNH8rfL8y1+Lu5lO9etfaf/BJH40P4Y+KqeD7yRtmobVijVvvSf8A7Nc9enOpSlynnZ5hnXwM0j93f2c/2nIfB9mnhLx/rW3TpGVF/vQ7v4t1flv/AMF+vhDpWofErVPiL4PuY7m2s71ZUa1+ZWt5vl3V9k+NNHmttmq6PMxh2xs/z/Kzf/FV4R+1X4Bf4neHdS0G7f5NY0aRJZJt3ysvzLt/2t1edk2ZV6K+qVz4PJM0q/WFQqfZPx2kVFdvkZX/ALrU+Nk279jbm/hqbWNL1DSdSutKvE2y2c7QOv8AtK22ooY3/ubW2f8AfNe7KPQ/Q4y5o3LEapIzBOd1fYH/AATLVRo3i9lGM3Nl/wCgzV8iRqiqd4ytfXv/AATQTZo/i4KwK/aLLaR/uzV+keEqkuPcL6VP/Tcz63g124jor/F/6Szwr9oREPx58WsIW3f8JFdfN/20Ncqruql9nzf3Vrrf2g02/HjxUSvH/CRXTf8AkQ1y/l/N5ao3zPuRq+Ozu39uYpf9PJ/+lM+bzPm/tCt/il+bEhk2rvTd8zfdp7N5279xt2/xNU0UE3yps2/xfL/dohgPzP8ANhv738NeV9g4/thDE7TMd+1fvPWhZq8knyIu1fuVWjt3j++6/wB7dVm2ZCwT74ao+yaxj7+hoRnyY9nys392rlmz7hv2tt+bczVRtm3SfIm1v4GarUNvtkCXO5mX77LXPKPMenSiXY1Rt8ny7W+6qtViz+aHzH3I235lb5t1VYY3WRZoYf49taNuzs2/f8v3flT71R7stjsUuYkt0RvKmcfJs+6v8VSJavIxcXjOsKfOuz7tLHC6xo6fOf4dvzbf9mp4ftKr5zn5G/iV/vf71MwqR5veIpLXzJPkTcu7+L5adHbw/Nbfd/ubamkj8lmd02bfvfxUNb7pN6Ozf3G2bflropy+yeNiqc+a5VlRVjXcfl3Ns3VRvFRYWfYvzVpXSo2zzNrfN91X/iqheQpbyfu9u2T+Gteb3fdOWMfe9483jk2t53nNuZ/lWrVnceZGzzIw8tdu7f8AerN+0TSSI/ULVu1k3R/I7Lt/iavLj8J7cDZs7942CbF2qv8AC33f96r1neeSyzJM27+8r1gWd0/nNbOn+sXdWjaNtwkMfH3dv8VL7HKb05cx2Gk+JHk+RJpCy/drtvDupagVXZIuJPl2t/DXm/h9X85/Mh3f8D212vhu68tt8FzGjq6/eWvKxkf5T6DA+9a56N4dupvM2TXLLtTbF/d3V3Hh2J7yQv8AbN8qrtddm5WrzzQ7pFUeWi7mlX7RJIm5v+A16H4UmT90j7lfd8nlrXzWIp8sZcx9Zg5fCjsdPheSNHm4Vl+RVT5d1aCwokiw7GU/eT/ZWovDtu8ccXnD5l+bb/eroY4Zl+fKurfN5a/w15sOaM7HrcseXmObvrFIIlmtkVZF3fvJHrA1C38u3E15bfvmdmVlbd/FXY6pp0LM38C7d33N22sfWLWFYRD9mUuq/eVfvLXtYX93seHmC5tbHCa1b7reZNi+az7fvfK1cT4i0O2mhZPJVPM+7t+bbXpfibSUtZNk0Pzt/CzbfLrmr6xhW3eG2jYyt9z+Kvq8JKPLzI+Hx0eaXvHWeDbZIfgObZwNv9lXYOemCZK8b0nSU8zZ5Klm+ZG/h2/w17l4bt1X4Sm1miCD+zrhXXsPv5ryzTNPSGIPCih2/i/h21+8+JbTybJE/wDoGj/6TA6uNLrC5e1/z6X5RNzw/pbIsdy6LFu/8d/2q63TbObc3yW+9UVd275mrnNLaGHykdN275d0f/s1dHpMiXDBLZ2bd/49/u1+Oy/unxMXyllYxeKZssiL8sqr8u3bXJeK/HGm6xcXOm6NNI0FjtWWOOX/AFjfxf8AAqPi38RoPAfh+Y2dzCb+4iZIoV/5Z/L96vO/hGZodH/tW88wtqkrPEzfLu/vNWEufnOipL3Trvj1O+seE/B3jmGZjp+tadvi+0LuaNl3Lt/2du2vCPFGpJ5MlhMnzN81er+LtSe8+C9/4G1PUma58G6tNPp27/lpbyfNtX/ZrwyS6e6U6leJuVk+Vfu7qJfETTly+6zwf4j6Fc6X4onhSNtn3kb/AHq5zzrhR99gK9X8dXmm6lqmblNzfd/3VrmtQ8BwszTWsy+Uy/eD1fLM6+Y5O31a8t2DpMwrptB+ImkC3Fh4k0fzkb780Z+asi88I3NuzbHyn8LVQm0ieKTy/MUk1oP3ep3LXHw11lv9GuFt2b+Gaib4f6VJH52m6layqz/djlrgJLaaNv8AVtt/vVNCupIuYJW/4C9HNL4ZC9mdPefD+/hdvJhU/wDA6rN4XvIVR3TarJ/frGTWtXtFBS8k3f7TU0a3qTf8vLf3vmalze6HL/MdFa2cNuyPNcqrK3zr96tqGR7zZD5jOFX+/trirXVn8xZJpuV/vVv+HfE0K6pG7/Ntf5lb+Kjm5hcvuHoOj6Homh2f2m88tJWVW8vZWV4k8TG8Y6b9jj+zN9+P+HbVu8l03XLhZodWjSST5dsj7aIfDum2sJmv7nzP7qq25v8AdojymSl/dOOl8KeGdVsZFgdoLn70S/w1xF3aTWd08EowyttNeu61oNna2ralZvHEf4l3/Mq1wHjaSw1K8a7sCvmR8S7f4qJfEbU5HOAE9Kcq7aFXbQzY4FL4TYWgNu5oYbutIq7akBaKRjgcUtXHYAoopdjelMA/g/GlaR2WlWPb99PmpfLf7j9aXKjMYoI+VDx3rtvgl4bh1zxnbSXO1orX9/KrDcrKv8NcfBD8wXNfQn7NXw5tlsV17UIZEEj7tzL8rL/drGtUjRhqc+KrezpSPsT4K/tTeMPgp8H7jwX4SSFJ9a1ePUri6b70e2Py1j/4DVKT40fFH4hak954q8W3XlRsyxQtP+72t95tteZWlvNfXCb9uyN2Tds+bb/dWuhhjMNq6JNHskdfNbZ8y14ft6tSR89GpKT94t+INavJr4F7ld6t96Nv4W/9mpVDzfJMm1t38TVWhhsLaZEuRHMytuSNl3bv96pri6kmuGS2s1iT70rNUSlyxL5TrPhOnk+NrOPaD8k2GK4P3DWj8apnXxHbwRKNzaf95ug+dqzfhUoHjqyO5WzDJtO7LY2Gr/xyuDF4js0W3Z/9Dy2JdoI3txX7dlf/ACj3jeb/AKC1+VIPhjocfJpthYyNc397uYJ86qny18k/tYapZ6h8cJrW2+VLPTo4k/8AQq+n9e1D7Pbb5n/hZU+f+H/4qvjn4uXqap8U9Zu4Yfl81URWbcy7Vr8Xy2PNX5jtwP8AF5jAl+b5KhvIXaFkRMLv3f71XI7dCux9oP8AeqvfXibWRP4fvf7Ve/yo9P8AxGRNb7c1HI22H50w1PupH3NDs3f7v8NU5pnk3I75qYxNIkH8W+iiiqlsbBSL8u40tFKIH6Qfs8/8o3U/7EvWP/Qrqvzfr9IP2ef+Ubqf9iXrH/oV1X5v1+0+K3/IlyD/ALBY/wDpMDhwnx1PUIztOXGRQ43nJpvXbTq/F47HcG7c2+ikVdtLTAKKKKzAR/umnK3bZndTX+6aFXHAoAcVY9qP4/xoY5bikKbuDxitCNmFFFL91PrWZYbG9KGb5VoVttG3a3I+Wq+EB9vvZxF8vzfxV+kH/BDX4O3lv4l8V/H54Gb+xdLj0nTm3/KtxcfNJ/5DWvzp0S0lvNQiVOBvX5ttfth/wTn+E/8AwpT9jfw3Z6lCsWoeIribW9WVdysvmfLEv/fK/wDj1eRnGI9jhH5npZPhfrOM9D2i+iSTe8zqvmJ8vy7q564VFbek0b/wsy/w/wC9WjqFwkrMjo0bK38T/My1l/aobVmfepLP83y18jha04e9I+ixWFK3lo0ium3K/wAS/LuqWOGDKb/m8v8AhptxMjYh3xosn3I9/wA1Ekc23y9igfdXb81evQlzazPFqU+WZFJqU0V0qTcIqMqMvy/99VHcalOWhdHYKr/Ky/7vzLTbqRvu/Y9vlptfc/8ArP8AarMmZLaTZbTKBvZZWb+Gu2jHm91HPL92bK6h51u00M21v7rfe/2qguNWhmV2srnczJ95vlrJtr2G1mebC7PurJJ95qikvN8b3EyMPL/5ZtXdTpzicUqnMTXEyTKby2TfIyfPu/h/2qwNTXzpnR7yPa3y/wCzVu9voZIVkdGiVUVtv8SrWNqV15as7w+Zt+Z2+7t/+KrflMoyKF02mx75kRS391lVmZf726q0eZdIbY28tE2CR1JzVHWrx5IHhhmt2RX2fu/4atWEijQvNReBE5APtmv1vwVhFZ1mHL/0C1P/AEqB9twfK+KrP/p3L80YDTTRwxrsZlVGVFj/AIW/2qxtckuLyGR9jJIvzMsfyrt21cmaG43pM7I7MrLtes7WryaOxmyVZ9jKys/zV+A+zlGem55NKtFQPjL9qyxhvtWluUfL+a33a8EmaRZG2c19EfG7R/t2qXaIkbM27ZXgeqWr29w8KH5l+X/Zr6ihHlpRPm6kuarLmM5bd3z/AHv9mrcMChc7GojVmb9y6/Kn3aGmeNtnzf7u6to+8TLm5hsjeSqpDTIVQyF/4qV1dm5p3lPtH8IpBIsQs7Kp2fN/erSs5DC2/ZuaqFu3SP7wZdtW4WSNU3/+O1UiTSsZE8xYXdd33t1dJo+n/wBqYh8ln3cKq1ws2oeQ2U5K/wAVdH4K8bPpd0iO+4fd21PxBL3Tb1D4d62rb7aGRU/grJvvC2vab99G/vLuSvXtH+IlneadDD+53fxbv4qW48TaVds/nabC6q/z7YqKcomMubmPIdL1zWdHul4YfP8AK392tTxxodt470p9VgRVvIV+bd/y0rubzw34M8SSBLbbaT/e2yVb0v4S3Nq2+0v43j/uxtTj8IlLld+U+XpoJreZ45kwV+VqZnJ612/x08G/8Ij4sZIn3JcJv/3Wrh1XbWnKdsZc0RQ27mikVdtLSKFVX3Vf0/5mEOzP8W6qEbDdnexrd8NQxzSMmzlv4amRlM1ZvJ0/RXmLsr+V8rVxrM7MXfq1dH43uBbwx6bF/F80tc/b2/mZfY2F/u0+ZFR90uaX5MJCTYNXJIyJt43bfvfLWXHHNHMuz738O6tKKRmjG/70dHvmcpRNfTZHaHzmh+X5dtQ6pbuqibY25vut/dqzpsyCLe/z/wAPy1FqkbqvD7xt/v8A3aI/3jP4fhMmS4fbnfyv8VbmkyOzfI+Rt+7WE3yts7/xVoabePbrscUw5u5u3EMzKP7lfcnx4Tzf2AJY1bGfCOl4I+tvXwzZz+fC3z79yfxV92fGdFk/YLKBcg+EdLwP/Aev1zwud8pzv/sGl/6TM+z4V0weP/69P8pH55tG8bfK/H92rGlyfZ5EfDLt/wBqrF9a/MxRFX+7uqlI3lrv2NivyM+LjLmN9bxJI/kjbc38VSrJNNJsT/vmsSxurlWZ/lxWnZ3+66SEv87fM9A5fEX2WONw7o26mf6tfMfduqe6kmjVU37hsqHb5g+/8mzc0jPQOQ/c8ny+c2GT5VaqepL5MZLpu/2t9WZWeM/PD5x+6rf7NZ2qt5a+c52qz/dpx5+UmXLI57WLwtv4Ynf96sNvmYvWxrVx95E/i/hrGpG9PYK7/wDZv8ay+CfizomvB1RYb+Nvm+796uAq1pN09nfxzI+Nrbt1AVI88LH9I/wh8P6l8Qv2WdJ+MGm20NxYNKsEskfytGzL8skleaeMvAt/dabd6zoFy0otWZ/MjbdXD/8ABGP9paH4ofst6j8B/FuvNCjQSQXC/wAW5V/dN/s/erQ+HPxI1X4P/EK8+EXxIvPtFvHcNAl5JF8qx/7VKrk0cTR+s4de9H4j8rzfBU8vzRTjpc/Kr9sfwOngv9obxCiWzQ299dfaLWNn3N8y/M3/AH1ury+H/VHen++1fdn/AAVo+Dtm0cPxL0HTY9kN1Iktxu+aaNvustfDNrCjf6N8oVf7zVvJNwiz7nLsRDFYWLH6evmN5bpsZfu7q+v/APgmmjx6L4tV8Em4sjke6zV8kWquzNsO5t33m/u19df8E2Tu0fxa3rc2fH/AZq/RvCX/AJL3C+lT/wBNzPtuClbiSj/29/6RI8Q+Puz/AIXj4udl3ga/c5T/ALaGues7W28tZndlMn3K6347Wu743+KF2L83iG5bcf8Aroa5lYUVgmzmT5V/vV8dnvL/AG3iv+vk/wD0pnz+ZytjqzX88vzZFJFuVk6fPtZv4qfDG8ah9/zbPu7PvVI1m+7+Jtvy7qdItzHGqQo2Y/uNXly5Dh5ve1K0mRF9xgu75FqW1VFXL8fxf71KqpMrb933/m3Utvb7d0kL7t3y7aykbU5e9EuW7ozJ91P/AGWrluPMk+d2Ct8q1BZ2/wAo3ov+0tXrPfErO+5v/Hq5/dPVpyLdtHDG6QpM23b91qsiGG3xsk3Fm+dt9V7fzmwiSLtZdv8A9jVq1je4bfInG77tZ8vKdnN7vulm2berQxvsff8AMy/dqzFb+ZD+8TaqtVO3CQt86YVW3ffq1GUWFfOfPybmZf4qqP8AeOeQeWjTOk24r/Cu/wD9Bp6yPFtZ9zJs+6vzf99UkLQvJ5zx+Wnlfd/i3f7NSQySLC8O9Q7fcbfW9P3ZHjYzmlsVpPmKOm5h93b93bVC+mSNf4dsO75t1X5Fe52vMmfl+9/DuqnqUNmpZHdWRtu9fvKrV0nHGPL8R5Uv2m3/AIN2779WbX94uxw3/AabIqLIGj6VYhx5yuifK38P+1Xi8yPoIx5iza26FfMjjw7fKlaNqmZEfHy7vnqpDCkaq6SM21vvN/FV21hmkf5Pvf7VRLm+ydtGJs6KqW7KmMszfP8APXXaLJCtx5Pk/LtXczfwrXJ6Xa7sfZnzI332/u12Whx7I0y6su/5mrzcRKZ7uFj2O38P3Ft5kaI/Ej133hmaFpvs0ybfLb5Nz/NXnGiyQqyRyQsoX+9/E3+zXX6FI8zR3MN78y/wsu5m/wCBV4FT3uY+jw8pRPVvDdwY40hm+5G+1tv3q7W3jE8KXMzttk+WJa8z8P3SWsjW8KN53yyvul3K3y13fh+dJIUciP8Ady/Iv8S15vLGnL3T2Iy5oamm0LtbvMm3cq/3Kw9XtIZVR32n5fmkX5drV0Sf6n5EVir7tu75mrO1KztmWR96/Km5vk+Vfmr1MJ73unkYzlOLvtPtriTznSRvMRt6yfeWsTUNDtre1kSGTe3/ADzX5q6rUtPuZpGfzl+X+FnrEu2dGNnBDv8A4UZV2qu7/ar6bCRltFnxWYe9zOxpaZbKngk2rDj7JKDg+u6vOv7L+zq9ym0Q7ti7q9Mso3fww0ccQDGCQKue/wA2K4u40+aRdkyNG2zcyr/DX774lTtlGRf9g0f/AEmB18YQ5sFgP+vS/KJm6P50MSQwt80n3dvy1f1jxJbeGdMa/mudrqn7r/ab/ZqnfWv2PF4qM38TfJ92vKPil44fULoWENyxSNPlXf8Aw1+Pz+I+Epx98wfFniDUvGXjTfePubdti2/N8v8AFXex6lDoun6H9js1kSO/aK6VU/1e5fl/4DXGeCbWG3sTquxvNm/1Uezd8taOoeJLO10G60q5dkmuIN1qu/5lkX7rUpR5Y8pfNzfCHxU1qw0fxBBrdy7Q2mof6LqNuybo9u75Wb/0GvFvirqiaDeTW2muzWDfNZf7K/71d54q16z8UeG5X1iFsN+7aPd8zMv3q8Z8UagmszS6VNNJ/ovyru/i/u1n7ppGP8xyt1cTSSPc3LttZ/4arWfiLU9LVw/zQ/3m/ho1C6muJnt/ubf4Veq1vIkjPbTfP5ny1fLzGxsQ69DeQ75nU/3qq3C2cm14UVNtYOoWt5pswTe3ltSQ6xJ8qSfrSDl/lNCSFAweb/gG2qupahDBH5EMK/7bfxU2S6RkZ8sStUJ980hffndVy+EqPvfERySNI2+kyGHBWpI7V2Vn9qlW12rh0o5UXzRKx+4PrTo5HVg6NytTpa+YSWX/AIDUn2FF+4agRu+GfEkc0KWV583zfJu/hre+y6k0zPp82V+9XBw27rcfI+K7zwnqTxwoknzv/C1BlL+6QXi63dK9hczMEZP4lrNTwy9vvkfaVVf4l+9Xd6hqVh5av5G41SuLVLiT9ynzN/DVxjMiXus8jnjkinaKRMYblabXXeOPCr731K3++v3465Gj/EdUZcwUUUUe+UFFFFHMgClCljhaWLofpTmyozioJluKu9k+ROf4mpHV2P8AdajG1dvyt/srVizs5riZIURvm/8AQqrmROx0/wALfh5qXjzxFDpVnDuXer3Df3Vr6+8H+B007T4dBsvM8mPb8v3d1Zv7IPwt0HwP4Q/tjxPpM1xqGobZfMX7sK/wq1ewx+KPDEcM8KabHskXam5f/QWrxMVW9tK0Tw8RW9tU5bnLNoOsRKYXSOLa/wArf3f9qpbXw7DbyKb+WR3/AI/9n/vmtS81rSmkRHePYy7nX+Hbu+WmyXln5zbPlK7m3L/drzpfvI2OL3Y+6VLeGzhVvJhWU793zf8AoNQNcuN3nbd0n3vL+7Vm4VJmZzMyKvzbt/zNUDKnzOY127t21azlyx3kXGXue8dP8I5JpvH9tK+zYUl2gdR8jVc+P8xi8QWvlJukNiNo/wCBtVH4PFG8fW0kM8mDBIGjZePuGtH46zRQeKbKWcttWxyAvc72r96y3ll9HvGf9ha/KkF1Y4G38OJNG9/fuyBn+Ztv8VfEvjG8e88fa9eRupaTVptu1dv8VfaXjjxVZ+G/D9zf/bGxJEzvG0v+rbbXxHDsvNQurxPmWaeSRmb/AGmr8ey+MT0sD8LZA0jtu86b73/jtQyWPmR74d2f9pq01sxJGziHb/D9ynR6bMI9mF/4FXsnb8PKYjaTCy797fLWZqNktv8AMiY9q7D+x3klDuMLs3bW/irH8Uaa8No03b+GlKPKXTlzHN0UUVB0Cb19aWiitAP0g/Z5/wCUbqf9iXrH/oV1X5v1+kH7PP8AyjdT/sS9Y/8AQrqvzeZscCv2fxW/5EuQf9gsf/SYHDhPjqeotFFFfi/947gooopgFFFIzeiVmA5RuNCfeFNf7ppY8pQAqv8ALx+FJIibsRmlYfNj1pKqQCLv705/vGkop8qAKXc8h+akUN61JCuXAc/epe6Znr/7F/wUufjf8efDfw9SFtmralGkr/wrGrbpP/Ha/cXVobC12aboO2Gxt4o7eyhVfljjjXaq/wDjtfn7/wAEX/gz9jXWvjfqulRuLGBtO06SZNv7yT7zbv7yrX3tcSTRwmYwN/u/3a+Bz3FyqYzk+yj9A4cy/wBnhPbS+0Zd5dFf9GmdX3S7E3N826s+4uHjLQ2ybv4n3fdq1fTPCsyJCwT5WfcnzbqqTL5jOmyOXdt/3vu/drxvby5/7p6lbCxqcyZPZw/MP9Tv/wDQastH9li/f7RG3937y1nxslvMieS33dztv/iq4l1ugX7TDtT7zru+7XbTx/tDyamB5djPvvlaO58jcGfbtZvmZa5y8jtlhUuV3+a3y7/vbm+7urqdcuraSMzTP/HtXb/DXK6tdQq0mLzlfuLs/wDHq97A4jueLjsM47le6kh+z7+rRv8AKq/w/wCzVZtReONv9Zuk+/UF5qVszF3diknzI38LLWHfaw8dx5KO2N+1vl+Va9unUj/MeBUjOJfvtWCyeT5zI+z/AJ6/NWJq2rbiYQiq+7d/rdytWTNfP5zfafl3S7dq/d21l3195d2yfaV/6ZK3/wAVWr2MeYtaheHb5lsixuy/vdvzbq2tJmz4OE20nFtIcMc5xurhptYDbUuU2M275V+V91dnoMsZ8BiQfdW1lHJ7AsP6V+w+DEEs5x9v+gWp/wClQPs+C5Xxlf8A69y/NHHXF8kMgjAyqqzP/stVC+urmPTbl3TzX8r/AIF/vNTI7yG4mMz/ACrJubav3Y6y/Fl4lnodzfpc7HZGX5X2/L/dr8QnTjz/AAnzNOtywPl79orxYmizSwr/AK64VlRm/wCWf+7XjGqR/ao47nfncm7dW9+0J4o/tzxlJDCu2KNvk+euc0u4e403yX/hr1+blhGxwy973ilGvlMU2fx0SRjdvd8VbktNqPsRmP8AHt/hqGRfupsz/tVp7nxEkDfu8fxUsMjtJv8AO+X+7UEkjqp7/PUTSOq1BUY8xr6bG810uybhv4a2JtLd/wDVpXPaXefZ5ld3xXVaXrltLb+X90/3qr7RnLYzbjQn+d0Rv+BVW/s28hdc7d/+zXSeYny/xf7NTR29sy/6j5lp8qDmMLTdU1iz2p5zKyv8ldNo/jbVYfkmDAs/zs3zVnNBCy79i/K/3Wpyy+T9yH733KOWURSkd7peoWesR+TdN5TyLteRflauk0qy1u2kjis79pQybfv/AC/8Cry/T/tPnJ/e+9uZq9b8B6x/Y+hvquq/c2/Kv/stHwwJly83Mef/ALSvha5m0i21sbWMPyy7W+7/AL1eG19A+NvFln4j02/hvHzDMrLEv3tteATBFmfZ93dT5uY3pjaKKKDYlgP7xQ4rpfDS/Z5Xnd9u1Nz7f4a5+xh3Scjd/erpppIdL8OvKj/Oy7drLU/FIwlvoc3rF9/aWqS3Mjs3bNWdJk8hPJcKwb5ttZ0a/NvFaenw/aGXen/AafxDkJdKizDZDt/2t1TJvjZf92ri6Wkm7ftbbS/2eVXGzHyVXwkcyHWd5/y28j733lqSZluY2hdGVKgt4fL++mV+7uVasrC6t8ny1HL9kPd5inJp6s2+Onra+WRvmqeON5ptj/LUv2fcuxIfm/vNT+EYiK8b7Iptvz/ItfffxpkC/sEGQN/zKOlkH/wHr4E+zuuU+Yn/ANBr75+NikfsCldhJHhHShgfW3r9f8MP+RTnn/YNL/0mZ9hwp/ueP/69P8pHwVc3HmTeS6bv92qzKm0O6NtWrKo64kmjxt/u1Ktv5keOu75ttfkMvePjI+6VIYfMO/qKuW8yWr/6nll/75oht1jbYE+XdTVh23D/ACMF/wBqlzIcpcxM19959/zbPu05pn8vYlRRRw/65x937tS/IzB3RlH96q5Y/ET78Szbtt43/wC1/wACrN1Ro2V/kZt3y7anbzlUun3W6bWrPvr4Q2+zf/F/FSBRly2MHVJELkbPutVCb5tz7PvVevpEkbekP3qz5Gz/AANQbQG1NbofMXZ96oMfvAuKt29u7SeZ5O6lzIuW59k/8Et/jvN8IfiNaSzXMf8ApE+147hvlZv4a+0/2kfiFpHxO+IVvr2laCti8lmv2xd3yNcf3lr8r/hZdTaRHFqlg7QyRvueRU+avuv9m39qnwl8TtDtfA3xIeG2v7WLZa3UkSqzNXtZLj6WCr+/8Mj5DiDKpY+HND4j2ib4C+Lfjt+zbqr6xon2qw2TW9vMqs22RVb5Wr8qNa8O3/hnWrvQdSt/Jns52ieH723a1fv9/wAE4Lyw0vUPEHwP8c38b6R4os9thNIq7d23crK1fkh/wVO/Z3f9n/8AbC8SaPb2fl2GoXTTRMv/AI83/Aq681p4ed50jmya+H5KUtP8z5xjV2kZIfl/3a+tf+Cb8ax6L4rC8j7RZ8/8Bmr5PtY3yHL4WvrD/gm/s/sTxV5aYH2iz5PU/LNX1XhN/wAl5hfSp/6bmfqXBcubiSj/ANvf+kSPH/jox/4XT4phdWA/t65ZX/u/vDXNeX+8TyeS33Gk/wDHq6X46KX+N3ikAKSNdudo/wC2hrmbeSWEbETb/CzN/DXx2ef8jvFf9fJ/+lM+czPTMK3+KX5smaP94d78L/yzqKZf33nI+x/4lo+eSTyelOaP94H35aRNvy15EY8pzS+EZ5PmL5z+Wdz0+1j8uOSZPlLP96nRqkmLZ4fu/fVf4aFLxx5Taqfx7qXxRF7seUnhj8pw/nfe+9V9bvy32Q3OF/2UrP8AOdV2I6/N8u6pI7h4/wDRn+f+F/n+9WMonqUfdgasMnkso2fP/A1WfOeXY/k8bvnjX+GsyG4+YR79qr/eepIWQqyJ5gMn/LSN/u1h9s64yNX9yqu7ybwrfd+7Uy3HnSMU3Afd2t92syGTcu9JtwZ/96rDXD+c6Ptx975aOb7JFSXMi5DNMriF0+6/zsv3ammkSSP7m1N38X3lqrDMi5jmPLL92rMcz7i/7tg3y/NXRH+U8rEcvLoElvJJE3+kfKv91KgvFT5/J3bWSrZt3+zo6Pv3fwx/w1XuGfbvNzt3ffVlrWP8qPN+H4jziS38lvs2/wC98u3Z/FT7ddzYdPm/2qmmDyXXz7nH8bbP4v71TLbozoifc+9uavO9n7vvH1FGUZFm1tHhUb0q6tvLEyYT73/jtJZ2qSRpv4b7y7q1bO1hkXycNt/ibbXFU54nsYenzajrGFFy6csy/Pt/vV02m27QwpNsUs38X92su1tUj2wpIv8Ad8xvlWtnTfkXY7/xfNtrzMRKUtj2sPTtD3jf0ffeTRpC7blX/gK12uk3Dqrwv+7Rvm8z/ZrjNHj8pjcv8is+1Nr/AHq6XSrh2aJNi5b5WXf8qrXk1pc0j0MPH3dTuPD+oGNkd412qnySfxV2+i6l5LJvlVopPm8z7rV5npvnW7LM7sqb/wCH7q11mk6o7SDZ8qL/AHk3bq8+VP3uU9WnU5oHpFjqFtIrP1SN/mk+781OuLx7iMpD99lVmjkSub0nVEa4/wBGufkX+Fl/hrQbWkuoRDPc52/eaP8Air1cLyR908nGe0KOsN5as8fzyt8rbfvVgTR3NvdSO6Llfm2s/wAv/fNauuXEKRsm/ak33Grm7i4fznhR8L/Fur6DDxjGPMj5XHRkdLZO0vhws7qSYHBK9O4rlNQeHy1+zbm+7v8AmrpdMkYeEPNVcH7LIQD+Nedav4gfT/MmmmVEW33PG3y7a/c/E+dsnyFr/oGj/wCkwPQ4sV8Hgb/8+1+UTI+J3ij+zdNTQoVZp5FZmk/2a8Zjs5ta1x7Z7aN2kf5ZGbayrV/UvGVz4m1S7ufOzE3zW+5/ur/dqxoumx28L39y7O+35G/551+UUY+6fBS+PmiaV00el6fsRPu2+1FVvu153qF9NqV00zyeWsabUZvmrd8SapczXImtpv3XlbW/vVwPjLxFNbw/Y4PkLbmZvvUv8IfDEg8aa88+pBNNmZ45G/f7V+61ch4wuIWmZLN95X5dyptqaG4CwmF9z+Z/t/drDuoprC7aG8m2/eZG3U+VGvumJq+yS3+0pDsm31jCZxJvR/m37q2bhrm6mZ/3bNvrKuoEhbzk/wCBKtHwmkYwN+3WHXNNRJpl3Knz/LXP6lphs3x/dq7o+rJHebPJ+X7u6ta80+G8h3w/NL/GtEthfCcms0ynDpuX+61TwTQ/LvRfl+bbUmp6bNbybmh2lv4aqbnjYj7rKtLlLi+Y0LeRC2/Zj/ZpfLRVV3+9Wesz7Vbdt/2hUv2p41+/u21JPKX1lRYfkeopJ4Wb7nH3qprcbv4G25p3mfMU/wDHq0KLPmJ5n31Vv/Qq3dC1B4WTY+0qtcyq7vnT+GrtrcvHJ/wDa9TzGfunWzao80yeYdqfera0uZJFb5Pu/c3f3a4+G4eTYm/dtrs/D8Pmafv3qZPvfN/dp/4SZDtUt7aS1Kb12f7VeceMPDK6XMt3bPuWT+Fa7LxRq0Ufyw7vlTa9c1Oj6pb+e4bbt20xxlynKUUs0bwzMjpgrSUHSFFFKoy3NKWwDo8fwVIsacv2qFW29qmjhdvv8/3afL9ozGLHuY8fLXuf7LXwQfxtqEfirUrZvsVnKuxWT/WSV518NPh7ceMdWFu7+VbxsrXEzf3f9mvqf4P65Z+D0l8N2yKLaNldVZPmas6kvZnBjK3ucsT6T8M+B9N0nQXcW3nSyRbtu35VWsW48J2GqaeiJprW5+9KsyVf8P8AjKfVvDdvMkzErAq7o2+9838VLdatquqQmF7/AHvu+Xy02t/u158qdOOi2PI5eWJyN94NtmZprBG/i/h+9WXN4fubWT5/MSZk2vt+bbXYw6s9vdPD9jZ1Vfmk/i2/xUNqGmyXzH7G0x3ruaP+7XP9XpSYbHEfPAsqO7Hc2x/M+9Un2tFl8lN27Z95q6S+sdHuL5U2SKGfCKyfMtMutC0pXb99t2/M/wDs1wVsLLnL5eUf8HXup/HdrIV+RVlB/wC/bVr/AByis18Q2l5dTqoWwIw/T7zc0z4avp9v43htYDGkjCQnauDJ8hri/wBsvxJe6b4m07SLVwBNpRc+o/eMM/pX7tlaUPo+4tf9Ra/KkONPmlyniH7RXjhNQ0u5ttKdhCsTbG/i214V4V0vdpaXOyRvnVV+eu8+K0k1v4XuIZgrSySqqNu+7WF4RtdukpC+0D7rfw1+R4GnyxZ6mHjy0hI7JFPkum4/7P8AFT00lJJvnRf+BPWs0KblgHzbaoSW80k29ywNehzS2NY+7rIhuLP7sezlf+BVz/jK2hbSZnSHdtT/AL5rr7PZIp/c43PtrH8eafDHo92+xtixMybaPekX9o8nf7ppaKKZ1BRQrIV96KiMQP0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs+cf8E21x/0Jes/+hXVfm8rbq/avFb/AJEuQf8AYLH/ANJgcOE+Op6i0UUV+MHcIfmbfmlDbuaF2c7aRV20viARVw3PpTqKKXwgFKvQ/SkoqQE37mNLRSMu6r+GQC0Ab+1FLyppgCr8xra8DaNNrXiC20yC2aZ2lXbGv8XzVip94V9K/wDBM/4JXPxd/aO0W2dF+zWc/wBovWk+6sa/Nub/AGa48XWjh8PKb6GmHoyr4iMP5j9O/wBkr4V2vwh+AOg+D7Z/Ku5rL7Vfq3/LOSRf7v8Au16ZJYTRtFvdWeP5nk37d1TtY/6Y+91fa+1WVPl2/wCzU50/7Ux3pIrruVm/vf7tfk+JxXtsRKUj9mwuHjh8NGPYwptPudqfaEZf3rb2j/iX+GqFxptzbskdskjf34d/zf71dra6enmGZIV/3W+61V9W8PvI2/ZM3mRN8y/wqv8ADurnVb3kE8PS5eZnE/Z3hk2dW3/O3+zUkk025Sibm/jaRflkro4fDcPkh3h8ot/e/iWs28sfLXyU+8sW7d/Eta0anN7xy+x5o80jmtUmdeX2q/3lVfmX/drk9WvLP7cIQ/kzSbldfu11niJYbhvvsj7dybk+b5a4XxNdQzaeZHjUFX+ZtnzN/tV7uWytK72PCzDD+7oZeoX6XCv9jdl8t9vmLWRdXVzMzO9yxMb7k+fbU91ePj7mFVNqqv8Ae/vVzepas8MzTecu5l2Ov8K19Lh6nQ+NxlPlkWNUmh8tn+b5U3Osf3t396ub1a5xb+dvYlt3zN/Ey1JqXiBJo3tkRS0ifejasC+1yGRt6bmk2/xP8terT5pcp5FTkJr7VHmjjmh2tti+9/Etej+GcN8L1/e7wbCb589fv140upQyfcdt3+y/y1654PlVfg0JnyoGnXJOeoGZK/afBtWzjHf9g1T/ANKgfW8Eu+PxD/6dS/OJ5nb3PmbPs025ZpW+X+7XLfFnVJofDNz9jfbui2/7taUN4beM/eUtu2bf/Qq8++KXiqG3aTTZtrvMm7y2/wB2vxrllzHx8ZSkfKPjpXk1y4uX++0rfM1UNGvPs0zF/utWp423yaxK7/cZ91c+spinVk+YK+6uiMfd5TSJ0/kyRq7+Tjd/erKvpHjwj7sf7NaEN4l1Ypvm+9/drMvPm3OjsW/u1X90jl98pyNlvdqjkx/B92pJJHZgmxai2nd8lSaRBt7SeY74NWLXUJoW3+c23+7UTQzMN/3qU27r8nf/AGqr/EBuWHih0Ub03D/arb0/xG8jNCm1d1cRHG7fJsartr5sbLNsb/dpc3KTI7LzPtDb9iq33dtT2VruzNlX/wBmsfR7xPIX52P+9W3p94gbyUT738S1rzc0TE0/Dun/AG64+zbNnzqq7v7tdf8AED+0o9JttEtrWQJDFulb/wBBrm/Cd3DHqUKXO1Pm2uzf71eqXzaa2jtqqQ/bC0Sqys9PmI5o854Rr032e1l+TCbPmXbXnEzFpi+zG6vc/H8fhvVrVLaGwktpGXd5f3q8e8ReH5tKuN6Qt5TfNUfD8RtTlcyqVWfdspKdCqCRd+6g6TX8O2vmP8g5/iq14wmEccNmlzuRfm21P4YtkjUzO642bvu1h65fPeX7vvVlZ/vLS+2Y8vNMpk+Y1aOnzTx252P81Z8Y/eYetnSVhWNhs/4E1QEi9pd472phd97tTdSmubfds+cVFbqnmHyX+Zal1L99OIUdt+z/AIDQR8PvIr6fq1zcN5P3f4fmq/dXiW8bPs2/w7lqO3sYbWMyVHfMkkJhfk/e+WtBc3vjrfUt0iu8y4b+GtixTdDLNC7FFrnbfT3umXCfdrqtBmezt3R0UIyfMq0B8RmyX32X3VvvV96/Gtt/7AxdCBnwhpZH/kvXwpqljbM29I2279v3a+7PjXb7v2CDbK2P+KS0pQfxt6/X/DHXKc7/AOwaX/pMz7LhR3weP/69P8pHwGusJ5nlu+dv96tGHe8P2npu+5WZb6SjNs+Ulfv/AN2tGO4e2jCPtx93bX5DKMYyufG/ZJbfevM0O8f3qkkWFmR3TBk+Wq011OJN6Ju3f3f4aiWaaS4SF0YBv4qQSjyli4t/9tg23/vmq8av5e/zmI/jp/2h1uPv/dfbTG+VVTfuT+81L3ugvd5hJGfOx5lP/stZ2qKqyfc3bf4lrRkt0uN2zctMbQ55ov4f71EolnNzWbtJ8nA/vVCbGZ12bN3y/wANdP8A8I/5Kqj/APA6kbwn5S74X4b7v+zTjEz5uU5C006a4uvISNifate80+bT1jieHYy/f3Vu/D/Rba68eQ2Tur/Oqv8A3a9C/ah1rwR4i1fw94a8B+A4dGHh/S2ttW1D7b5rapcM27zP9lVX7q1nKITqX5TA8G2v/EjV/vf31/urWnazzaTcLqtm+2aP/VN/dqv4NV5ND2bMbX2tuq9cRyRtKfJ+X7u5a3p7GNSMZaH2D+xb+3NqVjJZ+D/G2vNaTQp/oGpebtbd/Cq12/8AwVO0HVfjj8O2+MupaU02o6TErPNbxf66Pb95mr4A0+6vNJukubZ2/c/Mu371fUfwN/bAfxF8M9S+Dnj+/jZ7rTWt4rq63bWX/a/2q1jWnS0+ycFXDxn7/U+R/L8ldnzff/iSvq7/AIJzNnRvFKkYIns8/wDfM1fMOs6b9h1i5tra5V0hnZFZX3Ky7q+n/wDgnTn+yvFmQB/pNn0/3Zq/RvCb/kvML6VP/Tcz7bgdy/1ho3/vf+kSPG/jkUT42+KlMTBv7duWV/X94a5qO385ld41zv3bv71dL8dlQ/GfxSWbaf7eufm9P3hrnY1/eNs+6v8Adevjs8/5HeK/6+T/APSmfO5jJf2hW/xy/NiBXW4EMnyt/dX5ql2pI3k7GLbfu79u2ntC7R74H27fu0ohLQ7P3j7fvfJ8zV4/2DkjKW8iNdm5k37fm+9UCypCVRPm+fazbNy1buFQ7odm1tm5Vaq7RC3dnfzAI/4mquZBH4rC+Z5f+kiH5lptoySKuz+9/FUUzbf3Xk//ABNSbdtwyJJt3JtrGR3x2LMNw/2dkeDf821G3/dqeOaHaPn+X+CqTFNqu7t8y7akVkaFpEdW/uVly/aOqPOaX2tI4fJ6bl/herNjL5kaTP8AN8rbt1ZUf+qTfyV+bdVyN/JZXR2X5Nv+zS5UKUpGtbSecud/3f4f71WLdtyvNMija/y7X3Vm2s27Y7/LuT5NtXbObazO5VGb5mVnreJx1uXoaFjMVVdj7n2bVjX5aZcrBsKOjMZPlpkbAyIkKYXZu3Sfw0jMis2987fl8tfu/wC9V/4Tg92XunGX1r5LtM+7bu/v1LYrvj87yVbd9z/aWrGqRp882/I2fdp1vHtVY1Tjb8jKlR7GfKe3Rlyk9rbpM2Xh/h/75rVtWeFl/fbl/u1ShjeO3/cup/2a0bVfm2eTtG2vPrUZRPawtT/wIu2Me5Vab/lp/Ey/MtaOm3HnS7PJj+X7q/d3L/8AFVm29ykbF96/991dtZEutjwpsMnzfc+XdXj4inL7J7NOtKUuVyOg02R45g+xSzP/ABfwrXRabfJHCiI+1tm7d/8AFVyVi32eOKa5Crt3KzK33mrXjvNuJpLnai/NuVP9mvJqe9I9Gm4xOxtbra2938zcyq67/wCGug0vUPs00XnQ7k3bn3VxOl655bLv/wBIWRP3rK+1l+X5Vrd0nUplji2PDjeq7ZG+7XLKPKd8akTvtJ1GG3YvN0/ur/tVpLq1hG0iJtfy4vnk3bf4ttcTp+uOZGdEVjGjNt3/ADNWjJqkMLI7p/rE3fe+Wrw/PGRjiJQkaeuXEP2hEuZtiNKyfN93/erntQ1K2hm8mN8Ov8Wz5WqXWL+Z42hfaRvVkZvm/wDHqwdUvEW382ZtzKny7vvV7uFre57x8xi6fNJyPSdHS4vvAnlWjhpZbSVYiG/iO4Dn615V4p+E3xVutFns9F0dTLcjbMXvIhuHrktVnwz8UfEPhG1XSozHdCeTdELndtiz1AwRgd8Vz/iH9srxRp2uTadpmhaVJDDN5bSSCTJx94jD8iv6Mnn/AIZ8WZNl9PNqtenVw9KNO0EraJJu/LK9+W620eup62MxvDWY4SgsbOcZU4qPur08n2OftP2YvjQlwh/sC3jVW6m/iPHpw1dHqfwG+J4tvI07w/Gw27SrXsQ4/wC+qvWX7VPjCWyS6utA0zdIcqsaScD8XqjqX7YPi+0lkih8O6XlO8iyf/F1z/UfBiMr/WsT9y/+VnlQocD30rVfw/8AkTlLz9mH44OS0XhuMgggKmpQjb+b9K5PWP2Nv2h7243x+DoWG/duOrW4/wDZ66rVv2//AIj6fN5UPg/Q5CPvLtmyP/IlYdx/wUp+J0RITwR4f47Ms/8A8cpfUfBff61ifuX/AMrGqPA3/P2r9y/+ROef9iP9pBFaRPA0DMynC/2xbfL/AOP1jX37Bf7UupzNLP4KtVI+4TrNsf8A2euwX/gpn8WiQT4E8OYPQ7Lj/wCO0y6/4KefFS3GR4F8On5f7k/3v+/lV9T8GJe99axP3L/5WEcNwN0q1fw/+ROGb9gD9qcWjQjwBaFy2Q39tWv/AMcqhP8A8E8P2ry+6P4f2h/7jlr/APHK78f8FQ/jGyGT/hAPDIA6gpcZP/kWqkv/AAVV+MiqHT4eeGcHqClxkf8AkWo+oeC//QVifuX/AMrNPYcEW/i1fu/+1OKj/wCCdv7WET7ovAVoM/8AUbtfl/8AIlbuhfsHftRwZGoeA7RQRgkazbE/pJWmf+CrnxnySvw88LlR32XP/wAdqWD/AIKpfGl0WWb4deGVRjgER3H/AMdp/UfBeOv1rE/cv/lZLocDf8/av3f/AGpR1X9gH9oS6QmHwbakj7v/ABNLcZ/8frmr/wD4JzftTOxMPge0ceg1q2H83r1jwb/wUs8f+IZxbaj4O0GFs4IQTc/nJXW6l+2t8Tre386x8MaDJu+6WSbA+v7yksD4LS0WKxP3L/5WQqPA1P3vbVfuX/yJ85r/AME6f2smTa3gG0GemdctTt/8iUi/8E5f2slBA8CWgz6a5a//AByvVPEP/BSP446KzLH8PvDL7Wxylx/8drAk/wCCrnxpiQs/w78Lgjtsuf8A47Q8v8F4/wDMVifuX/ys0jS4HltVq/d/9qcYn/BOj9q8gq/gG2A3ZGNctf8A45SJ/wAE6f2s1BUeAbTP95tctf8A45XZD/grD8Zud3w58MDH+xcf/HaVv+Cr/wAZVfb/AMK88L/98XP/AMdo+o+C9v8AesT9y/8AlZXsOCf+ftX7l/8AInIJ/wAE7v2shFg+A7UMPu41u1/+OVLD/wAE8v2r1H7zwHabv7w1u1/+OV1p/wCCrXxmwGHw98L4P+xc/wDx2g/8FW/jKOB8PPDBPpsuP/jtH9n+C/8A0FYn7l/8rEqHBEdqtX7l/wDInP2f7AH7VEGC/gW1POcf21a//HK6HT/2JP2l4bcRT+B7ZMDGI9Zt/wD4uprT/gqh8Y7ghH+HfhoMfRLj/wCO1dP/AAVD+KkcZkn8DeGxt64W45/8i01gfBf/AKCsT9y/+Vk+w4H/AOftX7l/8icxqf7Bv7UF1K0ieBrZjuyrDWbYf+1Kji/YE/acEarJ4HthtGcLrNt97/vuunt/+CoPxjuX/d/D/wANY7jZcZ/9G1dP/BTL4rKGZ/BHhtQq5O5Ljn2/1vWj6j4L2/3rE/cv/lZLocC/8/av3L/5E8w1T/gnV+1bcXHnQeArQ7upGt2o/nJVX/h3J+1r/wBE+tP/AAeWv/xyvSj/AMFRvjC0vlxfDzw37FluMf8Ao2lvf+CoHxnjtzPZfD/wyxX7yOlxn/0bS+p+C8v+YrE/cv8A5Waxo8E/8/av3f8A2p5r/wAO4/2tP+hAtP8AweWv/wAco/4dx/taf9CBaf8Ag8tf/jldr/w9j+NP/ROfC/8A3xc//HaP+Hsfxp/6Jz4X/wC+Ln/47VfUfBj/AKCsT9y/+Vl+w4K/5+1fuX/yJxcf/BOb9rQct8PrP/weWv8A8cq7pn/BOb9qKS7jXUPBNtFGWXfINatjt/APXoHhL/gpj8evF+qx6Rpvw28MvI/UrHcYH/kWvXtG/av+ItzGBqfh3RVkC/P5SSgE+gy5rKdDwUpL3sXifuX/AMrOar/qJD3ZVqv3f/ann/hf9jP4teGNJj0y08K2o2jMji/h+Zv++q11/Zg+M0Uonh8NQqw6bdQhx/6FXZN+1Z4xTIbQdK3B9pBEn/xdMm/au8dJsRfDOlbmGcEydP8Avqud4bwQlvi8T9y/+VnO6PAL3rVfuX/yJ1nw18AeONC0RtM1/SFVpBkk3EbAH8GroYvBuqrIZ2i+cH5DuXgfnXMfCv47+KPHN3NbapoVqoiAO+zjfHP+8xrsZfGOpqp2WkJbOVVsjK/3utS8B4HdcXifuX/ys5o4bw8V0q1b7l/8gcpqHw18bx3sk2nxKyOrLhJlThvqaSDwL8QbcqsmkrIsabV2XSLn9a27/wCJup2dzHGLCBkkGMhWyD+dE3xN1NWXyLO2fPVBu3fhzWNTKvA7ri8V9y/+VFPCeH0d61b7l/8AIHOyeAfiXI27+yo1bbgMLiPj/wAeqpJ8OvinNGI20CDcFI3tdx//ABVdM/xS12P5ZbSyRiMpuD4P/j1D/Fy5t4Q86WjOekcatz+OamOWeBdv97xX3L/5UDwnh7/z+rfcv/kDO+Gfws8WeHfG0HiTxAwZIg4yJlOMxlegPqa539qj4NfEb4neMdN1PwXo8dxbQab5M8pu442VvMc4w5GeCK7zwf8AEzUfEniaHRrjT4EhmD4eIMWBCFuTnA6VoeP9R+KOnTrH8PfClpqCG33NJd3CpiTJ+XBde2Ofev1nJci4FzXw1r4HLnia2D9veXLByre0Sg7KMab91Llb917vUaoeHqldVqv3L/5A+PviL+xb+0Z4hhtrHSvBkEkUcu+RpdXtwT+b0ul/sU/tFWcYR/CNsNn3R/atv/8AF17rcfED9usayLa2/Z+0A2eObhtXhz+X2rP6VqWvjL9sJ3H2r4M6Ii98alHn/wBKK+fo+HXA8YWjh8x+dCf/AMpOlU+Abfx6n4f/ACJ4Kv7Gn7QDgmTwvbKSMfLqUHyj/vurQ/Y0+NJCRt4TtgqptyNRhz/6FXuq+Lv2tyxDfB/RQAeD/aEfI/8AAipovFf7Ve0mb4S6PnPAW/j6f9/60/4h3wT/ANA+Yf8Agif/AMpE4cAPevV/D/5E+fj+xj8b443SDwpD935M6nB/8XWP4y/Yn/aP1TQJ7TS/B1u08yBdp1e3GM9eS9fTg8VftS7xn4T6Tgrk/wCnx8H0/wBfWV4x8cftoWGkmfwZ8DdEvrzeAIZ9UhVcdzk3K/zo/wCIecE2/wB3zD/wRP8A+UlRhwCmrV6n4f8AyJ8Z/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV9P/APC0/wDgpZ/0a14X/wDB7b//ACbR/wALT/4KWf8ARrXhf/we2/8A8m0v+Ie8E/8AQPmP/gif/wApOj/jBP8An/U/D/5E+YP+Hcf7Wn/QgWn/AIPLX/45Qf8AgnH+1oeD4AtP/B5a/wDxyvp//haf/BS3/o1rwv8A+D23/wDk2j/haf8AwUs/6Na8L/8Ag9t//k2n/wAQ94J/6B8x/wDBE/8A5SH/ABgn/P8Aqfh/8idj8HvhP448JfsVr8G9d0yOLxAPDOpWZtFuUZfOlM/lrvBK8715zgZ5r4tb/gnF+1qeR8P7T/weWv8A8cr72tvHXxI8Pfs66h8Tfil4TtNK8S6VoF9f3+kwTCWGN4FldF3JI+4MqIThyfmPQ8D5A/4ex/Gn/onPhf8A74uf/jtfQcf4DgOGEy2hndStT5KKjTUVaXIlFfvE4NqWiurKzvoZYahwLeTp1qr112/+ROK/4dx/taf9E/tP/B5a/wDxyk/4dyfta/8ARPrT/wAHlr/8crtv+Hsfxp/6Jz4X/wC+Ln/47R/w9j+NP/ROvC//AHxc/wDx2vzf6j4Lv/mKxP3L/wCVnV7Dgr/n7U/r/t04r/h3H+1p/wBCBaf+Dy1/+OU3/h3D+1r1PgC0P/cdtf8A45Xb/wDD2P40/wDROfC//fFz/wDHaP8Ah7H8af8AonPhf/vi5/8AjtV9R8GP+grE/cv/AJWHsOCv+ftX7l/8icV/w7j/AGtP+hAtP/B5a/8Axyj/AIdx/taf9CBaf+Dy1/8Ajldr/wAPY/jT/wBE58L/APfFz/8AHaP+Hsfxp/6Jz4X/AO+Ln/47S+peC/8A0FYn7l/8rD2HBX/P2r9y/wDkTiv+Hcf7Wn/QgWn/AIPLX/45R/w7j/a0/wChAtP/AAeWv/xyu0b/AIKyfGkDP/CufC//AHxc/wDx2hv+CsnxpAz/AMK58L/98XP/AMdp/UfBj/oKxP3L/wCVh7Dgr/n7V+5f/InF/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV2v/AA9j+NP/AETnwv8A98XP/wAdo/4ex/Gn/onPhf8A74uf/jtH1HwY/wCgrE/cv/lYew4K/wCftX7l/wDInE/8O4v2tN27/hAbT/weWv8A8cpf+Hcf7Wn/AEIFp/4PLX/45XpGjf8ABUn4u63GbWDwJ4ZS8P8AqY3S42yH0B83rVC4/wCCrHxvtpmt5vhv4XR0bayslz/8dpfU/Bf/AKCsT9y/+Vi9hwX/AM/av4f/ACJw8f8AwTj/AGsiw8z4f2gHf/ieWv8A8cr7Z/4JvfAy8/Zo0HV9Y+JNvb2mr6gUt47aPbMRCRl2Lx5GcgcV87+Af+Clnx48eeI7bw7p3wz8OSS3MojRYorjJY9uZa+9/D/hS3v7Czk1K6cTyWiNeCIbVjlK7iozk4rxc6oeBEKPssVjMUk+yV//AE0z18nwHC9XEe0w85trvt/6Sj0TTPi74GtExLfyE55PkPkj8q1bP41/DJZWF1qjsrY+ZrSTt9Fri9I+EmgX8ImuNRvFHcIU5/8AHa6LTP2dfCN64WXWdSXK5XDxjn/vmvjHlv0a5R1x2N+5f/KT7hUsO4rc6OL46fCKJ939uyHnAP2GXgf981ZuPj58GpQXj12QF2GV/s+XgHr/AA1n237JPgqeHzz4i1XA7Bosn/xyrUf7H3gGRjjxRqxA7AxZH/jlZrL/AKNMf+Y7G/cv/lJoqNGPulS++NvwqeV2t9dkI2lVb7DJu29v4awdU+KvgK7RWg1hg38ebST5v/Ha6GX9kTwaCUh1/VmY/c5ix+PyVzOofs9+GLOZoF8QXuVJG5gmAR2Py9a2p5b9G37OOxv3L/5SRUpUOXW5zOr+KvC95OXTUJHGSQTAw/pXH6syXzq8NxsVRt2qnOM5rrNd+HWj6ZOI7bUpnUHErMV+X9K5q+0tbZ3FuzPtkKYbg5FejQy76O9PWONxnzS/+UnlVsLlc7qTl/XyOQ1XRNbmDC3t1kOc7vMALt/eOaxNT8GeMLmdwLZGiKfuhFIibW9+ea6XVvEOoabIyJaI+DgYycn0+tcnqfxi16wLL/ZVrlBkhg33f++q97DYDwHfvQxeK+aX/wAqPm8VhuGHJqpOfy//AGTL1X4Z+PrjIs9LQEr95rpMZ9etY0/wY+J1w29tMiVmfc7LdRnH+7k1b1D9pjxJaMUTSNNBVcvv8zj/AMerPH7Vvi3d5baBpeexUSEf+hV7NLL/AAV5fdxWJ+5f/Kzx6tDgm3vVav3L/wCRF/4Ut8TyVjXRYlj/AIl+2R5/PdXqPhjw5q9h8MB4ZvoFS8+wzxGPzAwDMXxyOO4rzSD9qHxbI6o3h/TTnrt8z/4qrtv+0j4klG+TQ7HGcYUSE5/76r6fhrOPCfhnFVa2ExFZupB03zRuuVtN2tBa6Lv6HTleO4MyitOpRq1G5RcXzK+jt2itdChdfAv4iGPFpYwLuXDBrpcr+Oa808dfsl/HzxH4lS+stDsjbxwldzahGCT9M16v4g/aa8RaLa/aV8NWLARlizyuAMV45bf8FNPHl74kvNHtfhzoxhtmISVriXLY9ea+fjl/gnusViPu/wDuZxww3AnLpVqfd/8Aannmv/8ABPD9p+/unktfDmnMrNkZ1iIf1rHb/gmv+1WTn/hF9M/8HUP+Nehar/wVU+JdhI8cfwy0FinUNPMP/Zqpf8PaPigeR8K9A/8AAif/AOKq/qPgp/0FYj7n/wDKzWGG4H6Van9f9unO6J/wTr/agtYXiu/DGmKD0H9sRH+tR3H/AATo/amlfJ8N6a3zZ3DWYR/Wu60n/gqT8TNRT5/hloSv6Ceb/Gi//wCCpHxNsjtHwy0Nj6edP/8AFVH1PwT/AOgrEfc//lZDocDc2tWp93/2p52P+CbX7U3O/wAL6Yc/9RqH/GlH/BNz9qf/AKFXSh9NZh/xru1/4KqfE9ip/wCFZeH8H7x+0T8f+PVKn/BU34myjKfDPQfu5/183/xVH1HwT/6CsR9z/wDlZXseBv8An7U+7/7U4P8A4dwftR5x/wAItpeP+wzF/jUh/wCCb/7TZw58N6duH/UZi/xrtx/wVO+JaDfP8NNAVT91/tE2D+tRt/wVX+JQGV+GGhdcczzf40/7P8FP+grEfd/9zF7HgaP/AC9qfd/9qceP+CcX7TOf+Ra00D21iL/GlT/gnV+1FGqxDwzppVf+ozD/AI11b/8ABV74lBii/DHQQf8Aanm/+KpU/wCCrXxPZtrfC7QR/wBt5/8A4qmsD4Kf9BWI+5//ACsaocD9KtT7v/tTnIP+Ce37TsChV8Madgdv7Yh/xrQtv2B/2loAWHhnTQx6/wDE2i/xrXX/AIKr/Ekjc3wx0PHtPN/8VV7Tv+CoHxGu5Ak/wy0ZQ33Cs03zfrT+o+Cn/QViPuf/AMrJ+r8Df8/an3f/AGpT0/8AYZ+P0TB5/DlgrbcM39qRn+tdT4a/Ze/aH0mOS3u9As3jP3VGqR4P61d8O/t/+O9bIWTwJpCE9Assv+Ndhp37VPxC1BFmPhPSYomztlkkk28fjSWB8E+mKxH3P/5WZyw/AcdHVqfd/wDamG37KXiy/iB1HwXYCXfw63qDav8Ad4Nc1rv7B/i/V/Nh/si0CSAgH7avFel3H7V2t2G2O80HTncpuYwzOV/nViH9p7xDPp7XqeHLFSMHa0j9D361p9R8F9vrWI+7/wC5h7LgP/n9U+7/AO1Pk/Xv+CaX7SsWpyroeh6bNb7v3UjatEpx7gmoLT/gm5+1OkgE/hbTNobOf7ah/wAa+hPGn7cHxE0ATnR/AmkzCEZ3TSy4I/A159B/wVG+KjTGGb4V6GpHpPN/8VUPL/BTrisR9z/+VmkafAvL/Gqfd/8AanNr/wAE/P2l7fT3hh8NaY0hTaudWi/xrnp/+Cbn7VLS+ZH4V0zHp/bUP+NelXf/AAVI+IUFwbdPhvoeV++Wnm4/WoJP+CpfxQCmSL4Y6CVHVjPN/wDFUfUfBT/oKxH3P/5WP2PAv/P2p93/ANqeeL/wTY/ao3iQ+GNMyGz/AMhqH/GtCL/gnX+0+iHd4X03J7LrMX+NdY//AAVV+KkbfP8AC7QMeouJ/wD4qr0P/BUP4lSQea3wz0IfS5m/xpPA+CnXFYj7n/8AKwdHgbrVqfd/9qcAf+CdX7U4kJHhjTto+7/xOof8asRf8E8v2pduZfDWmA/9hiL/ABrtl/4KifEpsEfDLRBnsZ5v8aQ/8FQ/iiEDn4YaFjGT+/m4/Wn9R8FI/wDMViPuf/ysPY8DSf8AFqfd/wDanJQf8E9/2mo1O/w3p59F/teH/Gqk3/BPD9qWWUlfDGmKD1xrMP8AjXbD/gqP8Tdm9vhjoQ9B9om5/WlP/BUn4mbcj4XaHk/d/wBIm+b9aj6j4J/9BWI+5/8AysPq/A0f+XtT7v8A7U5PTv8Agnv+03asHfw1p2QMc6vEf61r2/7Bf7Q4t2iuPDlhknII1WL/ABrYtf8AgqD8T7iLzT8MNDX2M83+NWB/wU88fjHm/DjRhn0mm/xo+o+Cf/QViPuf/wArF9X4G/5+1Pu/+1Obuf2CP2jnO+PQLHOM4GrRfe/OvqL4nfDLxh4k/ZSb4WaVp6Ta1/YFja/Z/tCqpliMO8b2IXA2NznnFfP3/Dz/AOIJGV+HOife/wCe83T161E//BTn4rTz7bX4daBGgHJladifycV9FkuceEvDuGxVLC4ms1iIOnK8W3Zpr3fcVnq97+h6OAxnB+W0qsKVWbVSPK7ro77e6tdTlk/YP/acjYlfBVr0x/yF7b/4ukf9g/8Aab2/L4EtCf8AsMW3/wAXXYJ/wUr+LAjDT+AvDwJ9Fn6f9/Ken/BSj4rlA58A6Bg8nCz8D1/1lfOfUfBn/oKxP3L/AOVnlfVuB/8An7V/D/5E4qP9gv8AadVh/wAUVajHrrFt/wDF0+T9gv8AaXaZXHgq2wv/AFGLb/4uu1/4eS/FLdtHgjw8SBlsJPx/5Epo/wCClHxRMXmHwR4eHttn/wDjlT9R8F/+grE/cv8A5WDo8DL/AJe1fuX/AMicdH+wX+0kUKz+B7Y4+7/xOLb/AOLpI/2Cv2lUO4eDbUD+6dXt/wD4uuxH/BSb4stEZh4D8PADswnyf/IlKv8AwUi+Lhj81vA/hwBl3KAlx/8AHKSwPgt0xWJ+5f8Aysf1fgjl/i1fuX/yJyqfsJftGiPL+CLVn/7C1v8A/F0L+w3+0qpYnwJbEEYx/bFt/wDF11K/8FJviyzbT4F8PAj7wKT/APxyh/8AgpL8WowrnwL4eKlsfLHcf/Haby/wY+J4rE/cv/lYex4Ij/y9q/cv/kTlh+w5+0vIY9/gG0XH3ydYtj/7PWh/wxF+0N9mMf8Awhlru9f7Vt//AIutpP8AgpN8VS7B/A/h0DOFG2fJ/wDIlXV/4KJfFA2T3h8G+HhsjLY2z9v+2lNYPwYlLTFYn7l/8rJlh+ButWr9y/8AkTh/A37CH7R2k+I7jVNW8GW0SnPkumr25z+T1qax+w78fLstLF4Rt5ZC+4FtUgH/ALPXQeA/+Ci3xV8V209ze+CPD0YjbagiE/Pp1kNa2oft8/Eq1hD2/gvQ3IOHY+dgf+P1DwPgv/0FYn7l/wDKwlhuBuZXq1fuX/yJyvh/9jP9oCxsXt7rwhboWkzj+1IDn8nqxP8AsdfHtovLj8J25P8AeOpwf/F10+nft5fEW8gSWXwnoa7unyzcj/v5Sz/t3/EqJiB4O0TAGcFZs/8AoyrjgfBi3+9Yn7l/8rM5YfgWUuZ1qv3L/wCROPk/Yx+Pi/6vwhA3GOdUg/8Ai6gP7GP7RSsWi8IW6jbjaNXg/wDi660ft+fE8u0f/CH6BlRlhib/AOOVH/w8D+J+WU+DNBBHTib/AOOVp9S8GtvrOJ+5f/KzH6rwDzfxqv3L/wCROUH7Ff7QxVI38HW+M5b/AImtv/8AF17z+x18G/Hvwg07XrTxzpUdqb2a3a18u4jk3BRIGzsJx94da8zX/goF8TGQk+DNCDDsVm/+OVDe/t/fE+6spbW38LaPBLLEypLHFKWjJGNwzJjI6816+QZl4TcNZpDMcJiK8qkOaylG6d4uL2guj01Wp6WW4ngrJsXHE0atRyjeya01TX8q79zg/jjIg+NvipHG7/ieXPHp+8Nc9b/vG6qn99tn3qq3WqX+qajNqmoX73F1dStJLPMS0kjk5LMTySTzmpbeTy1/fPvVf4tlfjGOxMcbj6uItZTlKSXa7b/U/PcVWWIxM5r7Tb+93Lp+zLgumz5Pvb6W6VPLEyf3P4XqD7QjNveFWRvl3MlRXVw7Rs6Pg/7P3dtcXvfaM/djSJbi73SfIkY/hRv4ttQXEaQ4HzbG/vNSec7M8Lop+T5G/vVHJNMsafI2F/hZaUpS+FBTp83vC/aEnymxTt+Z1amfaEjZv3O7+61OuJIYo1R0Xc3zVXa42/PMm5vvIq/xVjLY76dvtEse+SQzJ8n/AEzZvvVOvkNDs/h/2ap/aIZF3v8AeqZZkbaiD51/8dqDcvQt9nXeNqr/ABrvq5bzSRyb3TerfLtas6FvtDbLqFfm/iq/FJsk+zTbWVvuVO0+YUv7pdguizG2+zK4Xa27f/FWhbSJHMCiMrN/Cq/NurNh7b/lbd93ZVyO481vvsn8Tt1rWMebc4K0pRloaq3CeSqeTlf4l/ipitP+72TbG/g+Xa23/aqKznk3Nvfeuzcn+zTmuIfOSaZNzr8r7XrXl5fhOTm94//Z\n", - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [], - "image/jpeg": { - "width": 600 - } - }, - "execution_count": 38 } ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, { "cell_type": "markdown", "metadata": { @@ -665,35 +658,35 @@ "base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": [ - "cef5e9351ca743bcba5febac0b096a30", - "ec326c52378f4410920c328f221e0514", - "83000c64a11c4ae8abd6f0ef2f108cef", - "0f7899eb719f4a9c9852426551f97be9", - "886ac5b18b3c4c82bf15ad5055f1e17e", - "4e67b3c3a49849c7a7ba28b7eec96e7a", - "62c3682ff1804571a483d46664533969", - "599dda3b608b432393760b2ca4ae7c7d" + "2e915d9016c846e095e382b6a02ee773", + "cb7fc3a5c6cc4fde8d2c83e594a7c86e", + "ac3edef4e3434f4587e6cbf8aa048770", + "853ac234cc2a4236946fc516871e10eb", + "13842ca90c0047e584b8d68d99dad2b1", + "f454999c3a924c7bad0746fb453dec36", + "f94a7ca8c1f04761bf38fdc5f99664b8", + "9da1a23b042c41618dd14b0e30aa7cbe" ] }, - "outputId": "56b6402a-81d5-41d0-a3c8-8889db1fca6c" + "outputId": "3606f305-aa67-43fd-d5d6-93d1f311768c" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 10, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "cef5e9351ca743bcba5febac0b096a30", + "model_id": "2e915d9016c846e095e382b6a02ee773", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))" + "HBox(children=(FloatProgress(value=0.0, max=818322941.0), HTML(value='')))" ] }, "metadata": { @@ -716,30 +709,30 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "a5d41761-f1a0-41fe-d0bb-4cceebd7c4a6" + "outputId": "20fbc423-f536-43ff-e70b-3acf6aeade99" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 11, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/val', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:01<00:00, 156MB/s]\n", + "100% 168M/168M [00:05<00:00, 31.9MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3008.87it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:17<00:00, 2.02it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2653.03it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:18<00:00, 2.00it/s]\n", " all 5000 36335 0.746 0.626 0.68 0.49\n", - "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "Speed: 0.1ms pre-process, 5.1ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", @@ -747,14 +740,14 @@ "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.88s)\n", + "DONE (t=4.82s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=83.47s).\n", + "DONE (t=84.52s).\n", "Accumulating evaluation results...\n", - "DONE (t=12.96s).\n", + "DONE (t=13.82s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", @@ -829,35 +822,35 @@ "base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": [ - "217ca488c82a4b7a80318b70887a556e", - "4e63af16f1084ca98a6fa5a282f2a81e", - "49f4b3c7f6ff42b4b9132a8550e12186", - "8ec9e1a4883245daaf029458ee09721f", - "9d3e775ee11e4cf4b587b64fbc3cc6f7", - "70f68a9a51ac46e6ab7e51fb4fc6bda3", - "fdb8ab377c114bc3b862ba76eb93cef7", - "cd267c153c244621a1f50706d2ddc897" + "6ff8a710ded44391a624dec5c460b771", + "3c19729b51cd45d4848035da06e96ff8", + "23b2f0ae3d46438c8de375987c77f580", + "dd9498c321a9422da6faf17a0be026d4", + "d8dda4b2ce864fd682e558b9a48f602e", + "ff8151449e444a14869684212b9ab14e", + "0f84fe609bcf4aa9afdc32a8cf076909", + "8fda673769984e2b928ef820d34c85c3" ] }, - "outputId": "9e4788c2-e1d4-4a13-c3d2-984f5df7ffab" + "outputId": "4510c6b0-8d2a-436c-d3f4-c8f8470d913a" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 12, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "217ca488c82a4b7a80318b70887a556e", + "model_id": "6ff8a710ded44391a624dec5c460b771", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))" + "HBox(children=(FloatProgress(value=0.0, max=6984509.0), HTML(value='')))" ] }, "metadata": { @@ -918,25 +911,31 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c4dfc591-b6f9-4a60-9149-ee7eff970c90" + "outputId": "cd8ac17d-19a8-4e87-ab6a-31af1edac1ef" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 13, "outputs": [ { "output_type": "stream", "text": [ + "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache_images=True, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, entity=None, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias=latest, local_rank=-1\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-07-29 22:56:52.096481: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "\n", + "WARNING: Dataset not found, nonexistent paths: ['/content/datasets/coco128/images/train2017']\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", + "100% 6.66M/6.66M [00:00<00:00, 44.0MB/s]\n", + "Dataset autodownload success\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", @@ -968,34 +967,38 @@ "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", - "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 198.74it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 475107.00it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.63it/s]\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 59 weight, 62 weight (no decay), 62 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2021.98it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 273.58it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 506004.63it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 121.71it/s]\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", "Plotting labels... \n", "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.45it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.17s/it]\n", - " all 128 929 0.633 0.641 0.668 0.438\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 0/2 3.64G 0.0441 0.06646 0.02229 290 640: 100% 8/8 [00:04<00:00, 1.93it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.45it/s]\n", + " all 128 929 0.696 0.562 0.644 0.419\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.66G 0.04571 0.06615 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.10it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.88it/s]\n", - " all 128 929 0.614 0.661 0.67 0.438\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 1/2 5.04G 0.04573 0.06289 0.021 226 640: 100% 8/8 [00:01<00:00, 5.46it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.16it/s]\n", + " all 128 929 0.71 0.567 0.654 0.424\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.66G 0.04542 0.07179 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 5.40it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.43it/s]\n", - " all 128 929 0.636 0.652 0.67 0.439\n", - "3 epochs completed in 0.007 hours.\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 2/2 5.04G 0.04542 0.0715 0.02028 242 640: 100% 8/8 [00:01<00:00, 5.12it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.46it/s]\n", + " all 128 929 0.731 0.563 0.658 0.427\n", + "3 epochs completed in 0.006 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n" From 1a10b0ecd2aa44d95436b1a343b6b2242ba5c9f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 01:04:36 +0200 Subject: [PATCH 217/757] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3f3f73ad4443..8d9c3f8b7a15 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -585,7 +585,7 @@ "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", - " " + " " ] }, { @@ -627,7 +627,7 @@ }, "source": [ "        \n", - "" + "" ] }, { @@ -1028,7 +1028,7 @@ "\n", "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", - "" + "" ] }, { @@ -1057,7 +1057,7 @@ "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", "```\n", "\n", - "

\"COCO128

" + "\"COCO128" ] }, { From 8d3c3ef45ce1d530aa3751f6187f18cfd9c40791 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 01:35:39 +0200 Subject: [PATCH 218/757] Fix weight decay comment (#4228) --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 250342acff18..cf50a5d553e3 100644 --- a/train.py +++ b/train.py @@ -128,9 +128,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias g2.append(v.bias) - if isinstance(v, nn.BatchNorm2d): # weight with decay + if isinstance(v, nn.BatchNorm2d): # weight (no decay) g0.append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight without decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g1.append(v.weight) if opt.adam: From 94686575024f055e603c1b20a36dcbfb1418c3fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 16:00:47 +0200 Subject: [PATCH 219/757] Update profiler (#4236) --- utils/torch_utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d86267b26356..55a5fd7875bb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -22,6 +22,8 @@ import thop # for FLOPs computation except ImportError: thop = None + +logging.basicConfig(format="%(message)s", level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -103,11 +105,10 @@ def profile(x, ops, n=100, device=None): # m2 = nn.SiLU() # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + device = device or select_device() x = x.to(device) x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type From bceb57b910cfd3ce6ad77494782f73a380345d43 Mon Sep 17 00:00:00 2001 From: IneovaAI <67843470+IneovaAI@users.noreply.github.com> Date: Fri, 30 Jul 2021 17:39:48 +0200 Subject: [PATCH 220/757] Add `python train.py --freeze N` argument (#4238) * Add freeze as an argument I train on different platforms and sometimes I want to freeze some layers. I have to go into the code and change it and also keep track of how many layers I froze on what platform. Please add the number of layers to freeze as an argument in future versions thanks. * Update train.py * Update train.py * Cleanup Co-authored-by: Glenn Jocher --- train.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index cf50a5d553e3..1d3404ffc414 100644 --- a/train.py +++ b/train.py @@ -53,9 +53,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze # Directories w = save_dir / 'weights' # weights dir @@ -111,7 +111,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [] # parameter names to freeze (full or partial) + freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -442,6 +442,7 @@ def parse_opt(known=False): parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt From d8f18834a246cfe3589406635c7e990f8043130a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 18:17:19 +0200 Subject: [PATCH 221/757] Update `profile()` for CUDA Memory allocation (#4239) * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Cleanup --- tutorial.ipynb | 4 +-- utils/torch_utils.py | 76 ++++++++++++++++++++++++++------------------ 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8d9c3f8b7a15..b16506275288 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1172,11 +1172,11 @@ }, "source": [ "# Profile\n", - "from utils.torch_utils import profile \n", + "from utils.torch_utils import profile\n", "\n", "m1 = lambda x: x * torch.sigmoid(x)\n", "m2 = torch.nn.SiLU()\n", - "profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" + "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" ], "execution_count": null, "outputs": [] diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 55a5fd7875bb..4956cf95d1ca 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -98,42 +98,56 @@ def time_sync(): return time.time() -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input +def profile(input, ops, n=10, device=None): + # YOLOv5 speed/memory/FLOPs profiler + # + # Usage: + # input = torch.randn(16, 3, 640, 640) # m1 = lambda x: x * torch.sigmoid(x) # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + # profile(input, [m1, m2], n=100) # profile over 100 iterations + results = [] device = device or select_device() - x = x.to(device) - x.requires_grad = True - print(f"{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs - except: - flops = 0 - - for _ in range(n): - t[0] = time_sync() - y = m(x) - t[1] = time_sync() + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - _ = y.sum().backward() - t[2] = time_sync() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception as e: # no backward method + print(e) + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results def is_parallel(model): From b74929c910f9cd99d2ece587e57bce1ae000d3ba Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Sun, 1 Aug 2021 00:18:07 +0200 Subject: [PATCH 222/757] Add `train.py` and `val.py` callbacks (#4220) * added callbacks * Update callbacks.py * Update train.py * Update val.py * Fix CamlCase add staticmethod * Refactor logger into callbacks * Cleanup * New callback on_val_image_end() * Add curves and results images to TensorBoard Co-authored-by: Glenn Jocher --- train.py | 29 ++++--- utils/callbacks.py | 176 ++++++++++++++++++++++++++++++++++++++ utils/general.py | 5 ++ utils/loggers/__init__.py | 45 +++++----- utils/plots.py | 6 +- val.py | 10 +-- 6 files changed, 230 insertions(+), 41 deletions(-) create mode 100644 utils/callbacks.py diff --git a/train.py b/train.py index 1d3404ffc414..d4a5495d3b3b 100644 --- a/train.py +++ b/train.py @@ -34,7 +34,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ - check_requirements, print_mutation, set_logging, one_cycle, colorstr + check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolution @@ -42,6 +42,7 @@ from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness from utils.loggers import Loggers +from utils.callbacks import Callbacks LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -52,6 +53,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, + callbacks=Callbacks() ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ @@ -77,12 +79,16 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Loggers if RANK in [-1, 0]: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + # Config plots = not evolve # create plots cuda = device.type != 'cpu' @@ -215,13 +221,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: - plot_labels(labels, names, save_dir, loggers) + plot_labels(labels, names, save_dir) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision + callbacks.on_pretrain_routine_end() + # DDP mode if cuda and RANK != -1: model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) @@ -329,8 +337,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - loggers.on_train_batch_end(ni, model, imgs, targets, paths, plots) - + callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -339,7 +346,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: # mAP - loggers.on_train_epoch_end(epoch) + callbacks.on_train_epoch_end(epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP @@ -353,14 +360,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - loggers=loggers, + callbacks=callbacks, compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness, fi) + callbacks.on_fit_epoch_end(mloss, results, lr, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -377,7 +384,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if best_fitness == fi: torch.save(ckpt, best) del ckpt - loggers.on_model_save(last, epoch, final_epoch, best_fitness, fi) + callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- @@ -400,7 +407,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - loggers.on_train_end(last, best, plots) + callbacks.on_train_end(last, best, plots, epoch) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() return results @@ -448,6 +456,7 @@ def parse_opt(known=False): def main(opt): + # Checks set_logging(RANK) if RANK in [-1, 0]: print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 000000000000..f23d57a6c043 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + _callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + + 'teardown': [], + } + + def __init__(self): + return + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook The callback hook name to register the action to + name The name of the action + callback The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook The name of the hook to check, defaults to all + """ + if hook: + return self._callbacks[hook] + else: + return self._callbacks + + @staticmethod + def run_callbacks(register, *args, **kwargs): + """ + Loop through the registered actions and fire all callbacks + """ + for logger in register: + # print(f"Running callbacks.{logger['callback'].__name__}()") + logger['callback'](*args, **kwargs) + + def on_pretrain_routine_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each pretraining routine + """ + self.run_callbacks(self._callbacks['on_pretrain_routine_start'], *args, **kwargs) + + def on_pretrain_routine_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each pretraining routine + """ + self.run_callbacks(self._callbacks['on_pretrain_routine_end'], *args, **kwargs) + + def on_train_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training + """ + self.run_callbacks(self._callbacks['on_train_start'], *args, **kwargs) + + def on_train_epoch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training epoch + """ + self.run_callbacks(self._callbacks['on_train_epoch_start'], *args, **kwargs) + + def on_train_batch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training batch + """ + self.run_callbacks(self._callbacks['on_train_batch_start'], *args, **kwargs) + + def optimizer_step(self, *args, **kwargs): + """ + Fires all registered callbacks on each optimizer step + """ + self.run_callbacks(self._callbacks['optimizer_step'], *args, **kwargs) + + def on_before_zero_grad(self, *args, **kwargs): + """ + Fires all registered callbacks before zero grad + """ + self.run_callbacks(self._callbacks['on_before_zero_grad'], *args, **kwargs) + + def on_train_batch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each training batch + """ + self.run_callbacks(self._callbacks['on_train_batch_end'], *args, **kwargs) + + def on_train_epoch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each training epoch + """ + self.run_callbacks(self._callbacks['on_train_epoch_end'], *args, **kwargs) + + def on_val_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of the validation + """ + self.run_callbacks(self._callbacks['on_val_start'], *args, **kwargs) + + def on_val_batch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each validation batch + """ + self.run_callbacks(self._callbacks['on_val_batch_start'], *args, **kwargs) + + def on_val_image_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each val image + """ + self.run_callbacks(self._callbacks['on_val_image_end'], *args, **kwargs) + + def on_val_batch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each validation batch + """ + self.run_callbacks(self._callbacks['on_val_batch_end'], *args, **kwargs) + + def on_val_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of the validation + """ + self.run_callbacks(self._callbacks['on_val_end'], *args, **kwargs) + + def on_fit_epoch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each fit (train+val) epoch + """ + self.run_callbacks(self._callbacks['on_fit_epoch_end'], *args, **kwargs) + + def on_model_save(self, *args, **kwargs): + """ + Fires all registered callbacks after each model save + """ + self.run_callbacks(self._callbacks['on_model_save'], *args, **kwargs) + + def on_train_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of training + """ + self.run_callbacks(self._callbacks['on_train_end'], *args, **kwargs) + + def teardown(self, *args, **kwargs): + """ + Fires all registered callbacks before teardown + """ + self.run_callbacks(self._callbacks['teardown'], *args, **kwargs) diff --git a/utils/general.py b/utils/general.py index a414b391d24e..ed028d2b3765 100755 --- a/utils/general.py +++ b/utils/general.py @@ -67,6 +67,11 @@ def handler(*args, **kwargs): return handler +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 06d562d60f99..5d4377d54155 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -29,10 +29,12 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.hyp = hyp self.logger = logger # for printing results to console self.include = include + self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary - - def start(self): self.csv = True # always log to csv # Message @@ -57,7 +59,11 @@ def start(self): else: self.wandb = None - return self + def on_pretrain_routine_end(self): + # Callback runs on pre-train routine end + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end @@ -78,8 +84,8 @@ def on_train_epoch_end(self, epoch): if self.wandb: self.wandb.current_epoch = epoch + 1 - def on_val_batch_end(self, pred, predn, path, names, im): - # Callback runs on train batch end + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end if self.wandb: self.wandb.val_one_image(pred, predn, path, names, im) @@ -89,25 +95,20 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi): - # Callback runs on val end during training + def on_fit_epoch_end(self, mloss, results, lr, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch vals = list(mloss) + list(results) + lr - keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - x = {k: v for k, v in zip(keys, vals)} # dict - + x = {k: v for k, v in zip(self.keys, vals)} # dict if self.csv: file = self.save_dir / 'results.csv' n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header with open(file, 'a') as f: f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') if self.tb: for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) # TensorBoard + self.tb.add_scalar(k, v, epoch) if self.wandb: self.wandb.log(x) @@ -119,20 +120,22 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - def on_train_end(self, last, best, plots): + def on_train_end(self, last, best, plots, epoch): # Callback runs on training end if plots: plot_results(dir=self.save_dir) # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + + if self.tb: + from PIL import Image + import numpy as np + for f in files: + self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') + if self.wandb: wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) self.wandb.finish_run() - - def log_images(self, paths): - # Log images - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) diff --git a/utils/plots.py b/utils/plots.py index e13e316314dd..252e128168ee 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -281,7 +281,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx plt.savefig(str(Path(path).name) + '.png', dpi=300) -def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): +def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels print('Plotting labels... ') c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes @@ -324,10 +324,6 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('Agg') plt.close() - # loggers - if loggers: - loggers.log_images(save_dir.glob('*labels*.jpg')) - def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt diff --git a/val.py b/val.py index 86439b1380dc..58e8170da86c 100644 --- a/val.py +++ b/val.py @@ -25,7 +25,7 @@ from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync -from utils.loggers import Loggers +from utils.callbacks import Callbacks def save_one_txt(predn, save_conf, shape, file): @@ -97,7 +97,7 @@ def run(data, dataloader=None, save_dir=Path(''), plots=True, - loggers=Loggers(), + callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device @@ -213,7 +213,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - loggers.on_val_batch_end(pred, predn, path, names, img[si]) + callbacks.on_val_image_end(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -250,7 +250,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - loggers.on_val_end() + callbacks.on_val_end() # Save JSON if save_json and len(jdict): @@ -282,7 +282,7 @@ def run(data, model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") + print(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] From cd540d8625bba8a05329ede3522046ee53eb349d Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 1 Aug 2021 15:36:53 +0530 Subject: [PATCH 223/757] W&B: suppress warnings (#4257) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * call wandblogger.log instead of wandb.log Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 5d4377d54155..be76d0c17f1b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -134,7 +134,8 @@ def on_train_end(self, last, best, plots, epoch): self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') if self.wandb: - wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) From 53bfcbe0ae48bb31c80378d8487a2b85c6bcc702 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 1 Aug 2021 20:36:00 +0200 Subject: [PATCH 224/757] Update AP calculation (#4260) * Update AP calculation * Cleanup * Remove original --- val.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/val.py b/val.py index 58e8170da86c..4c1d7d26b0de 100644 --- a/val.py +++ b/val.py @@ -50,26 +50,27 @@ def save_one_json(predn, jdict, path, class_map): 'score': round(p[4], 5)}) -def process_batch(predictions, labels, iouv): - # Evaluate 1 batch of predictions - correct = torch.zeros(predictions.shape[0], len(iouv), dtype=torch.bool, device=iouv.device) - detected = [] # label indices - tcls, pcls = labels[:, 0], predictions[:, 5] - nl = labels.shape[0] # number of labels - for cls in torch.unique(tcls): - ti = (cls == tcls).nonzero().view(-1) # label indices - pi = (cls == pcls).nonzero().view(-1) # prediction indices - if pi.shape[0]: # find detections - ious, i = box_iou(predictions[pi, 0:4], labels[ti, 1:5]).max(1) # best ious, indices - detected_set = set() - for j in (ious > iouv[0]).nonzero(): - d = ti[i[j]] # detected label - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) # append detections - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all labels already located in image - break +def process_batch(detections, labels, iouv): + """ + Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (Array[N, 10]), for 10 IoU levels + """ + correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) + iou = box_iou(labels[:, 1:], detections[:, :4]) + x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + matches = torch.Tensor(matches).to(iouv.device) + correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct From 306fc0119a94915b91fb6ca6f46f2d50437152e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 12:42:23 +0200 Subject: [PATCH 225/757] Update Autoshape forward header (#4271) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 24f02c2a584c..8319552de5f0 100644 --- a/models/common.py +++ b/models/common.py @@ -232,10 +232,10 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/images/zidane.jpg' # str or PosixPath + # file: imgs = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images From 9c6732f61c5dc013114e6797905c5e3410cd8201 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:13:55 +0200 Subject: [PATCH 226/757] Update variables (#4273) --- models/common.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 8319552de5f0..e6b7b5182283 100644 --- a/models/common.py +++ b/models/common.py @@ -30,7 +30,7 @@ def autopad(k, p=None): # kernel, padding def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution + # Depth-wise convolution return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) @@ -183,11 +183,11 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' s = self.gain - x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) class Expand(nn.Module): @@ -197,11 +197,11 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain - x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): From 587c4b4b81fb4e9423e33a2a235731742386d03c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:36:30 +0200 Subject: [PATCH 227/757] Add `DWConvClass()` (#4274) * Add `DWConvClass()` * Cleanup * Cleanup2 --- models/common.py | 11 +++++++++-- models/experimental.py | 2 +- models/yolo.py | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index e6b7b5182283..30e7319f98a0 100644 --- a/models/common.py +++ b/models/common.py @@ -30,7 +30,7 @@ def autopad(k, p=None): # kernel, padding def DWConv(c1, c2, k=1, s=1, act=True): - # Depth-wise convolution + # Depth-wise convolution function return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) @@ -45,10 +45,17 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k def forward(self, x): return self.act(self.bn(self.conv(x))) - def fuseforward(self, x): + def forward_fuse(self, x): return self.act(self.conv(x)) +class DWConvClass(Conv): + # Depth-wise convolution class + def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, act) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k), groups=math.gcd(c1, c2), bias=False) + + class TransformerLayer(nn.Module): # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) def __init__(self, c, num_heads): diff --git a/models/experimental.py b/models/experimental.py index 276ca954b173..581c7b14b61e 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -72,7 +72,7 @@ def forward(self, x): class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): super().__init__() groups = len(k) diff --git a/models/yolo.py b/models/yolo.py index 2e7a20f813e2..9f05c8329f38 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -202,10 +202,10 @@ def _print_biases(self): def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): - if type(m) is Conv and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConvClass)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward + m.forward = m.forward_fuse # update forward self.info() return self From 388016e9e3fd84255444356b509862b935105d97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:48:53 +0200 Subject: [PATCH 228/757] Update 'results saved to' string (#4275) --- detect.py | 2 +- export.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 80517f342a41..88d1d9ca3800 100644 --- a/detect.py +++ b/detect.py @@ -189,7 +189,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") + print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) diff --git a/export.py b/export.py index c98e92d972c6..83e293b72e73 100644 --- a/export.py +++ b/export.py @@ -155,7 +155,9 @@ def run(weights='./yolov5s.pt', # weights path export_coreml(model, img, file) # Finish - print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') + print(f'\nExport complete ({time.time() - t:.2f}s)' + f"Results saved to {colorstr('bold', file.parent.resolve())}\n" + f'Visualize with https://netron.app') def parse_opt(): From 621caea53c393ca8b46261d369a6314f7d2736d7 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 2 Aug 2021 22:11:52 +0530 Subject: [PATCH 229/757] W&B: Fix sweep bug (#4276) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * call wandblogger.log instead of wandb.log * Fix Sweep bug Co-authored-by: Glenn Jocher --- utils/loggers/wandb/sweep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 8e952d03c085..2dcda508eb50 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -4,7 +4,7 @@ import wandb FILE = Path(__file__).absolute() -sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path +sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path from train import train, parse_opt from utils.general import increment_path From 2d99063201105d992f8b0dada3c9c7a206e582e7 Mon Sep 17 00:00:00 2001 From: junji hashimoto Date: Tue, 3 Aug 2021 01:47:24 +0900 Subject: [PATCH 230/757] Feature `python train.py --cache disk` (#4049) * Add cache-on-disk and cache-directory to cache images on disk * Fix load_image with cache_on_disk * Add no_cache flag for load_image * Revert the parts('logging' and a new line) that do not need to be modified * Add the assertion for shapes of cached images * Add a suffix string for cached images * Fix boundary-error of letterbox for load_mosaic * Add prefix as cache-key of cache-on-disk * Update cache-function on disk * Add psutil in requirements.txt * Update train.py * Cleanup1 * Cleanup2 * Skip existing npy * Include re-space * Export return character fix Co-authored-by: Glenn Jocher --- export.py | 4 ++-- train.py | 8 ++++---- utils/datasets.py | 45 +++++++++++++++++++++++++++++---------------- 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/export.py b/export.py index 83e293b72e73..cec85958b4a9 100644 --- a/export.py +++ b/export.py @@ -156,8 +156,8 @@ def run(weights='./yolov5s.pt', # weights path # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' - f"Results saved to {colorstr('bold', file.parent.resolve())}\n" - f'Visualize with https://netron.app') + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nVisualize with https://netron.app') def parse_opt(): diff --git a/train.py b/train.py index d4a5495d3b3b..34bd8e73c290 100644 --- a/train.py +++ b/train.py @@ -201,7 +201,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -211,7 +211,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -389,7 +389,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests @@ -430,7 +430,7 @@ def parse_opt(known=False): parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/datasets.py b/utils/datasets.py index fffe39a61459..1c780cdbac4b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -455,16 +455,25 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n + self.imgs, self.img_npy = [None] * n, [None] * n if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + if cache_images == 'disk': + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) + gb += self.img_npy[i].stat().st_size + else: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): @@ -618,21 +627,25 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw +def load_image(self, i): + # loads 1 image from dataset index 'i', returns im, original hw, resized hw + im = self.imgs[i] + if im is None: # not cached in ram + npy = self.img_npy[i] + if npy and npy.exists(): # load npy + im = np.load(npy) + else: # read image + path = self.img_files[i] + im = cv2.imread(path) # BGR + assert im is not None, 'Image Not Found ' + path + h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized def load_mosaic(self, index): From 771ac6c53ded79c408ed8bd99f7604b7077b7d77 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Tue, 3 Aug 2021 19:11:42 +0800 Subject: [PATCH 231/757] Fixed logging level in distributed mode (#4284) Co-authored-by: fkwong --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 4956cf95d1ca..628f672a010d 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -23,7 +23,6 @@ except ImportError: thop = None -logging.basicConfig(format="%(message)s", level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -108,6 +107,7 @@ def profile(input, ops, n=10, device=None): # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] + logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") From 4103ce9ad0393cc27f6c80457894ad7be0cb1f0d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 13:17:35 +0200 Subject: [PATCH 232/757] Simplify callbacks (#4289) --- utils/callbacks.py | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/utils/callbacks.py b/utils/callbacks.py index f23d57a6c043..a204ec1ceaaf 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -58,12 +58,11 @@ def get_registered_actions(self, hook=None): else: return self._callbacks - @staticmethod - def run_callbacks(register, *args, **kwargs): + def run_callbacks(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks """ - for logger in register: + for logger in self._callbacks[hook]: # print(f"Running callbacks.{logger['callback'].__name__}()") logger['callback'](*args, **kwargs) @@ -71,106 +70,106 @@ def on_pretrain_routine_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each pretraining routine """ - self.run_callbacks(self._callbacks['on_pretrain_routine_start'], *args, **kwargs) + self.run_callbacks('on_pretrain_routine_start', *args, **kwargs) def on_pretrain_routine_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each pretraining routine """ - self.run_callbacks(self._callbacks['on_pretrain_routine_end'], *args, **kwargs) + self.run_callbacks('on_pretrain_routine_end', *args, **kwargs) def on_train_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training """ - self.run_callbacks(self._callbacks['on_train_start'], *args, **kwargs) + self.run_callbacks('on_train_start', *args, **kwargs) def on_train_epoch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training epoch """ - self.run_callbacks(self._callbacks['on_train_epoch_start'], *args, **kwargs) + self.run_callbacks('on_train_epoch_start', *args, **kwargs) def on_train_batch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training batch """ - self.run_callbacks(self._callbacks['on_train_batch_start'], *args, **kwargs) + self.run_callbacks('on_train_batch_start', *args, **kwargs) def optimizer_step(self, *args, **kwargs): """ Fires all registered callbacks on each optimizer step """ - self.run_callbacks(self._callbacks['optimizer_step'], *args, **kwargs) + self.run_callbacks('optimizer_step', *args, **kwargs) def on_before_zero_grad(self, *args, **kwargs): """ Fires all registered callbacks before zero grad """ - self.run_callbacks(self._callbacks['on_before_zero_grad'], *args, **kwargs) + self.run_callbacks('on_before_zero_grad', *args, **kwargs) def on_train_batch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each training batch """ - self.run_callbacks(self._callbacks['on_train_batch_end'], *args, **kwargs) + self.run_callbacks('on_train_batch_end', *args, **kwargs) def on_train_epoch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each training epoch """ - self.run_callbacks(self._callbacks['on_train_epoch_end'], *args, **kwargs) + self.run_callbacks('on_train_epoch_end', *args, **kwargs) def on_val_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of the validation """ - self.run_callbacks(self._callbacks['on_val_start'], *args, **kwargs) + self.run_callbacks('on_val_start', *args, **kwargs) def on_val_batch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each validation batch """ - self.run_callbacks(self._callbacks['on_val_batch_start'], *args, **kwargs) + self.run_callbacks('on_val_batch_start', *args, **kwargs) def on_val_image_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each val image """ - self.run_callbacks(self._callbacks['on_val_image_end'], *args, **kwargs) + self.run_callbacks('on_val_image_end', *args, **kwargs) def on_val_batch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each validation batch """ - self.run_callbacks(self._callbacks['on_val_batch_end'], *args, **kwargs) + self.run_callbacks('on_val_batch_end', *args, **kwargs) def on_val_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of the validation """ - self.run_callbacks(self._callbacks['on_val_end'], *args, **kwargs) + self.run_callbacks('on_val_end', *args, **kwargs) def on_fit_epoch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each fit (train+val) epoch """ - self.run_callbacks(self._callbacks['on_fit_epoch_end'], *args, **kwargs) + self.run_callbacks('on_fit_epoch_end', *args, **kwargs) def on_model_save(self, *args, **kwargs): """ Fires all registered callbacks after each model save """ - self.run_callbacks(self._callbacks['on_model_save'], *args, **kwargs) + self.run_callbacks('on_model_save', *args, **kwargs) def on_train_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of training """ - self.run_callbacks(self._callbacks['on_train_end'], *args, **kwargs) + self.run_callbacks('on_train_end', *args, **kwargs) def teardown(self, *args, **kwargs): """ Fires all registered callbacks before teardown """ - self.run_callbacks(self._callbacks['teardown'], *args, **kwargs) + self.run_callbacks('teardown', *args, **kwargs) From e78aeac973ea3a2c58d7577453473e48f4e7a0f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 17:13:38 +0200 Subject: [PATCH 233/757] Evolve in CSV format (#4307) * Update evolution to CSV format * Update * Update * Update * Update * Update * reset args * reset args * reset args * plot_results() fix * Cleanup * Cleanup2 --- .dockerignore | 2 +- .gitignore | 1 - train.py | 32 ++++++++++++++----------- utils/general.py | 50 +++++++++++++++++++++++---------------- utils/loggers/__init__.py | 5 ++-- utils/plots.py | 50 +++++++++++++++++++-------------------- 6 files changed, 75 insertions(+), 65 deletions(-) diff --git a/.dockerignore b/.dockerignore index 9c9663f006ca..4248cb098cf4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,7 +8,7 @@ coco storage.googleapis.com data/samples/* -**/results*.txt +**/results*.csv *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- diff --git a/.gitignore b/.gitignore index b07134d097dd..e5d02af960af 100755 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,6 @@ data/* !data/images/bus.jpg !data/*.sh -results*.txt results*.csv # Datasets ------------------------------------------------------------------------------------------------------------- diff --git a/train.py b/train.py index 34bd8e73c290..a7d61c8c5411 100644 --- a/train.py +++ b/train.py @@ -37,7 +37,7 @@ check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss -from utils.plots import plot_labels, plot_evolution +from utils.plots import plot_labels, plot_evolve from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness @@ -367,7 +367,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - callbacks.on_fit_epoch_end(mloss, results, lr, epoch, best_fitness, fi) + log_vals = list(mloss) + list(results) + lr + callbacks.on_fit_epoch_end(log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -464,7 +465,7 @@ def main(opt): check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) # Resume - if opt.resume and not check_wandb_resume(opt): # resume an interrupted run + if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: @@ -474,8 +475,10 @@ def main(opt): else: opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + if opt.evolve: + opt.project = 'runs/evolve' + opt.exist_ok = opt.resume + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -533,17 +536,17 @@ def main(opt): hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 - opt.noval, opt.nosave = True, True # only val/save final epoch + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.txt .') # download evolve.txt if exists + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve - if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt('evolve.txt', ndmin=2) + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) @@ -575,12 +578,13 @@ def main(opt): results = train(hyp.copy(), opt, device) # Write mutation results - print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + print_mutation(results, hyp.copy(), save_dir, opt.bucket) # Plot results - plot_evolution(yaml_file) - print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' - f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') + plot_evolve(evolve_csv) + print(f'Hyperparameter evolution finished\n' + f"Results saved to {colorstr('bold', save_dir)}" + f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') def run(**kwargs): diff --git a/utils/general.py b/utils/general.py index ed028d2b3765..15111b727f33 100755 --- a/utils/general.py +++ b/utils/general.py @@ -615,35 +615,43 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) +def print_mutation(results, hyp, save_dir, bucket): + evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml' + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + # Download (optional) if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + # Print to screen + print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) + print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n') # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :7])) # + f.write(f'# YOLOv5 Hyperparameter Evolution Results\n' + + f'# Best generation: {i}\n' + + f'# Last generation: {len(data)}\n' + + f'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + + f'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(hyp, f, sort_keys=False) if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload def apply_classifier(x, model, img, im0): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index be76d0c17f1b..d40c0c350fde 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -95,9 +95,8 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_fit_epoch_end(self, mloss, results, lr, epoch, best_fitness, fi): + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch - vals = list(mloss) + list(results) + lr x = {k: v for k, v in zip(self.keys, vals)} # dict if self.csv: file = self.save_dir / 'results.csv' @@ -123,7 +122,7 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): def on_train_end(self, last, best, plots, epoch): # Callback runs on training end if plots: - plot_results(dir=self.save_dir) # save results.png + plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter diff --git a/utils/plots.py b/utils/plots.py index 252e128168ee..ef850ee2f26d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -325,30 +325,6 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() -def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() - # Plot hyperparameter evolution results in evolve.txt - with open(yaml_file) as f: - hyp = yaml.safe_load(f) - x = np.loadtxt('evolve.txt', ndmin=2) - f = fitness(x) - # weights = (f - f.min()) ** 2 # for weighted results - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - for i, (k, v) in enumerate(hyp.items()): - y = x[:, i + 7] - # mu = (y * weights).sum() / weights.sum() # best weighted result - mu = y[f.argmax()] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - plt.savefig('evolve.png', dpi=200) - print('\nPlot saved as evolve.png') - - def profile_idetection(start=0, stop=0, labels=(), save_dir=''): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() @@ -381,7 +357,31 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_results(file='', dir=''): +def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) From 86c7150cfd3ac926985ed8b0aa6550820c0d3ab9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 17:41:38 +0200 Subject: [PATCH 234/757] Update newline (#4308) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index a7d61c8c5411..23f4971b1758 100644 --- a/train.py +++ b/train.py @@ -583,7 +583,7 @@ def main(opt): # Plot results plot_evolve(evolve_csv) print(f'Hyperparameter evolution finished\n' - f"Results saved to {colorstr('bold', save_dir)}" + f"Results saved to {colorstr('bold', save_dir)}\n" f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') From f409d8e54f9391ce21436d33334beff3a2fd4042 Mon Sep 17 00:00:00 2001 From: Sudhanshu Singh Date: Thu, 5 Aug 2021 01:41:48 +0530 Subject: [PATCH 235/757] Update README.md (#4309) remove unnecessary "`" --- utils/flask_rest_api/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 324c2416dcd9..6c835936dde6 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -19,7 +19,7 @@ $ python3 restapi.py --port 5000 Then use [curl](https://curl.se/) to perform a request: ```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' ``` The model inference results are returned as a JSON response: From e96c74b5a1c4a27934c5d8ad52cde778af248ed8 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Thu, 5 Aug 2021 17:54:16 +0900 Subject: [PATCH 236/757] Simpler code for DWConvClass (#4310) * more simpler code for DWConvClass more simpler code for DWConvClass * remove DWConv function * Replace DWConvClass with DWConv --- models/common.py | 10 ++-------- models/yolo.py | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/models/common.py b/models/common.py index 30e7319f98a0..2d24672a6b44 100644 --- a/models/common.py +++ b/models/common.py @@ -29,11 +29,6 @@ def autopad(k, p=None): # kernel, padding return p -def DWConv(c1, c2, k=1, s=1, act=True): - # Depth-wise convolution function - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups @@ -49,11 +44,10 @@ def forward_fuse(self, x): return self.act(self.conv(x)) -class DWConvClass(Conv): +class DWConv(Conv): # Depth-wise convolution class def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, act) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k), groups=math.gcd(c1, c2), bias=False) + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) class TransformerLayer(nn.Module): diff --git a/models/yolo.py b/models/yolo.py index 9f05c8329f38..380f3401e5b9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -202,7 +202,7 @@ def _print_biases(self): def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): - if isinstance(m, (Conv, DWConvClass)) and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward From ce7deec440404d17b315768d955313404d70e776 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 17:32:13 +0200 Subject: [PATCH 237/757] `int(mlc)` (#4385) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 23f4971b1758..24152f1a1198 100644 --- a/train.py +++ b/train.py @@ -204,7 +204,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) - mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' From e7fc27406ab32b9860c9b351b30ca1c47f543433 Mon Sep 17 00:00:00 2001 From: Yuantao Yang <31794133+orangeccc@users.noreply.github.com> Date: Wed, 11 Aug 2021 23:38:34 +0800 Subject: [PATCH 238/757] Fix module count in parse_model (#4379) Co-authored-by: yangyuantao --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 380f3401e5b9..98e578d20384 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -234,7 +234,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) except: pass - n = max(round(n * gd), 1) if n > 1 else n # depth gain + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP]: c1, c2 = ch[f], args[0] @@ -264,7 +264,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: From b27f69f108a9b92f20bfd2725350bd86c313a177 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 18:32:06 +0200 Subject: [PATCH 239/757] Created using Colaboratory --- tutorial.ipynb | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b16506275288..eaa886509a66 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -19,6 +19,7 @@ "2e915d9016c846e095e382b6a02ee773": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], @@ -39,6 +40,7 @@ "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -90,6 +92,7 @@ "ac3edef4e3434f4587e6cbf8aa048770": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", @@ -113,6 +116,7 @@ "853ac234cc2a4236946fc516871e10eb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", @@ -133,6 +137,7 @@ "13842ca90c0047e584b8d68d99dad2b1": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", @@ -148,6 +153,7 @@ "f454999c3a924c7bad0746fb453dec36": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -199,6 +205,7 @@ "f94a7ca8c1f04761bf38fdc5f99664b8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -213,6 +220,7 @@ "9da1a23b042c41618dd14b0e30aa7cbe": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -264,6 +272,7 @@ "6ff8a710ded44391a624dec5c460b771": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], @@ -284,6 +293,7 @@ "3c19729b51cd45d4848035da06e96ff8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -335,6 +345,7 @@ "23b2f0ae3d46438c8de375987c77f580": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", @@ -358,6 +369,7 @@ "dd9498c321a9422da6faf17a0be026d4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", @@ -378,6 +390,7 @@ "d8dda4b2ce864fd682e558b9a48f602e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", @@ -393,6 +406,7 @@ "ff8151449e444a14869684212b9ab14e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -444,6 +458,7 @@ "0f84fe609bcf4aa9afdc32a8cf076909": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -458,6 +473,7 @@ "8fda673769984e2b928ef820d34c85c3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -564,7 +580,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -585,7 +601,15 @@ "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", - " " + "```shell\n", + "python detect.py --source 0 # webcam\n", + " file.jpg # image \n", + " file.mp4 # video\n", + " path/ # directory\n", + " path/*.jpg # glob\n", + " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" ] }, { @@ -601,7 +625,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 9, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -675,7 +699,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 10, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -715,7 +739,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 11, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -839,7 +863,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 12, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -917,7 +941,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 13, + "execution_count": null, "outputs": [ { "output_type": "stream", From 11e4aebfefb0ce77972c0b083fac03f2da650b76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 18:33:34 +0200 Subject: [PATCH 240/757] Update README.md (#4387) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index df4e9add519d..febf4bff9b40 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'https://youtu.be/NUsoVlDFqZg' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` From 75d323dd8b13dfa16707a5174960844d99f4d708 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 11 Aug 2021 22:05:43 +0530 Subject: [PATCH 241/757] W&B: Add advanced features tutorial (#4384) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Initial readme update * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- utils/loggers/wandb/README.md | 140 ++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 utils/loggers/wandb/README.md diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md new file mode 100644 index 000000000000..8616ea2b6945 --- /dev/null +++ b/utils/loggers/wandb/README.md @@ -0,0 +1,140 @@ +📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. + * [About Weights & Biases](#about-weights-&-biases) + * [First-Time Setup](#first-time-setup) + * [Viewing runs](#viewing-runs) + * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) + * [Reports: Share your work with the world!](#reports) + +## About Weights & Biases +Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. + + Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: + + * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time + * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4), visualized automatically + * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization + * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators + * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently + * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models + + ## First-Time Setup +
+ Toggle Details +When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. + + W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: + + ```shell + $ python train.py --project ... --name ... + ``` + + +
+ +## Viewing Runs +
+ Toggle Details + Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + + * Training & Validation losses + * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 + * Learning Rate over time + * A bounding box debugging panel, showing the training progress over time + * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** + * System: Disk I/0, CPU utilization, RAM memory usage + * Your trained model as W&B Artifact + * Environment: OS and Python types, Git repository and state, **training command** + + +
+ +## Advanced Usage +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. +
+

1. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +
+ +

2: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +
+ + + +

Reports

+ W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + + + + ## Environments + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + * **Google Colab and Kaggle** notebooks with free GPU: [![Open In Colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb) [![Open In Kaggle](https://camo.githubusercontent.com/a08ca511178e691ace596a95d334f73cf4ce06e83a5c4a5169b8bb68cac27bef/68747470733a2f2f6b6167676c652e636f6d2f7374617469632f696d616765732f6f70656e2d696e2d6b6167676c652e737667)](https://www.kaggle.com/ultralytics/yolov5) + * **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + * **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) + * **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) [![Docker Pulls](https://camo.githubusercontent.com/280faedaf431e4c0c24fdb30ec00a66d627404e5c4c498210d3f014dd58c2c7e/68747470733a2f2f696d672e736869656c64732e696f2f646f636b65722f70756c6c732f756c7472616c79746963732f796f6c6f76353f6c6f676f3d646f636b6572)](https://hub.docker.com/r/ultralytics/yolov5) + + ## Status + ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + From a4e4553040503b11df9283fda666736e9c57dd87 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 12 Aug 2021 01:26:25 +0530 Subject: [PATCH 242/757] W&B: Fix for 4360 (#4388) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Fix * fix Co-authored-by: Glenn Jocher --- utils/loggers/wandb/wandb_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 66fa8f85ec4e..3f2684a7f3e3 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -157,6 +157,8 @@ def __init__(self, opt, run_id, job_type='Training'): self.data_dict = data_dict else: # Local .yaml dataset file or .zip file self.data_dict = check_dataset(opt.data) + else: + self.data_dict = check_dataset(opt.data) self.setup_training(opt) if not self.wandb_artifact_data_dict: @@ -505,4 +507,4 @@ def all_logging_disabled(highest_level=logging.CRITICAL): try: yield finally: - logging.disable(previous_level) + logging.disable(previous_level) \ No newline at end of file From 3e7c59ad3bf5414d4b2a26e018f397e27a51c6f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 23:40:47 +0200 Subject: [PATCH 243/757] Fix rename `utils.google_utils` to `utils.downloads` (#4393) --- data/scripts/download_weights.sh | 2 +- utils/downloads.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 013036978c07..a576c956d008 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -9,7 +9,7 @@ # └── ... python - < Date: Fri, 13 Aug 2021 13:22:13 +0200 Subject: [PATCH 244/757] Simplify ONNX inference command (#4405) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index cec85958b4a9..db5a6918845c 100644 --- a/export.py +++ b/export.py @@ -76,7 +76,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): except Exception as e: print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - print(f"{prefix} run --dynamic ONNX model inference with detect.py: 'python detect.py --weights {f}'") + print(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") except Exception as e: print(f'{prefix} export failure: {e}') From d9f23ed6d65e985c07e9ef0ec77d476dd14e2b26 Mon Sep 17 00:00:00 2001 From: Ahmad Mustafa Anis <47111429+ahmadmustafaanis@users.noreply.github.com> Date: Fri, 13 Aug 2021 16:25:00 +0500 Subject: [PATCH 245/757] No cache option for reading datasets (#4376) * no cache option * no cache option * bit change * changed to 0,1 instead of True False * Update train.py * Update datasets.py Co-authored-by: Glenn Jocher From e086347377923076fb469a401b65980b98cd871b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 12:50:27 +0200 Subject: [PATCH 246/757] Update plots.py (#4407) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index ef850ee2f26d..7db527e14924 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -79,7 +79,7 @@ def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) -def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None): +def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=3): # Plots one bounding box on image 'im' using PIL im = Image.fromarray(im) draw = ImageDraw.Draw(im) From 4e8c81a368d154fed3f27b16a728b6467ff60c6a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 12:55:22 +0200 Subject: [PATCH 247/757] Add `yolov5s-ghost.yaml` (#4412) * Add yolov5s-ghost.yaml * Finish C3Ghost * Add C3Ghost to list * Add C3Ghost to number of repeats if statement * Fixes * Cleanup --- models/common.py | 36 +++++++++++++++++++++++++++ models/experimental.py | 28 --------------------- models/hub/yolov5s-ghost.yaml | 46 +++++++++++++++++++++++++++++++++++ models/yolo.py | 4 +-- 4 files changed, 84 insertions(+), 30 deletions(-) create mode 100644 models/hub/yolov5s-ghost.yaml diff --git a/models/common.py b/models/common.py index 2d24672a6b44..5ef3996007a2 100644 --- a/models/common.py +++ b/models/common.py @@ -149,6 +149,14 @@ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): self.m = SPP(c_, c_, k) +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)]) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): @@ -177,6 +185,34 @@ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) # return self.conv(self.contract(x)) +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + class Contract(nn.Module): # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) def __init__(self, gain=2): diff --git a/models/experimental.py b/models/experimental.py index 581c7b14b61e..5c690cce3d99 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -43,34 +43,6 @@ def forward(self, x): return y -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super().__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super().__init__() - c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml new file mode 100644 index 000000000000..d99d56d26e85 --- /dev/null +++ b/models/hub/yolov5s-ghost.yaml @@ -0,0 +1,46 @@ +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3Ghost, [128]], + [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3Ghost, [256]], + [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3Ghost, [512]], + [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3Ghost, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, GhostConv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3Ghost, [512, False]], # 13 + + [-1, 1, GhostConv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) + + [-1, 1, GhostConv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) + + [-1, 1, GhostConv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index 98e578d20384..88adb71f8fea 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -236,13 +236,13 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR, C3SPP]: + C3, C3TR, C3SPP, C3Ghost]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR]: + if m in [BottleneckCSP, C3, C3TR, C3Ghost]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: From 63e09fdc480c9398e7b7acb27083907ed29809de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 13:47:20 +0200 Subject: [PATCH 248/757] Remove `encoding='ascii'` (#4413) * Remove `encoding='ascii'` * Reinstate `encoding='ascii'` in emojis() --- utils/autoanchor.py | 2 +- utils/datasets.py | 2 +- utils/general.py | 4 ++-- utils/loggers/wandb/wandb_utils.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 2571fc99ac89..eca1d5be8ebe 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -104,7 +104,7 @@ def print_results(k): return k if isinstance(dataset, str): # *.yaml file - with open(dataset, encoding='ascii', errors='ignore') as f: + with open(dataset, errors='ignore') as f: data_dict = yaml.safe_load(f) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) diff --git a/utils/datasets.py b/utils/datasets.py index 1c780cdbac4b..b402723f9c49 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -931,7 +931,7 @@ def hub_ops(f, max_dim=1920): im.save(im_dir / Path(f).name, quality=75) # save zipped, data_dir, yaml_path = unzip(Path(path)) - with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f: + with open(check_file(yaml_path), errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? diff --git a/utils/general.py b/utils/general.py index 15111b727f33..c70c21f47636 100755 --- a/utils/general.py +++ b/utils/general.py @@ -112,7 +112,7 @@ def is_pip(): def emojis(str=''): # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + return str.encode().decode(encoding='ascii', errors='ignore') if platform.system() == 'Windows' else str def file_size(file): @@ -250,7 +250,7 @@ def check_dataset(data, autodownload=True): # Read yaml (optional) if isinstance(data, (str, Path)): - with open(data, encoding='ascii', errors='ignore') as f: + with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary # Parse yaml diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 3f2684a7f3e3..019aebf094e1 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -62,7 +62,7 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), encoding='ascii', errors='ignore') as f: + with open(check_file(opt.data), errors='ignore') as f: data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): @@ -152,7 +152,7 @@ def __init__(self, opt, run_id, job_type='Training'): self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact - with open(opt.data, encoding='ascii', errors='ignore') as f: + with open(opt.data, errors='ignore') as f: data_dict = yaml.safe_load(f) self.data_dict = data_dict else: # Local .yaml dataset file or .zip file @@ -186,7 +186,7 @@ def check_and_upload_dataset(self, opt): opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) - with open(config_path, encoding='ascii', errors='ignore') as f: + with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict From 2da4e7acf7510dd2a249120c484c6e5157459b3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 17:44:15 +0200 Subject: [PATCH 249/757] Merge PIL and OpenCV in `plot_one_box(use_pil=False)` (#4416) * Merge PIL and OpenCV box plotting functions * Add ASCII check to plot_one_box * Cleanup * Cleanup2 --- detect.py | 2 +- models/common.py | 2 +- utils/general.py | 7 +++++- utils/plots.py | 56 +++++++++++++++++++++++------------------------- 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/detect.py b/detect.py index 88d1d9ca3800..db0c545b0635 100644 --- a/detect.py +++ b/detect.py @@ -156,7 +156,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness) + im0 = plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_width=line_thickness) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/models/common.py b/models/common.py index 5ef3996007a2..35790804ca52 100644 --- a/models/common.py +++ b/models/common.py @@ -354,7 +354,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others - plot_one_box(box, im, label=label, color=colors(cls)) + im = plot_one_box(box, im, label=label, color=colors(cls)) else: str += '(no detections)' diff --git a/utils/general.py b/utils/general.py index c70c21f47636..4fc32f5691fc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -110,9 +110,14 @@ def is_pip(): return 'site-packages' in Path(__file__).absolute().parts +def is_ascii(str=''): + # Is string composed of all ASCII (no UTF) characters? + return len(str.encode().decode('ascii', 'ignore')) == len(str) + + def emojis(str=''): # Return platform-dependent emoji-safe version of string - return str.encode().decode(encoding='ascii', errors='ignore') if platform.system() == 'Windows' else str + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str def file_size(file): diff --git a/utils/plots.py b/utils/plots.py index 7db527e14924..71e90b00241d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,20 +1,19 @@ # Plotting utils +import math from copy import copy from pathlib import Path import cv2 -import math import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn import torch -import yaml from PIL import Image, ImageDraw, ImageFont -from utils.general import xywh2xyxy, xyxy2xywh +from utils.general import is_ascii, xyxy2xywh, xywh2xyxy from utils.metrics import fitness # Settings @@ -65,32 +64,31 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): - # Plots one bounding box on image 'im' using OpenCV +def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), label=None, line_width=3, use_pil=False): + # Plots one xyxy box on image im with label assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' - tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness - c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) - if label: - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) - - -def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=3): - # Plots one bounding box on image 'im' using PIL - im = Image.fromarray(im) - draw = ImageDraw.Draw(im) - line_thickness = line_thickness or max(int(min(im.size) / 200), 2) - draw.rectangle(box, width=line_thickness, outline=color) # plot - if label: - font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) - draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(im) + lw = line_width or max(int(min(im.size) / 200), 2) # line width + + if use_pil or not is_ascii(label): # use PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + draw.rectangle(box, width=lw + 1, outline=color) # plot + if label: + font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) + draw.text((box[0], box[1] - txt_height + 1), label, fill=txt_color, font=font) + return np.asarray(im) + else: # use OpenCV + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(im, c1, c2, color, thickness=lw, lineType=cv2.LINE_AA) + if label: + tf = max(lw - 1, 1) # font thickness + txt_width, txt_height = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] + c2 = c1[0] + txt_width, c1[1] - txt_height - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) + return im def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() @@ -180,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + mosaic = plot_one_box(box, mosaic, label=label, color=color, line_width=tl) # Draw image filename labels if paths: From bb0aed1ba6a62b940df902de7cc6741603bbe82d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 21:12:05 +0200 Subject: [PATCH 250/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index eaa886509a66..7cef01b6b651 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -545,7 +545,7 @@ "\n", "\n", "\n", - "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, From 24bea5e4b7ad369753f45e93b736d9205c37d20a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 21:17:51 +0200 Subject: [PATCH 251/757] Standardize headers and docstrings (#4417) * Implement new headers * Reformat 1 * Reformat 2 * Reformat 3 - math * Reformat 4 - yaml --- .github/workflows/ci-testing.yml | 12 +++++++----- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/greetings.yml | 4 +++- .github/workflows/rebase.yml | 2 +- .github/workflows/stale.yml | 2 ++ Dockerfile | 2 ++ data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/hyps/hyp.finetune.yaml | 2 +- data/hyps/hyp.finetune_objects365.yaml | 2 ++ data/hyps/hyp.scratch-p6.yaml | 2 +- data/hyps/hyp.scratch.yaml | 2 +- data/scripts/download_weights.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_coco128.sh | 2 +- data/xView.yaml | 2 +- detect.py | 4 +++- export.py | 4 +++- hubconf.py | 4 +++- models/common.py | 7 +++++-- models/experimental.py | 7 +++++-- models/hub/anchors.yaml | 3 ++- models/hub/yolov3-spp.yaml | 2 ++ models/hub/yolov3-tiny.yaml | 2 ++ models/hub/yolov3.yaml | 2 ++ models/hub/yolov5-bifpn.yaml | 2 ++ models/hub/yolov5-fpn.yaml | 2 ++ models/hub/yolov5-p2.yaml | 2 ++ models/hub/yolov5-p6.yaml | 2 ++ models/hub/yolov5-p7.yaml | 2 ++ models/hub/yolov5-panet.yaml | 2 ++ models/hub/yolov5l6.yaml | 2 ++ models/hub/yolov5m6.yaml | 2 ++ models/hub/yolov5s-ghost.yaml | 2 ++ models/hub/yolov5s-transformer.yaml | 2 ++ models/hub/yolov5s6.yaml | 2 ++ models/hub/yolov5x6.yaml | 2 ++ models/yolo.py | 4 +++- models/yolov5l.yaml | 2 ++ models/yolov5m.yaml | 2 ++ models/yolov5s.yaml | 2 ++ models/yolov5x.yaml | 2 ++ train.py | 6 ++++-- utils/activations.py | 5 ++++- utils/augmentations.py | 7 +++++-- utils/autoanchor.py | 5 ++++- utils/callbacks.py | 6 +++++- utils/datasets.py | 5 ++++- utils/downloads.py | 5 ++++- utils/flask_rest_api/README.md | 9 +++++++-- utils/general.py | 7 +++++-- utils/loggers/__init__.py | 6 +++++- utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 5 ++++- utils/metrics.py | 7 +++++-- utils/plots.py | 5 ++++- utils/torch_utils.py | 7 +++++-- val.py | 4 +++- 64 files changed, 164 insertions(+), 54 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a7964ea01d5d..02e8f74bf56c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,11 +1,13 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master, develop ] + branches: [master, develop] pull_request: # The branches below must be a subset of the branches above - branches: [ master, develop ] + branches: [master, develop] jobs: cpu-tests: @@ -14,9 +16,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8 ] - model: [ 'yolov5s' ] # models to test + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: [3.8] + model: ['yolov5s'] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 458465d90eef..2305ea07e902 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'python' ] + language: ['python'] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ddd739ea5769..c557e77f3b70 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,6 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: Greetings -on: [ pull_request_target, issues ] +on: [pull_request_target, issues] jobs: greeting: diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index 38e14578216c..e86c57744b84 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -3,7 +3,7 @@ name: Automatic Rebase on: issue_comment: - types: [ created ] + types: [created] jobs: rebase: diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d620e540706a..c81c0ca18c2f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: Close stale issues on: schedule: diff --git a/Dockerfile b/Dockerfile index e22c1106f23d..858b22bc6383 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch FROM nvcr.io/nvidia/pytorch:21.05-py3 diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 3bf91ce7d504..1625dd1b9d2b 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index de9c7837cf57..75b3bfdff43e 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 457b9fd9bf69..dc5bfbc7faa4 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Objects365 dataset https://www.objects365.org/ # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index c85fa81d2e03..653485e2079a 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index e59fb6afd2fd..8dbaacf9c290 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index fe6cb9199ce1..7753da98269e 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index acf8e84f3e21..2ccc6478b620 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # COCO 2017 dataset http://cocodataset.org # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index eda39dcdaa8d..70cf52c397af 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index 237cd5bc19a1..b89d66ff8dee 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for VOC finetuning # python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - # Hyperparameter Evolution Results # Generations: 306 # P R mAP.5 mAP.5:.95 box obj cls diff --git a/data/hyps/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml index 435fa7a45119..073720a65be5 100644 --- a/data/hyps/hyp.finetune_objects365.yaml +++ b/data/hyps/hyp.finetune_objects365.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + lr0: 0.00258 lrf: 0.17 momentum: 0.779 diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml index fc1d8ebe0876..7aad818e5b16 100644 --- a/data/hyps/hyp.scratch-p6.yaml +++ b/data/hyps/hyp.scratch-p6.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml index b2cf2e32c638..77405a537067 100644 --- a/data/hyps/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for COCO training from scratch # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index a576c956d008..b4b0ccd7857e 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases # Example usage: bash path/to/download_weights.sh # parent diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index f6c075689709..0210c8ebbda4 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download COCO 2017 dataset http://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 6eb47bfe5595..4238e3634dbb 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: bash data/scripts/get_coco128.sh # parent diff --git a/data/xView.yaml b/data/xView.yaml index e191188da0f0..fabcdb0bdd13 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # xView 2018 dataset https://challenge.xviewdataset.org # -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index db0c545b0635..49ebbe96c068 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,6 @@ -"""Run inference with a YOLOv5 model on images, videos, directories, streams +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 diff --git a/export.py b/export.py index db5a6918845c..db805cb45e6e 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,6 @@ -"""Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export a PyTorch model to TorchScript, ONNX, CoreML formats Usage: $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 diff --git a/hubconf.py b/hubconf.py index 93ea84d69dd3..36f3bd86bc11 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,6 @@ -"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch diff --git a/models/common.py b/models/common.py index 35790804ca52..fe4319b0f370 100644 --- a/models/common.py +++ b/models/common.py @@ -1,11 +1,14 @@ -# YOLOv5 common modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" import logging +import math import warnings from copy import copy from pathlib import Path -import math import numpy as np import pandas as pd import requests diff --git a/models/experimental.py b/models/experimental.py index 5c690cce3d99..7dfaf9611bec 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,10 +1,13 @@ -# YOLOv5 experimental modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" import numpy as np import torch import torch.nn as nn -from models.common import Conv, DWConv +from models.common import Conv from utils.downloads import attempt_download diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index 57512955ac1f..e4d7beb06e07 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,5 @@ -# Default YOLOv5 anchors for COCO data +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index ddc0549f50d6..c66982158ce8 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index 537ad755b166..b28b44315248 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index 3adfc2c6d2f9..4f4b240e6c36 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 69f7b5938c58..119aebb1523a 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index 217e4ca6ac96..707b2136cee1 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 6a932a868229..44d8da55dafb 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 58b86b0ca892..85e142539ce3 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index f6e8fc7928cc..88a7a95cbbd1 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index c5f3b4817102..76b9b7e74e33 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index d5afd7d84100..1288f15f940b 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 16a841a0b4b0..f14f0b0ebcce 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index d99d56d26e85..dbf2c8e03489 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index b999ebb7583d..aeac1acb0582 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 2fb245050053..2baee5af9e05 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index c5187101072b..e94f592fc19a 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple diff --git a/models/yolo.py b/models/yolo.py index 88adb71f8fea..f3c1516f49f7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,6 @@ -"""YOLOv5-specific modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules Usage: $ python path/to/models/yolo.py --cfg yolov5s.yaml diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 0c130c1514af..30b22a25a483 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index e477b3433d39..f5f518ad8ab3 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index e85442dc9188..b311ab7fd50a 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index c7ca03589ab8..7dcb822b8b84 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple diff --git a/train.py b/train.py index 24152f1a1198..0aa7a13628dc 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,6 @@ -"""Train a YOLOv5 model on a custom dataset +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 model on a custom dataset Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 @@ -6,6 +8,7 @@ import argparse import logging +import math import os import random import sys @@ -13,7 +16,6 @@ from copy import deepcopy from pathlib import Path -import math import numpy as np import torch import torch.distributed as dist diff --git a/utils/activations.py b/utils/activations.py index 92a3b5eaa54b..62eb532b3f95 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,7 @@ -# Activation functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" import torch import torch.nn as nn diff --git a/utils/augmentations.py b/utils/augmentations.py index cf64f2f9db1f..49f957e6fd62 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,10 +1,13 @@ -# YOLOv5 image augmentation functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" import logging +import math import random import cv2 -import math import numpy as np from utils.general import colorstr, segment2box, resample_segments, check_version diff --git a/utils/autoanchor.py b/utils/autoanchor.py index eca1d5be8ebe..66a2712dfd5d 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,7 @@ -# Auto-anchor utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-anchor utils +""" import random diff --git a/utils/callbacks.py b/utils/callbacks.py index a204ec1ceaaf..19c334430b5d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,8 @@ -#!/usr/bin/env python +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + class Callbacks: """" diff --git a/utils/datasets.py b/utils/datasets.py index b402723f9c49..7d831cd63230 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,4 +1,7 @@ -# YOLOv5 dataset utils and dataloaders +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" import glob import hashlib diff --git a/utils/downloads.py b/utils/downloads.py index 6b2c37433b5b..27cb899cd606 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,7 @@ -# Download utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" import os import platform diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 6c835936dde6..a726acbd9204 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -1,9 +1,13 @@ # Flask REST API -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). ## Requirements [Flask](https://palletsprojects.com/p/flask/) is required. Install with: + ```shell $ pip install Flask ``` @@ -65,4 +69,5 @@ The model inference results are returned as a JSON response: ] ``` -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/utils/general.py b/utils/general.py index 4fc32f5691fc..850ca6ba0b1f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,8 +1,12 @@ -# YOLOv5 general utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" import contextlib import glob import logging +import math import os import platform import random @@ -16,7 +20,6 @@ from subprocess import check_output import cv2 -import math import numpy as np import pandas as pd import pkg_resources as pkg diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index d40c0c350fde..3d67e9307b4c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,8 @@ -# YOLOv5 experiment logging utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + import warnings from threading import Thread diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 019aebf094e1..4631e8a1f8fd 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -507,4 +507,4 @@ def all_logging_disabled(highest_level=logging.CRITICAL): try: yield finally: - logging.disable(previous_level) \ No newline at end of file + logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py index 79e8f24359c1..29aac3191c10 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,7 @@ -# Loss functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" import torch import torch.nn as nn diff --git a/utils/metrics.py b/utils/metrics.py index c94c4a76a964..ddc425910a75 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,9 +1,12 @@ -# Model validation metrics +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" +import math import warnings from pathlib import Path -import math import matplotlib.pyplot as plt import numpy as np import torch diff --git a/utils/plots.py b/utils/plots.py index 71e90b00241d..76c161a13d1a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,7 @@ -# Plotting utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" import math from copy import copy diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 628f672a010d..dff0617e87c9 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,7 +1,11 @@ -# YOLOv5 PyTorch utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" import datetime import logging +import math import os import platform import subprocess @@ -10,7 +14,6 @@ from copy import deepcopy from pathlib import Path -import math import torch import torch.backends.cudnn as cudnn import torch.distributed as dist diff --git a/val.py b/val.py index 4c1d7d26b0de..cbee8cf1c026 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,6 @@ -"""Validate a trained YOLOv5 model accuracy on a custom dataset +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 From 01cdb7671b82be8dfa9e0bf47af2ab7554825bb0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Aug 2021 14:28:35 +0200 Subject: [PATCH 252/757] Add `SPPF()` layer (#4420) * Add `SPPF()` layer * Cleanup * Add credit --- models/common.py | 20 +++++++++++++++++++- models/yolo.py | 10 ++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index fe4319b0f370..e1f5aea3abed 100644 --- a/models/common.py +++ b/models/common.py @@ -161,7 +161,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 def __init__(self, c1, c2, k=(5, 9, 13)): super().__init__() c_ = c1 // 2 # hidden channels @@ -176,6 +176,24 @@ def forward(self, x): return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups diff --git a/models/yolo.py b/models/yolo.py index f3c1516f49f7..dee6032d069d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -237,8 +237,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) pass n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR, C3SPP, C3Ghost]: + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) @@ -279,6 +279,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() opt.cfg = check_file(opt.cfg) # check file set_logging() @@ -289,8 +290,9 @@ def parse_model(d, ch): # model_dict, input_channels(3) model.train() # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) - # y = model(img, profile=True) + if opt.profile: + img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + y = model(img, profile=True) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter From 4e65052f28b1184b9d463c1e44b3a79b95113904 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Aug 2021 16:41:57 +0200 Subject: [PATCH 253/757] Created using Colaboratory --- tutorial.ipynb | 424 +++++++++++++------------------------------------ 1 file changed, 106 insertions(+), 318 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7cef01b6b651..ba6d19113a93 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -6,7 +6,6 @@ "name": "YOLOv5 Tutorial", "provenance": [], "collapsed_sections": [], - "toc_visible": true, "include_colab_link": true }, "kernelspec": { @@ -16,7 +15,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "2e915d9016c846e095e382b6a02ee773": { + "484511f272e64eab8b42e68dac5f7a66": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,15 +28,16 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_cb7fc3a5c6cc4fde8d2c83e594a7c86e", + "layout": "IPY_MODEL_78cceec059784f2bb36988d3336e4d56", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_ac3edef4e3434f4587e6cbf8aa048770", - "IPY_MODEL_853ac234cc2a4236946fc516871e10eb" + "IPY_MODEL_ab93d8b65c134605934ff9ec5efb1bb6", + "IPY_MODEL_30df865ded4c434191bce772c9a82f3a", + "IPY_MODEL_20cdc61eb3404f42a12b37901b0d85fb" ] } }, - "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { + "78cceec059784f2bb36988d3336e4d56": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -89,15 +89,36 @@ "left": null } }, - "ac3edef4e3434f4587e6cbf8aa048770": { + "ab93d8b65c134605934ff9ec5efb1bb6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_2d7239993a9645b09b221405ac682743", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": "100%", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_17b5a87f92104ec7ab96bf507637d0d2" + } + }, + "30df865ded4c434191bce772c9a82f3a": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", + "style": "IPY_MODEL_2358bfb2270247359e94b066b3cc3d1f", "_dom_classes": [], - "description": "100%", + "description": "", "_model_name": "FloatProgressModel", "bar_style": "success", "max": 818322941, @@ -110,99 +131,31 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f454999c3a924c7bad0746fb453dec36" + "layout": "IPY_MODEL_3e984405db654b0b83b88b2db08baffd" } }, - "853ac234cc2a4236946fc516871e10eb": { + "20cdc61eb3404f42a12b37901b0d85fb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", + "style": "IPY_MODEL_654d8a19b9f949c6bbdaf8b0875c931e", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 780M/780M [03:59<00:00, 3.42MB/s]", + "value": " 780M/780M [00:33<00:00, 24.4MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_9da1a23b042c41618dd14b0e30aa7cbe" + "layout": "IPY_MODEL_896030c5d13b415aaa05032818d81a6e" } }, - "13842ca90c0047e584b8d68d99dad2b1": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "f454999c3a924c7bad0746fb453dec36": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "f94a7ca8c1f04761bf38fdc5f99664b8": { + "2d7239993a9645b09b221405ac682743": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -217,7 +170,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "9da1a23b042c41618dd14b0e30aa7cbe": { + "17b5a87f92104ec7ab96bf507637d0d2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -269,132 +222,14 @@ "left": null } }, - "6ff8a710ded44391a624dec5c460b771": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_3c19729b51cd45d4848035da06e96ff8", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_23b2f0ae3d46438c8de375987c77f580", - "IPY_MODEL_dd9498c321a9422da6faf17a0be026d4" - ] - } - }, - "3c19729b51cd45d4848035da06e96ff8": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "23b2f0ae3d46438c8de375987c77f580": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", - "_dom_classes": [], - "description": "100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 6984509, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 6984509, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_ff8151449e444a14869684212b9ab14e" - } - }, - "dd9498c321a9422da6faf17a0be026d4": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 6.66M/6.66M [00:01<00:00, 6.08MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_8fda673769984e2b928ef820d34c85c3" - } - }, - "d8dda4b2ce864fd682e558b9a48f602e": { + "2358bfb2270247359e94b066b3cc3d1f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", - "description_width": "initial", + "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, @@ -403,7 +238,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "ff8151449e444a14869684212b9ab14e": { + "3e984405db654b0b83b88b2db08baffd": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -455,7 +290,7 @@ "left": null } }, - "0f84fe609bcf4aa9afdc32a8cf076909": { + "654d8a19b9f949c6bbdaf8b0875c931e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -470,7 +305,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "8fda673769984e2b928ef820d34c85c3": { + "896030c5d13b415aaa05032818d81a6e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -567,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "ada1dd8d-e0aa-4858-e893-dc320319ca30" + "outputId": "4d67116a-43e9-4d84-d19e-1edd83f23a04" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -580,7 +415,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", @@ -619,25 +454,26 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "a7a37616-a82b-4bdb-a463-6ead850b5615" + "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb" }, "source": [ + "%rm -rf runs\n", "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", - "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n", - "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.008s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", - "Results saved to runs/detect/exp\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.007s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.007s)\n", + "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n", "Done. (0.091s)\n" ], "name": "stdout" @@ -680,49 +516,45 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 66, + "height": 48, "referenced_widgets": [ - "2e915d9016c846e095e382b6a02ee773", - "cb7fc3a5c6cc4fde8d2c83e594a7c86e", - "ac3edef4e3434f4587e6cbf8aa048770", - "853ac234cc2a4236946fc516871e10eb", - "13842ca90c0047e584b8d68d99dad2b1", - "f454999c3a924c7bad0746fb453dec36", - "f94a7ca8c1f04761bf38fdc5f99664b8", - "9da1a23b042c41618dd14b0e30aa7cbe" + "484511f272e64eab8b42e68dac5f7a66", + "78cceec059784f2bb36988d3336e4d56", + "ab93d8b65c134605934ff9ec5efb1bb6", + "30df865ded4c434191bce772c9a82f3a", + "20cdc61eb3404f42a12b37901b0d85fb", + "2d7239993a9645b09b221405ac682743", + "17b5a87f92104ec7ab96bf507637d0d2", + "2358bfb2270247359e94b066b3cc3d1f", + "3e984405db654b0b83b88b2db08baffd", + "654d8a19b9f949c6bbdaf8b0875c931e", + "896030c5d13b415aaa05032818d81a6e" ] }, - "outputId": "3606f305-aa67-43fd-d5d6-93d1f311768c" + "outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2e915d9016c846e095e382b6a02ee773", + "model_id": "484511f272e64eab8b42e68dac5f7a66", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=818322941.0), HTML(value='')))" + " 0%| | 0.00/780M [00:00 Date: Sun, 15 Aug 2021 18:32:41 +0200 Subject: [PATCH 254/757] Remove DDP process group timeout (#4422) --- train.py | 2 +- utils/torch_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 0aa7a13628dc..275e0a4b1a8e 100644 --- a/train.py +++ b/train.py @@ -493,7 +493,7 @@ def main(opt): assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Train if not opt.evolve: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index dff0617e87c9..2eb51d80f34e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -35,10 +35,10 @@ def torch_distributed_zero_first(local_rank: int): Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: - dist.barrier() + dist.barrier(device_ids=[local_rank]) yield if local_rank == 0: - dist.barrier() + dist.barrier(device_ids=[0]) def init_torch_seeds(seed=0): From dbc06ce29298de4a55d73fa37362dd51a03035ed Mon Sep 17 00:00:00 2001 From: Omid Sadeghnezhad <58780720+OmidSa75@users.noreply.github.com> Date: Mon, 16 Aug 2021 13:28:02 +0430 Subject: [PATCH 255/757] Update hubconf.py attempt_load import (#4428) --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 36f3bd86bc11..799c83ec8400 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,7 +27,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path - from models.yolo import Model, attempt_load + from models.yolo import Model + from models.experimental import attempt_load from utils.general import check_requirements, set_logging from utils.downloads import attempt_download from utils.torch_utils import select_device From f3e3f7603fca56e52f3f055d8bbb5847a73e3e78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 16 Aug 2021 17:25:06 +0200 Subject: [PATCH 256/757] TFLite prep (#4436) --- detect.py | 3 ++- utils/general.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 49ebbe96c068..cdac4f213790 100644 --- a/detect.py +++ b/detect.py @@ -67,7 +67,8 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights - classify, pt, onnx = False, w.endswith('.pt'), w.endswith('.onnx') # inference type + classify, suffix = False, Path(w).suffix.lower() + pt, onnx, tflite, pb, graph_def = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model diff --git a/utils/general.py b/utils/general.py index 850ca6ba0b1f..0b6e8fc7fb9a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -203,11 +203,14 @@ def check_requirements(requirements='requirements.txt', exclude=()): print(emojis(s)) -def check_img_size(img_size, s=32, floor=0): - # Verify img_size is a multiple of stride s - new_size = max(make_divisible(img_size, int(s)), floor) # ceil gs-multiple - if new_size != img_size: - print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}') +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size From 808bcad3bb952f4976aca63f95af8855bc227090 Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Tue, 17 Aug 2021 19:18:16 +0800 Subject: [PATCH 257/757] Add TensorFlow and TFLite export (#1127) * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * Put representative dataset in tfl_int8 block * detect.py TF inference * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * detect.py TF inference * Put representative dataset in tfl_int8 block * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * implement C3() and SiLU() * Fix reshape dim to support dynamic batching * Add epsilon argument in tf_BN, which is different between TF and PT * Set stride to None if not using PyTorch, and do not warmup without PyTorch * Add list support in check_img_size() * Add list input support in detect.py * sys.path.append('./') to run from yolov5/ * Add int8 quantization support for TensorFlow 2.5 * Add get_coco128.sh * Remove --no-tfl-detect in models/tf.py (Use tf-android-tfl-detect branch for EdgeTPU) * Update requirements.txt * Replace torch.load() with attempt_load() * Update requirements.txt * Add --tf-raw-resize to set half_pixel_centers=False * Add --agnostic-nms for TF class-agnostic NMS * Cleanup after merge * Cleanup2 after merge * Cleanup3 after merge * Add tf.py docstring with credit and usage * pb saved_model and tflite use only one model in detect.py * Add use cases in docstring of tf.py * Remove redundant `stride` definition * Remove keras direct import * Fix `check_requirements(('tensorflow>=2.4.1',))` Co-authored-by: Glenn Jocher --- detect.py | 64 ++++- models/experimental.py | 8 +- models/tf.py | 558 +++++++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + utils/datasets.py | 12 +- 5 files changed, 626 insertions(+), 17 deletions(-) create mode 100644 models/tf.py diff --git a/detect.py b/detect.py index cdac4f213790..a2331e23b43e 100644 --- a/detect.py +++ b/detect.py @@ -12,6 +12,7 @@ from pathlib import Path import cv2 +import numpy as np import torch import torch.backends.cudnn as cudnn @@ -51,6 +52,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference + tfl_int8=False, # INT8 quantized TFLite model ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -68,7 +70,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights classify, suffix = False, Path(w).suffix.lower() - pt, onnx, tflite, pb, graph_def = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend + pt, onnx, tflite, pb, saved_model = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model @@ -83,30 +85,49 @@ def run(weights='yolov5s.pt', # model.pt path(s) check_requirements(('onnx', 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) + else: # TensorFlow models + check_requirements(('tensorflow>=2.4.1',)) + import tensorflow as tf + if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import + return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), + tf.nest.map_structure(x.graph.as_graph_element, outputs)) + + graph_def = tf.Graph().as_graph_def() + graph_def.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") + elif saved_model: + model = tf.keras.models.load_model(w) + elif tflite: + interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once t0 = time.time() for path, img, im0s, vid_cap in dataset: - if pt: + if onnx: + img = img.astype('float32') + else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 - elif onnx: - img = img.astype('float32') - img /= 255.0 # 0 - 255 to 0.0 - 1.0 + img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim @@ -117,6 +138,27 @@ def run(weights='yolov5s.pt', # model.pt path(s) pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) + else: # tensorflow model (tflite, pb, saved_model) + imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy + if pb: + pred = frozen_func(x=tf.constant(imn)).numpy() + elif saved_model: + pred = model(imn, training=False).numpy() + elif tflite: + if tfl_int8: + scale, zero_point = input_details[0]['quantization'] + imn = (imn / scale + zero_point).astype(np.uint8) + interpreter.set_tensor(input_details[0]['index'], imn) + interpreter.invoke() + pred = interpreter.get_tensor(output_details[0]['index']) + if tfl_int8: + scale, zero_point = output_details[0]['quantization'] + pred = (pred.astype(np.float32) - zero_point) * scale + pred[..., 0] *= imgsz[1] # x + pred[..., 1] *= imgsz[0] # y + pred[..., 2] *= imgsz[1] # w + pred[..., 3] *= imgsz[0] # h + pred = torch.tensor(pred) # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) @@ -202,9 +244,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pb', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') @@ -226,7 +268,9 @@ def parse_opt(): parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--tfl-int8', action='store_true', help='INT8 quantized TFLite model') opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand return opt diff --git a/models/experimental.py b/models/experimental.py index 7dfaf9611bec..e25a4e1779fa 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -85,14 +85,18 @@ def forward(self, x, augment=False, profile=False, visualize=False): return y, None # inference, train output -def attempt_load(weights, map_location=None, inplace=True): +def attempt_load(weights, map_location=None, inplace=True, fuse=True): from models.yolo import Detect, Model # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + if fuse: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + else: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse + # Compatibility updates for m in model.modules(): diff --git a/models/tf.py b/models/tf.py new file mode 100644 index 000000000000..40e7d20a9d84 --- /dev/null +++ b/models/tf.py @@ -0,0 +1,558 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow/Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt --cfg yolov5s.yaml + +Export int8 TFLite models: + $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --tfl-int8 \ + --source path/to/images/ --ncalib 100 + +Detection: + $ python detect.py --weights yolov5s.pb --img 320 + $ python detect.py --weights yolov5s_saved_model --img 320 + $ python detect.py --weights yolov5s-fp16.tflite --img 320 + $ python detect.py --weights yolov5s-int8.tflite --img 320 --tfl-int8 + +For TensorFlow.js: + $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --img 320 --tf-nms --agnostic-nms + $ pip install tensorflowjs + $ tensorflowjs_converter \ + --input_format=tf_frozen_model \ + --output_node_names='Identity,Identity_1,Identity_2,Identity_3' \ + yolov5s.pb \ + web_model + $ # Edit web_model/model.json to sort Identity* in ascending order + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/web_model public/web_model + $ npm start +""" + +import argparse +import logging +import os +import sys +import traceback +from copy import deepcopy +from pathlib import Path + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +import yaml +from tensorflow import keras +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + +from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 +from models.experimental import MixConv2d, CrossConv, attempt_load +from models.yolo import Detect +from utils.datasets import LoadImages +from utils.general import make_divisible, check_file, check_dataset + +logger = logging.getLogger(__name__) + + +class tf_BN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super(tf_BN, self).__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class tf_Pad(keras.layers.Layer): + def __init__(self, pad): + super(tf_Pad, self).__init__() + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class tf_Conv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super(tf_Conv, self).__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + assert isinstance(k, int), "Convolution with multiple kernels are not allowed." + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + + conv = keras.layers.Conv2D( + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) + self.conv = conv if s == 1 else keras.Sequential([tf_Pad(autopad(k, p)), conv]) + self.bn = tf_BN(w.bn) if hasattr(w, 'bn') else tf.identity + + # YOLOv5 activations + if isinstance(w.act, nn.LeakyReLU): + self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity + elif isinstance(w.act, nn.Hardswish): + self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity + elif isinstance(w.act, nn.SiLU): + self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class tf_Focus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super(tf_Focus, self).__init__() + self.conv = tf_Conv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255. # normalize 0-255 to 0-1 + return self.conv(tf.concat([inputs[:, ::2, ::2, :], + inputs[:, 1::2, ::2, :], + inputs[:, ::2, 1::2, :], + inputs[:, 1::2, 1::2, :]], 3)) + + +class tf_Bottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super(tf_Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class tf_Conv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super(tf_Conv2d, self).__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D( + c2, k, s, 'VALID', use_bias=bias, + kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, ) + + def call(self, inputs): + return self.conv(inputs) + + +class tf_BottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super(tf_BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = tf_Conv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = tf_BN(w.bn) + self.act = lambda x: keras.activations.relu(x, alpha=0.1) + self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class tf_C3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super(tf_C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class tf_SPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super(tf_SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class tf_Detect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer + super(tf_Detect, self).__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(tf.convert_to_tensor(w.anchor_grid.numpy(), dtype=tf.float32), + [self.nl, 1, -1, 1, 2]) + self.m = [tf_Conv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.export = False # onnx export + self.training = True # set to False after building model + for i in range(self.nl): + ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) + + if not self.training: # inference + y = tf.sigmoid(x[i]) + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + wh /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, y[..., 4:]], -1) + z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) + + return x if self.training else (tf.concat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class tf_Upsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): + super(tf_Upsample, self).__init__() + assert scale_factor == 2, "scale_factor must be 2" + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + if opt.tf_raw_resize: + # with default arguments: align_corners=False, half_pixel_centers=False + self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + size=(x.shape[1] * 2, x.shape[2] * 2)) + else: + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + + def call(self, inputs): + return self.upsample(inputs) + + +class tf_Concat(keras.layers.Layer): + def __init__(self, dimension=1, w=None): + super(tf_Concat, self).__init__() + assert dimension == 1, "convert only NCHW to NHWC concat" + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + else: + c2 = ch[f] + + tf_m = eval('tf_' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in torch_m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class tf_Model(): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, input channels, number of classes + super(tf_Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model) # model, savelist, ch_out + + def predict(self, inputs, profile=False): + y = [] # outputs + x = inputs + for i, m in enumerate(self.model.layers): + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if opt.tf_nms: + boxes = xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if opt.agnostic_nms: + nms = agnostic_nms_layer()((boxes, classes, scores)) + return nms, x[1] + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression( + boxes, scores, opt.topk_per_class, opt.topk_all, opt.iou_thres, opt.score_thres, clip_boxes=False) + return nms, x[1] + + return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + +class agnostic_nms_layer(keras.layers.Layer): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + def call(self, input): + return tf.map_fn(agnostic_nms, input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + +def agnostic_nms(x): + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=opt.topk_all, iou_threshold=opt.iou_thres, score_threshold=opt.score_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +def representative_dataset_gen(): + # Representative dataset for use with converter.representative_dataset + n = 0 + for path, img, im0s, vid_cap in dataset: + # Get sample input data as a numpy array in a method of your choosing. + n += 1 + input = np.transpose(img, [1, 2, 0]) + input = np.expand_dims(input, axis=0).astype(np.float32) + input /= 255.0 + yield [input] + if n >= opt.ncalib: + break + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='cfg path') + parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size') + parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file') + parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images') + parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model') + parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize', + help='use tf.raw_ops.ResizeNearestNeighbor for resize') + parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS') + parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS') + parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') + parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + + # Input + img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection + + # Load PyTorch model + model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + model.model[-1].export = False # set Detect() layer export=True + y = model(img) # dry run + nc = y[0].shape[-1] - 5 + + # TensorFlow saved_model export + try: + print('\nStarting TensorFlow saved_model export with TensorFlow %s...' % tf.__version__) + tf_model = tf_Model(opt.cfg, model=model, nc=nc) + img = tf.zeros((opt.batch_size, *opt.img_size, 3)) # NHWC Input for TensorFlow + + m = tf_model.model.layers[-1] + assert isinstance(m, tf_Detect), "the last layer must be Detect" + m.training = False + y = tf_model.predict(img) + + inputs = keras.Input(shape=(*opt.img_size, 3), batch_size=None if opt.dynamic_batch_size else opt.batch_size) + keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs)) + keras_model.summary() + path = opt.weights.replace('.pt', '_saved_model') # filename + keras_model.save(path, save_format='tf') + print('TensorFlow saved_model export success, saved as %s' % path) + except Exception as e: + print('TensorFlow saved_model export failure: %s' % e) + traceback.print_exc(file=sys.stdout) + + # TensorFlow GraphDef export + try: + print('\nStarting TensorFlow GraphDef export with TensorFlow %s...' % tf.__version__) + + # https://github.com/leimao/Frozen_Graph_TensorFlow + full_model = tf.function(lambda x: keras_model(x)) + full_model = full_model.get_concrete_function( + tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + + frozen_func = convert_variables_to_constants_v2(full_model) + frozen_func.graph.as_graph_def() + f = opt.weights.replace('.pt', '.pb') # filename + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, + logdir=os.path.dirname(f), + name=os.path.basename(f), + as_text=False) + + print('TensorFlow GraphDef export success, saved as %s' % f) + except Exception as e: + print('TensorFlow GraphDef export failure: %s' % e) + traceback.print_exc(file=sys.stdout) + + # TFLite model export + if not opt.tf_nms: + try: + print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__) + + # fp32 TFLite model export --------------------------------------------------------------------------------- + # converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + # converter.allow_custom_ops = False + # converter.experimental_new_converter = True + # tflite_model = converter.convert() + # f = opt.weights.replace('.pt', '.tflite') # filename + # open(f, "wb").write(tflite_model) + + # fp16 TFLite model export --------------------------------------------------------------------------------- + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + # converter.representative_dataset = representative_dataset_gen + # converter.target_spec.supported_types = [tf.float16] + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.allow_custom_ops = False + converter.experimental_new_converter = True + tflite_model = converter.convert() + f = opt.weights.replace('.pt', '-fp16.tflite') # filename + open(f, "wb").write(tflite_model) + print('\nTFLite export success, saved as %s' % f) + + # int8 TFLite model export --------------------------------------------------------------------------------- + if opt.tfl_int8: + # Representative Dataset + if opt.source.endswith('.yaml'): + with open(check_file(opt.source)) as f: + data = yaml.load(f, Loader=yaml.FullLoader) # data dict + check_dataset(data) # check + opt.source = data['train'] + dataset = LoadImages(opt.source, img_size=opt.img_size, auto=False) + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = representative_dataset_gen + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.allow_custom_ops = False + converter.experimental_new_converter = True + converter.experimental_new_quantizer = False + tflite_model = converter.convert() + f = opt.weights.replace('.pt', '-int8.tflite') # filename + open(f, "wb").write(tflite_model) + print('\nTFLite (int8) export success, saved as %s' % f) + + except Exception as e: + print('\nTFLite export failure: %s' % e) + traceback.print_exc(file=sys.stdout) diff --git a/requirements.txt b/requirements.txt index f1629eafc65a..f6361d591f1b 100755 --- a/requirements.txt +++ b/requirements.txt @@ -23,6 +23,7 @@ pandas # coremltools>=4.1 # onnx>=1.9.0 # scikit-learn==0.19.2 # for coreml quantization +# tensorflow==2.4.1 # for TFLite export # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 diff --git a/utils/datasets.py b/utils/datasets.py index 7d831cd63230..52b028994325 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -155,7 +155,7 @@ def __iter__(self): class LoadImages: # for inference - def __init__(self, path, img_size=640, stride=32): + def __init__(self, path, img_size=640, stride=32, auto=True): p = str(Path(path).absolute()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob @@ -176,6 +176,7 @@ def __init__(self, path, img_size=640, stride=32): self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' + self.auto = auto if any(videos): self.new_video(videos[0]) # new video else: @@ -217,7 +218,7 @@ def __next__(self): print(f'image {self.count}/{self.nf} {path}: ', end='') # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -276,7 +277,7 @@ def __len__(self): class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640, stride=32): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.mode = 'stream' self.img_size = img_size self.stride = stride @@ -290,6 +291,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream print(f'{i + 1}/{n}: {s}... ', end='') @@ -312,7 +314,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): print('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs], 0) # shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') @@ -341,7 +343,7 @@ def __next__(self): # Letterbox img0 = self.imgs.copy() - img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] # Stack img = np.stack(img, 0) From 6dd7dd8dd3c27ac8b986578f91fa14aab12357d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 17 Aug 2021 23:29:07 +0200 Subject: [PATCH 258/757] Fix default `--weights yolov5s.pt` (#4458) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index a2331e23b43e..601d5daf9852 100644 --- a/detect.py +++ b/detect.py @@ -244,7 +244,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pb', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') From 1d65e8194d183f4e1537cf64a7292f8ab57d1d55 Mon Sep 17 00:00:00 2001 From: "Huu Quan, CAP" Date: Wed, 18 Aug 2021 19:07:09 +0900 Subject: [PATCH 259/757] Fix missing labels after albumentations (#4455) * fix missing labels after augmentation * Update datasets.py Cleanup Co-authored-by: Huu Quan Co-authored-by: Glenn Jocher --- utils/datasets.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/datasets.py b/utils/datasets.py index 52b028994325..25a2ba6f9561 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -568,6 +568,7 @@ def __getitem__(self, index): if self.augment: # Albumentations img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) From d1182c4f29e2141be856b85c5d613480a2fffc5d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 18 Aug 2021 21:16:57 +0200 Subject: [PATCH 260/757] `check_requirements(('coremltools',))` (#4478) * `check_requirements(('coremltools',))` * Update ci-testing.yml * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 4 ++-- export.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 02e8f74bf56c..ecd6f9bbd625 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -48,7 +48,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx + pip install -q onnx onnx-simplifier coremltools # for export python --version pip --version pip list @@ -76,5 +76,5 @@ jobs: python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include onnx torchscript # export shell: bash diff --git a/export.py b/export.py index db805cb45e6e..674609463a9d 100644 --- a/export.py +++ b/export.py @@ -87,6 +87,7 @@ def export_coreml(model, img, file): # CoreML model export prefix = colorstr('CoreML:') try: + check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') From 7316b78e36a004bfe1272c4d7fc63e7e76f90cc8 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 23 Aug 2021 16:40:07 +0530 Subject: [PATCH 261/757] W&B: Refactor the wandb_utils.py file (#4496) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Fix * fix * refactor constructor * refactor * refactor * refactor * PyCharm reformat Co-authored-by: Glenn Jocher --- utils/loggers/wandb/wandb_utils.py | 77 +++++++++++++++++------------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 4631e8a1f8fd..8b2095afcb8b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -38,6 +38,19 @@ def check_wandb_config_file(data_config_file): return data_config_file +def check_wandb_dataset(data_file): + is_wandb_artifact = False + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem @@ -104,7 +117,7 @@ def __init__(self, opt, run_id, job_type='Training'): - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup trainig processes if job_type is 'Training' - + arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed @@ -147,26 +160,24 @@ def __init__(self, opt, run_id, job_type='Training'): allow_val_change=True) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': - if not opt.resume: - if opt.upload_dataset: + if opt.upload_dataset: + if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact - with open(opt.data, errors='ignore') as f: - data_dict = yaml.safe_load(f) - self.data_dict = data_dict - else: # Local .yaml dataset file or .zip file - self.data_dict = check_dataset(opt.data) + if opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) else: - self.data_dict = check_dataset(opt.data) + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - self.setup_training(opt) - if not self.wandb_artifact_data_dict: - self.wandb_artifact_data_dict = self.data_dict - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - if not opt.resume: + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) + self.setup_training(opt) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -174,10 +185,10 @@ def __init__(self, opt, run_id, job_type='Training'): def check_and_upload_dataset(self, opt): """ Check if the dataset format is compatible and upload it as W&B artifact - + arguments: opt (namespace)-- Commandline arguments for current run - + returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ @@ -196,10 +207,10 @@ def setup_training(self, opt): - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval - + arguments: opt (namespace) -- commandline arguments for this run - + """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval @@ -211,9 +222,7 @@ def setup_training(self, opt): opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp - data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume - else: - data_dict = self.data_dict + data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) @@ -243,11 +252,11 @@ def setup_training(self, opt): def download_dataset_artifact(self, path, alias): """ download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - + arguments: path -- path of the dataset to be used for training alias (str)-- alias of the artifact to be download/used for training - + returns: (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) @@ -263,7 +272,7 @@ def download_dataset_artifact(self, path, alias): def download_model_artifact(self, opt): """ download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - + arguments: opt (namespace) -- Commandline arguments for this run """ @@ -281,7 +290,7 @@ def download_model_artifact(self, opt): def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact - + arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run @@ -305,14 +314,14 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ Log the dataset as W&B artifact and return the new data file with W&B links - + arguments: data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml - + returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ @@ -359,12 +368,12 @@ def map_val_table_path(self): def create_dataset_table(self, dataset, class_to_id, name='dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. - + arguments: dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table class_to_id (dict(int, str)) -- hash map that maps class ids to labels name (str) -- name of the artifact - + returns: dataset artifact to be logged or used """ @@ -401,7 +410,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): def log_training_progress(self, predn, path, names): """ Build evaluation Table. Uses reference from validation dataset table. - + arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image @@ -431,7 +440,7 @@ def log_training_progress(self, predn, path, names): def val_one_image(self, pred, predn, path, names, im): """ Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - + arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] @@ -453,7 +462,7 @@ def val_one_image(self, pred, predn, path, names, im): def log(self, log_dict): """ save the metrics to the logging dictionary - + arguments: log_dict (Dict) -- metrics/media to be logged in current step """ @@ -464,7 +473,7 @@ def log(self, log_dict): def end_epoch(self, best_result=False): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - + arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ From 7b1643b5b563596440beccf7d8ed066f51e1cf83 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 23 Aug 2021 14:38:30 +0200 Subject: [PATCH 262/757] Add `install=True` argument to `check_requirements` (#4512) * Add `install=True` argument to `check_requirements` * Update general.py --- utils/general.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0b6e8fc7fb9a..16244903575a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -172,7 +172,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except -def check_requirements(requirements='requirements.txt', exclude=()): +def check_requirements(requirements='requirements.txt', exclude=(), install=True): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version @@ -188,13 +188,17 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") - try: - assert check_online(), f"'pip install {r}' skipped (offline)" - print(check_output(f"pip install '{r}'", shell=True).decode()) - n += 1 - except Exception as e: - print(f'{prefix} {e}') + s = f"{prefix} {r} not found and is required by YOLOv5" + if install: + print(f"{s}, attempting auto-update...") + try: + assert check_online(), f"'pip install {r}' skipped (offline)" + print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 + except Exception as e: + print(f'{prefix} {e}') + else: + print(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From 79af1144c270ac7169553d450b9170f9c60f92e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 23 Aug 2021 17:05:53 +0200 Subject: [PATCH 263/757] Automatic TFLite uint8 determination (#4515) * Auto TFLite uint8 detection This PR automatically determines if TFLite models are uint8 quantized rather than accepting a manual argument. The quantization determination is based on @zldrobit comment https://github.com/ultralytics/yolov5/pull/1127#issuecomment-901713847 * Cleanup --- detect.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 601d5daf9852..15ddc1ffb6a4 100644 --- a/detect.py +++ b/detect.py @@ -52,7 +52,6 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference - tfl_int8=False, # INT8 quantized TFLite model ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -104,6 +103,7 @@ def wrap_frozen_graph(gd, inputs, outputs): interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs + int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader @@ -145,15 +145,15 @@ def wrap_frozen_graph(gd, inputs, outputs): elif saved_model: pred = model(imn, training=False).numpy() elif tflite: - if tfl_int8: + if int8: scale, zero_point = input_details[0]['quantization'] - imn = (imn / scale + zero_point).astype(np.uint8) + imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) - if tfl_int8: + if int8: scale, zero_point = output_details[0]['quantization'] - pred = (pred.astype(np.float32) - zero_point) * scale + pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w @@ -268,7 +268,6 @@ def parse_opt(): parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--tfl-int8', action='store_true', help='INT8 quantized TFLite model') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand return opt From 2da6444c9251f77cfd3e410369cd067245d961b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 25 Aug 2021 21:23:28 +0200 Subject: [PATCH 264/757] Fix for `python models/yolo.py --profile` (#4541) Profiling fix copies input to Detect layer to circumvent inplace changes to the feature maps. --- models/yolo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index dee6032d069d..8618401b3455 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -48,7 +48,6 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): - # x = x.copy() # for profiling z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv @@ -143,10 +142,11 @@ def forward_once(self, x, profile=False, visualize=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + c = isinstance(m, Detect) # copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): - _ = m(x) + m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") From 11f85e7e71d91810460a2eee22235a2264b458eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 26 Aug 2021 15:51:04 +0200 Subject: [PATCH 265/757] Auto-fix corrupt JPEGs (#4548) * Autofix corrupt JPEGs This PR automatically re-saves corrupt JPEGs and trains with the resaved images. WARNING: this will overwrite the existing corrupt JPEGs in a dataset and replace them with correct JPEGs, though the filesize may increase and the image contents may not be exactly the same due to lossy JPEG compression schemes. Results may vary by JPEG decoder and hardware. Current behavior is to exclude corrupt JPEGs from training with a warning to the user, but many users have been complaining about large parts of their dataset being excluded from training. * Clarify re-save reason --- utils/datasets.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 25a2ba6f9561..eea8ad348452 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -314,7 +314,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): print('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs], 0) # shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') @@ -568,7 +568,7 @@ def __getitem__(self, index): if self.augment: # Albumentations img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations + nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) @@ -861,7 +861,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota def verify_image_label(args): # Verify one image-label pair im_file, lb_file, prefix = args - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments try: # verify images im = Image.open(im_file) @@ -872,10 +872,11 @@ def verify_image_label(args): if im.format.lower() in ('jpg', 'jpeg'): with open(im_file, 'rb') as f: f.seek(-2, 2) - assert f.read() == b'\xff\xd9', 'corrupted JPEG' + if f.read() != b'\xff\xd9': # corrupt JPEG + im.save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image + msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}' # verify labels - segments = [] # instance segments if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file, 'r') as f: @@ -896,7 +897,7 @@ def verify_image_label(args): else: nm = 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - return im_file, l, shape, segments, nm, nf, ne, nc, '' + return im_file, l, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}' From e899d6e8fbfc990f60a822fdd482b350f2d162a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 13:01:21 +0200 Subject: [PATCH 266/757] Fix for corrupt JPEGs auto-fix PR (#4560) Auto-fix corrupt JPEGs PR introduced a bug whereby the f.seek() operation read all of the bytes in the image, resulting in the PIL image having nothing to read upon the .save() operation. Fix was to re-open the image using PIL before saving. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index eea8ad348452..852bb7c04aa8 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -873,7 +873,7 @@ def verify_image_label(args): with open(im_file, 'rb') as f: f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG - im.save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image + Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}' # verify labels From 8ac96b797538d6a5e882e56f9a48f3d015bcf952 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 13:23:29 +0200 Subject: [PATCH 267/757] Fix for AP calculation limits 0.0 - 1.0 (#4563) This PR brings alignment in AP computation practices with Detectron2 and MMDetection. Problem first noted by @yusiyoh in https://github.com/ultralytics/yolov5/issues/4546 --- utils/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index ddc425910a75..44b9a3c16488 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -91,8 +91,8 @@ def compute_ap(recall, precision): """ # Append sentinel values to beginning and end - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) From 8b18b66304317276f4bfc7cc7741bd535dc5fa7a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 16:00:39 +0200 Subject: [PATCH 268/757] ONNX opset 13 (#4566) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 674609463a9d..5db09884bae8 100644 --- a/export.py +++ b/export.py @@ -176,7 +176,7 @@ def parse_opt(): parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') opt = parser.parse_args() return opt From 93cc0157483bf206d23797a4326ce4e1aaab9bea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 28 Aug 2021 19:03:52 +0200 Subject: [PATCH 269/757] Add EarlyStopping feature (#4576) * Add EarlyStopping feature * Add comment * Cleanup * Cleanup2 * debug * debug2 * debug3 * debug3 * debug4 * debug5 * debug6 * debug7 * debug8 * debug9 * debug10 * debug11 * debug12 * Cleanup * Add TODO for known DDP issue --- train.py | 19 ++++++++++++++++++- utils/torch_utils.py | 17 +++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 275e0a4b1a8e..a6c34cbc466c 100644 --- a/train.py +++ b/train.py @@ -40,7 +40,8 @@ from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, \ + torch_distributed_zero_first from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness from utils.loggers import Loggers @@ -255,6 +256,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) + stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers} dataloader workers\n' @@ -389,6 +391,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary del ckpt callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) + # Stop Single-GPU + if stopper(epoch=epoch, fitness=fi): + break + + # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 + # stop = stopper(epoch=epoch, fitness=fi) + # if RANK == 0: + # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks + + # Stop DPP + # with torch_distributed_zero_first(RANK): + # if stop: + # break # must break all DDP ranks + # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: @@ -454,6 +470,7 @@ def parse_opt(known=False): parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') + parser.add_argument('--patience', type=int, default=30, help='EarlyStopping patience (epochs)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2eb51d80f34e..2e153921eb10 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -293,6 +293,23 @@ def copy_attr(a, b, include=(), exclude=()): setattr(a, k, v) +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience # epochs to wait after fitness stops improving to stop + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + stop = (epoch - self.best_epoch) >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.') + return stop + + class ModelEMA: """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models Keep a moving average of everything in the model state_dict (parameters and buffers). From d7aa3f153d049b89267b1b594a481a1a27fe27e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 28 Aug 2021 19:17:21 +0200 Subject: [PATCH 270/757] Remove `image_weights` DDP code (#4579) * Initial commit * Update --- train.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/train.py b/train.py index a6c34cbc466c..aaee41a499e4 100644 --- a/train.py +++ b/train.py @@ -265,21 +265,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() - # Update image weights (optional) + # Update image weights (optional, single-GPU only) if opt.image_weights: - # Generate indices - if RANK in [-1, 0]: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - # Broadcast if DDP - if RANK != -1: - indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int() - dist.broadcast(indices, 0) - if RANK != 0: - dataset.indices = indices.cpu().numpy() - - # Update mosaic border + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders From bbfafeabdbf7785f8da5e4f9880df27869a71218 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 13:49:04 +0200 Subject: [PATCH 271/757] Add `Profile()` profiler (#4587) * Add `Profile()` profiler * CamelCase Timeout --- utils/general.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 16244903575a..c74d8bb299de 100755 --- a/utils/general.py +++ b/utils/general.py @@ -39,8 +39,17 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads -class timeout(contextlib.ContextDecorator): - # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager +class Profile(contextlib.ContextDecorator): + # Usage: @Profile() decorator or 'with Profile():' context manager + def __enter__(self): + self.start = time.time() + + def __exit__(self, type, value, traceback): + print(f'Profile results: {time.time() - self.start:.5f}s') + + +class Timeout(contextlib.ContextDecorator): + # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg From 7b35971ba5942aea0ad81d2c2663629d3e733cf8 Mon Sep 17 00:00:00 2001 From: Takumi Karasawa Date: Sun, 29 Aug 2021 22:08:27 +0900 Subject: [PATCH 272/757] Fix bug in `plot_one_box` when label is `None` (#4588) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 76c161a13d1a..25d70dbabc75 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -72,7 +72,7 @@ def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), labe assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' lw = line_width or max(int(min(im.size) / 200), 2) # line width - if use_pil or not is_ascii(label): # use PIL + if use_pil or (label is not None and not is_ascii(label)): # use PIL im = Image.fromarray(im) draw = ImageDraw.Draw(im) draw.rectangle(box, width=lw + 1, outline=color) # plot From de44376d1b0a091a5970c52864a6555978e2ff79 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 16:46:13 +0200 Subject: [PATCH 273/757] Create `Annotator()` class (#4591) * Add Annotator() class * Download Arial * 2x for loop * Cleanup * tuple 2 list * max_size=1920 * bold logging results to * tolist() * im = annotator.im * PIL save in detect.py * Smart asarray in detect.py * revert to cv2.imwrite * Cleanup * Return result asarray * Add `Profile()` profiler * CamelCase Timeout * Resize after mosaic * pillow>=8.0.0 * daemon imwrite * Add cv2 support * Remove plot_wh_methods and plot_one_box * pil=False for hubconf.py annotations * im.shape bug fix * colorstr common.py * join daemons * Update t.daemon * Removed daemon saving --- detect.py | 6 +- models/common.py | 11 ++- requirements.txt | 2 +- train.py | 2 +- utils/general.py | 5 +- utils/plots.py | 189 ++++++++++++++++++++++------------------------- 6 files changed, 106 insertions(+), 109 deletions(-) diff --git a/detect.py b/detect.py index 15ddc1ffb6a4..77502b0c5bee 100644 --- a/detect.py +++ b/detect.py @@ -23,7 +23,7 @@ from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, plot_one_box +from utils.plots import colors, Annotator from utils.torch_utils import select_device, load_classifier, time_sync @@ -181,6 +181,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, pil=False) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -201,7 +202,7 @@ def wrap_frozen_graph(gd, inputs, outputs): if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - im0 = plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_width=line_thickness) + annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) @@ -209,6 +210,7 @@ def wrap_frozen_graph(gd, inputs, outputs): print(f'{s}Done. ({t2 - t1:.3f}s)') # Stream results + im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond diff --git a/models/common.py b/models/common.py index e1f5aea3abed..0c60b39a483d 100644 --- a/models/common.py +++ b/models/common.py @@ -18,8 +18,9 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box -from utils.plots import colors, plot_one_box +from utils.general import colorstr, non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, \ + save_one_box +from utils.plots import colors, Annotator from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) @@ -370,12 +371,14 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: + annotator = Annotator(im, pil=False) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others - im = plot_one_box(box, im, label=label, color=colors(cls)) + annotator.box_label(box, label, color=colors(cls)) + im = annotator.im else: str += '(no detections)' @@ -388,7 +391,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False f = self.files[i] im.save(save_dir / f) # save if i == self.n - 1: - LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to '{save_dir}'") + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) diff --git a/requirements.txt b/requirements.txt index f6361d591f1b..2ad65ba53e29 100755 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 -Pillow +Pillow>=8.0.0 PyYAML>=5.3.1 scipy>=1.4.1 torch>=1.7.0 diff --git a/train.py b/train.py index aaee41a499e4..2fe38ef043d0 100644 --- a/train.py +++ b/train.py @@ -260,7 +260,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers} dataloader workers\n' - f'Logging results to {save_dir}\n' + f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() diff --git a/utils/general.py b/utils/general.py index c74d8bb299de..fe9a8ac537fb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -122,9 +122,10 @@ def is_pip(): return 'site-packages' in Path(__file__).absolute().parts -def is_ascii(str=''): +def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? - return len(str.encode().decode('ascii', 'ignore')) == len(str) + s = str(s) # convert to str() in case of None, etc. + return len(s.encode().decode('ascii', 'ignore')) == len(s) def emojis(str=''): diff --git a/utils/plots.py b/utils/plots.py index 25d70dbabc75..696d32345dd5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -67,51 +67,59 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), label=None, line_width=3, use_pil=False): - # Plots one xyxy box on image im with label - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' - lw = line_width or max(int(min(im.size) / 200), 2) # line width - - if use_pil or (label is not None and not is_ascii(label)): # use PIL - im = Image.fromarray(im) - draw = ImageDraw.Draw(im) - draw.rectangle(box, width=lw + 1, outline=color) # plot - if label: - font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) - draw.text((box[0], box[1] - txt_height + 1), label, fill=txt_color, font=font) - return np.asarray(im) - else: # use OpenCV - c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(im, c1, c2, color, thickness=lw, lineType=cv2.LINE_AA) - if label: - tf = max(lw - 1, 1) # font thickness - txt_width, txt_height = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] - c2 = c1[0] + txt_width, c1[1] - txt_height - 3 - cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(im, label, (c1[0], c1[1] - 2), 0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) - return im - - -def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() - # Compares the two methods for width-height anchor multiplication - # https://github.com/ultralytics/yolov3/issues/168 - x = np.arange(-4.0, 4.0, .1) - ya = np.exp(x) - yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 - - fig = plt.figure(figsize=(6, 3), tight_layout=True) - plt.plot(x, ya, '.-', label='YOLOv3') - plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') - plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') - plt.xlim(left=-4, right=4) - plt.ylim(bottom=0, top=6) - plt.xlabel('input') - plt.ylabel('output') - plt.grid() - plt.legend() - fig.savefig('comparison.png', dpi=200) +class Annotator: + # YOLOv5 PIL Annotator class + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + self.pil = pil + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + s = sum(self.im.size) / 2 # mean shape + f = font_size or max(round(s * 0.035), 12) + try: + self.font = ImageFont.truetype(font, size=f) + except: # download TTF + url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font + torch.hub.download_url_to_file(url, font) + self.font = ImageFont.truetype(font, size=f) + self.fh = self.font.getsize('a')[1] - 3 # font height + else: # use cv2 + self.im = im + s = sum(im.shape) / 2 # mean shape + self.lw = line_width or max(round(s * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w = self.font.getsize(label)[0] # text width + self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) + self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') + else: # cv2 + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] + c2 = c1[0] + w, c1[1] - h - 3 + cv2.rectangle(self.im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (c1[0], c1[1] - 2), 0, self.lw / 3, txt_color, thickness=tf, + lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) def output_to_target(output): @@ -123,82 +131,65 @@ def output_to_target(output): return np.array(targets) -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): # Plot image grid with labels - if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - - # un-normalise if np.max(images[0]) <= 1: - images *= 255 - - tl = 3 # line thickness - tf = max(tl - 1, 1) # font thickness + images *= 255.0 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) - # Check if we should resize - scale_factor = max_size / max(h, w) - if scale_factor < 1: - h = math.ceil(scale_factor * h) - w = math.ceil(scale_factor * w) - + # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, img in enumerate(images): + for i, im in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect break - - block_x = int(w * (i // ns)) - block_y = int(h * (i % ns)) - - img = img.transpose(1, 2, 0) - if scale_factor < 1: - img = cv2.resize(img, (w, h)) - - mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int(h * ns * 0.02) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: - image_targets = targets[targets[:, 0] == i] - boxes = xywh2xyxy(image_targets[:, 2:6]).T - classes = image_targets[:, 1].astype('int') - labels = image_targets.shape[1] == 6 # labels if no conf column - conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) if boxes.shape[1]: if boxes.max() <= 1.01: # if normalized with tolerance 0.01 boxes[[0, 2]] *= w # scale to pixels boxes[[1, 3]] *= h - elif scale_factor < 1: # absolute coords need scale if image scales - boxes *= scale_factor - boxes[[0, 2]] += block_x - boxes[[1, 3]] += block_y - for j, box in enumerate(boxes.T): - cls = int(classes[j]) + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - mosaic = plot_one_box(box, mosaic, label=label, color=color, line_width=tl) - - # Draw image filename labels - if paths: - label = Path(paths[i]).name[:40] # trim to 40 char - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, - lineType=cv2.LINE_AA) - - # Image border - cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) - - if fname: - r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size - mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) - # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save - Image.fromarray(mosaic).save(fname) # PIL save - return mosaic + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): From e5e5ebc7999e26ec0d5f96bb6d12de25e412d98e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:15:18 +0200 Subject: [PATCH 274/757] Auto-UTF handling (#4594) --- detect.py | 7 ++++--- models/common.py | 9 +++++---- utils/general.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index 77502b0c5bee..0b1d93897d4c 100644 --- a/detect.py +++ b/detect.py @@ -21,9 +21,9 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ +from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, Annotator +from utils.plots import Annotator, colors from utils.torch_utils import select_device, load_classifier, time_sync @@ -105,6 +105,7 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size + ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) # Dataloader if webcam: @@ -181,7 +182,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, pil=False) + annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() diff --git a/models/common.py b/models/common.py index 0c60b39a483d..90bfef5124b3 100644 --- a/models/common.py +++ b/models/common.py @@ -18,9 +18,9 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, \ - save_one_box -from utils.plots import colors, Annotator +from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ + scale_coords, xyxy2xywh +from utils.plots import Annotator, colors from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) @@ -354,6 +354,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names + self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels @@ -371,7 +372,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - annotator = Annotator(im, pil=False) + annotator = Annotator(im, pil=not self.ascii) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: diff --git a/utils/general.py b/utils/general.py index fe9a8ac537fb..ba1e4f58cd86 100755 --- a/utils/general.py +++ b/utils/general.py @@ -124,7 +124,7 @@ def is_pip(): def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? - s = str(s) # convert to str() in case of None, etc. + s = str(s) # convert list, tuple, None, etc. to str return len(s.encode().decode('ascii', 'ignore')) == len(s) From dbbc6b5c48c2f2ff75501e5fec7fd78bcbb632fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:44:51 +0200 Subject: [PATCH 275/757] Re-order `plots.py` to class-first (#4595) --- utils/plots.py | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 696d32345dd5..99c8cc2f7044 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -45,30 +45,8 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - class Annotator: - # YOLOv5 PIL Annotator class + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' self.pil = pil @@ -79,9 +57,11 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr f = font_size or max(round(s * 0.035), 12) try: self.font = ImageFont.truetype(font, size=f) - except: # download TTF + except Exception as e: # download TTF if missing + print(f'WARNING: Annotator font {font} not found: {e}') url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font torch.hub.download_url_to_file(url, font) + print(f'Annotator font successfully downloaded from {url} to {font}') self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 @@ -122,6 +102,28 @@ def result(self): return np.asarray(self.im) +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] From bfad3644555c2e8dd82d194ca11842e5d7723798 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:56:35 +0200 Subject: [PATCH 276/757] Created using Colaboratory --- tutorial.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index ba6d19113a93..d3388670f56c 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -415,7 +415,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -461,7 +461,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -538,7 +538,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -571,7 +571,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 6, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -734,7 +734,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 8, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -853,13 +853,13 @@ "\n", "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", "\n", - "> \n", + "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", - "> \n", + "> \n", "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", - "> \n", + "> \n", "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", "\n", "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", From b894e69dfc341fcbfe4a307a15d6af90d90367df Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 18:05:49 +0200 Subject: [PATCH 277/757] Update mosaic plots font size (#4596) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 99c8cc2f7044..ddfdb42201ee 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -162,7 +162,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) # Annotate - fs = int(h * ns * 0.02) # font size + fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin From dc13820c9d561bf112e773795cd75d7c40dbbff7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 13:01:41 +0200 Subject: [PATCH 278/757] TensorBoard `on_train_end()` speed improvements (#4605) --- utils/loggers/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 3d67e9307b4c..16d0348d86f3 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -131,10 +131,9 @@ def on_train_end(self, last, best, plots, epoch): files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: - from PIL import Image - import numpy as np + import cv2 for f in files: - self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') if self.wandb: self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) From bb4da083d1b2d19fbe482ed91064498aa8f942e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 14:33:53 +0200 Subject: [PATCH 279/757] Created using Colaboratory --- tutorial.ipynb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d3388670f56c..38e8fd4389ea 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -457,9 +457,8 @@ "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb" }, "source": [ - "%rm -rf runs\n", "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", - "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], "execution_count": null, "outputs": [ From e7d1842a983b0cd98ea22f2f2d5a2b362bd7ebfd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 14:58:22 +0200 Subject: [PATCH 280/757] Auto-download Arial.ttf on init (#4606) * Auto-download Arial.ttf on init * Fix ROOT --- utils/__init__.py | 16 ++++++++++++++++ utils/plots.py | 9 ++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2d1..649b288b3588 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,16 @@ +from pathlib import Path + +import torch +from PIL import ImageFont + +FILE = Path(__file__).absolute() +ROOT = FILE.parents[1] # yolov5/ dir + +# Check YOLOv5 Annotator font +font = 'Arial.ttf' +try: + ImageFont.truetype(font) +except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font + print(f'Downloading {url} to {ROOT / font}...') + torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/plots.py b/utils/plots.py index ddfdb42201ee..eae295e09bed 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -48,7 +48,7 @@ def hex2rgb(h): # rgb order (PIL) class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' self.pil = pil if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) @@ -57,11 +57,10 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr f = font_size or max(round(s * 0.035), 12) try: self.font = ImageFont.truetype(font, size=f) - except Exception as e: # download TTF if missing - print(f'WARNING: Annotator font {font} not found: {e}') - url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font + except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font + print(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, font) - print(f'Annotator font successfully downloaded from {url} to {font}') self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 From 35fe03146187cd8b1c09dd3e72ae678bb9ec5b86 Mon Sep 17 00:00:00 2001 From: Yukun Xia Date: Mon, 30 Aug 2021 09:46:33 -0400 Subject: [PATCH 281/757] Fix: add P2 layer 21 to yolov5-p2.yaml `Detect()` inputs (#4608) Layer 21 includes the information of xsmall objects --- models/hub/yolov5-p2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 44d8da55dafb..759e9f92fb29 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -50,5 +50,5 @@ head: [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 30 (P5/32-large) - [[24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) ] From 3a72d4a7e33e55a3a505832eb44fd7f7b630fffc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 17:05:45 +0200 Subject: [PATCH 282/757] Update `check_git_status()` warning (#4610) --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba1e4f58cd86..cc316cd103aa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -162,8 +162,7 @@ def check_git_status(): branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." + s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe From 11e7c7b48d7e94a45c0bf46d35efa7da1581f8e9 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 30 Aug 2021 20:37:20 +0530 Subject: [PATCH 283/757] W&B: Don't log models in evolve operation (#4611) --- utils/loggers/__init__.py | 12 ++++++++---- utils/loggers/wandb/wandb_utils.py | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 16d0348d86f3..775803abf068 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -138,7 +138,11 @@ def on_train_end(self, last, best, plots, epoch): if self.wandb: self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - wandb.log_artifact(str(best if best.exists() else last), type='model', - name='run_' + self.wandb.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + self.wandb.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + else: + self.wandb.finish_run() + self.wandb = WandbLogger(self.opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 8b2095afcb8b..5d495c70517b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -112,7 +112,7 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id, job_type='Training'): + def __init__(self, opt, run_id=None, job_type='Training'): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True From bb5ebc290e5d630a081d7cbc5a9725ed8cea0a24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 17:22:21 +0200 Subject: [PATCH 284/757] Close `matplotlib` plots after opening (#4612) * Close plots * Replace fig.close() for plt.close() --- utils/metrics.py | 3 +++ utils/plots.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index 44b9a3c16488..4f1b5e2d2c2d 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -178,6 +178,7 @@ def plot(self, normalize=True, save_dir='', names=()): fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() except Exception as e: print(f'WARNING: ConfusionMatrix plot failure: {e}') @@ -308,6 +309,7 @@ def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): @@ -328,3 +330,4 @@ def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence' ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() diff --git a/utils/plots.py b/utils/plots.py index eae295e09bed..e470329f5473 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -345,7 +345,6 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): a.remove() except Exception as e: print('Warning: Plotting error for %s; %s' % (f, e)) - ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) @@ -371,6 +370,7 @@ def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots impo print('%15s: %.3g' % (k, mu)) f = evolve_csv.with_suffix('.png') # filename plt.savefig(f, dpi=200) + plt.close() print(f'Saved {f}') @@ -397,6 +397,7 @@ def plot_results(file='path/to/results.csv', dir=''): print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): @@ -423,3 +424,4 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec print(f'Saving {save_dir / f}... ({n}/{channels})') plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + plt.close() From 50a9828679d075772a0875a5b2488fb9febb1082 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 18:35:07 +0200 Subject: [PATCH 285/757] DDP `torch.jit.trace()` `--sync-bn` fix (#4615) * Remove assert * debug0 * trace=not opt.sync * sync to sync_bn fix * Cleanup --- train.py | 3 +-- utils/loggers/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 2fe38ef043d0..36492edb8f0b 100644 --- a/train.py +++ b/train.py @@ -333,7 +333,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots) + callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots, opt.sync_bn) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -499,7 +499,6 @@ def main(opt): assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' assert not opt.evolve, '--evolve argument is not compatible with DDP training' - assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 775803abf068..0750be6c8828 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -69,13 +69,14 @@ def on_pretrain_routine_end(self): if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): # Callback runs on train batch end if plots: if ni == 0: - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() From ba0f80874fc5c515fa31a3b0d384a65dd2efdce0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 15:01:41 +0200 Subject: [PATCH 286/757] Fix for Arial.ttf redownloads with hub inference (#4627) --- utils/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/__init__.py b/utils/__init__.py index 649b288b3588..2af1466f1f1d 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,3 +1,4 @@ +import sys from pathlib import Path import torch @@ -5,6 +6,8 @@ FILE = Path(__file__).absolute() ROOT = FILE.parents[1] # yolov5/ dir +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH # Check YOLOv5 Annotator font font = 'Arial.ttf' From a4e8f78c5eba7500ba36f70c805ce76de5b4b0a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 20:05:17 +0200 Subject: [PATCH 287/757] Fix 2 for Arial.ttf redownloads with hub inference (#4628) --- utils/__init__.py | 38 +++++++++++++++++++------------------- utils/plots.py | 11 ++++++++--- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 2af1466f1f1d..4a61057e8083 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,19 +1,19 @@ -import sys -from pathlib import Path - -import torch -from PIL import ImageFont - -FILE = Path(__file__).absolute() -ROOT = FILE.parents[1] # yolov5/ dir -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -# Check YOLOv5 Annotator font -font = 'Arial.ttf' -try: - ImageFont.truetype(font) -except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font - print(f'Downloading {url} to {ROOT / font}...') - torch.hub.download_url_to_file(url, str(ROOT / font)) +# import sys +# from pathlib import Path +# +# import torch +# from PIL import ImageFont +# +# FILE = Path(__file__).absolute() +# ROOT = FILE.parents[1] # yolov5/ dir +# if str(ROOT) not in sys.path: +# sys.path.append(str(ROOT)) # add ROOT to PATH +# +# # Check YOLOv5 Annotator font +# font = 'Arial.ttf' +# try: +# ImageFont.truetype(font) +# except Exception as e: # download if missing +# url = "https://ultralytics.com/assets/" + font +# print(f'Downloading {url} to {ROOT / font}...') +# torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/plots.py b/utils/plots.py index e470329f5473..9e14e765a647 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -23,6 +23,9 @@ matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only +FILE = Path(__file__).absolute() +ROOT = FILE.parents[1] # yolov5/ dir + class Colors: # Ultralytics color palette https://ultralytics.com/ @@ -55,12 +58,14 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr self.draw = ImageDraw.Draw(self.im) s = sum(self.im.size) / 2 # mean shape f = font_size or max(round(s * 0.035), 12) + font = Path(font) # font handling + font = font if font.exists() else (ROOT / font.name) try: - self.font = ImageFont.truetype(font, size=f) + self.font = ImageFont.truetype(str(font) if font.exists() else font.name, size=f) except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font + url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, font) + torch.hub.download_url_to_file(url, str(font)) self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 From de534e922120b2da876e8214b976af1f82019e28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 20:54:10 +0200 Subject: [PATCH 288/757] Fix 3 for Arial.ttf redownloads with hub inference (#4629) Fix 3 for Arial.ttf redownloads with hub inference, follow-on to #4628. --- utils/plots.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 9e14e765a647..fd120b1d427f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -48,7 +48,22 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' +def check_font(font='Arial.ttf', size=10): + # Return a PIL TrueType Font, downloading to ROOT dir if necessary + font = Path(font) + font = font if font.exists() else (ROOT / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font.name + print(f'Downloading {url} to {font}...') + torch.hub.download_url_to_file(url, str(font)) + return ImageFont.truetype(str(font), size) + + class Annotator: + check_font() # download TTF if necessary + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' @@ -56,22 +71,11 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - s = sum(self.im.size) / 2 # mean shape - f = font_size or max(round(s * 0.035), 12) - font = Path(font) # font handling - font = font if font.exists() else (ROOT / font.name) - try: - self.font = ImageFont.truetype(str(font) if font.exists() else font.name, size=f) - except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font.name - print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font)) - self.font = ImageFont.truetype(font, size=f) + self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 self.im = im - s = sum(im.shape) / 2 # mean shape - self.lw = line_width or max(round(s * 0.003), 2) # line width + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label From 234e8ae6fabc827ef41aec42d05aceedaf228ebc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Sep 2021 15:00:13 +0200 Subject: [PATCH 289/757] Fix for `plot_evolve()` string argument (#4639) --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index fd120b1d427f..d8a561a71dcf 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -358,8 +358,9 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots import *; plot_evolve() +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) keys = [x.strip() for x in data.columns] x = data.values From fad57c29cd27c0fcbc0038b7b7312b9b6ef922a8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Sep 2021 16:30:14 +0200 Subject: [PATCH 290/757] Fix `is_coco` on missing `data['val']` key (#4642) --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index cbee8cf1c026..1aa37d12dfac 100644 --- a/val.py +++ b/val.py @@ -134,7 +134,7 @@ def run(data, # Configure model.eval() - is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() From f64fab58251c58d8aac2772dbc005d569bf72edc Mon Sep 17 00:00:00 2001 From: imyhxy Date: Sun, 5 Sep 2021 23:43:09 +0800 Subject: [PATCH 291/757] Fixed 'meta' and 'hyp' may out of order when using evolve (#4657) * Fixed 'meta' and 'hyp' may out of order when using evolve * Update gitignore --- .gitignore | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index e5d02af960af..9c270c7dabe7 100755 --- a/.gitignore +++ b/.gitignore @@ -76,7 +76,7 @@ sdist/ var/ wheels/ *.egg-info/ -wandb/ +/wandb/ .installed.cfg *.egg diff --git a/train.py b/train.py index 36492edb8f0b..89f86401c187 100644 --- a/train.py +++ b/train.py @@ -570,7 +570,7 @@ def main(opt): mp, s = 0.8, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) - g = np.array([x[0] for x in meta.values()]) # gains 0-1 + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 ng = len(meta) v = np.ones(ng) while all(v == 1): # mutate until a change occurs (prevent duplicates) From 548745181a4d30db3d4fe81a952ca6dbb54c7578 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 5 Sep 2021 19:09:53 +0200 Subject: [PATCH 292/757] EarlyStopper updates (#4679) --- train.py | 6 +++--- utils/torch_utils.py | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 89f86401c187..72aee2cb8883 100644 --- a/train.py +++ b/train.py @@ -344,7 +344,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP callbacks.on_train_epoch_end(epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = epoch + 1 == epochs + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -384,7 +384,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) # Stop Single-GPU - if stopper(epoch=epoch, fitness=fi): + if RANK == -1 and stopper(epoch=epoch, fitness=fi): break # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 @@ -462,7 +462,7 @@ def parse_opt(known=False): parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') - parser.add_argument('--patience', type=int, default=30, help='EarlyStopping patience (epochs)') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2e153921eb10..04e1446bb908 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -298,13 +298,16 @@ class EarlyStopping: def __init__(self, patience=30): self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 - self.patience = patience # epochs to wait after fitness stops improving to stop + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness - stop = (epoch - self.best_epoch) >= self.patience # stop training if patience exceeded + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded if stop: LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.') return stop From 2317f86ca4ee140ed6a50fc0cc9857383f755ecd Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Tue, 7 Sep 2021 18:32:15 +0200 Subject: [PATCH 293/757] Optimised Callback Class to Reduce Code and Fix Errors (#4688) * added callbacks * added back callback to main * added save_dir to callback output * reduced code count * updated callbacks * added default callback class to main, added missing parameters to on_model_save * Glenn updates Co-authored-by: Glenn Jocher --- train.py | 20 ++++---- utils/callbacks.py | 123 ++++----------------------------------------- val.py | 4 +- 3 files changed, 22 insertions(+), 125 deletions(-) diff --git a/train.py b/train.py index 72aee2cb8883..f9aa3d4b5f69 100644 --- a/train.py +++ b/train.py @@ -56,7 +56,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, - callbacks=Callbacks() + callbacks ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ @@ -231,7 +231,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision - callbacks.on_pretrain_routine_end() + callbacks.run('on_pretrain_routine_end') # DDP mode if cuda and RANK != -1: @@ -333,7 +333,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots, opt.sync_bn) + callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -342,7 +342,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: # mAP - callbacks.on_train_epoch_end(epoch=epoch) + callbacks.run('on_train_epoch_end', epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP @@ -364,7 +364,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr - callbacks.on_fit_epoch_end(log_vals, epoch, best_fitness, fi) + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -381,7 +381,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if best_fitness == fi: torch.save(ckpt, best) del ckpt - callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) # Stop Single-GPU if RANK == -1 and stopper(epoch=epoch, fitness=fi): @@ -418,7 +418,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - callbacks.on_train_end(last, best, plots, epoch) + callbacks.run('on_train_end', last, best, plots, epoch) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() @@ -467,7 +467,7 @@ def parse_opt(known=False): return opt -def main(opt): +def main(opt, callbacks=Callbacks()): # Checks set_logging(RANK) if RANK in [-1, 0]: @@ -505,7 +505,7 @@ def main(opt): # Train if not opt.evolve: - train(opt.hyp, opt, device) + train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] @@ -585,7 +585,7 @@ def main(opt): hyp[k] = round(hyp[k], 5) # significant digits # Train mutation - results = train(hyp.copy(), opt, device) + results = train(hyp.copy(), opt, device, callbacks) # Write mutation results print_mutation(results, hyp.copy(), save_dir, opt.bucket) diff --git a/utils/callbacks.py b/utils/callbacks.py index 19c334430b5d..327b8639b60c 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -9,6 +9,7 @@ class Callbacks: Handles all registered callbacks for YOLOv5 Hooks """ + # Define the available callbacks _callbacks = { 'on_pretrain_routine_start': [], 'on_pretrain_routine_end': [], @@ -34,16 +35,13 @@ class Callbacks: 'teardown': [], } - def __init__(self): - return - def register_action(self, hook, name='', callback=None): """ Register a new action to a callback hook Args: hook The callback hook name to register the action to - name The name of the action + name The name of the action for later reference callback The callback to fire """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" @@ -62,118 +60,17 @@ def get_registered_actions(self, hook=None): else: return self._callbacks - def run_callbacks(self, hook, *args, **kwargs): + def run(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks - """ - for logger in self._callbacks[hook]: - # print(f"Running callbacks.{logger['callback'].__name__}()") - logger['callback'](*args, **kwargs) - - def on_pretrain_routine_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each pretraining routine - """ - self.run_callbacks('on_pretrain_routine_start', *args, **kwargs) - - def on_pretrain_routine_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each pretraining routine - """ - self.run_callbacks('on_pretrain_routine_end', *args, **kwargs) - - def on_train_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training - """ - self.run_callbacks('on_train_start', *args, **kwargs) - - def on_train_epoch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training epoch - """ - self.run_callbacks('on_train_epoch_start', *args, **kwargs) - - def on_train_batch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training batch - """ - self.run_callbacks('on_train_batch_start', *args, **kwargs) - def optimizer_step(self, *args, **kwargs): - """ - Fires all registered callbacks on each optimizer step - """ - self.run_callbacks('optimizer_step', *args, **kwargs) - - def on_before_zero_grad(self, *args, **kwargs): - """ - Fires all registered callbacks before zero grad - """ - self.run_callbacks('on_before_zero_grad', *args, **kwargs) - - def on_train_batch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each training batch - """ - self.run_callbacks('on_train_batch_end', *args, **kwargs) - - def on_train_epoch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each training epoch - """ - self.run_callbacks('on_train_epoch_end', *args, **kwargs) - - def on_val_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of the validation - """ - self.run_callbacks('on_val_start', *args, **kwargs) - - def on_val_batch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each validation batch - """ - self.run_callbacks('on_val_batch_start', *args, **kwargs) - - def on_val_image_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each val image - """ - self.run_callbacks('on_val_image_end', *args, **kwargs) - - def on_val_batch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each validation batch - """ - self.run_callbacks('on_val_batch_end', *args, **kwargs) - - def on_val_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of the validation - """ - self.run_callbacks('on_val_end', *args, **kwargs) - - def on_fit_epoch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each fit (train+val) epoch - """ - self.run_callbacks('on_fit_epoch_end', *args, **kwargs) - - def on_model_save(self, *args, **kwargs): - """ - Fires all registered callbacks after each model save + Args: + hook The name of the hook to check, defaults to all + args Arguments to receive from YOLOv5 + kwargs Keyword Arguments to receive from YOLOv5 """ - self.run_callbacks('on_model_save', *args, **kwargs) - def on_train_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of training - """ - self.run_callbacks('on_train_end', *args, **kwargs) + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - def teardown(self, *args, **kwargs): - """ - Fires all registered callbacks before teardown - """ - self.run_callbacks('teardown', *args, **kwargs) + for logger in self._callbacks[hook]: + logger['callback'](*args, **kwargs) diff --git a/val.py b/val.py index 1aa37d12dfac..947cd78f7e1f 100644 --- a/val.py +++ b/val.py @@ -216,7 +216,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.on_val_image_end(pred, predn, path, names, img[si]) + callbacks.run('on_val_image_end', pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -253,7 +253,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.on_val_end() + callbacks.run('on_val_end') # Save JSON if save_json and len(jdict): From 8e5f9ddbdb3375d00483db1616ce891886072055 Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang Date: Wed, 8 Sep 2021 18:48:33 +0800 Subject: [PATCH 294/757] Remove redundant `ComputeLoss` code (#4701) --- utils/loss.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index 29aac3191c10..fac432d0edc3 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -91,7 +91,6 @@ def forward(self, pred, true): class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False): - super(ComputeLoss, self).__init__() self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters From a2b3c71636c41141c244ec43f70adbd7387b15d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 14:36:12 +0200 Subject: [PATCH 295/757] Add suffix checks (#4711) * Add suffix checks * Cleanup * Cleanup2 * Cleanup3 --- detect.py | 10 ++++++---- models/tf.py | 6 +++--- models/yolo.py | 8 ++++---- train.py | 7 ++++--- utils/datasets.py | 6 +++--- utils/general.py | 17 ++++++++++++++++- val.py | 8 +++++--- 7 files changed, 41 insertions(+), 21 deletions(-) diff --git a/detect.py b/detect.py index 0b1d93897d4c..8acd5bf71593 100644 --- a/detect.py +++ b/detect.py @@ -21,8 +21,9 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \ - apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box +from utils.general import check_img_size, check_imshow, check_requirements, check_suffix, colorstr, is_ascii, \ + non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, \ + save_one_box from utils.plots import Annotator, colors from utils.torch_utils import select_device, load_classifier, time_sync @@ -68,8 +69,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights - classify, suffix = False, Path(w).suffix.lower() - pt, onnx, tflite, pb, saved_model = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend + classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] + check_suffix(w, suffixes) # check weights have acceptable suffix + pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model diff --git a/models/tf.py b/models/tf.py index 40e7d20a9d84..d6d0f26210b2 100644 --- a/models/tf.py +++ b/models/tf.py @@ -53,7 +53,7 @@ from models.experimental import MixConv2d, CrossConv, attempt_load from models.yolo import Detect from utils.datasets import LoadImages -from utils.general import make_divisible, check_file, check_dataset +from utils.general import check_dataset, check_yaml, make_divisible logger = logging.getLogger(__name__) @@ -447,7 +447,7 @@ def representative_dataset_gen(): parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file + opt.cfg = check_yaml(opt.cfg) # check YAML opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -534,7 +534,7 @@ def representative_dataset_gen(): if opt.tfl_int8: # Representative Dataset if opt.source.endswith('.yaml'): - with open(check_file(opt.source)) as f: + with open(check_yaml(opt.source)) as f: data = yaml.load(f, Loader=yaml.FullLoader) # data dict check_dataset(data) # check opt.source = data['train'] diff --git a/models/yolo.py b/models/yolo.py index 8618401b3455..25118a92bb2d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -17,10 +17,10 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging +from utils.general import check_yaml, make_divisible, set_logging from utils.plots import feature_visualization -from utils.torch_utils import time_sync, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr +from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ + select_device, time_sync try: import thop # for FLOPs computation @@ -281,7 +281,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file + opt.cfg = check_yaml(opt.cfg) # check YAML set_logging() device = select_device(opt.device) diff --git a/train.py b/train.py index f9aa3d4b5f69..c32664832d8b 100644 --- a/train.py +++ b/train.py @@ -35,8 +35,8 @@ from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ - check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods + strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ + check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -484,7 +484,8 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + check_suffix(opt.weights, '.pt') # check weights + opt.data, opt.cfg, opt.hyp = check_yaml(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' diff --git a/utils/datasets.py b/utils/datasets.py index 852bb7c04aa8..0c6b9b5e2893 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,8 +26,8 @@ from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective -from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ - xyn2xy, segments2boxes, clean_str +from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \ + xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -938,7 +938,7 @@ def hub_ops(f, max_dim=1920): im.save(im_dir / Path(f).name, quality=75) # save zipped, data_dir, yaml_path = unzip(Path(path)) - with open(check_file(yaml_path), errors='ignore') as f: + with open(check_yaml(yaml_path), errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? diff --git a/utils/general.py b/utils/general.py index cc316cd103aa..06c62daa32f1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -242,8 +242,23 @@ def check_imshow(): return False -def check_file(file): +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffixes + if any(suffix): + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + assert Path(f).suffix.lower() in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Check YAML file(s) for acceptable suffixes + return check_file(file, suffix) + + +def check_file(file, suffix=''): # Search/download file (if necessary) and return path + check_suffix(file, suffix) file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file diff --git a/val.py b/val.py index 947cd78f7e1f..b7068e041e57 100644 --- a/val.py +++ b/val.py @@ -22,8 +22,9 @@ from models.experimental import attempt_load from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ - box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr +from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ + check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ + increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync @@ -116,6 +117,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model + check_suffix(weights, '.pt') model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check image size @@ -316,7 +318,7 @@ def parse_opt(): opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid - opt.data = check_file(opt.data) # check file + opt.data = check_yaml(opt.data) # check YAML return opt From f984cce52a465f7377f2d9188e728496a83821af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 15:06:31 +0200 Subject: [PATCH 296/757] Fix `check_suffix()` (#4712) Fix a bug when `file=''` --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 06c62daa32f1..2033f76126f8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -244,7 +244,7 @@ def check_imshow(): def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffixes - if any(suffix): + if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: @@ -258,7 +258,7 @@ def check_yaml(file, suffix=('.yaml', '.yml')): def check_file(file, suffix=''): # Search/download file (if necessary) and return path - check_suffix(file, suffix) + check_suffix(file, suffix) # optional file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file From 25a7e1dae59e75d2c401a49cd2c7b76a7cf07139 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 16:01:03 +0200 Subject: [PATCH 297/757] Update `check_yaml()` comment (#4713) * Update `check_yaml()` comment * Cleanup --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 2033f76126f8..f2af386a7d93 100755 --- a/utils/general.py +++ b/utils/general.py @@ -252,7 +252,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): def check_yaml(file, suffix=('.yaml', '.yml')): - # Check YAML file(s) for acceptable suffixes + # Search/download YAML file (if necessary) and return path, checking suffix return check_file(file, suffix) From 8e94bf62d9aa588982daec58e89dd2bb682a1f0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 18:13:59 +0200 Subject: [PATCH 298/757] Add `user_config_dir('Ultralytics')` (#4715) * Add `user_config_dir` * Linux to .config --- utils/general.py | 9 +++++++++ utils/plots.py | 10 ++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index f2af386a7d93..06bf088582dc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -103,6 +103,15 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' +def user_config_dir(dir='Ultralytics'): + # Return path of user configuration directory (make if necessary) + settings = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} + path = Path.home() / settings.get(platform.system(), '') / dir + if not path.is_dir(): + path.mkdir() # make dir if required + return path + + def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() diff --git a/utils/plots.py b/utils/plots.py index d8a561a71dcf..1ed88ea7c832 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -16,16 +16,14 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import is_ascii, xyxy2xywh, xywh2xyxy +from utils.general import user_config_dir, is_ascii, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings +CONFIG_DIR = user_config_dir() # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only -FILE = Path(__file__).absolute() -ROOT = FILE.parents[1] # yolov5/ dir - class Colors: # Ultralytics color palette https://ultralytics.com/ @@ -49,9 +47,9 @@ def hex2rgb(h): # rgb order (PIL) def check_font(font='Arial.ttf', size=10): - # Return a PIL TrueType Font, downloading to ROOT dir if necessary + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) - font = font if font.exists() else (ROOT / font.name) + font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing From 0d8a1842373e55f8f639adede0c3d378f1ffbea5 Mon Sep 17 00:00:00 2001 From: ELHoussineT Date: Wed, 8 Sep 2021 19:42:28 +0200 Subject: [PATCH 299/757] Add `crops = results.crops()` dictionary (#4676) * adding get cropped functionality * Add target logic in existing functions * Crops cleanup * Add dictionary keys: conf, cls, box * Bug fixes - avoid return after first image Co-authored-by: Glenn Jocher --- models/common.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 90bfef5124b3..e79b8a9d2644 100644 --- a/models/common.py +++ b/models/common.py @@ -365,6 +365,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred.shape[0]: @@ -376,7 +377,9 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: - save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) else: # all others annotator.box_label(box, label, color=colors(cls)) im = annotator.im @@ -395,6 +398,10 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops def print(self): self.display(pprint=True) # print results @@ -408,10 +415,9 @@ def save(self, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results - def crop(self, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(crop=True, save_dir=save_dir) # crop results - LOGGER.info(f'Saved results to {save_dir}\n') + def crop(self, save=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + return self.display(crop=True, save=save, save_dir=save_dir) # crop results def render(self): self.display(render=True) # render results From deb434aefad43be38aa0252bbdece501919108ea Mon Sep 17 00:00:00 2001 From: JD Costa Date: Thu, 9 Sep 2021 14:29:18 +0100 Subject: [PATCH 300/757] Make CONFIG_DIR configurable per environment variable (#4727) --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 1ed88ea7c832..141a9ac2b09e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -4,6 +4,7 @@ """ import math +import os from copy import copy from pathlib import Path @@ -20,7 +21,7 @@ from utils.metrics import fitness # Settings -CONFIG_DIR = user_config_dir() # Ultralytics settings dir +CONFIG_DIR = Path(os.getenv('YOLOV5_CONFIG_DIR') or user_config_dir()) # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only From 1cad0ce2c7d3fb0917b4e392be377b5a370c26ef Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Martin Date: Thu, 9 Sep 2021 15:32:04 +0200 Subject: [PATCH 301/757] Allow `multi_label` option for NMS with PyTorch Hub (#4728) * Allow specifying multi_label option for NMS when using torch hub * Reformat Co-authored-by: Glenn Jocher --- models/common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index e79b8a9d2644..5305b03d5389 100644 --- a/models/common.py +++ b/models/common.py @@ -278,6 +278,7 @@ class AutoShape(nn.Module): conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class + multi_label = False # NMS multiple labels per box max_det = 1000 # maximum number of detections per image def __init__(self, model): @@ -337,7 +338,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): t.append(time_sync()) # Post-process - y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS + y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, + multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) From 2d9411dbb85ae63b8ca9913726844767898eb021 Mon Sep 17 00:00:00 2001 From: Zegorax Date: Thu, 9 Sep 2021 16:49:10 +0200 Subject: [PATCH 302/757] Scope `onnx-simplifier` requirements check (#4730) * Changed onnx-simplifier check behavior Export.py has been updated to check for onnx-simplifier requirement only when the --simplify argument is added. Allows for better flexibility and one less requirement if simplify is not needed. * Fix single-element tuples Co-authored-by: Glenn Jocher --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 5db09884bae8..b9b32b55ac7f 100644 --- a/export.py +++ b/export.py @@ -44,7 +44,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): # ONNX model export prefix = colorstr('ONNX:') try: - check_requirements(('onnx', 'onnx-simplifier')) + check_requirements(('onnx',)) import onnx print(f'\n{prefix} starting export with onnx {onnx.__version__}...') @@ -66,6 +66,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): # Simplify if simplify: try: + check_requirements(('onnx-simplifier',)) import onnxsim print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') From 4a025ae97f0ae274fa25699c6e3a050a82e5bb08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Sep 2021 17:57:46 +0200 Subject: [PATCH 303/757] Fix `user_config_dir()` for GCP/AWS functions (#4726) * Fix `user_config_dir()` for GCP/AWS functions Compatability fix for GCP functions and AWS lambda for user config directory in https://github.com/ultralytics/yolov5/pull/4628 * Windows skip check --- utils/general.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 06bf088582dc..e3fc31e0bd81 100755 --- a/utils/general.py +++ b/utils/general.py @@ -105,13 +105,21 @@ def get_latest_run(search_dir='.'): def user_config_dir(dir='Ultralytics'): # Return path of user configuration directory (make if necessary) - settings = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} - path = Path.home() / settings.get(platform.system(), '') / dir + system = platform.system() + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} + path = Path.home() / cfg.get(system, '') / dir + if system == 'Linux' and not is_writeable(path): # GCP functions and AWS lambda solution, only /tmp is writeable + path = Path('/tmp') / dir if not path.is_dir(): path.mkdir() # make dir if required return path +def is_writeable(path): + # Return True if path has write permissions (Warning: known issue on Windows) + return os.access(path, os.R_OK) + + def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() From c5360f6e7009eb4d05f14d1cc9dae0963e949213 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Sep 2021 18:01:59 +0200 Subject: [PATCH 304/757] Fix `--data from_HUB.zip` (#4732) @KalenMike --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index c32664832d8b..e5410eeeba9f 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -105,6 +105,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model + check_suffix(weights, '.pt') # check weights pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(RANK): @@ -484,8 +485,7 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - check_suffix(opt.weights, '.pt') # check weights - opt.data, opt.cfg, opt.hyp = check_yaml(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' From 7af1b4c266fef1a0554c2077509b3be16d972e1b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 14:34:09 +0200 Subject: [PATCH 305/757] Improved `detect.py` timing (#4741) * Improved detect.py timing * Eliminate 1 time_sync() call * Inference-only time * dash * #Save section * Cleanup --- detect.py | 24 ++++++++++++++---------- val.py | 16 ++++++++-------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/detect.py b/detect.py index 8acd5bf71593..5cb131220e89 100644 --- a/detect.py +++ b/detect.py @@ -8,7 +8,6 @@ import argparse import sys -import time from pathlib import Path import cv2 @@ -123,8 +122,9 @@ def wrap_frozen_graph(gd, inputs, outputs): # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once - t0 = time.time() + dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: + t1 = time_sync() if onnx: img = img.astype('float32') else: @@ -133,9 +133,10 @@ def wrap_frozen_graph(gd, inputs, outputs): img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 # Inference - t1 = time_sync() if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] @@ -162,17 +163,20 @@ def wrap_frozen_graph(gd, inputs, outputs): pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) + t3 = time_sync() + dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - t2 = time_sync() + dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions - for i, det in enumerate(pred): # detections per image + for i, det in enumerate(pred): # per image + seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: @@ -209,8 +213,8 @@ def wrap_frozen_graph(gd, inputs, outputs): if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - # Print time (inference + NMS) - print(f'{s}Done. ({t2 - t1:.3f}s)') + # Print time (inference-only) + print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() @@ -237,15 +241,15 @@ def wrap_frozen_graph(gd, inputs, outputs): vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) - print(f'Done. ({time.time() - t0:.3f}s)') - def parse_opt(): parser = argparse.ArgumentParser() diff --git a/val.py b/val.py index b7068e041e57..c8f503351ad9 100644 --- a/val.py +++ b/val.py @@ -154,22 +154,22 @@ def run(data, names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. + dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - t_ = time_sync() + t1 = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - t = time_sync() - t0 += t - t_ + t2 = time_sync() + dt[0] += t2 - t1 # Run model out, train_out = model(img, augment=augment) # inference and training outputs - t1 += time_sync() - t + dt[1] += time_sync() - t2 # Compute loss if compute_loss: @@ -178,9 +178,9 @@ def run(data, # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_sync() + t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t2 += time_sync() - t + dt[2] += time_sync() - t3 # Statistics per image for si, pred in enumerate(out): @@ -247,7 +247,7 @@ def run(data, print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image + t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) From 19e28e3bfe29c3570313bd069a214edae293c7ea Mon Sep 17 00:00:00 2001 From: Josh Veitch-Michaelis Date: Sat, 11 Sep 2021 02:28:52 +1200 Subject: [PATCH 306/757] Add `callbacks` to train function in W&B sweep (#4742) * add callbacks to train function in wandb sweep Fix following https://github.com/ultralytics/yolov5/pull/4688 which modified the function signature to `train` * Cleanup Co-authored-by: Glenn Jocher --- utils/loggers/wandb/sweep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 2dcda508eb50..0ca704dd28dd 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -9,6 +9,7 @@ from train import train, parse_opt from utils.general import increment_path from utils.torch_utils import select_device +from utils.callbacks import Callbacks def sweep(): @@ -26,7 +27,7 @@ def sweep(): device = select_device(opt.device, batch_size=opt.batch_size) # train - train(hyp_dict, opt, device) + train(hyp_dict, opt, device, callbacks=Callbacks()) if __name__ == "__main__": From a144536f881b0ba36ed865ceaed74f11949ca93c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:06:22 +0200 Subject: [PATCH 307/757] Fix `is_writeable()` for 3 OS support (#4743) * Fix `is_writeable()` for 3 OS support * Update general.py --- utils/general.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/utils/general.py b/utils/general.py index e3fc31e0bd81..6201320d3c63 100755 --- a/utils/general.py +++ b/utils/general.py @@ -105,19 +105,24 @@ def get_latest_run(search_dir='.'): def user_config_dir(dir='Ultralytics'): # Return path of user configuration directory (make if necessary) - system = platform.system() - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} - path = Path.home() / cfg.get(system, '') / dir - if system == 'Linux' and not is_writeable(path): # GCP functions and AWS lambda solution, only /tmp is writeable - path = Path('/tmp') / dir - if not path.is_dir(): - path.mkdir() # make dir if required + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 config dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required return path -def is_writeable(path): - # Return True if path has write permissions (Warning: known issue on Windows) - return os.access(path, os.R_OK) +def is_writeable(dir): + # Return True if directory has write permissions + # return os.access(path, os.R_OK) # known issue on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): + pass + file.unlink() # remove file + return True + except IOError: + return False def is_docker(): From 6c554b75eaa77e26cec8759335df3a6bf24175c5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:51:22 +0200 Subject: [PATCH 308/757] Add TF and TFLite models to `.gitignore` (#4747) --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 9c270c7dabe7..f8a2437973f0 100755 --- a/.gitignore +++ b/.gitignore @@ -45,9 +45,13 @@ VOC/ # Neural Network weights ----------------------------------------------------------------------------------------------- *.weights *.pt +*.pb *.onnx *.mlmodel *.torchscript +*.tflite +*.h5 +*_saved_model/ darknet53.conv.74 yolov3-tiny.conv.15 From ff3529252077310bf51604294797fe8d3e973d11 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:52:21 +0200 Subject: [PATCH 309/757] Add TF and TFLite models to `.dockerignore` (#4748) --- .dockerignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 4248cb098cf4..8d60b462e7d1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -18,7 +18,10 @@ data/samples/* **/*.mlmodel **/*.torchscript **/*.torchscript.pt - +**/*.tflite +**/*.h5 +**/*.pb +*_saved_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- From 22ee6fb7c186853710ebc57a0dbd716d45b7eef0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:52:33 +0200 Subject: [PATCH 310/757] Update `is_writeable()` for 2 methods (#4744) * Writeable test * Fix * Cleanup --- utils/general.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6201320d3c63..e8b158a773d4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -112,17 +112,19 @@ def user_config_dir(dir='Ultralytics'): return path -def is_writeable(dir): - # Return True if directory has write permissions - # return os.access(path, os.R_OK) # known issue on Windows - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): - pass - file.unlink() # remove file - return True - except IOError: - return False +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except IOError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows def is_docker(): From cd810c82866006c1bbe5de797532dcd469641813 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Sep 2021 16:32:08 +0200 Subject: [PATCH 311/757] Centralize `user_config_dir()` decision making (#4755) --- utils/general.py | 14 +++++++++----- utils/plots.py | 3 +-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index e8b158a773d4..229b7a7db168 100755 --- a/utils/general.py +++ b/utils/general.py @@ -103,11 +103,15 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def user_config_dir(dir='Ultralytics'): - # Return path of user configuration directory (make if necessary) - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 config dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable path.mkdir(exist_ok=True) # make if required return path diff --git a/utils/plots.py b/utils/plots.py index 141a9ac2b09e..1ed88ea7c832 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -4,7 +4,6 @@ """ import math -import os from copy import copy from pathlib import Path @@ -21,7 +20,7 @@ from utils.metrics import fitness # Settings -CONFIG_DIR = Path(os.getenv('YOLOV5_CONFIG_DIR') or user_config_dir()) # Ultralytics settings dir +CONFIG_DIR = user_config_dir() # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only From c47be26f34327e667ad13e5bfc45389bdf21b593 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Sep 2021 22:46:33 +0200 Subject: [PATCH 312/757] Replace `path.absolute()` with `path.resolve()` (#4763) --- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/yolo.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/datasets.py | 2 +- utils/general.py | 2 +- utils/loggers/wandb/sweep.py | 2 +- utils/loggers/wandb/wandb_utils.py | 2 +- val.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/detect.py b/detect.py index 5cb131220e89..b6597c1662f9 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ import torch import torch.backends.cudnn as cudnn -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load diff --git a/export.py b/export.py index b9b32b55ac7f..935bdb40bc9b 100644 --- a/export.py +++ b/export.py @@ -15,7 +15,7 @@ import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.common import Conv diff --git a/hubconf.py b/hubconf.py index 799c83ec8400..9c5fa63809d1 100644 --- a/hubconf.py +++ b/hubconf.py @@ -33,7 +33,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.downloads import attempt_download from utils.torch_utils import select_device - file = Path(__file__).absolute() + file = Path(__file__).resolve() check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) diff --git a/models/yolo.py b/models/yolo.py index 25118a92bb2d..9eddf4a08e49 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -11,7 +11,7 @@ from copy import deepcopy from pathlib import Path -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path from models.common import * diff --git a/train.py b/train.py index e5410eeeba9f..d243a9cb010f 100644 --- a/train.py +++ b/train.py @@ -26,7 +26,7 @@ from torch.optim import Adam, SGD, lr_scheduler from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path import val # for end-of-epoch mAP diff --git a/utils/__init__.py b/utils/__init__.py index 4a61057e8083..74260ad1e5b4 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,7 +4,7 @@ # import torch # from PIL import ImageFont # -# FILE = Path(__file__).absolute() +# FILE = Path(__file__).resolve() # ROOT = FILE.parents[1] # yolov5/ dir # if str(ROOT) not in sys.path: # sys.path.append(str(ROOT)) # add ROOT to PATH diff --git a/utils/datasets.py b/utils/datasets.py index 0c6b9b5e2893..cb6ad29e4652 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -156,7 +156,7 @@ def __iter__(self): class LoadImages: # for inference def __init__(self, path, img_size=640, stride=32, auto=True): - p = str(Path(path).absolute()) # os-agnostic absolute path + p = str(Path(path).resolve()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): diff --git a/utils/general.py b/utils/general.py index 229b7a7db168..5c3d8d117dc3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -147,7 +147,7 @@ def is_colab(): def is_pip(): # Is file in a pip package? - return 'site-packages' in Path(__file__).absolute().parts + return 'site-packages' in Path(__file__).resolve().parts def is_ascii(s=''): diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 0ca704dd28dd..4d5df5c8e00a 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -3,7 +3,7 @@ import wandb -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path from train import train, parse_opt diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 5d495c70517b..504a518f75ea 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -9,7 +9,7 @@ import yaml from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path from utils.datasets import LoadImagesAndLabels diff --git a/val.py b/val.py index c8f503351ad9..00eb92bb096a 100644 --- a/val.py +++ b/val.py @@ -17,7 +17,7 @@ import torch from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load From c3a93d783d1a1e920d346f62b5de9f500e4540e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 15:52:24 +0200 Subject: [PATCH 313/757] Add TensorFlow formats to `export.py` (#4479) * Initial commit * Remove unused export_torchscript return * ROOT variable * Add prefix to fcn arg * fix ROOT * check_yaml into run() * interim fixes * imgsz=(320, 320) * Hardcode tf_raw_resize False * Finish opt elimination * Update representative_dataset_gen() * Update export.py with TF methods * SiLU and GraphDef fixes * file_size() directory handling feature * export fixes * add lambda: to representative_dataset * Detect training False default * Fuse false for TF models * Embed agnostic NMS arguments * Remove lambda * TensorFlow.js export success * Add pb to Usage * Add *_tfjs_model/ to ignore files * prepend YOLOv5 to function headers * Remove end --- comments * parameterize tfjs export pb file * update run() data default /ROOT * update --include help * update imports * return ct_model * Consolidate TFLite export * pb prerequisite to tfjs * TF modules CamelCase * Remove exports from tf.py and cleanup * pass agnostic NMS arguments * CI * CI * ignore *_web_model/ * Add tensorflow to CI dependencies * CI tensorflow-cpu * Update requirements.txt * Remove tensorflow check_requirement * CI coreml tfjs * export only onnx torchscript * reorder exports torchscript first --- .dockerignore | 1 + .github/workflows/ci-testing.yml | 7 +- .gitignore | 1 + detect.py | 2 +- export.py | 219 +++++++++++++--- models/tf.py | 433 ++++++++++++------------------- requirements.txt | 20 +- utils/general.py | 12 +- 8 files changed, 366 insertions(+), 329 deletions(-) diff --git a/.dockerignore b/.dockerignore index 8d60b462e7d1..6c2f2b9b7725 100644 --- a/.dockerignore +++ b/.dockerignore @@ -22,6 +22,7 @@ data/samples/* **/*.h5 **/*.pb *_saved_model/ +*_web_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index ecd6f9bbd625..54b230a13e6b 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -48,7 +48,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx onnx-simplifier coremltools # for export + pip install -q onnx tensorflow-cpu # for export python --version pip --version pip list @@ -75,6 +75,7 @@ jobs: python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub - python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include onnx torchscript # export + python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model + python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export shell: bash diff --git a/.gitignore b/.gitignore index f8a2437973f0..375b71807588 100755 --- a/.gitignore +++ b/.gitignore @@ -52,6 +52,7 @@ VOC/ *.tflite *.h5 *_saved_model/ +*_web_model/ darknet53.conv.74 yolov3-tiny.conv.15 diff --git a/detect.py b/detect.py index b6597c1662f9..ef7458d52db3 100644 --- a/detect.py +++ b/detect.py @@ -253,7 +253,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/export.py b/export.py index 935bdb40bc9b..8d6805893d1e 100644 --- a/export.py +++ b/export.py @@ -1,12 +1,28 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Export a PyTorch model to TorchScript, ONNX, CoreML formats +Export a YOLOv5 PyTorch model to TorchScript, ONNX, CoreML, TensorFlow (saved_model, pb, TFLite, TF.js,) formats +TensorFlow exports authored by https://github.com/zldrobit Usage: - $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs + +Inference: + $ python path/to/detect.py --weights yolov5s.pt + yolov5s.onnx (must export with --dynamic) + yolov5s_saved_model + yolov5s.pb + yolov5s.tflite + +TensorFlow.js: + $ # Edit yolov5s_web_model/model.json to sort Identity* in ascending order + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model + $ npm start """ import argparse +import subprocess import sys import time from pathlib import Path @@ -16,40 +32,42 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # yolov5/ dir +sys.path.append(ROOT.as_posix()) # add yolov5/ to path from models.common import Conv -from models.yolo import Detect from models.experimental import attempt_load -from utils.activations import Hardswish, SiLU -from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging +from models.yolo import Detect +from utils.activations import SiLU +from utils.datasets import LoadImages +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device -def export_torchscript(model, img, file, optimize): - # TorchScript model export - prefix = colorstr('TorchScript:') +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export try: print(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript.pt') - ts = torch.jit.trace(model, img, strict=False) + + ts = torch.jit.trace(model, im, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ts except Exception as e: print(f'{prefix} export failure: {e}') -def export_onnx(model, img, file, opset, train, dynamic, simplify): - # ONNX model export - prefix = colorstr('ONNX:') +def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export try: check_requirements(('onnx',)) import onnx print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, img, f, verbose=False, opset_version=opset, + + torch.onnx.export(model, im, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], @@ -73,7 +91,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, - input_shapes={'images': list(img.shape)} if dynamic else None) + input_shapes={'images': list(im.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -84,26 +102,131 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): print(f'{prefix} export failure: {e}') -def export_coreml(model, img, file): - # CoreML model export - prefix = colorstr('CoreML:') +def export_coreml(model, im, file, prefix=colorstr('CoreML:')): + # YOLOv5 CoreML export + ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') + model.train() # CoreML exports should be placed in model.train() mode - ts = torch.jit.trace(model, img, strict=False) # TorchScript model - model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - model.save(f) + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + ct_model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') + return ct_model + + +def export_saved_model(model, im, file, dynamic, + tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): + # YOLOv5 TensorFlow saved_model export + keras_model = None + try: + import tensorflow as tf + from tensorflow import keras + from models.tf import TFModel, TFDetect + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW -def run(weights='./yolov5s.pt', # weights path - img_size=(640, 640), # image (height, width) + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow + y = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = keras.Model(inputs=inputs, outputs=outputs) + keras_model.summary() + keras_model.save(f, save_format='tf') + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + return keras_model + + +def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + try: + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): + # YOLOv5 TensorFlow Lite export + try: + import tensorflow as tf + from models.tf import representative_dataset_gen + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = file.with_suffix('.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if tfl_int8: + dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = False + f = str(file).replace('.pt', '-int8.tflite') + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): + # YOLOv5 TensorFlow.js export + try: + check_requirements(('tensorflowjs',)) + import tensorflowjs as tfjs + + print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + + cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ + f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" + subprocess.run(cmd, shell=True) + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +@torch.no_grad() +def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx', 'coreml'), # include formats @@ -117,29 +240,28 @@ def run(weights='./yolov5s.pt', # weights path ): t = time.time() include = [x.lower() for x in include] - img_size *= 2 if len(img_size) == 1 else 1 # expand + tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports + imgsz *= 2 if len(imgsz) == 1 else 1 # expand file = Path(weights) # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device) # load FP32 model - names = model.names + model = attempt_load(weights, map_location=device, inplace=True, fuse=not any(tf_exports)) # load FP32 model + nc, names = model.nc, model.names # number of classes, class names # Input gs = int(max(model.stride)) # grid size (max stride) - img_size = [check_img_size(x, gs) for x in img_size] # verify img_size are gs-multiples - img = torch.zeros(batch_size, 3, *img_size).to(device) # image size(1,3,320,192) iDetection + imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples + im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model if half: - img, model = img.half(), model.half() # to FP16 + im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): if isinstance(m, Conv): # assign export-friendly activations - if isinstance(m.act, nn.Hardswish): - m.act = Hardswish() - elif isinstance(m.act, nn.SiLU): + if isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, Detect): m.inplace = inplace @@ -147,16 +269,28 @@ def run(weights='./yolov5s.pt', # weights path # m.forward = m.forward_export # assign forward (optional) for _ in range(2): - y = model(img) # dry runs + y = model(im) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # Exports if 'torchscript' in include: - export_torchscript(model, img, file, optimize) + export_torchscript(model, im, file, optimize) if 'onnx' in include: - export_onnx(model, img, file, opset, train, dynamic, simplify) + export_onnx(model, im, file, opset, train, dynamic, simplify) if 'coreml' in include: - export_coreml(model, img, file) + export_coreml(model, im, file) + + # TensorFlow Exports + if any(tf_exports): + pb, tflite, tfjs = tf_exports[1:] + assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' + model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs) # keras model + if pb or tfjs: # pb prerequisite to tfjs + export_pb(model, im, file) + if tflite: + export_tflite(model, im, file, tfl_int8=False, data=data, ncalib=100) + if tfjs: + export_tfjs(model, im, file) # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' @@ -166,18 +300,21 @@ def run(weights='./yolov5s.pt', # weights path def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--include', nargs='+', + default=['torchscript', 'onnx'], + help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() return opt diff --git a/models/tf.py b/models/tf.py index d6d0f26210b2..621236240f10 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,67 +1,44 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -TensorFlow/Keras and TFLite versions of YOLOv5 +TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 Usage: - $ python models/tf.py --weights yolov5s.pt --cfg yolov5s.yaml - -Export int8 TFLite models: - $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --tfl-int8 \ - --source path/to/images/ --ncalib 100 - -Detection: - $ python detect.py --weights yolov5s.pb --img 320 - $ python detect.py --weights yolov5s_saved_model --img 320 - $ python detect.py --weights yolov5s-fp16.tflite --img 320 - $ python detect.py --weights yolov5s-int8.tflite --img 320 --tfl-int8 - -For TensorFlow.js: - $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --img 320 --tf-nms --agnostic-nms - $ pip install tensorflowjs - $ tensorflowjs_converter \ - --input_format=tf_frozen_model \ - --output_node_names='Identity,Identity_1,Identity_2,Identity_3' \ - yolov5s.pb \ - web_model - $ # Edit web_model/model.json to sort Identity* in ascending order - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/web_model public/web_model - $ npm start + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs """ import argparse import logging -import os import sys -import traceback from copy import deepcopy from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # yolov5/ dir +sys.path.append(ROOT.as_posix()) # add yolov5/ to path import numpy as np import tensorflow as tf import torch import torch.nn as nn -import yaml from tensorflow import keras -from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 from models.experimental import MixConv2d, CrossConv, attempt_load from models.yolo import Detect -from utils.datasets import LoadImages -from utils.general import check_dataset, check_yaml, make_divisible +from utils.general import colorstr, make_divisible, set_logging +from utils.activations import SiLU -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) -class tf_BN(keras.layers.Layer): +class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper def __init__(self, w=None): - super(tf_BN, self).__init__() + super(TFBN, self).__init__() self.bn = keras.layers.BatchNormalization( beta_initializer=keras.initializers.Constant(w.bias.numpy()), gamma_initializer=keras.initializers.Constant(w.weight.numpy()), @@ -73,20 +50,20 @@ def call(self, inputs): return self.bn(inputs) -class tf_Pad(keras.layers.Layer): +class TFPad(keras.layers.Layer): def __init__(self, pad): - super(tf_Pad, self).__init__() + super(TFPad, self).__init__() self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) def call(self, inputs): return tf.pad(inputs, self.pad, mode='constant', constant_values=0) -class tf_Conv(keras.layers.Layer): +class TFConv(keras.layers.Layer): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups - super(tf_Conv, self).__init__() + super(TFConv, self).__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" assert isinstance(k, int), "Convolution with multiple kernels are not allowed." # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) @@ -95,27 +72,29 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): conv = keras.layers.Conv2D( c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) - self.conv = conv if s == 1 else keras.Sequential([tf_Pad(autopad(k, p)), conv]) - self.bn = tf_BN(w.bn) if hasattr(w, 'bn') else tf.identity + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity # YOLOv5 activations if isinstance(w.act, nn.LeakyReLU): self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity elif isinstance(w.act, nn.Hardswish): self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity - elif isinstance(w.act, nn.SiLU): + elif isinstance(w.act, (nn.SiLU, SiLU)): self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + else: + raise Exception(f'no matching TensorFlow activation found for {w.act}') def call(self, inputs): return self.act(self.bn(self.conv(inputs))) -class tf_Focus(keras.layers.Layer): +class TFFocus(keras.layers.Layer): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, kernel, stride, padding, groups - super(tf_Focus, self).__init__() - self.conv = tf_Conv(c1 * 4, c2, k, s, p, g, act, w.conv) + super(TFFocus, self).__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) # inputs = inputs / 255. # normalize 0-255 to 0-1 @@ -125,23 +104,23 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) inputs[:, 1::2, 1::2, :]], 3)) -class tf_Bottleneck(keras.layers.Layer): +class TFBottleneck(keras.layers.Layer): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion - super(tf_Bottleneck, self).__init__() + super(TFBottleneck, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c_, c2, 3, 1, g=g, w=w.cv2) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) self.add = shortcut and c1 == c2 def call(self, inputs): return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) -class tf_Conv2d(keras.layers.Layer): +class TFConv2d(keras.layers.Layer): # Substitution for PyTorch nn.Conv2D def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super(tf_Conv2d, self).__init__() + super(TFConv2d, self).__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D( c2, k, s, 'VALID', use_bias=bias, @@ -152,19 +131,19 @@ def call(self, inputs): return self.conv(inputs) -class tf_BottleneckCSP(keras.layers.Layer): +class TFBottleneckCSP(keras.layers.Layer): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(tf_BottleneckCSP, self).__init__() + super(TFBottleneckCSP, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv2d(c1, c_, 1, 1, bias=False, w=w.cv2) - self.cv3 = tf_Conv2d(c_, c_, 1, 1, bias=False, w=w.cv3) - self.cv4 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv4) - self.bn = tf_BN(w.bn) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) self.act = lambda x: keras.activations.relu(x, alpha=0.1) - self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): y1 = self.cv3(self.m(self.cv1(inputs))) @@ -172,28 +151,28 @@ def call(self, inputs): return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) -class tf_C3(keras.layers.Layer): +class TFC3(keras.layers.Layer): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(tf_C3, self).__init__() + super(TFC3, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) -class tf_SPP(keras.layers.Layer): +class TFSPP(keras.layers.Layer): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super(tf_SPP, self).__init__() + super(TFSPP, self).__init__() c_ = c1 // 2 # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] def call(self, inputs): @@ -201,9 +180,9 @@ def call(self, inputs): return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) -class tf_Detect(keras.layers.Layer): - def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer - super(tf_Detect, self).__init__() +class TFDetect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super(TFDetect, self).__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -213,22 +192,20 @@ def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) self.anchor_grid = tf.reshape(tf.convert_to_tensor(w.anchor_grid.numpy(), dtype=tf.float32), [self.nl, 1, -1, 1, 2]) - self.m = [tf_Conv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] - self.export = False # onnx export - self.training = True # set to False after building model + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz for i in range(self.nl): - ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] self.grid[i] = self._make_grid(nx, ny) def call(self, inputs): - # x = x.copy() # for profiling z = [] # inference output - self.training |= self.export x = [] for i in range(self.nl): x.append(self.m[i](inputs[i])) # x(bs,20,20,255) to x(bs,3,20,20,85) - ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) if not self.training: # inference @@ -236,8 +213,8 @@ def call(self, inputs): xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # Normalize xywh to 0-1 to reduce calibration error - xy /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) - wh /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) @@ -251,25 +228,23 @@ def _make_grid(nx=20, ny=20): return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) -class tf_Upsample(keras.layers.Layer): - def __init__(self, size, scale_factor, mode, w=None): - super(tf_Upsample, self).__init__() +class TFUpsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super(TFUpsample, self).__init__() assert scale_factor == 2, "scale_factor must be 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) - if opt.tf_raw_resize: - # with default arguments: align_corners=False, half_pixel_centers=False - self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, - size=(x.shape[1] * 2, x.shape[2] * 2)) - else: - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) def call(self, inputs): return self.upsample(inputs) -class tf_Concat(keras.layers.Layer): +class TFConcat(keras.layers.Layer): def __init__(self, dimension=1, w=None): - super(tf_Concat, self).__init__() + super(TFConcat, self).__init__() assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 @@ -277,8 +252,8 @@ def call(self, inputs): return tf.concat(inputs, self.d) -def parse_model(d, ch, model): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -310,10 +285,11 @@ def parse_model(d, ch, model): # model_dict, input_channels(3) args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + args.append(imgsz) else: c2 = ch[f] - tf_m = eval('tf_' + m_str.replace('nn.', '')) + tf_m = eval('TF' + m_str.replace('nn.', '')) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module @@ -321,16 +297,16 @@ def parse_model(d, ch, model): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in torch_m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) return keras.Sequential(layers), sorted(save) -class tf_Model(): - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, input channels, number of classes - super(tf_Model, self).__init__() +class TFModel: + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super(TFModel, self).__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -343,9 +319,10 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, inp if nc and nc != self.yaml['nc']: print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) self.yaml['nc'] = nc # override yaml value - self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model) # model, savelist, ch_out + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) - def predict(self, inputs, profile=False): + def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25): y = [] # outputs x = inputs for i, m in enumerate(self.model.layers): @@ -356,18 +333,18 @@ def predict(self, inputs, profile=False): y.append(x if m.i in self.savelist else None) # save output # Add TensorFlow NMS - if opt.tf_nms: - boxes = xywh2xyxy(x[0][..., :4]) + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) probs = x[0][:, :, 4:5] classes = x[0][:, :, 5:] scores = probs * classes - if opt.agnostic_nms: - nms = agnostic_nms_layer()((boxes, classes, scores)) + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) return nms, x[1] else: boxes = tf.expand_dims(boxes, 2) nms = tf.image.combined_non_max_suppression( - boxes, scores, opt.topk_per_class, opt.topk_all, opt.iou_thres, opt.score_thres, clip_boxes=False) + boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) return nms, x[1] return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] @@ -377,182 +354,94 @@ def predict(self, inputs, profile=False): # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes # return tf.concat([conf, cls, xywh], 1) + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + -class agnostic_nms_layer(keras.layers.Layer): - # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - def call(self, input): - return tf.map_fn(agnostic_nms, input, +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(self._nms, input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') - -def agnostic_nms(x): - boxes, classes, scores = x - class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) - scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression( - boxes, scores_inp, max_output_size=opt.topk_all, iou_threshold=opt.iou_thres, score_threshold=opt.score_thres) - selected_boxes = tf.gather(boxes, selected_inds) - padded_boxes = tf.pad(selected_boxes, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", constant_values=0.0) - selected_scores = tf.gather(scores_inp, selected_inds) - padded_scores = tf.pad(selected_scores, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) - selected_classes = tf.gather(class_inds, selected_inds) - padded_classes = tf.pad(selected_classes, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) - valid_detections = tf.shape(selected_inds)[0] - return padded_boxes, padded_scores, padded_classes, valid_detections - - -def xywh2xyxy(xywh): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) - return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) - - -def representative_dataset_gen(): - # Representative dataset for use with converter.representative_dataset - n = 0 - for path, img, im0s, vid_cap in dataset: - # Get sample input data as a numpy array in a method of your choosing. - n += 1 + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap) in enumerate(dataset): input = np.transpose(img, [1, 2, 0]) input = np.expand_dims(input, axis=0).astype(np.float32) input /= 255.0 yield [input] - if n >= opt.ncalib: + if n >= ncalib: break -if __name__ == "__main__": +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size + ): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + y = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + y = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + +def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='cfg path') - parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size') - parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file') - parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images') - parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model') - parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize', - help='use tf.raw_ops.ResizeNearestNeighbor for resize') - parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS') - parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS') - parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') - parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() - opt.cfg = check_yaml(opt.cfg) # check YAML - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - print(opt) - - # Input - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection - - # Load PyTorch model - model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False) - model.model[-1].export = False # set Detect() layer export=True - y = model(img) # dry run - nc = y[0].shape[-1] - 5 - - # TensorFlow saved_model export - try: - print('\nStarting TensorFlow saved_model export with TensorFlow %s...' % tf.__version__) - tf_model = tf_Model(opt.cfg, model=model, nc=nc) - img = tf.zeros((opt.batch_size, *opt.img_size, 3)) # NHWC Input for TensorFlow - - m = tf_model.model.layers[-1] - assert isinstance(m, tf_Detect), "the last layer must be Detect" - m.training = False - y = tf_model.predict(img) - - inputs = keras.Input(shape=(*opt.img_size, 3), batch_size=None if opt.dynamic_batch_size else opt.batch_size) - keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs)) - keras_model.summary() - path = opt.weights.replace('.pt', '_saved_model') # filename - keras_model.save(path, save_format='tf') - print('TensorFlow saved_model export success, saved as %s' % path) - except Exception as e: - print('TensorFlow saved_model export failure: %s' % e) - traceback.print_exc(file=sys.stdout) - - # TensorFlow GraphDef export - try: - print('\nStarting TensorFlow GraphDef export with TensorFlow %s...' % tf.__version__) - - # https://github.com/leimao/Frozen_Graph_TensorFlow - full_model = tf.function(lambda x: keras_model(x)) - full_model = full_model.get_concrete_function( - tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - - frozen_func = convert_variables_to_constants_v2(full_model) - frozen_func.graph.as_graph_def() - f = opt.weights.replace('.pt', '.pb') # filename - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, - logdir=os.path.dirname(f), - name=os.path.basename(f), - as_text=False) - - print('TensorFlow GraphDef export success, saved as %s' % f) - except Exception as e: - print('TensorFlow GraphDef export failure: %s' % e) - traceback.print_exc(file=sys.stdout) - - # TFLite model export - if not opt.tf_nms: - try: - print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__) - - # fp32 TFLite model export --------------------------------------------------------------------------------- - # converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - # converter.allow_custom_ops = False - # converter.experimental_new_converter = True - # tflite_model = converter.convert() - # f = opt.weights.replace('.pt', '.tflite') # filename - # open(f, "wb").write(tflite_model) - - # fp16 TFLite model export --------------------------------------------------------------------------------- - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - # converter.representative_dataset = representative_dataset_gen - # converter.target_spec.supported_types = [tf.float16] - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.allow_custom_ops = False - converter.experimental_new_converter = True - tflite_model = converter.convert() - f = opt.weights.replace('.pt', '-fp16.tflite') # filename - open(f, "wb").write(tflite_model) - print('\nTFLite export success, saved as %s' % f) - - # int8 TFLite model export --------------------------------------------------------------------------------- - if opt.tfl_int8: - # Representative Dataset - if opt.source.endswith('.yaml'): - with open(check_yaml(opt.source)) as f: - data = yaml.load(f, Loader=yaml.FullLoader) # data dict - check_dataset(data) # check - opt.source = data['train'] - dataset = LoadImages(opt.source, img_size=opt.img_size, auto=False) - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = representative_dataset_gen - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.allow_custom_ops = False - converter.experimental_new_converter = True - converter.experimental_new_quantizer = False - tflite_model = converter.convert() - f = opt.weights.replace('.pt', '-int8.tflite') # filename - open(f, "wb").write(tflite_model) - print('\nTFLite (int8) export success, saved as %s' % f) - - except Exception as e: - print('\nTFLite export failure: %s' % e) - traceback.print_exc(file=sys.stdout) + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + return opt + + +def main(opt): + set_logging() + print(colorstr('tf.py: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/requirements.txt b/requirements.txt index 2ad65ba53e29..b84b353f75f3 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # pip install -r requirements.txt -# base ---------------------------------------- +# Base ---------------------------------------- matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 @@ -11,21 +11,23 @@ torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 -# logging ------------------------------------- +# Logging ------------------------------------- tensorboard>=2.4.1 # wandb -# plotting ------------------------------------ +# Plotting ------------------------------------ seaborn>=0.11.0 pandas -# export -------------------------------------- -# coremltools>=4.1 -# onnx>=1.9.0 -# scikit-learn==0.19.2 # for coreml quantization -# tensorflow==2.4.1 # for TFLite export +# Export -------------------------------------- +# coremltools>=4.1 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.3.6 # ONNX simplifier +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export +# tensorflowjs>=3.9.0 # TF.js export -# extras -------------------------------------- +# Extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP # albumentations>=1.0.3 diff --git a/utils/general.py b/utils/general.py index 5c3d8d117dc3..7a80b2ea81bc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -161,9 +161,15 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str -def file_size(file): - # Return file size in MB - return Path(file).stat().st_size / 1e6 +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 def check_online(): From b161edf8738c4020ca4ffb6f73ce2d881cc47d59 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 17:55:41 +0200 Subject: [PATCH 314/757] Update ci-testing.yml (#4770) --- .github/workflows/ci-testing.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 54b230a13e6b..3272c0316113 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -8,6 +8,8 @@ on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows pull_request: # The branches below must be a subset of the branches above branches: [master, develop] + schedule: + - cron: '0 0 * * *' # Runs at 00:00 UTC every day jobs: cpu-tests: From aa1859909c96d5e1fc839b2746b45038ee8465c9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 23:40:28 +0200 Subject: [PATCH 315/757] Update ci-testing.yml (#4772) --- .github/workflows/ci-testing.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 3272c0316113..71f39c16c4ed 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -4,10 +4,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [master, develop] + branches: [master] pull_request: # The branches below must be a subset of the branches above - branches: [master, develop] + branches: [master] schedule: - cron: '0 0 * * *' # Runs at 00:00 UTC every day From fcb225c1c81a17fbaeff027b1f4be5300049e8a8 Mon Sep 17 00:00:00 2001 From: Jihoon Kim <41357160+kimnamu@users.noreply.github.com> Date: Wed, 15 Sep 2021 07:57:06 +0900 Subject: [PATCH 316/757] Shuffle all 4(or 9) images in mosaic augmentation (#4787) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for sharing nice open-source codes 👍 I applied to shuffle the order of all 4(or 9) images in mosaic augmentation Currently, the order of images in mosaic augmentation is not completely random. The remaining images except the first are randomly arranged. Apply shuffle all to increase the diversity of data composition. --- utils/datasets.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index cb6ad29e4652..4a4b187da345 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -661,6 +661,7 @@ def load_mosaic(self, index): s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) @@ -717,6 +718,7 @@ def load_mosaic9(self, index): labels9, segments9 = [], [] s = self.img_size indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) From b74dd4ba4f295eaacc8cc3ac75270ba40a2d9ef6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Sep 2021 11:33:46 +0200 Subject: [PATCH 317/757] Add `--int8` argument (#4799) * Add `--int8` argument * parents[0] bug fix * Fix order --- export.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 8d6805893d1e..ea7f1ebd0b1f 100644 --- a/export.py +++ b/export.py @@ -33,7 +33,8 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # yolov5/ dir -sys.path.append(ROOT.as_posix()) # add yolov5/ to path +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.common import Conv from models.experimental import attempt_load @@ -174,7 +175,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): print(f'\n{prefix} export failure: {e}') -def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): +def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export try: import tensorflow as tf @@ -187,7 +188,7 @@ def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] converter.optimizations = [tf.lite.Optimize.DEFAULT] - if tfl_int8: + if int8: dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -234,7 +235,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode optimize=False, # TorchScript: optimize for mobile - dynamic=False, # ONNX: dynamic axes + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version ): @@ -288,7 +290,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: - export_tflite(model, im, file, tfl_int8=False, data=data, ncalib=100) + export_tflite(model, im, file, int8=int8, data=data, ncalib=100) if tfjs: export_tfjs(model, im, file) @@ -309,6 +311,7 @@ def parse_opt(): parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') From 621b6d5ba80707ca98242dd7c71d738e5594b41e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Sep 2021 13:13:33 +0200 Subject: [PATCH 318/757] Evolution `--resume` fix (#4802) Also disable `/weights` dir creation when evolving as no weights are saved and empty folder causes user expectations of weights. --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index d243a9cb010f..1d0c2c608878 100644 --- a/train.py +++ b/train.py @@ -64,7 +64,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Directories w = save_dir / 'weights' # weights dir - w.mkdir(parents=True, exist_ok=True) # make dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters @@ -489,7 +489,7 @@ def main(opt, callbacks=Callbacks()): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' - opt.exist_ok = opt.resume + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode From 0dc725e3dc36283ef657088bbc9e05461311c921 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 09:47:34 +0200 Subject: [PATCH 319/757] Refactor `forward()` method profiling (#4816) --- models/yolo.py | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 9eddf4a08e49..0a27b24dede7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -98,7 +98,6 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) - # LOGGER.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -110,7 +109,6 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # LOGGER.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) @@ -119,47 +117,33 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i def forward(self, x, augment=False, profile=False, visualize=False): if augment: - return self.forward_augment(x) # augmented inference, None - return self.forward_once(x, profile, visualize) # single-scale inference, train + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train - def forward_augment(self, x): + def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward + yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False, visualize=False): + def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - c = isinstance(m, Detect) # copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - + self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output - if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) - - if profile: - LOGGER.info('%.1fms total' % sum(dt)) return x def _descale_pred(self, p, flips, scale, img_size): @@ -179,6 +163,19 @@ def _descale_pred(self, p, flips, scale, img_size): p = torch.cat((x, y, wh, p[..., 4:]), -1) return p + def _profile_one_layer(self, m, x, dt): + c = isinstance(m, Detect) # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. From 43b2817f6e615497a10a0921a8df8b0e3d286210 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Thu, 16 Sep 2021 13:33:54 +0200 Subject: [PATCH 320/757] Feature/fix export on url (#4823) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * added url check * Add url2file() * Update file-only Co-authored-by: Glenn Jocher --- export.py | 4 ++-- utils/general.py | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index ea7f1ebd0b1f..4ec3c3e0c711 100644 --- a/export.py +++ b/export.py @@ -41,7 +41,7 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging, url2file from utils.torch_utils import select_device @@ -244,7 +244,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' include = [x.lower() for x in include] tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports imgsz *= 2 if len(imgsz) == 1 else 1 # expand - file = Path(weights) + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # Load PyTorch model device = select_device(device) diff --git a/utils/general.py b/utils/general.py index 7a80b2ea81bc..dc9a10fe8617 100755 --- a/utils/general.py +++ b/utils/general.py @@ -360,6 +360,13 @@ def check_dataset(data, autodownload=True): return data # dictionary +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + return file + + def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): From 6b44ecd53dd299ccaa54cff4194e0f4e323bbc40 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 14:38:35 +0200 Subject: [PATCH 321/757] Fix 'PyTorch starting from' for URL weights (#4828) Follows #4823 --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 4ec3c3e0c711..a5139c0a965e 100644 --- a/export.py +++ b/export.py @@ -272,7 +272,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' for _ in range(2): y = model(im) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") + print(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports if 'torchscript' in include: From 3beb871ba4558c9e720388e6632798c4eb267d4f Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Thu, 16 Sep 2021 21:27:22 +0800 Subject: [PATCH 322/757] Multiple TF export improvements (#4824) * Add fused conv support * Set all saved_model values to non trainable * Fix TFLite fp16 model export * Fix int8 TFLite conversion --- export.py | 7 +++++-- models/tf.py | 5 +++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index a5139c0a965e..dd7eefc51702 100644 --- a/export.py +++ b/export.py @@ -145,6 +145,7 @@ def export_saved_model(model, im, file, dynamic, inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) keras_model = keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False keras_model.summary() keras_model.save(f, save_format='tf') @@ -183,15 +184,17 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW - f = file.with_suffix('.tflite') + f = str(file).replace('.pt', '-fp16.tflite') converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = False @@ -249,7 +252,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device, inplace=True, fuse=not any(tf_exports)) # load FP32 model + model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model nc, names = model.nc, model.names # number of classes, class names # Input diff --git a/models/tf.py b/models/tf.py index 621236240f10..5d7153f246eb 100644 --- a/models/tf.py +++ b/models/tf.py @@ -70,8 +70,9 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch conv = keras.layers.Conv2D( - c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, - kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity From 27a4736e968158063a87024be74534a560fc8e84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 17:55:58 +0200 Subject: [PATCH 323/757] Fix val.py study plot (#4831) * Fix val.py study plot * call plot_val_study * Rename plot_study_txt to plot_val_study --- utils/plots.py | 11 +++++++---- val.py | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 1ed88ea7c832..9570fdf27a63 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -247,15 +247,16 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() plt.savefig('targets.jpg', dpi=200) -def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by val.py +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results if plot2: ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(Path(path).glob('study*.txt')): + for f in sorted(save_dir.glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) if plot2: @@ -278,7 +279,9 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') ax2.legend(loc='lower right') - plt.savefig(str(Path(path).name) + '.png', dpi=300) + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) def plot_labels(labels, names=(), save_dir=Path('')): diff --git a/val.py b/val.py index 00eb92bb096a..16dd76d680f7 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix -from utils.plots import plot_images, output_to_target, plot_study_txt +from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync from utils.callbacks import Callbacks @@ -348,7 +348,7 @@ def main(opt): y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') - plot_study_txt(x=x) # plot + plot_val_study(x=x) # plot if __name__ == "__main__": From 850f98f5085a7c3425ab91412fbd136b407ab2d0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 11:39:00 +0200 Subject: [PATCH 324/757] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 38e8fd4389ea..6d6a1e77dc30 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -957,7 +957,6 @@ "# Unit tests\n", "%%shell\n", "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", - "\n", "rm -rf runs # remove runs/\n", "for m in yolov5s; do # models\n", " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n", @@ -968,9 +967,10 @@ " python val.py --weights $m.pt --device $d # val official\n", " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", " done\n", - " python hubconf.py # hub\n", - " python models/yolo.py --cfg $m.yaml # inspect\n", - " python export.py --weights $m.pt --img 640 --batch 1 # export\n", + "python hubconf.py # hub\n", + "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", + "python models/tf.py --weights $m.pt # build TensorFlow model\n", + "python export.py --img 128 --batch 1 --weights $m.pt --include torchscript onnx # export\n", "done" ], "execution_count": null, From e83792e65ce8b2ac51c29733e111302680dbceb7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 11:40:47 +0200 Subject: [PATCH 325/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6d6a1e77dc30..cfa96914c713 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -954,7 +954,7 @@ "id": "FGH0ZjkGjejy" }, "source": [ - "# Unit tests\n", + "# CI Checks\n", "%%shell\n", "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", "rm -rf runs # remove runs/\n", From 3a822a22ceedaca367f7a790c7d1091c0b426758 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 13:02:37 +0200 Subject: [PATCH 326/757] `PIL.ImageDraw.text(anchor=...)` removal, reduce to `>=7.1.2` (#4842) * Unpin Pillow * Update requirements.txt * Update plots.py --- requirements.txt | 2 +- utils/plots.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index b84b353f75f3..42d5dfc49354 100755 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 -Pillow>=8.0.0 +Pillow>=7.1.2 PyYAML>=5.3.1 scipy>=1.4.1 torch>=1.7.0 diff --git a/utils/plots.py b/utils/plots.py index 9570fdf27a63..5ff72cb144e2 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,11 +3,11 @@ Plotting utils """ -import math from copy import copy from pathlib import Path import cv2 +import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -80,9 +80,10 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w = self.font.getsize(label)[0] # text width + w, h = self.font.getsize(label) # text width self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) - self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h), label, fill=txt_color, font=self.font) else: # cv2 c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) From fe39562a5f4009c59dde343502b4dd20a6aff823 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 17:35:45 +0200 Subject: [PATCH 327/757] Sorted datasets update to `cache_labels()` (#4845) PR should produce datasets sorted alphabetically by filename. Cache version incremented to 0.5. Note: will force a one-time re-caching of existing datasets on first-use. --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4a4b187da345..adcdafe69df7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -487,7 +487,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), + pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f @@ -508,7 +508,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.4 # cache version + x['version'] = 0.5 # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix From 4fdaec0449817000803d6ab7e7512b0e58c4d7d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 18:01:52 +0200 Subject: [PATCH 328/757] Single `cache_version` definition (#4846) Defines dataset labels *.cache version in a single place, fixing a bug introduced in #4845. --- utils/datasets.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index adcdafe69df7..d253cb177b82 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -365,6 +365,8 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing + cache_version = 0.5 # dataset labels *.cache version + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size @@ -404,7 +406,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) + assert cache['version'] == self.cache_version # same version + assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash except: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -508,7 +511,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.5 # cache version + x['version'] = self.cache_version # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix From 302a1b0bb03b3dbae0cb41f43b6c6c998070ff49 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 17 Sep 2021 22:29:34 +0530 Subject: [PATCH 329/757] W&B: Enable login timeout (#4843) * evolve fix * Enable login timeout * fix pkg --- Dockerfile | 2 +- utils/loggers/wandb/wandb_utils.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 858b22bc6383..e9cd304376ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook +RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy # RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 504a518f75ea..e7b0d82213f0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,6 +5,7 @@ import sys from contextlib import contextmanager from pathlib import Path +import pkg_resources as pkg import yaml from tqdm import tqdm @@ -20,6 +21,8 @@ import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2'): + wandb.login(timeout=30) except (ImportError, AssertionError): wandb = None From 84bfa892365cd9d5938ea78494727783482dcad4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 13:28:42 +0200 Subject: [PATCH 330/757] Consolidate `init_seeds()` (#4849) --- utils/general.py | 8 +++++--- utils/torch_utils.py | 10 ---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/utils/general.py b/utils/general.py index dc9a10fe8617..561602323ab2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -29,7 +29,6 @@ from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness -from utils.torch_utils import init_torch_seeds # Settings torch.set_printoptions(linewidth=320, precision=5, profile='long') @@ -91,10 +90,13 @@ def set_logging(rank=-1, verbose=True): def init_seeds(seed=0): - # Initialize random number generator (RNG) seeds + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible + import torch.backends.cudnn as cudnn random.seed(seed) np.random.seed(seed) - init_torch_seeds(seed) + torch.manual_seed(seed) + cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) def get_latest_run(search_dir='.'): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 04e1446bb908..352ecf572c9f 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -15,7 +15,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F @@ -41,15 +40,6 @@ def torch_distributed_zero_first(local_rank: int): dist.barrier(device_ids=[0]) -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - def date_modified(path=__file__): # return human-readable file modification date, i.e. '2021-3-26' t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) From 3732f9ac8a73eeae6ca80795c0ce435a56a5a18d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 14:16:19 +0200 Subject: [PATCH 331/757] Refactor argparser printing to `print_args()` (#4850) * Refactor argparser printing to `print_args()` * Cleanup --- detect.py | 12 ++++++------ export.py | 5 +++-- models/tf.py | 6 +++--- train.py | 7 +++---- utils/general.py | 5 +++++ val.py | 6 +++--- 6 files changed, 23 insertions(+), 18 deletions(-) diff --git a/detect.py b/detect.py index ef7458d52db3..57bd6eea9ec4 100644 --- a/detect.py +++ b/detect.py @@ -19,12 +19,12 @@ sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_imshow, check_requirements, check_suffix, colorstr, is_ascii, \ - non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, \ - save_one_box +from utils.datasets import LoadImages, LoadStreams +from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ + increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ + strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors -from utils.torch_utils import select_device, load_classifier, time_sync +from utils.torch_utils import load_classifier, select_device, time_sync @torch.no_grad() @@ -279,11 +279,11 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) return opt def main(opt): - print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/export.py b/export.py index dd7eefc51702..9d7576bf383a 100644 --- a/export.py +++ b/export.py @@ -41,7 +41,8 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging, url2file +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, \ + set_logging, url2file from utils.torch_utils import select_device @@ -322,12 +323,12 @@ def parse_opt(): default=['torchscript', 'onnx'], help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) run(**vars(opt)) diff --git a/models/tf.py b/models/tf.py index 5d7153f246eb..5b918ee3c34a 100644 --- a/models/tf.py +++ b/models/tf.py @@ -27,9 +27,9 @@ from tensorflow import keras from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 -from models.experimental import MixConv2d, CrossConv, attempt_load +from models.experimental import CrossConv, MixConv2d, attempt_load from models.yolo import Detect -from utils.general import colorstr, make_divisible, set_logging +from utils.general import make_divisible, print_args, set_logging from utils.activations import SiLU LOGGER = logging.getLogger(__name__) @@ -434,12 +434,12 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('tf.py: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) run(**vars(opt)) diff --git a/train.py b/train.py index 1d0c2c608878..281a3c0bad7a 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_file, check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -470,9 +470,8 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks - set_logging(RANK) if RANK in [-1, 0]: - print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + print_args(FILE.stem, opt) check_git_status() check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) @@ -508,7 +507,7 @@ def main(opt, callbacks=Callbacks()): if not opt.evolve: train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: - _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] + _ = LOGGER.info('Destroying process group... ', end=''), dist.destroy_process_group(), LOGGER.info('Done.') # Evolve hyperparameters (optional) else: diff --git a/utils/general.py b/utils/general.py index 561602323ab2..d4d8e2064d08 100755 --- a/utils/general.py +++ b/utils/general.py @@ -89,6 +89,11 @@ def set_logging(rank=-1, verbose=True): level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) +def print_args(name, opt): + # Print argparser arguments + print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + + def init_seeds(seed=0): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible diff --git a/val.py b/val.py index 16dd76d680f7..3574fb085c07 100644 --- a/val.py +++ b/val.py @@ -24,7 +24,7 @@ from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ - increment_path, colorstr + increment_path, colorstr, print_args from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync @@ -295,7 +295,7 @@ def run(data, def parse_opt(): - parser = argparse.ArgumentParser(prog='val.py') + parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') @@ -319,12 +319,12 @@ def parse_opt(): opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid opt.data = check_yaml(opt.data) # check YAML + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally From 4d1a2ac87eb2c9c37978584f4f93af2af0260738 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 15:02:08 +0200 Subject: [PATCH 332/757] Update `sys.path.append(str(ROOT))` (#4852) * Update `sys.path.append(str(ROOT))` * Cleanup --- detect.py | 6 ++++-- export.py | 2 +- models/tf.py | 5 +++-- models/yolo.py | 4 +++- train.py | 7 +++++-- utils/__init__.py | 19 ------------------- utils/aws/resume.py | 5 ++++- utils/loggers/wandb/sweep.py | 4 +++- utils/loggers/wandb/wandb_utils.py | 4 +++- val.py | 6 ++++-- 10 files changed, 30 insertions(+), 32 deletions(-) diff --git a/detect.py b/detect.py index 57bd6eea9ec4..92fcd064d53d 100644 --- a/detect.py +++ b/detect.py @@ -16,7 +16,9 @@ import torch.backends.cudnn as cudnn FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams @@ -284,7 +286,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/export.py b/export.py index 9d7576bf383a..e876af234592 100644 --- a/export.py +++ b/export.py @@ -32,7 +32,7 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # yolov5/ dir +ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH diff --git a/models/tf.py b/models/tf.py index 5b918ee3c34a..3265b7b75f55 100644 --- a/models/tf.py +++ b/models/tf.py @@ -17,8 +17,9 @@ from pathlib import Path FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # yolov5/ dir -sys.path.append(ROOT.as_posix()) # add yolov5/ to path +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index 0a27b24dede7..a7590c57816c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -12,7 +12,9 @@ from pathlib import Path FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.common import * from models.experimental import * diff --git a/train.py b/train.py index 281a3c0bad7a..89c0c507b8bf 100644 --- a/train.py +++ b/train.py @@ -27,7 +27,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH import val # for end-of-epoch mAP from models.experimental import attempt_load @@ -470,10 +472,11 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks + set_logging(RANK) if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run diff --git a/utils/__init__.py b/utils/__init__.py index 74260ad1e5b4..e69de29bb2d1 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,19 +0,0 @@ -# import sys -# from pathlib import Path -# -# import torch -# from PIL import ImageFont -# -# FILE = Path(__file__).resolve() -# ROOT = FILE.parents[1] # yolov5/ dir -# if str(ROOT) not in sys.path: -# sys.path.append(str(ROOT)) # add ROOT to PATH -# -# # Check YOLOv5 Annotator font -# font = 'Arial.ttf' -# try: -# ImageFont.truetype(font) -# except Exception as e: # download if missing -# url = "https://ultralytics.com/assets/" + font -# print(f'Downloading {url} to {ROOT / font}...') -# torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index e869834e96e7..e1a8bd896a58 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -8,7 +8,10 @@ import torch import yaml -sys.path.append('./') # to run '$ python *.py' files in subdirectories +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH port = 0 # --master_port path = Path('').resolve() diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 4d5df5c8e00a..fdabec4eb63b 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -4,7 +4,9 @@ import wandb FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from train import train, parse_opt from utils.general import increment_path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e7b0d82213f0..ab2c20d520b0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -11,7 +11,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths diff --git a/val.py b/val.py index 3574fb085c07..2dbf570f1e6e 100644 --- a/val.py +++ b/val.py @@ -18,7 +18,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.experimental import attempt_load from utils.datasets import create_dataloader @@ -325,7 +327,7 @@ def parse_opt(): def main(opt): set_logging() - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 4c839eeb1030e0d5c77609de646c1361e4dfdd61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 18:34:30 +0200 Subject: [PATCH 333/757] Simplify `check_requirements()` usage (#4855) * Simplify `check_requirements()` usage * remove assert, print() --- detect.py | 2 +- hubconf.py | 2 +- train.py | 2 +- utils/autoanchor.py | 2 +- utils/general.py | 5 ++++- val.py | 2 +- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 92fcd064d53d..9b9e7c74644b 100644 --- a/detect.py +++ b/detect.py @@ -286,7 +286,7 @@ def parse_opt(): def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index 9c5fa63809d1..3a89cf9763da 100644 --- a/hubconf.py +++ b/hubconf.py @@ -34,7 +34,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.torch_utils import select_device file = Path(__file__).resolve() - check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) save_dir = Path('') if str(name).endswith('.pt') else file.parent diff --git a/train.py b/train.py index 89c0c507b8bf..6f98a64124d7 100644 --- a/train.py +++ b/train.py @@ -476,7 +476,7 @@ def main(opt, callbacks=Callbacks()): if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() - check_requirements(requirements=ROOT / 'requirements.txt', exclude=['thop']) + check_requirements(exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 66a2712dfd5d..1706fcb8e735 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -127,7 +127,7 @@ def print_results(k): print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + assert len(k) == n, f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered diff --git a/utils/general.py b/utils/general.py index d4d8e2064d08..dcaa3c71b3f5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -37,6 +37,9 @@ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager @@ -222,7 +225,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except -def check_requirements(requirements='requirements.txt', exclude=(), install=True): +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version diff --git a/val.py b/val.py index 2dbf570f1e6e..f8c4f9e1cdd5 100644 --- a/val.py +++ b/val.py @@ -327,7 +327,7 @@ def parse_opt(): def main(opt): set_logging() - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 9ef94940aa5e9618e7e804f0758f9a6cebfc63a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 18:43:26 +0200 Subject: [PATCH 334/757] Update greetings.yml (#4856) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index c557e77f3b70..a40d0a50c8ac 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -18,7 +18,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git merge upstream/master git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From 40d1c805031c4feba8ea9c0c1d5cb4eb8170afcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 12:22:24 +0200 Subject: [PATCH 335/757] Update Dockerfile (#4861) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index e9cd304376ed..543e02e34124 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,3 +50,6 @@ ENV HOME=/usr/src/app # Clean up # docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ From 28096163451a7bf3dc964228c557c3b8d010de2d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:10:41 +0200 Subject: [PATCH 336/757] Update Dockerfile (#4862) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index 543e02e34124..95c098f9f513 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,3 +53,6 @@ ENV HOME=/usr/src/app # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 From 2b3109eeb05a609538e4b378f0e00a4fa78d11c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:12:48 +0200 Subject: [PATCH 337/757] Fix DDP destruction `LOGGER.info()` (#4863) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 6f98a64124d7..51a0e127486a 100644 --- a/train.py +++ b/train.py @@ -510,7 +510,8 @@ def main(opt, callbacks=Callbacks()): if not opt.evolve: train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: - _ = LOGGER.info('Destroying process group... ', end=''), dist.destroy_process_group(), LOGGER.info('Done.') + LOGGER.info('Destroying process group... ') + dist.destroy_process_group() # Evolve hyperparameters (optional) else: From 8ad9e4ed5be3cba275bcd624b14d53fe4985f262 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:57:23 +0200 Subject: [PATCH 338/757] Annotator `check_font()` RANK -1 remove progress (#4864) * Annotator `check_font()` RANK -1 remove progress * Cleanup --- utils/plots.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 5ff72cb144e2..1e6ee516387a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,11 +3,12 @@ Plotting utils """ +import math +import os from copy import copy from pathlib import Path import cv2 -import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -21,6 +22,7 @@ # Settings CONFIG_DIR = user_config_dir() # Ultralytics settings dir +RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only @@ -55,12 +57,13 @@ def check_font(font='Arial.ttf', size=10): except Exception as e: # download if missing url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font)) + torch.hub.download_url_to_file(url, str(font), progress=False) return ImageFont.truetype(str(font), size) class Annotator: - check_font() # download TTF if necessary + if RANK in (-1, 0): + check_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): From 9febea79de895191bd7a375e5c5a61bfa2886c89 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 20 Sep 2021 19:49:34 +0530 Subject: [PATCH 339/757] W&B: Login only in master processes (#4866) * evolve fix * Enable login timeout * fix pkg * check rank * don't relogin --- utils/loggers/wandb/wandb_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index ab2c20d520b0..f520fbba8850 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -19,16 +19,17 @@ from utils.datasets import img2label_paths from utils.general import check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) + try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2'): + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: wandb.login(timeout=30) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' From 59aae85a7e40701bb872df673a6ef288e99a4ae3 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 21 Sep 2021 23:32:32 +0530 Subject: [PATCH 340/757] W&B: Fix dataset check (#4879) * evolve fix * Enable login timeout * fix pkg * check rank * don't relogin * fix * reformat --- utils/loggers/wandb/wandb_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index f520fbba8850..9a80dc42ca95 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,8 +5,8 @@ import sys from contextlib import contextmanager from pathlib import Path -import pkg_resources as pkg +import pkg_resources as pkg import yaml from tqdm import tqdm @@ -49,9 +49,11 @@ def check_wandb_dataset(data_file): if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) - is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or - data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) - if is_wandb_artifact: + is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and + data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) + is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: return data_dict else: return check_dataset(data_file) From dad8660540e47d3331a2ae1c78bda8670ecdd737 Mon Sep 17 00:00:00 2001 From: NauchtanRobotics Date: Fri, 24 Sep 2021 23:44:01 +1000 Subject: [PATCH 341/757] Fix arg help string to match 'classes' arg name (#4893) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 9b9e7c74644b..08e78bf64a3b 100644 --- a/detect.py +++ b/detect.py @@ -267,7 +267,7 @@ def parse_opt(): parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') From ce7fa81d4e342ca97f7459f6dd10036b3449321b Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 25 Sep 2021 00:23:28 +0800 Subject: [PATCH 342/757] Avoid out-of-image class labels (#4902) * Avoid out-of-image class labels * Update plots.py * Cleanup Co-authored-by: Glenn Jocher --- utils/plots.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 1e6ee516387a..d8e7c07f39b1 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -73,7 +73,6 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width @@ -83,20 +82,25 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width - self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle([box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1], fill=color) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h), label, fill=txt_color, font=self.font) + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 - c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] - c2 = c1[0] + w, c1[1] - h - 3 - cv2.rectangle(self.im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, label, (c1[0], c1[1] - 2), 0, self.lw / 3, txt_color, thickness=tf, - lineType=cv2.LINE_AA) + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) From 2c2ef25f8bb351b34aef89f8fce75742c698e847 Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 25 Sep 2021 05:18:15 +0800 Subject: [PATCH 343/757] TensorFlow.js export enhancements (#4905) * Add arguments to TensorFlow NMS call * Add regex substitution to reorder Identity_* * Delete reorder in docstring * Cleanup * Cleanup2 * Removed `+ \` on string ends (not needed) Co-authored-by: Glenn Jocher --- export.py | 29 +++++++++++++++++++++++++++-- models/tf.py | 2 +- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index e876af234592..d5b63c410af8 100644 --- a/export.py +++ b/export.py @@ -14,7 +14,6 @@ yolov5s.tflite TensorFlow.js: - $ # Edit yolov5s_web_model/model.json to sort Identity* in ascending order $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example $ npm install $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model @@ -213,16 +212,32 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: check_requirements(('tensorflowjs',)) + import re import tensorflowjs as tfjs print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path + f_json = f + '/model.json' # *.json path cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" subprocess.run(cmd, shell=True) + json = open(f_json).read() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', + r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', + json) + j.write(subst) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') @@ -243,6 +258,10 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25 # TF.js NMS: confidence threshold ): t = time.time() include = [x.lower() for x in include] @@ -290,7 +309,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if any(tf_exports): pb, tflite, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs) # keras model + model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs, + topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, + iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: @@ -319,6 +340,10 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') + parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') + parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') + parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') diff --git a/models/tf.py b/models/tf.py index 3265b7b75f55..b7d99359c863 100644 --- a/models/tf.py +++ b/models/tf.py @@ -367,7 +367,7 @@ class AgnosticNMS(keras.layers.Layer): # TF Agnostic NMS def call(self, input, topk_all, iou_thres, conf_thres): # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn(self._nms, input, + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') From 149263279655d26dc3c74192773de1c9c5a772c9 Mon Sep 17 00:00:00 2001 From: Jebastin Nadar Date: Sat, 25 Sep 2021 19:13:24 +0530 Subject: [PATCH 344/757] Fix zipfile name for coco128-segments (#4914) --- data/scripts/get_coco128.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 4238e3634dbb..ee05a867e564 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -10,7 +10,7 @@ # Download/unzip images and labels d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB +f='coco128.zip' # or 'coco128-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & From a64a4c839fa15baf5ea2be933e7034a1607878b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Sep 2021 08:52:36 -0700 Subject: [PATCH 345/757] Replace `os.system('unzip file.zip')` -> `ZipFile.extractall()` (#4919) * Replace `os.system('unzip file.zip')` -> `ZipFile.extractall()` * Cleanup --- utils/datasets.py | 5 +++-- utils/downloads.py | 5 +++-- utils/general.py | 18 ++++++++++-------- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d253cb177b82..a54e29fd2908 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -15,6 +15,7 @@ from multiprocessing.pool import ThreadPool, Pool from pathlib import Path from threading import Thread +from zipfile import ZipFile import cv2 import numpy as np @@ -928,8 +929,8 @@ def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' if str(path).endswith('.zip'): # path is data.zip assert Path(path).is_file(), f'Error unzipping {path}, file not found' - assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' - dir = path.with_suffix('') # dataset directory + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path else: # path is data.yaml return False, None, path diff --git a/utils/downloads.py b/utils/downloads.py index 27cb899cd606..eafa3b7ac309 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -9,6 +9,7 @@ import time import urllib from pathlib import Path +from zipfile import ZipFile import requests import torch @@ -104,8 +105,8 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip print(f'Done ({time.time() - t:.1f}s)') return r diff --git a/utils/general.py b/utils/general.py index dcaa3c71b3f5..2e2cdf389075 100755 --- a/utils/general.py +++ b/utils/general.py @@ -18,6 +18,7 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from zipfile import ZipFile import cv2 import numpy as np @@ -353,17 +354,19 @@ def check_dataset(data, autodownload=True): if s and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename - print(f'Downloading {s} ...') + print(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' Path(root).mkdir(parents=True, exist_ok=True) # create root - r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip + ZipFile(f).extractall(path=root) # unzip + Path(f).unlink() # remove zip + r = None # success elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result + print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}") else: raise Exception('Dataset not found.') @@ -393,12 +396,11 @@ def download_one(url, dir): if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': - s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite + ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': - s = f'tar xfz {f} --directory {f.parent}' # unzip - if delete: # delete zip file after unzip - s += f' && rm {f}' - os.system(s) + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory From 39c17ce0b922bbafcf9f8da64f286fef01040727 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Sep 2021 09:16:14 -0700 Subject: [PATCH 346/757] Fix `root` referenced before assignment (#4920) * Fix `root` referenced before assignment Fix for bug introduced by #4919 discovered on VOC autodownload: ``` python train.py --data VOC.yaml ``` * Cleanup --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2e2cdf389075..3c5cbff13d55 100755 --- a/utils/general.py +++ b/utils/general.py @@ -352,11 +352,11 @@ def check_dataset(data, autodownload=True): if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) if s and autodownload: # download script + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip @@ -366,7 +366,7 @@ def check_dataset(data, autodownload=True): r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}") + print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") else: raise Exception('Dataset not found.') From e687873436eb7d4259c46387993fb0d50034c18b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 10:54:55 -0700 Subject: [PATCH 347/757] Add Slack Forum badge to README (#4930) Add badge with link to join the new YOLOv5 Slack Forum! https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg --- README.md | 67 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index febf4bff9b40..a01c2c688aa4 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,43 @@

- - + +


-CI CPU testing -YOLOv5 Citation -
-Open In Colab -Open In Kaggle -Docker Pulls + CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum
-
-
- - - - - - - - - - - - - - - - - - - - - - - +
+
From d856c4829837dd6ef004e1defc789e44d24f7b6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 11:54:53 -0700 Subject: [PATCH 348/757] Validate `best.pt` on train end (#4889) * Validate best.pt on train end * 0.7 iou for COCO only * pass callbacks * active model.float() if not half * print Validating best.pt... * add newline --- train.py | 27 +++++++++++++-------------- val.py | 3 +-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 51a0e127486a..40f58bfafb4a 100644 --- a/train.py +++ b/train.py @@ -356,9 +356,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, + plots=False, callbacks=callbacks, compute_loss=compute_loss) @@ -404,23 +402,24 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - if not evolve: - if is_coco: # COCO dataset - for m in [last, best] if best.exists() else [last]: # speed, mAP tests + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, - model=attempt_load(m, device).half(), - iou_thres=0.7, # NMS IoU threshold for best pycocotools results + model=attempt_load(f, device).half(), + iou_thres=0.7 if is_coco else 0.6, # best pycocotools results at 0.7 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, - save_json=True, - plots=False) - # Strip optimizers - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks) # val best model with plots + callbacks.run('on_train_end', last, best, plots, epoch) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/val.py b/val.py index f8c4f9e1cdd5..92e0e3b13ae9 100644 --- a/val.py +++ b/val.py @@ -133,8 +133,7 @@ def run(data, # Half half &= device.type != 'cpu' # half precision only supported on CUDA - if half: - model.half() + model.half() if half else model.float() # Configure model.eval() From 793383232fd52382d7bbd2a1ce771516afc15fe5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 12:10:33 -0700 Subject: [PATCH 349/757] Update default Albumentations (#4931) --- utils/augmentations.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 49f957e6fd62..04192d1ec5cd 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -23,9 +23,13 @@ def __init__(self): check_version(A.__version__, '1.0.3') # version requirement self.transform = A.Compose([ - A.Blur(p=0.1), - A.MedianBlur(p=0.1), - A.ToGray(p=0.01)], + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) From 5a8e4343d80de4ece38cdb5807a7187ec937c57c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 17:11:46 -0700 Subject: [PATCH 350/757] Scope `check_file()` search space (#4933) `check_file()` is now limited to searching opt-in directories: /data, /models, /utils. This prevents large non-project directories like /.git and /venv from being searched, which may cause `check_file()` to slow significantly. --- utils/general.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3c5cbff13d55..00bafb1e9537 100755 --- a/utils/general.py +++ b/utils/general.py @@ -315,7 +315,9 @@ def check_file(file, suffix=''): assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search - files = glob.glob('./**/' + file, recursive=True) # find file + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From c5ba2abb4afb9fe8c671f14eb5200647893efe30 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 19:16:14 -0700 Subject: [PATCH 351/757] Update Dockerfile (#4935) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 95c098f9f513..e0653e0f9b3a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy -# RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html +# RUN pip install --no-cache torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From a820b43aca3816c9552e9beaf14a77955742b0ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 13:48:15 -0700 Subject: [PATCH 352/757] Automatic Chinese fonts plotting (#4951) * Automatic Chinese fonts plotting * Default PIL=False --- detect.py | 5 ++--- models/common.py | 13 ++++++------- utils/general.py | 7 +++---- utils/plots.py | 13 +++++++------ 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index 08e78bf64a3b..fae82833c5f6 100644 --- a/detect.py +++ b/detect.py @@ -23,7 +23,7 @@ from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ - increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ + increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -108,7 +108,6 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size - ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) # Dataloader if webcam: @@ -190,7 +189,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() diff --git a/models/common.py b/models/common.py index 5305b03d5389..2acf6281f475 100644 --- a/models/common.py +++ b/models/common.py @@ -18,7 +18,7 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ +from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \ scale_coords, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import time_sync @@ -356,7 +356,6 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names - self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels @@ -369,13 +368,13 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' + s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class - str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - annotator = Annotator(im, pil=not self.ascii) + annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: @@ -386,11 +385,11 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False annotator.box_label(box, label, color=colors(cls)) im = annotator.im else: - str += '(no detections)' + s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - LOGGER.info(str.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: diff --git a/utils/general.py b/utils/general.py index 00bafb1e9537..8421981147f7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -161,10 +161,9 @@ def is_pip(): return 'site-packages' in Path(__file__).resolve().parts -def is_ascii(s=''): - # Is string composed of all ASCII (no UTF) characters? - s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return re.search('[\u4e00-\u9fff]', s) def emojis(str=''): diff --git a/utils/plots.py b/utils/plots.py index d8e7c07f39b1..491c5704d67b 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import user_config_dir, is_ascii, xywh2xyxy, xyxy2xywh +from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -66,20 +66,21 @@ class Annotator: check_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - self.pil = pil + self.pil = pil or not example.isascii() or is_chinese(example) if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label - if self.pil or not is_ascii(label): + if self.pil or not label.isascii(): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height @@ -177,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max # Annotate fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders From 4e04cb0dc8cae357ad14be1b1507b88fe08c453a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 17:40:20 -0700 Subject: [PATCH 353/757] Allow YOLOv5 execution from arbitrary `cwd` (#4954) * Allow YOLOv5 execution from arbitrary `cwd` * Fix str bugs --- detect.py | 14 ++++++++------ export.py | 1 + train.py | 14 ++++++++------ val.py | 11 ++++++----- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/detect.py b/detect.py index fae82833c5f6..75ec3ecc5ff3 100644 --- a/detect.py +++ b/detect.py @@ -19,6 +19,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams @@ -30,8 +31,8 @@ @torch.no_grad() -def run(weights='yolov5s.pt', # model.pt path(s) - source='data/images', # file/dir/URL/glob, 0 for webcam +def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold @@ -47,7 +48,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models - project='runs/detect', # save results to project/name + project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) @@ -55,6 +56,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference ): + source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) @@ -254,8 +256,8 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') @@ -271,7 +273,7 @@ def parse_opt(): parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') diff --git a/export.py b/export.py index d5b63c410af8..74aca4b6c30a 100644 --- a/export.py +++ b/export.py @@ -34,6 +34,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import Conv from models.experimental import attempt_load diff --git a/train.py b/train.py index 40f58bfafb4a..39fe1a0cb14b 100644 --- a/train.py +++ b/train.py @@ -30,6 +30,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative import val # for end-of-epoch mAP from models.experimental import attempt_load @@ -429,10 +430,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') @@ -451,8 +452,8 @@ def parse_opt(known=False): parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') @@ -486,10 +487,11 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - opt.project = 'runs/evolve' + opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) diff --git a/val.py b/val.py index 92e0e3b13ae9..4f0b49ae2ca7 100644 --- a/val.py +++ b/val.py @@ -21,6 +21,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.experimental import attempt_load from utils.datasets import create_dataloader @@ -95,7 +96,7 @@ def run(data, save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file - project='runs/val', # save to project/name + project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference @@ -297,8 +298,8 @@ def run(data, def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') @@ -312,14 +313,14 @@ def parse_opt(): parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default='runs/val', help='save to project/name') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid - opt.data = check_yaml(opt.data) # check YAML print_args(FILE.stem, opt) return opt From 5ed28603cf94185c28da02b3d8bb433118ac33d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 18:26:41 -0700 Subject: [PATCH 354/757] Update relative `ROOT` logic (#4955) * Update relative `ROOT` logic * python 3.9 Path().is_relative_to() removal --- models/tf.py | 1 + models/yolo.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index b7d99359c863..4c082cb8a15e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -20,6 +20,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index a7590c57816c..b4ec1eda8376 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -15,11 +15,12 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, set_logging +from utils.general import check_yaml, make_divisible, print_args, set_logging from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -281,6 +282,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(FILE.stem, opt) set_logging() device = select_device(opt.device) From 3aeb57d66ff2297d20aab4b5cd3e954fa3ef3e19 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 18:32:22 -0700 Subject: [PATCH 355/757] Created using Colaboratory --- tutorial.ipynb | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cfa96914c713..5663f151cef8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -666,31 +666,14 @@ "source": [ "# 3. Train\n", "\n", - "Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Knxi2ncxWffW" - }, - "source": [ - "# Download COCO128\n", - "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_pOkGLv1dMqh" - }, - "source": [ - "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. \n", + "\n", + "* **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded \n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "\n", + "* **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "\n", - "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n" + "* **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc." ] }, { From 2993c3fa7af7a76dd82349e3cf85e35e4254576b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 19:00:05 -0700 Subject: [PATCH 356/757] Add `roboflow` (#4956) --- requirements.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 42d5dfc49354..a80d46789943 100755 --- a/requirements.txt +++ b/requirements.txt @@ -16,8 +16,8 @@ tensorboard>=2.4.1 # wandb # Plotting ------------------------------------ -seaborn>=0.11.0 pandas +seaborn>=0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export @@ -28,7 +28,8 @@ pandas # tensorflowjs>=3.9.0 # TF.js export # Extras -------------------------------------- +# albumentations>=1.0.3 # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.3 +# roboflow thop # FLOPs computation From c1bed601e9b9a3f5fa8fb529cfa40df7a3a0b903 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Mon, 27 Sep 2021 23:16:23 -0400 Subject: [PATCH 357/757] Fix `isascii()` method calls for python 3.6 (#4958) * fix isascii for python3.6 * update comment with python 3.7 note Co-authored-by: Glenn Jocher --- utils/general.py | 5 +++++ utils/plots.py | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 8421981147f7..28301f8573bb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -160,6 +160,11 @@ def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).resolve().parts +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? diff --git a/utils/plots.py b/utils/plots.py index 491c5704d67b..2f98d5b7e630 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh +from utils.general import user_config_dir, is_ascii, is_chinese, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -68,7 +68,7 @@ class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - self.pil = pil or not example.isascii() or is_chinese(example) + self.pil = pil or not is_ascii(example) or is_chinese(example) if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) @@ -80,7 +80,7 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Fa def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label - if self.pil or not label.isascii(): + if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height From fb982d6030a700703649311937d9d08e68006b58 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 06:36:12 -0700 Subject: [PATCH 358/757] Fix relative `ROOT` Pytorch Hub custom model bug (#4974) * Fix relative `ROOT` Pytorch Hub custom model bug * Update yolo.py --- models/tf.py | 2 +- models/yolo.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 4c082cb8a15e..bc6134291aca 100644 --- a/models/tf.py +++ b/models/tf.py @@ -20,7 +20,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = ROOT.relative_to(Path.cwd()) # relative +# ROOT = ROOT.relative_to(Path.cwd()) # relative import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index b4ec1eda8376..5d19aad5369f 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -15,7 +15,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = ROOT.relative_to(Path.cwd()) # relative +# ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import * from models.experimental import * From 29acedf7dd4b805ef29aff08746d4cf75c1b5eb7 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Tue, 28 Sep 2021 18:35:46 +0200 Subject: [PATCH 359/757] Add Roboflow to README (#4972) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * added integrations section removed competition * attempt column build * attempt column build * moved new to header * removed center column * utm * Update README.md * Update README.md * updated logo sizes * resized logos * fixed links * Update README.md * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- README.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index a01c2c688aa4..8314aea0849a 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW * [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 @@ -158,11 +158,9 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size -##
Environments and Integrations
+##
Environments
-Get started in seconds with our verified environments and integrations, -including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment -logging. Click each icon below for details. +Get started in seconds with our verified environments. Click each icon below for details. + +##
Integrations
+ + + + +
+ +|Weights and Biases|Roboflow - ⭐ NEW| +|:-:|:-:| +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases.](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training using [Roboflow](https://roboflow.com/?ref=ultralytics) | -##
Compete and Win
+ + ##
Why YOLOv5
From 38c779b09950a7a8349d1d0891d414ced176dd4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 10:04:56 -0700 Subject: [PATCH 360/757] Created using Colaboratory --- tutorial.ipynb | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 5663f151cef8..17cf192e4832 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -665,15 +665,31 @@ }, "source": [ "# 3. Train\n", + "[](https://roboflow.com/?ref=ultralytics)\n", + "*Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package*\n", "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. \n", + "
\n", "\n", - "* **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded \n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", + "
\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", + "\n", + "
\n", "\n", - "* **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "[](https://roboflow.com/?ref=ultralytics)\n", "\n", - "* **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc." + "*Label images lightning fast (including with model-assisted labeling)*" ] }, { From 0c87478713b3db7583da0ae950587e3316291004 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:22:31 -0400 Subject: [PATCH 361/757] Update wandb_utils.py (#4953) `is_valset_wandb_artifact` and `is_trainset_wandb_artifact` were referenced before assignment causing wandb to be unusable. --- utils/loggers/wandb/wandb_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 9a80dc42ca95..92fdd27bb004 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -45,7 +45,8 @@ def check_wandb_config_file(data_config_file): def check_wandb_dataset(data_file): - is_wandb_artifact = False + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) From 94705a952861d8a70ec8be2fb90f3375150a873d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 12:28:18 -0700 Subject: [PATCH 362/757] Add Hub custom models to CI tests (#4978) * Update ci-testing.yml for Hub custom model tests * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 71f39c16c4ed..da695395fe69 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -4,10 +4,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [master] + branches: [ master ] pull_request: # The branches below must be a subset of the branches above - branches: [master] + branches: [ master ] schedule: - cron: '0 0 * * *' # Runs at 00:00 UTC every day @@ -18,9 +18,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8] - model: ['yolov5s'] # models to test + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ 3.8 ] + model: [ 'yolov5s' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 @@ -65,19 +65,25 @@ jobs: - name: Tests workflow run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - di=cpu # inference devices # define device + di=cpu # device - # train + # Train python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di - # detect - python detect.py --weights ${{ matrix.model }}.pt --device $di - python detect.py --weights runs/train/exp/weights/last.pt --device $di - # val + # Val python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di - + # Detect + python detect.py --weights ${{ matrix.model }}.pt --device $di + python detect.py --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub + # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + # Python + python - < Date: Tue, 28 Sep 2021 12:41:28 -0700 Subject: [PATCH 363/757] Faster `--img 64` CI tests (#4979) --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index da695395fe69..c44e23995c3b 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -68,10 +68,10 @@ jobs: di=cpu # device # Train - python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di # Val - python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di - python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $di + python val.py --img 64 --batch 32 --weights runs/train/exp/weights/last.pt --device $di # Detect python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di @@ -79,7 +79,7 @@ jobs: # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + python export.py --img 64 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export # Python python - < Date: Tue, 28 Sep 2021 17:29:05 -0700 Subject: [PATCH 364/757] Created using Colaboratory --- tutorial.ipynb | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 17cf192e4832..9cfb61b0e682 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -661,14 +661,14 @@ { "cell_type": "markdown", "metadata": { - "id": "VUOiNLtMP5aG" + "id": "ZY2VXXXu74w5" }, "source": [ "# 3. Train\n", - "[](https://roboflow.com/?ref=ultralytics)\n", - "*Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package*\n", "\n", - "
\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", "\n", "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", "\n", @@ -676,7 +676,7 @@ "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "
\n", + "

\n", "\n", "## Train on Custom Data with Roboflow 🌟 NEW\n", "\n", @@ -684,12 +684,9 @@ "\n", "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "\n", "
\n", "\n", - "[](https://roboflow.com/?ref=ultralytics)\n", - "\n", - "*Label images lightning fast (including with model-assisted labeling)*" + "

Label images lightning fast (including with model-assisted labeling)" ] }, { From 6b19f728a1d422a721d3094f746caacbc24c3fed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 17:33:48 -0700 Subject: [PATCH 365/757] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9cfb61b0e682..b7a06845ea99 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -378,7 +378,7 @@ }, "source": [ "\n", - "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" From 9988059b1063a8375de76179ff31a273f58b53bc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 17:46:14 -0700 Subject: [PATCH 366/757] Clickable CI badge (#4985) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a40d0a50c8ac..0bbc49ba2508 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -54,7 +54,7 @@ jobs: ## Status - ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + CI CPU testing If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. From cd35a009ba964331abccd30f6fa0614224105d39 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 20:11:20 -0700 Subject: [PATCH 367/757] Revert `torch.hub.load()` test (#4986) Temporarily reverts https://github.com/ultralytics/yolov5/pull/4978 until torch 1.10 is released, which should resolve `urllib.error.HTTPError: HTTP Error 403: rate limit exceeded` errors generated by torch hub from GitHub actions runners. --- .github/workflows/ci-testing.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index c44e23995c3b..23a742fc08dd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -83,7 +83,8 @@ jobs: # Python python - < Date: Wed, 29 Sep 2021 19:48:45 +0200 Subject: [PATCH 368/757] Fix URL parsing bug (#4998) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * fixed parsing error for google temp links Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 28301f8573bb..f2afb480cc63 100755 --- a/utils/general.py +++ b/utils/general.py @@ -313,7 +313,7 @@ def check_file(file, suffix=''): return file elif file.startswith(('http:/', 'https:/')): # download url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ - file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check From 42354d70252be81dedf0b889eaf5f4b071eb7694 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 29 Sep 2021 16:56:10 -0700 Subject: [PATCH 369/757] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8314aea0849a..9510fb278cd7 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ Get started in seconds with our verified environments. Click each icon below for |Weights and Biases|Roboflow - ⭐ NEW| |:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases.](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training using [Roboflow](https://roboflow.com/?ref=ultralytics) | +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | ## Pitch diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 6d1603880f4d..8ebfdeca8d74 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -83,7 +83,7 @@ jobs: # Python python - <> $GITHUB_ENV + - uses: actions/cache@v2 + with: + path: ~/.cache/pre-commit + key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }} + + - uses: pre-commit/action@v2.0.3 + # this action also provides an additional behaviour when used in private repositories + # when configured with a github token, the action will push back fixes to the pull request branch + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2305ea07e902..67f51f0e8bce 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,4 +1,4 @@ -# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. +# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. # https://github.com/github/codeql-action name: "CodeQL" diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a00ee8da66e1..0daf9514d3c5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -57,4 +57,3 @@ jobs: CI CPU testing If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - diff --git a/.gitignore b/.gitignore index 375b71807588..5f8cab550021 100755 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ *.data *.json *.cfg +!setup.cfg !cfg/yolov3*.cfg storage.googleapis.com diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..2eb78aa17ef4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,67 @@ +# Define hooks for code formations +# Will be applied on any updated commit files if a user has installed and linked commit hook + +default_language_version: + python: python3.8 + +# Define bot property if installed via https://github.com/marketplace/pre-commit-ci +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: quarterly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + - id: check-toml + - id: pretty-format-json + - id: check-docstring-first + + - repo: https://github.com/asottile/pyupgrade + rev: v2.23.1 + hooks: + - id: pyupgrade + args: [--py36-plus] + name: Upgrade code + + # TODO + #- repo: https://github.com/PyCQA/isort + # rev: 5.9.3 + # hooks: + # - id: isort + # name: imports + + # TODO + #- repo: https://github.com/pre-commit/mirrors-yapf + # rev: v0.31.0 + # hooks: + # - id: yapf + # name: formatting + + # TODO + #- repo: https://github.com/executablebooks/mdformat + # rev: 0.7.7 + # hooks: + # - id: mdformat + # additional_dependencies: + # - mdformat-gfm + # - mdformat-black + # - mdformat_frontmatter + + # TODO + #- repo: https://github.com/asottile/yesqa + # rev: v1.2.3 + # hooks: + # - id: yesqa + + - repo: https://github.com/PyCQA/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + name: PEP8 diff --git a/LICENSE b/LICENSE index 9e419e042146..92b370f0e0e1 100644 --- a/LICENSE +++ b/LICENSE @@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. \ No newline at end of file +. diff --git a/README.md b/README.md index 0d474cb4a09b..d3fd7e9a92f5 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- @@ -109,7 +109,7 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and ```bash $ python detect.py --source 0 # webcam - file.jpg # image + file.jpg # image file.mp4 # video path/ # directory path/*.jpg # glob @@ -136,7 +136,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size - +
Tutorials @@ -178,7 +178,7 @@ Get started in seconds with our verified environments. Click each icon below for -
+ ##
Integrations
@@ -239,7 +239,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi |[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6 |[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0 |[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4 -|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
- +|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
Table Notes (click to expand) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 97a424fd03a0..b10c28e764c1 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -62,21 +62,21 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla download: | from pycocotools.coco import COCO from tqdm import tqdm - + from utils.general import Path, download, np, xyxy2xywhn - + # Make Directories dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': (dir / p).mkdir(parents=True, exist_ok=True) for q in 'train', 'val': (dir / p / q).mkdir(parents=True, exist_ok=True) - + # Train, Val Splits for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: print(f"Processing {split} in {patches} patches ...") images, labels = dir / 'images' / split, dir / 'labels' / split - + # Download url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" if split == 'train': @@ -86,11 +86,11 @@ download: | download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) - + # Move for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): f.rename(images / f.name) # move to /images/{split} - + # Labels coco = COCO(dir / f'zhiyuan_objv2_{split}.json') names = [x["name"] for x in coco.loadCats(coco.getCatIds())] diff --git a/data/coco128.yaml b/data/coco128.yaml index 70cf52c397af..b1dfb004afa1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't # Download script/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip \ No newline at end of file +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 519c82687e09..5a586cc63fae 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.1 # image mixup (probability) -copy_paste: 0.1 # segment copy-paste (probability) \ No newline at end of file +copy_paste: 0.1 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index b093a95ac53b..b9ef1d55a3b6 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) -copy_paste: 0.0 # segment copy-paste (probability) \ No newline at end of file +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/models/common.py b/models/common.py index 5da35690a4ec..d0fb0e8596ed 100644 --- a/models/common.py +++ b/models/common.py @@ -79,7 +79,7 @@ def __init__(self, c1, c2, num_heads, num_layers): if c1 != c2: self.conv = Conv(c1, c2) self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) self.c2 = c2 def forward(self, x): @@ -114,7 +114,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) @@ -130,7 +130,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) def forward(self, x): @@ -158,7 +158,7 @@ class C3Ghost(C3): def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)]) + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) class SPP(nn.Module): @@ -362,7 +362,7 @@ class Detections: def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super().__init__() d = pred[0].device # device - gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names diff --git a/models/experimental.py b/models/experimental.py index edccc9632fb5..adb86c81fc06 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -97,7 +97,6 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): else: model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse - # Compatibility updates for m in model.modules(): if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 119aebb1523a..2f2c82c70122 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -18,7 +18,7 @@ backbone: [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 9, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]] + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 1, SPP, [1024, [5, 9, 13]]], [-1, 3, C3, [1024, False]], # 9 diff --git a/models/tf.py b/models/tf.py index 1c6da43adaac..5599ff5cce91 100644 --- a/models/tf.py +++ b/models/tf.py @@ -40,7 +40,7 @@ class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper def __init__(self, w=None): - super(TFBN, self).__init__() + super().__init__() self.bn = keras.layers.BatchNormalization( beta_initializer=keras.initializers.Constant(w.bias.numpy()), gamma_initializer=keras.initializers.Constant(w.weight.numpy()), @@ -54,7 +54,7 @@ def call(self, inputs): class TFPad(keras.layers.Layer): def __init__(self, pad): - super(TFPad, self).__init__() + super().__init__() self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) def call(self, inputs): @@ -65,7 +65,7 @@ class TFConv(keras.layers.Layer): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups - super(TFConv, self).__init__() + super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" assert isinstance(k, int), "Convolution with multiple kernels are not allowed." # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) @@ -96,7 +96,7 @@ class TFFocus(keras.layers.Layer): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, kernel, stride, padding, groups - super(TFFocus, self).__init__() + super().__init__() self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) @@ -110,7 +110,7 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) class TFBottleneck(keras.layers.Layer): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion - super(TFBottleneck, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) @@ -123,7 +123,7 @@ def call(self, inputs): class TFConv2d(keras.layers.Layer): # Substitution for PyTorch nn.Conv2D def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super(TFConv2d, self).__init__() + super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D( c2, k, s, 'VALID', use_bias=bias, @@ -138,7 +138,7 @@ class TFBottleneckCSP(keras.layers.Layer): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(TFBottleneckCSP, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) @@ -158,7 +158,7 @@ class TFC3(keras.layers.Layer): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(TFC3, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) @@ -172,7 +172,7 @@ def call(self, inputs): class TFSPP(keras.layers.Layer): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super(TFSPP, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) @@ -186,7 +186,7 @@ def call(self, inputs): class TFSPPF(keras.layers.Layer): # Spatial pyramid pooling-Fast layer def __init__(self, c1, c2, k=5, w=None): - super(TFSPPF, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) @@ -201,7 +201,7 @@ def call(self, inputs): class TFDetect(keras.layers.Layer): def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer - super(TFDetect, self).__init__() + super().__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -249,7 +249,7 @@ def _make_grid(nx=20, ny=20): class TFUpsample(keras.layers.Layer): def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' - super(TFUpsample, self).__init__() + super().__init__() assert scale_factor == 2, "scale_factor must be 2" self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) @@ -263,7 +263,7 @@ def call(self, inputs): class TFConcat(keras.layers.Layer): def __init__(self, dimension=1, w=None): - super(TFConcat, self).__init__() + super().__init__() assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 @@ -272,7 +272,7 @@ def call(self, inputs): def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) - LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -299,7 +299,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) elif m is Detect: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors @@ -312,11 +312,11 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module - torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in torch_m_.parameters()]) # number params + np = sum(x.numel() for x in torch_m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) @@ -325,7 +325,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) class TFModel: def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes - super(TFModel, self).__init__() + super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -336,7 +336,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64 # Define model if nc and nc != self.yaml['nc']: - print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) + print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) diff --git a/models/yolo.py b/models/yolo.py index 497a0e9c24e6..0fa2db91e82b 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -247,7 +247,7 @@ def _apply(self, fn): def parse_model(d, ch): # model_dict, input_channels(3) - LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -275,7 +275,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[x] for x in f]) + c2 = sum(ch[x] for x in f) elif m is Detect: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors @@ -287,11 +287,11 @@ def parse_model(d, ch): # model_dict, input_channels(3) else: c2 = ch[f] - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params + np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000000..7d25200cdb33 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,45 @@ +# Project-wide configuration file, can be used for package metadata and other toll configurations +# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments + +[metadata] +license_file = LICENSE +description-file = README.md + + +[tool:pytest] +norecursedirs = + .git + dist + build +addopts = + --doctest-modules + --durations=25 + --color=yes + + +[flake8] +max-line-length = 120 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# https://pep8.readthedocs.io/en/latest/intro.html#error-codes +format = pylint +# see: https://www.flake8rules.com/ +ignore = + E731 # Do not assign a lambda expression, use a def + F405 + E402 + F841 + E741 + F821 + E722 + F401 + W504 + E127 + W504 + E231 + E501 + F403 + E302 + F541 diff --git a/tutorial.ipynb b/tutorial.ipynb index 47c44251b5ab..115d767a70bf 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1014,4 +1014,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/datasets.py b/utils/datasets.py index 3997a5df6331..fce005bd597c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -140,7 +140,7 @@ def __iter__(self): yield next(self.iterator) -class _RepeatSampler(object): +class _RepeatSampler: """ Sampler that repeats forever Args: @@ -287,7 +287,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.stride = stride if os.path.isfile(sources): - with open(sources, 'r') as f: + with open(sources) as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] @@ -398,14 +398,14 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r f += glob.glob(str(p / '**' / '*.*'), recursive=True) # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file - with open(p, 'r') as t: + with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS]) + self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: @@ -681,7 +681,7 @@ def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices random.shuffle(indices) for i, index in enumerate(indices): @@ -767,7 +767,7 @@ def load_mosaic9(self, index): c = s - w, s + h0 - hp - h, s, s + h0 - hp padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords # Labels labels, segments = self.labels[index].copy(), self.segments[index].copy() @@ -782,7 +782,7 @@ def load_mosaic9(self, index): hp, wp = h, w # height, width previous # Offset - yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels @@ -838,7 +838,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): - with open(lb_file, 'r') as f: + with open(lb_file) as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): @@ -866,7 +866,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sorted([x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS]) # image files only + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split @@ -902,7 +902,7 @@ def verify_image_label(args): # verify labels if os.path.isfile(lb_file): nf = 1 # label found - with open(lb_file, 'r') as f: + with open(lb_file) as f: l = [x.split() for x in f.read().strip().splitlines() if len(x)] if any([len(x) > 8 for x in l]): # is segment classes = np.array([x[0] for x in l], dtype=np.float32) @@ -944,7 +944,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil def round_labels(labels): # Update labels to integer class and 6 decimal place floats - return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels] + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' @@ -1019,7 +1019,7 @@ def hub_ops(f, max_dim=1920): with open(file, 'w') as f: json.dump(stats, f) # save stats *.json t2 = time.time() - with open(file, 'r') as f: + with open(file) as f: x = json.load(f) # load hyps dict print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') diff --git a/utils/general.py b/utils/general.py index 02bc741ca3ba..f22908907fd0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -136,7 +136,7 @@ def is_writeable(dir, test=False): pass file.unlink() # remove file return True - except IOError: + except OSError: return False else: # method 2 return os.access(dir, os.R_OK) # possible issues on Windows @@ -355,7 +355,7 @@ def check_dataset(data, autodownload=True): assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing - train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index ac29d104b144..5056b7c1186d 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -11,4 +11,4 @@ manual_scaling: resources: cpu: 1 memory_gb: 4 - disk_size_gb: 20 \ No newline at end of file + disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 0b457df63c93..ae2d98bdc36d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -135,7 +135,7 @@ def on_train_end(self, last, best, plots, epoch, results): # Callback runs on training end if plots: plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index dd7dc1e46d45..d787fb7a5a0e 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -61,10 +61,10 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
- +

2: Train and Log Evaluation simultaneousy

This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, @@ -72,31 +72,31 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data - + ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
- +

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml - + ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
- +

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
Usage Code $ python train.py --save_period 1 - + ![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
- +

5: Resume runs from checkpoint artifacts.

@@ -105,28 +105,28 @@ Any run can be resumed using artifacts if the --resume argument sta
Usage Code $ python train.py --resume wandb-artifact://{run_path} - + ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
- +

6: Resume runs from dataset artifact & checkpoint artifacts.

Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or train from _wandb.yaml file and set --save_period
Usage Code $ python train.py --resume wandb-artifact://{run_path} - + ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
- +

Reports

W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - + Weights & Biases Reports diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index c3727de82d4a..c7790d75f6b2 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -1,17 +1,17 @@ # Hyperparameters for training -# To set range- +# To set range- # Provide min and max values as: # parameter: -# +# # min: scalar # max: scalar # OR # # Set a specific list of search space- -# parameter: +# parameter: # values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy +# +# You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration program: utils/loggers/wandb/sweep.py diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 7fb76b05e987..8546ec6c63cb 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,6 +5,7 @@ import sys from contextlib import contextmanager from pathlib import Path +from typing import Dict import pkg_resources as pkg import yaml @@ -25,7 +26,7 @@ assert hasattr(wandb, '__version__') # verify package import not local dir except (ImportError, AssertionError): wandb = None - + RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -127,7 +128,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run + job_type (str) -- To set the job_type for this run """ # Pre-training routine -- @@ -142,7 +143,8 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.max_imgs_to_log = 16 self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): entity, project, run_id, model_artifact_name = get_run_info(opt.resume) @@ -212,7 +214,7 @@ def setup_training(self, opt): Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval + - Setup log_dict, initialize bbox_interval arguments: opt (namespace) -- commandline arguments for this run @@ -301,7 +303,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch + fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ @@ -325,7 +327,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml returns: @@ -371,14 +373,14 @@ def map_val_table_path(self): for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] - def create_dataset_table(self, dataset, class_to_id, name='dataset'): + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. arguments: - dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id (dict(int, str)) -- hash map that maps class ids to labels - name (str) -- name of the artifact + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact returns: dataset artifact to be logged or used @@ -419,7 +421,7 @@ def log_training_progress(self, predn, path, names): arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) @@ -430,7 +432,7 @@ def log_training_progress(self, predn, path, names): box_data.append( {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), + "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"}) total_conf += conf @@ -450,7 +452,7 @@ def val_one_image(self, pred, predn, path, names, im): arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) @@ -459,7 +461,7 @@ def val_one_image(self, pred, predn, path, names, im): if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), + "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space diff --git a/utils/loss.py b/utils/loss.py index fac432d0edc3..e8ce42ad994a 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -18,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() + super().__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() self.alpha = alpha @@ -35,7 +35,7 @@ def forward(self, pred, true): class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha @@ -65,7 +65,7 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(QFocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha diff --git a/utils/plots.py b/utils/plots.py index 00b8f88811e2..00cda6d8d986 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') ax[i].legend() ax[i].set_title(s[i]) plt.savefig('targets.jpg', dpi=200) @@ -363,7 +363,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): else: a.remove() except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) + print(f'Warning: Plotting error for {f}; {e}') ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) @@ -384,10 +384,10 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plt.subplot(6, 5, i + 1) plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) - print('%15s: %.3g' % (k, mu)) + print(f'{k:>15}: {mu:.3g}') f = evolve_csv.with_suffix('.png') # filename plt.savefig(f, dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6f52f9a3728d..e6d8ebd743bf 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -123,7 +123,7 @@ def profile(input, ops, n=10, device=None): y = m(x) t[1] = time_sync() try: - _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward() + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception as e: # no backward method # print(e) # for debug @@ -223,7 +223,7 @@ def model_info(model, verbose=False, img_size=640): n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % @@ -270,7 +270,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean From 620b535f850728d63d81793aa9e4577f7b844078 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 29 Oct 2021 22:51:59 +0530 Subject: [PATCH 434/757] Update sweep.py (#5402) --- utils/loggers/wandb/sweep.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index fdabec4eb63b..6029f6b8039d 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -26,6 +26,11 @@ def sweep(): opt.epochs = hyp_dict.get("epochs") opt.nosave = True opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) device = select_device(opt.device, batch_size=opt.batch_size) # train From 7f9bbf0268317ace43f59174efbfecff60023c84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Oct 2021 23:16:04 +0200 Subject: [PATCH 435/757] Update GitHub issues templates (#5404) * Update GitHub issues templates * pre-commit fixes Co-authored-by: pre-commit --- .github/ISSUE_TEMPLATE/bug-report.md | 59 --------------- .github/ISSUE_TEMPLATE/bug-report.yml | 83 ++++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 8 +++ .github/ISSUE_TEMPLATE/feature-request.md | 29 -------- .github/ISSUE_TEMPLATE/feature-request.yml | 50 +++++++++++++ .github/ISSUE_TEMPLATE/question.md | 12 ---- .github/ISSUE_TEMPLATE/question.yml | 33 +++++++++ 7 files changed, 174 insertions(+), 100 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.yml delete mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/ISSUE_TEMPLATE/question.yml diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 62a02a3a6948..000000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -name: "🐛 Bug report" -about: Create a report to help us improve -title: '' -labels: bug -assignees: '' - ---- - -Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, -otherwise it is non-actionable, and we can not help you: - -- **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo -- **Common dataset**: coco.yaml or coco128.yaml -- **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments - -If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` -figures, or we can not help you. You can generate these with `utils.plot_results()`. - -## 🐛 Bug - -A clear and concise description of what the bug is. - -## To Reproduce (REQUIRED) - -Input: - -``` -import torch - -a = torch.tensor([5]) -c = a / 0 -``` - -Output: - -``` -Traceback (most recent call last): - File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code - exec(code_obj, self.user_global_ns, self.user_ns) - File "", line 5, in - c = a / 0 -RuntimeError: ZeroDivisionError -``` - -## Expected behavior - -A clear and concise description of what you expected to happen. - -## Environment - -If applicable, add screenshots to help explain your problem. - -- OS: [e.g. Ubuntu] -- GPU [e.g. 2080 Ti] - -## Additional context - -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 000000000000..a20f15c20c93 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,83 @@ +name: 🐛 Bug Report +# title: " " +description: Problems with YOLOv5 +labels: [bug, triage] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🐛 Bug Report! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report. + required: true + + - type: dropdown + attributes: + label: YOLOv5 Component + description: | + Please select the part of YOLOv5 where you found the bug. + multiple: true + options: + - "Training" + - "Validation" + - "Detection" + - "Export" + - "PyTorch Hub" + - "Multi-GPU" + - "Evolution" + - "Integrations" + - "Other" + validations: + required: false + + - type: textarea + attributes: + label: Bug + description: Provide console output with error messages and/or screenshots of the bug. + placeholder: > + TIP: Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: Please specify the software and hardware you used to produce the bug. + placeholder: | + - YOLO: YOLOv5 🚀 v6.0-37-g620b535 torch 1.9.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) + - OS: Ubuntu 20.04 + - Python: 3.9.0 + validations: + required: false + + - type: textarea + attributes: + label: Minimal Reproducible Example + description: > + When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. + This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + placeholder: | + # code to reproduce your issue here + validations: + required: false + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000000..f388d7bacf66 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Slack + url: https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg + about: Ask on Ultralytics Slack Forum + - name: Stack Overflow + url: https://stackoverflow.com/search?q=YOLOv5 + about: Ask on Stack Overflow with 'YOLOv5' tag diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 994f506e0f09..000000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -name: "🚀 Feature request" -about: Suggest an idea for this project -title: '' -labels: enhancement -assignees: '' - ---- - -## 🚀 Feature - - - -## Motivation - - - -## Pitch - - - -## Alternatives - - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 000000000000..68ef985186ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,50 @@ +name: 🚀 Feature Request +description: Suggest a YOLOv5 idea +# title: " " +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🚀 Feature Request! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests. + required: true + + - type: textarea + attributes: + label: Description + description: A short description of your feature. + placeholder: | + What new feature would you like to see in YOLOv5? + validations: + required: true + + - type: textarea + attributes: + label: Use case + description: | + Describe the use case of your feature request. It will help us understand and prioritize the feature request. + placeholder: | + How would this feature be used, and who would use it? + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index 2892cfe262fb..000000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -name: "❓Question" -about: Ask a general question -title: '' -labels: question -assignees: '' - ---- - -## ❔Question - -## Additional context diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 000000000000..9ae5dd57c608 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,33 @@ +name: ❓ Question +description: Ask a YOLOv5 question +# title: " " +labels: [question] +body: + - type: markdown + attributes: + value: | + Thank you for asking a YOLOv5 ❓ Question! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions. + required: true + + - type: textarea + attributes: + label: Question + description: What is your question? + placeholder: > + TIP: Include as much information as possible (screenshots, links, reference etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? From 5d4258fac5e6ceaa9c897f841cb737c56717a996 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Oct 2021 13:38:51 +0200 Subject: [PATCH 436/757] Fix MixConv2d() remove shortcut + apply depthwise (#5410) --- models/common.py | 2 +- models/experimental.py | 21 +++++++++++---------- utils/torch_utils.py | 2 +- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index d0fb0e8596ed..8b70a6fea595 100644 --- a/models/common.py +++ b/models/common.py @@ -113,7 +113,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) + self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): diff --git a/models/experimental.py b/models/experimental.py index adb86c81fc06..2e92ccb36faf 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -2,7 +2,7 @@ """ Experimental modules """ - +import math import numpy as np import torch import torch.nn as nn @@ -48,26 +48,27 @@ def forward(self, x): class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy super().__init__() - groups = len(k) + n = len(k) # number of convolutions if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) a *= np.array(k) ** 2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.m = nn.ModuleList( + [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) + self.act = nn.SiLU() def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) class Ensemble(nn.ModuleList): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index e6d8ebd743bf..fc214147da72 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -166,7 +166,7 @@ def initialize_weights(model): elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True From 8c326a1edfa5565c6ee81b6a1c669f7849875717 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 14:33:08 +0100 Subject: [PATCH 437/757] Meshgrid `indexing='ij'` for PyTorch 1.10 (#5309) * Meshgrid `indexing='ij'` for PyTorch 1.10 Will not merge currently as breaks backwards compatibility. * Meshgrid `indexing='ij'` for PyTorch 1.10 Will not merge currently as breaks backwards compatibility. * Add check_version hard argument * Update comment --- models/yolo.py | 7 +++++-- utils/augmentations.py | 2 +- utils/general.py | 11 +++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 0fa2db91e82b..80ff83e16085 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -20,7 +20,7 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, print_args, set_logging +from utils.general import check_yaml, make_divisible, print_args, set_logging, check_version from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -74,7 +74,10 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) + if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij') + else: + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() diff --git a/utils/augmentations.py b/utils/augmentations.py index 04192d1ec5cd..b3cbbf913b65 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -20,7 +20,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.3') # version requirement + check_version(A.__version__, '1.0.3', hard=True) # version requirement self.transform = A.Compose([ A.Blur(p=0.01), diff --git a/utils/general.py b/utils/general.py index f22908907fd0..667af63e4044 100755 --- a/utils/general.py +++ b/utils/general.py @@ -220,14 +220,17 @@ def check_git_status(): def check_python(minimum='3.6.2'): # Check current python version vs. required python version - check_version(platform.python_version(), minimum, name='Python ') + check_version(platform.python_version(), minimum, name='Python ', hard=True) -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False): +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) - result = (current == minimum) if pinned else (current >= minimum) - assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' + result = (current == minimum) if pinned else (current >= minimum) # bool + if hard: # assert min requirements met + assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' + else: + return result @try_except From 7b1f7aec4632d7aa0f04442ef21df0b31ec6390a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 18:22:13 +0100 Subject: [PATCH 438/757] Update `get_loggers()` (#4854) * Update `set_logging()` * Update export.py * pre-commit fixes * Update LoadImages * Update LoadStreams * Update print_args * Single LOGGER definition * yolo.py fix Co-authored-by: pre-commit --- detect.py | 17 ++++++------- export.py | 63 +++++++++++++++++++++++------------------------ models/tf.py | 7 ++---- models/yolo.py | 5 +--- train.py | 12 ++++----- utils/datasets.py | 30 +++++++++++----------- utils/general.py | 18 ++++++++------ val.py | 23 +++++++++-------- 8 files changed, 84 insertions(+), 91 deletions(-) diff --git a/detect.py b/detect.py index 70c52dc5214b..c57edba67c6c 100644 --- a/detect.py +++ b/detect.py @@ -25,8 +25,7 @@ from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ - increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ - strip_optimizer, xyxy2xywh + increment_path, non_max_suppression, print_args, save_one_box, scale_coords, strip_optimizer, xyxy2xywh, LOGGER from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -68,7 +67,6 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize - set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA @@ -132,7 +130,7 @@ def wrap_frozen_graph(gd, inputs, outputs): if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 - for path, img, im0s, vid_cap in dataset: + for path, img, im0s, vid_cap, s in dataset: t1 = time_sync() if onnx: img = img.astype('float32') @@ -191,9 +189,10 @@ def wrap_frozen_graph(gd, inputs, outputs): for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 - p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' else: - p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg @@ -227,7 +226,7 @@ def wrap_frozen_graph(gd, inputs, outputs): save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) - print(f'{s}Done. ({t3 - t2:.3f}s)') + LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() @@ -256,10 +255,10 @@ def wrap_frozen_graph(gd, inputs, outputs): # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {colorstr('bold', save_dir)}{s}") + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) diff --git a/export.py b/export.py index 2aca0f341dbd..47dbcab50144 100644 --- a/export.py +++ b/export.py @@ -42,23 +42,23 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, \ - set_logging, url2file +from utils.general import check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args, \ + url2file, LOGGER from utils.torch_utils import select_device def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript.pt') ts = torch.jit.trace(model, im, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'{prefix} export failure: {e}') + LOGGER.info(f'{prefix} export failure: {e}') def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): @@ -67,7 +67,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst check_requirements(('onnx',)) import onnx - print(f'\n{prefix} starting export with onnx {onnx.__version__}...') + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export(model, im, f, verbose=False, opset_version=opset, @@ -82,7 +82,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print + # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify if simplify: @@ -90,7 +90,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst check_requirements(('onnx-simplifier',)) import onnxsim - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, @@ -98,11 +98,11 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - print(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") + LOGGER.info(f'{prefix} simplifier failure: {e}') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") except Exception as e: - print(f'{prefix} export failure: {e}') + LOGGER.info(f'{prefix} export failure: {e}') def export_coreml(model, im, file, prefix=colorstr('CoreML:')): @@ -112,7 +112,7 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): check_requirements(('coremltools',)) import coremltools as ct - print(f'\n{prefix} starting export with coremltools {ct.__version__}...') + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') model.train() # CoreML exports should be placed in model.train() mode @@ -120,9 +120,9 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) ct_model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') return ct_model @@ -137,7 +137,7 @@ def export_saved_model(model, im, file, dynamic, from tensorflow import keras from models.tf import TFModel, TFDetect - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = str(file).replace('.pt', '_saved_model') batch_size, ch, *imgsz = list(im.shape) # BCHW @@ -151,9 +151,9 @@ def export_saved_model(model, im, file, dynamic, keras_model.summary() keras_model.save(f, save_format='tf') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') return keras_model @@ -164,7 +164,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = file.with_suffix('.pb') m = tf.function(lambda x: keras_model(x)) # full model @@ -173,9 +173,9 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): frozen_func.graph.as_graph_def() tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): @@ -184,7 +184,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te import tensorflow as tf from models.tf import representative_dataset_gen - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') @@ -204,10 +204,10 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te tflite_model = converter.convert() open(f, "wb").write(tflite_model) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): @@ -217,7 +217,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): import re import tensorflowjs as tfjs - print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path f_json = f + '/model.json' # *.json path @@ -240,9 +240,9 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): json) j.write(subst) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') @torch.no_grad() @@ -297,7 +297,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' for _ in range(2): y = model(im) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports if 'torchscript' in include: @@ -322,9 +322,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_tfjs(model, im, file) # Finish - print(f'\nExport complete ({time.time() - t:.2f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nVisualize with https://netron.app') + LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nVisualize with https://netron.app') def parse_opt(): @@ -355,7 +355,6 @@ def parse_opt(): def main(opt): - set_logging() run(**vars(opt)) diff --git a/models/tf.py b/models/tf.py index 5599ff5cce91..531c8cc5a29f 100644 --- a/models/tf.py +++ b/models/tf.py @@ -31,11 +31,9 @@ from models.common import Bottleneck, BottleneckCSP, Concat, Conv, C3, DWConv, Focus, SPP, SPPF, autopad from models.experimental import CrossConv, MixConv2d, attempt_load from models.yolo import Detect -from utils.general import make_divisible, print_args, set_logging +from utils.general import make_divisible, print_args, LOGGER from utils.activations import SiLU -LOGGER = logging.getLogger(__name__) - class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper @@ -336,7 +334,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64 # Define model if nc and nc != self.yaml['nc']: - print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) @@ -457,7 +455,6 @@ def parse_opt(): def main(opt): - set_logging() run(**vars(opt)) diff --git a/models/yolo.py b/models/yolo.py index 80ff83e16085..38a17d9e7ba4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -20,7 +20,7 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, print_args, set_logging, check_version +from utils.general import check_version, check_yaml, make_divisible, print_args, LOGGER from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -30,8 +30,6 @@ except ImportError: thop = None -LOGGER = logging.getLogger(__name__) - class Detect(nn.Module): stride = None # strides computed during build @@ -311,7 +309,6 @@ def parse_model(d, ch): # model_dict, input_channels(3) opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(FILE.stem, opt) - set_logging() device = select_device(opt.device) # Create model diff --git a/train.py b/train.py index 292f2da965f0..4886034d811f 100644 --- a/train.py +++ b/train.py @@ -40,7 +40,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_args, print_mutation, one_cycle, colorstr, methods, LOGGER from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -51,7 +51,6 @@ from utils.loggers import Loggers from utils.callbacks import Callbacks -LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -129,7 +128,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): - print(f'freezing {k}') + LOGGER.info(f'freezing {k}') v.requires_grad = False # Image size @@ -485,7 +484,6 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks - set_logging(RANK) if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() @@ -609,9 +607,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) - print(f'Hyperparameter evolution finished\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') + LOGGER.info(f'Hyperparameter evolution finished\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') def run(**kwargs): diff --git a/utils/datasets.py b/utils/datasets.py index fce005bd597c..7fce122942f7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -28,7 +28,7 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \ - xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy + xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy, LOGGER from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -210,14 +210,14 @@ def __next__(self): ret_val, img0 = self.cap.read() self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='') + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - print(f'image {self.count}/{self.nf} {path}: ', end='') + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] @@ -226,7 +226,7 @@ def __next__(self): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return path, img, img0, self.cap + return path, img, img0, self.cap, s def new_video(self, path): self.frame = 0 @@ -264,7 +264,7 @@ def __next__(self): # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' - print(f'webcam {self.count}: ', end='') + s = f'webcam {self.count}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] @@ -273,7 +273,7 @@ def __next__(self): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return img_path, img, img0, None + return img_path, img, img0, None, s def __len__(self): return 0 @@ -298,14 +298,14 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream - print(f'{i + 1}/{n}: {s}... ', end='') + st = f'{i + 1}/{n}: {s}... ' if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video check_requirements(('pafy', 'youtube_dl')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'Failed to open {s}' + assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback @@ -313,15 +313,15 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() - print('') # newline + LOGGER.info('') # newline # check for common shapes s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -335,7 +335,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - print('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] *= 0 cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time @@ -361,7 +361,7 @@ def __next__(self): img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) - return self.sources, img, img0, None + return self.sources, img, img0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years @@ -666,7 +666,7 @@ def load_image(self, i): else: # read image path = self.img_files[i] im = cv2.imread(path) # BGR - assert im is not None, 'Image Not Found ' + path + assert im is not None, f'Image Not Found {path}' h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal diff --git a/utils/general.py b/utils/general.py index 667af63e4044..872d5ce57c81 100755 --- a/utils/general.py +++ b/utils/general.py @@ -42,6 +42,16 @@ ROOT = FILE.parents[1] # YOLOv5 root directory +def set_logging(name=None, verbose=True): + # Sets level and returns logger + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) + return logging.getLogger(name) + + +LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.) + + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): @@ -87,15 +97,9 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def set_logging(rank=-1, verbose=True): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) - - def print_args(name, opt): # Print argparser arguments - print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) def init_seeds(seed=0): diff --git a/val.py b/val.py index 2fc547322a0a..1fc98c71198b 100644 --- a/val.py +++ b/val.py @@ -25,9 +25,9 @@ from models.experimental import attempt_load from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ - check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ - increment_path, colorstr, print_args +from utils.general import box_iou, coco80_to_coco91_class, colorstr, check_dataset, check_img_size, \ + check_requirements, check_suffix, check_yaml, increment_path, non_max_suppression, print_args, scale_coords, \ + xyxy2xywh, xywh2xyxy, LOGGER from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync @@ -242,18 +242,18 @@ def run(data, # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format - print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): - print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: @@ -265,7 +265,7 @@ def run(data, w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -284,13 +284,13 @@ def run(data, eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: - print(f'pycocotools unable to run: {e}') + LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {colorstr('bold', save_dir)}{s}") + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] @@ -327,8 +327,7 @@ def parse_opt(): def main(opt): - set_logging() - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) @@ -346,7 +345,7 @@ def main(opt): f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to y = [] # y axis for i in x: # img-size - print(f'\nRunning {f} point {i}...') + LOGGER.info(f'\nRunning {f} point {i}...') r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False) y.append(r + t) # results and times From 4c0982a243aac3969345fc61e10eb7ea4d78e104 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 18:28:14 +0100 Subject: [PATCH 439/757] Update README.md (#5438) 2-line update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3fd7e9a92f5..3e2f5b656cde 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ Get started in seconds with our verified environments. Click each icon below for |Weights and Biases|Roboflow ⭐ NEW| |:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path print(increment_path('runs')) print(increment_path('export.py')) print(increment_path('abc.def.dir')) print(increment_path('abc.def.file')) ``` --- utils/general.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index adbf1bd48c5f..fc05c691afa2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -830,13 +830,12 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: - suffix = path.suffix - path = path.with_suffix('') + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - path = Path(f"{path}{sep}{n}{suffix}") # update path + path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path From 5f603a9dbaeae3fa052b09e6fff7903a7355e8b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Nov 2021 18:43:03 +0100 Subject: [PATCH 459/757] Fix detect.py URL inference (#5525) * Fix detect.py URL inference Allows detect.py to run inference on remote URL sources, i.e.: ```python !python detect.py --weights yolov5s.pt --source https://ultralytics.com/assets/zidane.jpg # image URL !python detect.py --weights yolov5s.pt --source https://ultralytics.com/assets/decelera_landscape.mov # video URL ``` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 46141ed4da3c..61044914e16b 100644 --- a/detect.py +++ b/detect.py @@ -24,10 +24,10 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from utils.datasets import LoadImages, LoadStreams -from utils.general import (LOGGER, apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, - colorstr, increment_path, non_max_suppression, print_args, save_one_box, scale_coords, - strip_optimizer, xyxy2xywh) +from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, + check_suffix, colorstr, increment_path, non_max_suppression, print_args, save_one_box, + scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -61,8 +61,11 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images - webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://', 'https://')) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run From 32b8738735339207b00f79b43987f1c3755a9039 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Nov 2021 19:22:47 +0100 Subject: [PATCH 460/757] Update `check_file()` avoid repeat URL downloads (#5526) --- utils/general.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index fc05c691afa2..15b58257eabb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -338,9 +338,12 @@ def check_file(file, suffix=''): elif file.startswith(('http:/', 'https:/')): # download url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search files = [] From 98a3fd7e8fd82d63aa00200c08a9da40959f7217 Mon Sep 17 00:00:00 2001 From: nanmi <37356276+nanmi@users.noreply.github.com> Date: Sat, 6 Nov 2021 02:26:45 +0800 Subject: [PATCH 461/757] Update export.py (#5471) * fix export onnx bug * Update export.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update yolo.py Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From d895a7f70df7dcda4ce19e904e9d2b3a97705a3a Mon Sep 17 00:00:00 2001 From: Wonbeom Jang Date: Sat, 6 Nov 2021 03:28:53 +0900 Subject: [PATCH 462/757] Update train.py (#5451) * correct --resume True error * delete temp file * Update train.py Co-authored-by: Glenn Jocher From 336437998f4ff5facb94b4e36ef2d941456d2d8f Mon Sep 17 00:00:00 2001 From: Deep Patel <35742688+deepsworld@users.noreply.github.com> Date: Fri, 5 Nov 2021 14:31:53 -0400 Subject: [PATCH 463/757] Suppress ONNX export trace warning (#5437) Checking for `onnx_dynamic` first should suppress the warning: ```log TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic ``` --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 85c8d43258e3..510f8e58d9a3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -55,7 +55,7 @@ def forward(self, x): x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic: + if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() From 60e42e16c2bc51b303e680afccd72351af7a7a69 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 12:21:17 +0100 Subject: [PATCH 464/757] Update autobatch.py (#5536) --- utils/autobatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 168b16f691ab..1632e9bc6a5a 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -52,5 +52,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using colorstr(batch-size {b}) for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') + print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') return b From cb18cac33d7161ed938c1c4056e17c653df69ad0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:49:00 +0100 Subject: [PATCH 465/757] Update autobatch.py (#5538) * Update autobatch.py * Update autobatch.py * Update autobatch.py --- utils/autobatch.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 1632e9bc6a5a..3f2b4d1a4c38 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -35,11 +35,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): return batch_size d = str(device).upper() # 'CUDA:0' - t = torch.cuda.get_device_properties(device).total_memory / 1024 ** 3 # (GB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GB) + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / 1024 ** 3 # (GiB) + r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) + a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') + print(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') batch_sizes = [1, 2, 4, 8, 16] try: @@ -52,5 +53,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') + print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') return b From 76d90d899a80f52246143edb7a683129b4359396 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:58:12 +0100 Subject: [PATCH 466/757] =?UTF-8?q?Update=20Issue=20Templates=20with=20?= =?UTF-8?q?=F0=9F=92=A1=20ProTip!=20(#5539)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update bug-report.yml * Update question.yml * Update bug-report.yml --- .github/ISSUE_TEMPLATE/bug-report.yml | 10 ++++++---- .github/ISSUE_TEMPLATE/question.yml | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index a20f15c20c93..fcb64138b088 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -41,8 +41,8 @@ body: attributes: label: Bug description: Provide console output with error messages and/or screenshots of the bug. - placeholder: > - TIP: Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. validations: required: true @@ -51,7 +51,7 @@ body: label: Environment description: Please specify the software and hardware you used to produce the bug. placeholder: | - - YOLO: YOLOv5 🚀 v6.0-37-g620b535 torch 1.9.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) + - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB) - OS: Ubuntu 20.04 - Python: 3.9.0 validations: @@ -64,7 +64,9 @@ body: When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). placeholder: | - # code to reproduce your issue here + ``` + # Code to reproduce your issue here + ``` validations: required: false diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml index 9ae5dd57c608..8e0993c68bab 100644 --- a/.github/ISSUE_TEMPLATE/question.yml +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -22,8 +22,8 @@ body: attributes: label: Question description: What is your question? - placeholder: > - TIP: Include as much information as possible (screenshots, links, reference etc.) to receive the most helpful response. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. validations: required: true From fa2344cdd8814ac7901b844f9e80d3db8bdc1c32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 15:07:45 +0100 Subject: [PATCH 467/757] Update `models/hub/*.yaml` files for v6.0n release (#5540) * Update model yamls for v6.0 * Add python models/yolo.py --test * Ghost fix --- models/hub/yolov5-bifpn.yaml | 14 +++++++------- models/hub/yolov5-fpn.yaml | 22 +++++++++++----------- models/hub/yolov5-p2.yaml | 14 +++++++------- models/hub/yolov5-p6.yaml | 16 ++++++++-------- models/hub/yolov5-p7.yaml | 12 ++++++------ models/hub/yolov5-panet.yaml | 24 ++++++++++++------------ models/hub/yolov5s-ghost.yaml | 12 ++++++------ models/hub/yolov5s-transformer.yaml | 12 ++++++------ models/yolo.py | 9 +++++++++ 9 files changed, 72 insertions(+), 63 deletions(-) diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 2f2c82c70122..504815f5cfa0 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 BiFPN head +# YOLOv5 v6.0 BiFPN head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], @@ -37,7 +37,7 @@ head: [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], - [[-1, 14, 6], 1, Concat, [1]], # cat P4 + [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index 707b2136cee1..a23e9c6fbf9f 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -9,34 +9,34 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, Bottleneck, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 6, BottleneckCSP, [1024]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 FPN head +# YOLOv5 v6.0 FPN head head: - [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 1, Conv, [512, 1, 1]], - [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + [-1, 3, C3, [512, False]], # 14 (P4/16-medium) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 1, Conv, [256, 1, 1]], - [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + [-1, 3, C3, [256, False]], # 18 (P3/8-small) [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 759e9f92fb29..ffe26ebad182 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -4,24 +4,24 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor evolves 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 85e142539ce3..28f3e439cccd 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -4,26 +4,26 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 1, SPP, [1024, [3, 5, 7]]], - [-1, 3, C3, [1024, False]], # 11 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], @@ -50,7 +50,7 @@ head: [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P5/64-xlarge) + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 88a7a95cbbd1..bd2f5845f884 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -4,16 +4,16 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 @@ -21,8 +21,8 @@ backbone: [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 - [-1, 1, SPP, [1280, [3, 5]]], - [-1, 3, C3, [1280, False]], # 13 + [-1, 3, C3, [1280]], + [-1, 1, SPPF, [1280, 5]], # 13 ] # YOLOv5 head diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 76b9b7e74e33..ccfbf900691c 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -9,40 +9,40 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 PANet head +# YOLOv5 v6.0 PANet head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 + [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index dbf2c8e03489..ff9519c3f1aa 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3Ghost, [128]], [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3Ghost, [256]], + [-1, 6, C3Ghost, [256]], [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3Ghost, [512]], [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3Ghost, [1024, False]], # 9 + [-1, 3, C3Ghost, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, GhostConv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index aeac1acb0582..100d7c447527 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/yolo.py b/models/yolo.py index 510f8e58d9a3..c196d46f9efa 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -306,6 +306,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(FILE.stem, opt) @@ -320,6 +321,14 @@ def parse_model(d, ch): # model_dict, input_channels(3) img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) y = model(img, profile=True) + # Test all models + if opt.test: + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') From e189fa15eab4866a5f55c8b58d873dacebfb2f74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 15:41:17 +0100 Subject: [PATCH 468/757] `intersect_dicts()` in hubconf.py fix (#5542) --- hubconf.py | 5 ++--- train.py | 7 +++---- utils/general.py | 5 +++++ utils/torch_utils.py | 5 ----- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/hubconf.py b/hubconf.py index 51f658a532ff..3488fef76ac5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.experimental import attempt_load from models.yolo import Model from utils.downloads import attempt_download - from utils.general import check_requirements, set_logging + from utils.general import check_requirements, intersect_dicts, set_logging from utils.torch_utils import select_device file = Path(__file__).resolve() @@ -49,9 +49,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load - msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute diff --git a/train.py b/train.py index 75f3b7cb36a7..90abdc59db88 100644 --- a/train.py +++ b/train.py @@ -43,15 +43,14 @@ from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, - print_mutation, strip_optimizer) + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, + print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, - torch_distributed_zero_first) +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/utils/general.py b/utils/general.py index 15b58257eabb..46cb1ddef983 100755 --- a/utils/general.py +++ b/utils/general.py @@ -125,6 +125,11 @@ def init_seeds(seed=0): cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + def get_latest_run(search_dir='.'): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 793e8d8ffd3e..b36e98d0b656 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -153,11 +153,6 @@ def de_parallel(model): return model.module if is_parallel(model) else model -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - def initialize_weights(model): for m in model.modules(): t = type(m) From 60c8a4f6965cd16c22ee425f58f63eb903e40ee0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 16:03:14 +0100 Subject: [PATCH 469/757] Fix for *.yaml emojis on load (#5543) Fix for Colab hub error: ```python import yaml with open('yolov5s.yaml', errors='ignore') as f: d = yaml.safe_load(f) # model dict print(d) --------------------------------------------------------------------------- ReaderError Traceback (most recent call last) in () 2 3 with open('yolov5s.yaml', errors='ignore') as f: ----> 4 d = yaml.safe_load(f) # model dict 5 6 print(d) 6 frames /usr/local/lib/python3.7/dist-packages/yaml/reader.py in check_printable(self, data) 142 position = self.index+(len(self.buffer)-self.pointer)+match.start() 143 raise ReaderError(self.name, position, ord(character), --> 144 'unicode', "special characters are not allowed") 145 146 def update(self, length): ReaderError: unacceptable character #x1f680: special characters are not allowed in "yolov5s.yaml", position 9 ``` --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index c196d46f9efa..305f0ca0cc88 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -90,7 +90,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name - with open(cfg, errors='ignore') as f: + with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model From 3f64ad176068ba5f840eefe943cdafbcd9a7753b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 19:28:03 +0100 Subject: [PATCH 470/757] Fix `save_one_box()` (#5545) * Fix `save_one_box()` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 6 +-- models/common.py | 5 +- utils/general.py | 15 ------ utils/plots.py | 126 ++++++++++++++++++++++++++--------------------- 4 files changed, 76 insertions(+), 76 deletions(-) diff --git a/detect.py b/detect.py index 61044914e16b..9527ae2b57f4 100644 --- a/detect.py +++ b/detect.py @@ -26,9 +26,9 @@ from models.experimental import attempt_load from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, - check_suffix, colorstr, increment_path, non_max_suppression, print_args, save_one_box, - scale_coords, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors + check_suffix, colorstr, increment_path, non_max_suppression, print_args, scale_coords, + strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import load_classifier, select_device, time_sync diff --git a/models/common.py b/models/common.py index 04aa2e4749f4..8035ef11a791 100644 --- a/models/common.py +++ b/models/common.py @@ -18,9 +18,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import (colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, scale_coords, - xyxy2xywh) -from utils.plots import Annotator, colors +from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) diff --git a/utils/general.py b/utils/general.py index 46cb1ddef983..0f45d72498fe 100755 --- a/utils/general.py +++ b/utils/general.py @@ -819,21 +819,6 @@ def apply_classifier(x, model, img, im0): return x -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop) - return crop - - def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic diff --git a/utils/plots.py b/utils/plots.py index 94487b4f5b85..b5e25d668d22 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh +from utils.general import clip_coords, increment_path, is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -117,6 +117,33 @@ def result(self): return np.asarray(self.im) +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + print(f'Saving {save_dir / f}... ({n}/{channels})') + plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + plt.close() + + def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) @@ -337,37 +364,6 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) @@ -420,28 +416,48 @@ def plot_results(file='path/to/results.csv', dir=''): plt.close() -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') - plt.close() +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + return crop From b8f979bafab6db020d86779b4b40619cd4d77d57 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 20:34:54 +0100 Subject: [PATCH 471/757] Inside Ultralytics video https://youtu.be/Zgi9g1ksQHc (#5546) * Update detect.py Usage examples * Inside Ultralytics at https://youtu.be/Zgi9g1ksQHc --- README.md | 6 +++--- detect.py | 8 +++++++- tutorial.ipynb | 6 +++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3e2f5b656cde..6e72d85da7ee 100644 --- a/README.md +++ b/README.md @@ -109,11 +109,11 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and ```bash $ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video + img.jpg # image + vid.mp4 # video path/ # directory path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube + 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` diff --git a/detect.py b/detect.py index 9527ae2b57f4..661a0b86bc99 100644 --- a/detect.py +++ b/detect.py @@ -3,7 +3,13 @@ Run inference on images, videos, directories, streams, etc. Usage: - $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ import argparse diff --git a/tutorial.ipynb b/tutorial.ipynb index 9184a66d3f42..b013fe694ba4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -438,11 +438,11 @@ "\n", "```shell\n", "python detect.py --source 0 # webcam\n", - " file.jpg # image \n", - " file.mp4 # video\n", + " img.jpg # image \n", + " vid.mp4 # video\n", " path/ # directory\n", " path/*.jpg # glob\n", - " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] From 0de4a9c35d0ab2a204eeb1eab879106c799b28bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Nov 2021 16:04:31 +0100 Subject: [PATCH 472/757] Add `--conf-thres` >> 0.001 warning (#5567) Partially addresses invalid mAPs at higher confidence threshold issue https://github.com/ultralytics/yolov5/issues/1466. --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 2118ad400ac7..d2797f1189ec 100644 --- a/val.py +++ b/val.py @@ -330,6 +330,8 @@ def main(opt): check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.') run(**vars(opt)) elif opt.task == 'speed': # speed benchmarks From 79bca2bf64da04e7e1e74a132eb54171f41638cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Nov 2021 16:32:15 +0100 Subject: [PATCH 473/757] `LOGGER` consolidation (#5569) * Logger consolidation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 5 +---- train.py | 5 ++--- utils/augmentations.py | 7 +++---- utils/datasets.py | 13 ++++++------- utils/general.py | 2 +- utils/torch_utils.py | 2 -- 6 files changed, 13 insertions(+), 21 deletions(-) diff --git a/models/common.py b/models/common.py index 8035ef11a791..f9e4fc69f006 100644 --- a/models/common.py +++ b/models/common.py @@ -3,7 +3,6 @@ Common modules """ -import logging import math import warnings from copy import copy @@ -18,12 +17,10 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync -LOGGER = logging.getLogger(__name__) - def autopad(k, p=None): # kernel, padding # Pad to 'same' diff --git a/train.py b/train.py index 90abdc59db88..fedc55d8be5c 100644 --- a/train.py +++ b/train.py @@ -7,7 +7,6 @@ """ import argparse -import logging import math import os import random @@ -201,8 +200,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/utils/augmentations.py b/utils/augmentations.py index 1c3e66fb87ab..5dcfd49fdd05 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -3,14 +3,13 @@ Image augmentation functions """ -import logging import math import random import cv2 import numpy as np -from utils.general import check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa @@ -32,11 +31,11 @@ def __init__(self): A.ImageCompression(quality_lower=75, p=0.0)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - logging.info(colorstr('albumentations: ') + f'{e}') + LOGGER.info(colorstr('albumentations: ') + f'{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: diff --git a/utils/datasets.py b/utils/datasets.py index 15fca1775849..94acaaa92cd7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,7 +6,6 @@ import glob import hashlib import json -import logging import os import random import shutil @@ -335,7 +334,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] *= 0 cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time @@ -427,7 +426,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: - logging.info('\n'.join(cache['msgs'])) # display warnings + LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache @@ -525,9 +524,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.close() if msgs: - logging.info('\n'.join(msgs)) + LOGGER.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings @@ -535,9 +534,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - logging.info(f'{prefix}New cache created: {path}') + LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): diff --git a/utils/general.py b/utils/general.py index 0f45d72498fe..b0ea1527129a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -45,7 +45,7 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b36e98d0b656..73acec8e819c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -4,7 +4,6 @@ """ import datetime -import logging import math import os import platform @@ -100,7 +99,6 @@ def profile(input, ops, n=10, device=None): # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] - logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") From 3883261143c56a7eca035f94f2bcb3e4023e72bc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 16:45:02 +0100 Subject: [PATCH 474/757] New `DetectMultiBackend()` class (#5549) * New `DetectMultiBackend()` class * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * pb to pt fix * Cleanup * explicit apply_classifier path * Cleanup2 * Cleanup3 * Cleanup4 * Cleanup5 * Cleanup6 * val.py MultiBackend inference * warmup fix * to device fix * pt fix * device fix * Val cleanup * COCO128 URL to assets * half fix * detect fix * detect fix 2 * remove half from DetectMultiBackend * training half handling * training half handling 2 * training half handling 3 * Cleanup * Fix CI error * Add torchscript _extra_files * Add TorchScript * Add CoreML * CoreML cleanup * New `DetectMultiBackend()` class * pb to pt fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * explicit apply_classifier path * Cleanup2 * Cleanup3 * Cleanup4 * Cleanup5 * Cleanup6 * val.py MultiBackend inference * warmup fix * to device fix * pt fix * device fix * Val cleanup * COCO128 URL to assets * half fix * detect fix * detect fix 2 * remove half from DetectMultiBackend * training half handling * training half handling 2 * training half handling 3 * Cleanup * Fix CI error * Add torchscript _extra_files * Add TorchScript * Add CoreML * CoreML cleanup * revert default to pt * Add Usage examples * Cleanup val Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data/coco128.yaml | 2 +- detect.py | 133 +++++++++---------------------------------- export.py | 5 +- models/common.py | 128 ++++++++++++++++++++++++++++++++++++++++- utils/general.py | 3 +- utils/torch_utils.py | 20 ------- val.py | 74 ++++++++++++------------ 7 files changed, 200 insertions(+), 165 deletions(-) diff --git a/data/coco128.yaml b/data/coco128.yaml index b1dfb004afa1..84a91b18359d 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't # Download script/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip +download: https://ultralytics.com/assets/coco128.zip diff --git a/detect.py b/detect.py index 661a0b86bc99..108f8f138052 100644 --- a/detect.py +++ b/detect.py @@ -14,12 +14,10 @@ import argparse import os -import platform import sys from pathlib import Path import cv2 -import numpy as np import torch import torch.backends.cudnn as cudnn @@ -29,13 +27,12 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.experimental import attempt_load +from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, - check_suffix, colorstr, increment_path, non_max_suppression, print_args, scale_coords, - strip_optimizer, xyxy2xywh) +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import load_classifier, select_device, time_sync +from utils.torch_utils import select_device, time_sync @torch.no_grad() @@ -77,120 +74,45 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - # Initialize + # Load model device = select_device(device) - half &= device.type != 'cpu' # half precision only supported on CUDA + model = DetectMultiBackend(weights, device=device, dnn=dnn) + stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx + imgsz = check_img_size(imgsz, s=stride) # check image size - # Load model - w = str(weights[0] if isinstance(weights, list) else weights) - classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] - check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans - stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + # Half + half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: - model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) - stride = int(model.stride.max()) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - if half: - model.half() # to FP16 - if classify: # second-stage classifier - modelc = load_classifier(name='resnet50', n=2) # initialize - modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() - elif onnx: - if dnn: - check_requirements(('opencv-python>=4.5.4',)) - net = cv2.dnn.readNetFromONNX(w) - else: - check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) - import onnxruntime - session = onnxruntime.InferenceSession(w, None) - else: # TensorFlow models - import tensorflow as tf - if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import - return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), - tf.nest.map_structure(x.graph.as_graph_element, outputs)) - - graph_def = tf.Graph().as_graph_def() - graph_def.ParseFromString(open(w, 'rb').read()) - frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") - elif saved_model: - model = tf.keras.models.load_model(w) - elif tflite: - if "edgetpu" in w: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - import tflite_runtime.interpreter as tflri - delegate = {'Linux': 'libedgetpu.so.1', # install libedgetpu https://coral.ai/software/#edgetpu-runtime - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = tflri.Interpreter(model_path=w, experimental_delegates=[tflri.load_delegate(delegate)]) - else: - interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model - imgsz = check_img_size(imgsz, s=stride) # check image size + model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': - model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once + model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup dt, seen = [0.0, 0.0, 0.0], 0 - for path, img, im0s, vid_cap, s in dataset: + for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() - if onnx: - img = img.astype('float32') - else: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255 # 0 - 255 to 0.0 - 1.0 - if len(img.shape) == 3: - img = img[None] # expand for batch dim + im = torch.from_numpy(im).to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference - if pt: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(img, augment=augment, visualize=visualize)[0] - elif onnx: - if dnn: - net.setInput(img) - pred = torch.tensor(net.forward()) - else: - pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) - else: # tensorflow model (tflite, pb, saved_model) - imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy - if pb: - pred = frozen_func(x=tf.constant(imn)).numpy() - elif saved_model: - pred = model(imn, training=False).numpy() - elif tflite: - if int8: - scale, zero_point = input_details[0]['quantization'] - imn = (imn / scale + zero_point).astype(np.uint8) # de-scale - interpreter.set_tensor(input_details[0]['index'], imn) - interpreter.invoke() - pred = interpreter.get_tensor(output_details[0]['index']) - if int8: - scale, zero_point = output_details[0]['quantization'] - pred = (pred.astype(np.float32) - zero_point) * scale # re-scale - pred[..., 0] *= imgsz[1] # x - pred[..., 1] *= imgsz[0] # y - pred[..., 2] *= imgsz[1] # w - pred[..., 3] *= imgsz[0] # h - pred = torch.tensor(pred) + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 @@ -199,8 +121,7 @@ def wrap_frozen_graph(gd, inputs, outputs): dt[2] += time_sync() - t3 # Second-stage classifier (optional) - if classify: - pred = apply_classifier(pred, modelc, img, im0s) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image @@ -212,15 +133,15 @@ def wrap_frozen_graph(gd, inputs, outputs): p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path - save_path = str(save_dir / p.name) # img.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] # print string + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): diff --git a/export.py b/export.py index f5eb487045b0..4cf30e34fc7b 100644 --- a/export.py +++ b/export.py @@ -21,6 +21,7 @@ """ import argparse +import json import os import subprocess import sys @@ -54,7 +55,9 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript.pt') ts = torch.jit.trace(model, im, strict=False) - (optimize_for_mobile(ts) if optimize else ts).save(f) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + (optimize_for_mobile(ts) if optimize else ts).save(f, _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: diff --git a/models/common.py b/models/common.py index f9e4fc69f006..3ea7ba5477a6 100644 --- a/models/common.py +++ b/models/common.py @@ -3,11 +3,14 @@ Common modules """ +import json import math +import platform import warnings from copy import copy from pathlib import Path +import cv2 import numpy as np import pandas as pd import requests @@ -17,7 +20,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, + non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync @@ -269,6 +273,128 @@ def forward(self, x): return torch.cat(x, self.d) +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=None, dnn=True): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript.pt + # CoreML: *.mlmodel + # TensorFlow: *_saved_model + # TensorFlow: *.pb + # TensorFlow Lite: *.tflite + # ONNX Runtime: *.onnx + # OpenCV DNN: *.onnx with dnn=True + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '', '.mlmodel'] + check_suffix(w, suffixes) # check weights have acceptable suffix + pt, onnx, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + jit = pt and 'torchscript' in w.lower() + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + + if jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files) + if extra_files['config.txt']: + d = json.loads(extra_files['config.txt']) # extra_files dict + stride, names = int(d['stride']), d['names'] + elif pt: # PyTorch + from models.experimental import attempt_load # scoped to avoid circular import + model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + elif coreml: # CoreML *.mlmodel + import coremltools as ct + model = ct.models.MLModel(w) + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) + import onnxruntime + session = onnxruntime.InferenceSession(w, None) + else: # TensorFlow model (TFLite, pb, saved_model) + import tensorflow as tf + if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), + tf.nest.map_structure(x.graph.as_graph_element, outputs)) + + LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + graph_def = tf.Graph().as_graph_def() + graph_def.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") + elif saved_model: + LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') + model = tf.keras.models.load_model(w) + elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + if 'edgetpu' in w.lower(): + LOGGER.info(f'Loading {w} for TensorFlow Edge TPU inference...') + import tflite_runtime.interpreter as tfli + delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) + else: + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False, val=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.pt: # PyTorch + y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) + return y if val else y[0] + elif self.coreml: # CoreML *.mlmodel + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + elif self.onnx: # ONNX + im = im.cpu().numpy() # torch to numpy + if self.dnn: # ONNX OpenCV DNN + self.net.setInput(im) + y = self.net.forward() + else: # ONNX Runtime + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + else: # TensorFlow model (TFLite, pb, saved_model) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.pb: + y = self.frozen_func(x=self.tf.constant(im)).numpy() + elif self.saved_model: + y = self.model(im, training=False).numpy() + elif self.tflite: + input, output = self.input_details[0], self.output_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + y = (y.astype(np.float32) - zero_point) * scale # re-scale + y[..., 0] *= w # x + y[..., 1] *= h # y + y[..., 2] *= w # w + y[..., 3] *= h # h + y = torch.tensor(y) + return (y, []) if val else y + + class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold diff --git a/utils/general.py b/utils/general.py index b0ea1527129a..a6fe603850c8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -785,7 +785,8 @@ def print_mutation(results, hyp, save_dir, bucket): def apply_classifier(x, model, img, im0): - # Apply a second stage classifier to yolo outputs + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 73acec8e819c..b65b69fe1559 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -17,7 +17,6 @@ import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -import torchvision from utils.general import LOGGER @@ -235,25 +234,6 @@ def model_info(model, verbose=False, img_size=640): LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: diff --git a/val.py b/val.py index d2797f1189ec..2bcbc582a500 100644 --- a/val.py +++ b/val.py @@ -23,10 +23,10 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.experimental import attempt_load +from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_suffix, check_yaml, +from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -100,6 +100,7 @@ def run(data, name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), @@ -110,8 +111,10 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device = next(model.parameters()).device # get model device + device, pt = next(model.parameters()).device, True # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) @@ -120,22 +123,21 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - check_suffix(weights, '.pt') - model = attempt_load(weights, map_location=device) # load FP32 model - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check image size - - # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 - # if device.type != 'cpu' and torch.cuda.device_count() > 1: - # model = nn.DataParallel(model) + model = DetectMultiBackend(weights, device=device, dnn=dnn) + stride, pt = model.stride, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt: + model.model.half() if half else model.model.float() + else: + half = False + batch_size = 1 # export.py models default to batch-size 1 + device = torch.device('cpu') + LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends') # Data data = check_dataset(data) # check - # Half - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - # Configure model.eval() is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset @@ -145,11 +147,11 @@ def run(data, # Dataloader if not training: - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + if pt and device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True, + dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, prefix=colorstr(f'{task}: '))[0] seen = 0 @@ -160,32 +162,33 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + for batch_i, (im, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): t1 = time_sync() - img = img.to(device, non_blocking=True) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255 # 0 - 255 to 0.0 - 1.0 - targets = targets.to(device) - nb, _, height, width = img.shape # batch size, channels, height, width + if pt: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width t2 = time_sync() dt[0] += t2 - t1 - # Run model - out, train_out = model(img, augment=augment) # inference and training outputs + # Inference + out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs dt[1] += time_sync() - t2 - # Compute loss + # Loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls - # Run NMS + # NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) dt[2] += time_sync() - t3 - # Statistics per image + # Metrics for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) @@ -202,12 +205,12 @@ def run(data, if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: @@ -221,16 +224,16 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, img[si]) + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels - Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start() f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions - Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() - # Compute statistics + # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) @@ -318,6 +321,7 @@ def parse_opt(): parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') From 7207fe95e5dc368e4402134148c5d0c35361ad88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 17:55:57 +0100 Subject: [PATCH 475/757] FROM nvcr.io/nvidia/pytorch:21.10-py3 (#5592) --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0ee89b432b8f..fe1acb0a6540 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.05-py3 +FROM nvcr.io/nvidia/pytorch:21.10-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -11,8 +11,8 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 -RUN pip install --no-cache -U torch torchvision numpy -# RUN pip install --no-cache torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install --no-cache -U torch torchvision numpy Pillow +# RUN pip install --no-cache torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 7ebb5e5da673350d4c168cf60d01986a5e0f00cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 23:03:19 +0100 Subject: [PATCH 476/757] Add `notebook_init()` to utils/__init__.py (#5488) * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * notebook_init * notebook_init * notebook_init * notebook_init * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * notebook_init * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 30 ++++++++++++++---------------- utils/__init__.py | 18 ++++++++++++++++++ utils/torch_utils.py | 4 +++- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b013fe694ba4..7763a26066e2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -368,7 +368,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -402,26 +402,24 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e2e839d5-d6fc-409c-e44c-0b0b6aa9319d" + "outputId": "3809e5a9-dd41-4577-fe62-5531abf7cca2" }, "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", + "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", - "%pip install -qr requirements.txt # install dependencies\n", + "%pip install -qr requirements.txt # install\n", "\n", - "import torch\n", - "from IPython.display import Image, clear_output # to display images\n", - "\n", - "clear_output()\n", - "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" + "from yolov5 import utils\n", + "display = utils.notebook_init() # checks" ], - "execution_count": 11, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete. Using torch 1.10.0+cu102 (Tesla V100-SXM2-16GB)\n" + "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "Setup complete ✅\n" ] } ] @@ -458,9 +456,9 @@ }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 17, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -537,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 18, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -568,7 +566,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 19, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -726,7 +724,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 24, + "execution_count": null, "outputs": [ { "output_type": "stream", diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2d1..2b0c896364a2 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,18 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + + +def notebook_init(): + # For YOLOv5 notebooks + print('Checking setup...') + from IPython import display # to display images and clear console output + + from utils.general import emojis + from utils.torch_utils import select_device # YOLOv5 imports + + display.clear_output() + select_device(newline=False) + print(emojis('Setup complete ✅')) + return display diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b65b69fe1559..16289104eb48 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,7 +53,7 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository -def select_device(device='', batch_size=None): +def select_device(device='', batch_size=None, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' @@ -77,6 +77,8 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' + if not newline: + s = s.rstrip() LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') From 27bf4282d3d5879f0f4f7492400675ba93a3db1b Mon Sep 17 00:00:00 2001 From: Ayman Saleh <30412615+ayman-saleh@users.noreply.github.com> Date: Wed, 10 Nov 2021 06:51:30 -0500 Subject: [PATCH 477/757] Fix `check_requirements()` resource warning allocation open file (#5602) * Fix to resource warning allocation; utilize file.open within a context manager * rename fh to f in keeping with naming convention Co-authored-by: Ayman Saleh Co-authored-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a6fe603850c8..8f59d487edfb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -264,7 +264,8 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] From 61c50199a234e950ee16fff199bba0915ab9951d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:47:38 +0100 Subject: [PATCH 478/757] Update train, val `tqdm` to fixed width (#5367) * Update tqdm for fixed width * Update val.py * Update val.py * Try ncols= in train.py * NCOLS * NCOLS * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * bar_format * position 0 leave true * exp0 * auto * auto * Cleanup * Cleanup * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 11 +++++------ utils/general.py | 5 +++++ val.py | 5 +++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index fedc55d8be5c..4193365d5a09 100644 --- a/train.py +++ b/train.py @@ -5,7 +5,6 @@ Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 """ - import argparse import math import os @@ -40,10 +39,10 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, - check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, - print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -289,7 +288,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar + pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/utils/general.py b/utils/general.py index 8f59d487edfb..fa56ed49aba8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -11,6 +11,7 @@ import platform import random import re +import shutil import signal import time import urllib @@ -834,3 +835,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path + + +# Variables +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size diff --git a/val.py b/val.py index 2bcbc582a500..62a30ac09d39 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -162,7 +162,8 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (im, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: im = im.to(device, non_blocking=True) From 30bc089cbbe0c38bb09883f01b85ca31afca653b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 16:48:38 +0100 Subject: [PATCH 479/757] Update val.py `speed` and `study` tasks (#5608) Accepts all arguments now by default resolving https://github.com/ultralytics/yolov5/issues/5600 --- val.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/val.py b/val.py index 62a30ac09d39..dfabb65b979c 100644 --- a/val.py +++ b/val.py @@ -339,26 +339,27 @@ def main(opt): LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.') run(**vars(opt)) - elif opt.task == 'speed': # speed benchmarks - # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... - for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, - device=opt.device, save_json=False, plots=False) - - elif opt.task == 'study': # run over a range of settings and save/plot - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... - x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to - y = [] # y axis - for i in x: # img-size - LOGGER.info(f'\nRunning {f} point {i}...') - r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_val_study(x=x) # plot + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot if __name__ == "__main__": From 69032519bc575ef6c2033ab0f7d9bc1f9651b251 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Nov 2021 00:15:17 +0100 Subject: [PATCH 480/757] `np.unique()` sort fix for segments (#5609) * `np.unique()` sort fix for segments * Update datasets.py --- utils/datasets.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 94acaaa92cd7..1ecc7440119f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -913,10 +913,12 @@ def verify_image_label(args): assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected' assert (l >= 0).all(), f'negative label values {l[l < 0]}' assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}' - l = np.unique(l, axis=0) # remove duplicate rows - if len(l) < nl: - segments = np.unique(segments, axis=0) - msg = f'{prefix}WARNING: {im_file}: {nl - len(l)} duplicate labels removed' + _, i = np.unique(l, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + l = l[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty l = np.zeros((0, 5), dtype=np.float32) From def7a0fd19c1629903c3b073b4df265407719a07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Nov 2021 12:56:38 +0100 Subject: [PATCH 481/757] Improve plots.py robustness (#5616) * Improve plots.py robustness Addresses issues #5374, #5395, #5611 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/plots.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index b5e25d668d22..a5b20803c7be 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import clip_coords, increment_path, is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh +from utils.general import (Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, try_except, + user_config_dir, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -58,7 +59,10 @@ def check_font(font='Arial.ttf', size=10): url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, str(font), progress=False) - return ImageFont.truetype(str(font), size) + try: + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 class Annotator: @@ -320,6 +324,8 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ plt.savefig(f, dpi=300) +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels print('Plotting labels... ') From d5b21b1ecb66b35af937ac12364aa80733222bd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Nov 2021 12:05:25 +0100 Subject: [PATCH 482/757] HUB dataset previews to JPEG (#5627) @kalenmike per our convo yesterday. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 1ecc7440119f..2a6653bfc02c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -965,7 +965,7 @@ def hub_ops(f, max_dim=1920): r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, quality=75) # save + im.save(f_new, 'JPEG', quality=75, optimize=True) # save except Exception as e: # use OpenCV print(f'WARNING: HUB ops PIL failure {f}: {e}') im = cv2.imread(f) From 7473f0f95dbc9ef9dd1706274906c99eac2ee2f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Nov 2021 14:48:55 +0100 Subject: [PATCH 483/757] DDP `WORLD_SIZE`-safe dataloader workers (#5631) * WORLD_SIZE-safe workers * Update with DDP comment --- train.py | 4 ++-- utils/datasets.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 4193365d5a09..96b3c2fdc516 100644 --- a/train.py +++ b/train.py @@ -266,7 +266,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers} dataloader workers\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ @@ -460,7 +460,7 @@ def parse_opt(known=False): parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') diff --git a/utils/datasets.py b/utils/datasets.py index 2a6653bfc02c..f153db0d7104 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -34,6 +34,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads # Get orientation exif tag @@ -107,7 +108,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() From 09d170381c67032f2daaeeb9defc5a67e59265aa Mon Sep 17 00:00:00 2001 From: Werner Duvaud <40442230+werner-duvaud@users.noreply.github.com> Date: Sat, 13 Nov 2021 12:07:32 +0000 Subject: [PATCH 484/757] Default DataLoader `shuffle=True` for training (#5623) * Fix shuffle DataLoader argument * Add shuffle argument * Disable shuffle when rect * Cleanup, add rect warning * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup2 * Cleanup3 Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 2 +- utils/datasets.py | 41 +++++++++++++++++++++-------------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/train.py b/train.py index 96b3c2fdc516..91bcd1e1e2e8 100644 --- a/train.py +++ b/train.py @@ -212,7 +212,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=LOCAL_RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, - prefix=colorstr('train: ')) + prefix=colorstr('train: '), shuffle=True) mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/datasets.py b/utils/datasets.py index f153db0d7104..3504998b125d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ import torch.nn.functional as F import yaml from PIL import ExifTags, Image, ImageOps -from torch.utils.data import Dataset +from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective @@ -93,13 +93,15 @@ def exif_transpose(image): def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, - rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), @@ -109,19 +111,18 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None - loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader - # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() - dataloader = loader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader From 80cfaf40ef1923183820a2d88d33b7c3a6217c54 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Nov 2021 15:40:18 +0100 Subject: [PATCH 485/757] AutoAnchor and AutoBatch `LOGGER` (#5635) * AutoBatch, AutoAnchor `LOGGER` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update autoanchor.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autoanchor.py | 50 +++++++++++++++++++++++---------------------- utils/autobatch.py | 14 ++++++------- utils/plots.py | 6 +++--- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index af0aa7de65ac..eef8f6499194 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -10,7 +10,9 @@ import yaml from tqdm import tqdm -from utils.general import colorstr +from utils.general import LOGGER, colorstr, emojis + +PREFIX = colorstr('AutoAnchor: ') def check_anchor_order(m): @@ -19,14 +21,12 @@ def check_anchor_order(m): da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da.sign() != ds.sign(): # same order - print('Reversing anchor order') + LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary - prefix = colorstr('autoanchor: ') - print(f'\n{prefix}Analyzing anchors... ', end='') m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale @@ -42,23 +42,24 @@ def metric(k): # compute metric anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) - print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) + else: + LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) na = m.anchors.numel() // 2 # number of anchors try: anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) except Exception as e: - print(f'{prefix}ERROR: {e}') + LOGGER.info(f'{PREFIX}ERROR: {e}') new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) - print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') else: - print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline + LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): @@ -81,7 +82,6 @@ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen from scipy.cluster.vq import kmeans thr = 1 / thr - prefix = colorstr('autoanchor: ') def metric(k, wh): # compute metrics r = wh[:, None] / k[None] @@ -93,15 +93,17 @@ def anchor_fitness(k): # mutation fitness _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness - def print_results(k): + def print_results(k, verbose=True): k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') - print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' - f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) return k if isinstance(dataset, str): # *.yaml file @@ -117,19 +119,19 @@ def print_results(k): # Filter i = (wh0 < 3.0).any(1).sum() if i: - print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans calculation - print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' + assert len(k) == n, f'{PREFIX}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) + k = print_results(k, verbose=False) # Plot # k, d = [None] * 20, [None] * 20 @@ -146,7 +148,7 @@ def print_results(k): # Evolve npr = np.random f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) @@ -155,8 +157,8 @@ def print_results(k): fg = anchor_fitness(kg) if fg > f: f, k = fg, kg.copy() - pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' if verbose: - print_results(k) + print_results(k, verbose) return print_results(k) diff --git a/utils/autobatch.py b/utils/autobatch.py index 3f2b4d1a4c38..cb94f041e95d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -9,7 +9,7 @@ import torch from torch.cuda import amp -from utils.general import colorstr +from utils.general import LOGGER, colorstr from utils.torch_utils import profile @@ -27,11 +27,11 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) # print(autobatch(model)) - prefix = colorstr('autobatch: ') - print(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') device = next(model.parameters()).device # get model device if device.type == 'cpu': - print(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size d = str(device).upper() # 'CUDA:0' @@ -40,18 +40,18 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') batch_sizes = [1, 2, 4, 8, 16] try: img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] y = profile(img, model, n=3, device=device) except Exception as e: - print(f'{prefix}{e}') + LOGGER.warning(f'{prefix}{e}') y = [x[2] for x in y if x] # memory [2] batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') return b diff --git a/utils/plots.py b/utils/plots.py index a5b20803c7be..9919e4d9d88f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,8 +17,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, try_except, - user_config_dir, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, + try_except, user_config_dir, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -328,7 +328,7 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels - print('Plotting labels... ') + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) From 540ef0dd30be9bcf6882c9625c49f61c5c764f52 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 14 Nov 2021 17:56:53 +0530 Subject: [PATCH 486/757] W&B refactor, handle exceptions, CI example (#5618) * handle exceptions| attempt CI * update * Pre-commit manual run * yaml one-liner * Update ci-testing.yml * Comment W&B CI Leave as example for future separate CI * Update ci-testing.yml Co-authored-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 5 ++++- utils/loggers/wandb/log_dataset.py | 4 ++++ utils/loggers/wandb/wandb_utils.py | 16 +++++++++++----- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index abfe21ef8726..5db6d41f4bcc 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -51,12 +51,15 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx tensorflow-cpu keras==2.6.0 # for export + pip install -q onnx tensorflow-cpu keras==2.6.0 # wandb # extras python --version pip --version pip list shell: bash + # - name: W&B login + # run: wandb login 345011b3fb26dc8337fd9b20e53857c1d403f2aa + - name: Download data run: | # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index 8447272cdb48..06e81fb69307 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -2,11 +2,15 @@ from wandb_utils import WandbLogger +from utils.general import LOGGER + WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def create_dataset_artifact(opt): logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") if __name__ == '__main__': diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index a71bc6ce96d2..47757dd1a74e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH from utils.datasets import LoadImagesAndLabels, img2label_paths -from utils.general import check_dataset, check_file +from utils.general import LOGGER, check_dataset, check_file try: import wandb @@ -203,7 +203,7 @@ def check_and_upload_dataset(self, opt): config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - print("Created dataset config file ", config_path) + LOGGER.info(f"Created dataset config file {config_path}") with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict @@ -316,7 +316,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - print("Saving model artifact on epoch ", epoch + 1) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ @@ -368,7 +368,7 @@ def map_val_table_path(self): Useful for - referencing artifacts for evaluation. """ self.val_table_path_map = {} - print("Mapping dataset") + LOGGER.info("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] @@ -488,7 +488,13 @@ def end_epoch(self, best_result=False): with all_logging_disabled(): if self.bbox_media_panel_images: self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images - wandb.log(self.log_dict) + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info(f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + self.wandb_run.finish() + self.wandb_run = None + self.log_dict = {} self.bbox_media_panel_images = [] if self.result_artifact: From c2523be634a94da2b1b2a43c11b25827a0de990d Mon Sep 17 00:00:00 2001 From: Ding Yiwei <16083536+dingyiwei@users.noreply.github.com> Date: Mon, 15 Nov 2021 17:06:18 +0800 Subject: [PATCH 487/757] Replace 2 `transpose()` with 1 `permute` in TransformerBlock()` (#5645) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 3ea7ba5477a6..3930c8e7b2df 100644 --- a/models/common.py +++ b/models/common.py @@ -86,8 +86,8 @@ def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape - p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3) - return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h) + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): From fb19561f9869714cd639c7ce58281ea0d5592dff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Nov 2021 20:10:29 +0100 Subject: [PATCH 488/757] Bump pip from 19.2 to 21.1 in /utils/google_app_engine (#5661) Bumps [pip](https://github.com/pypa/pip) from 19.2 to 21.1. - [Release notes](https://github.com/pypa/pip/releases) - [Changelog](https://github.com/pypa/pip/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/pip/compare/19.2...21.1) --- updated-dependencies: - dependency-name: pip dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 2f81c8b40056..42d7ffc0eed8 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones -pip==19.2 +pip==21.1 Flask==1.0.2 gunicorn==19.9.0 From e80a09bbfa1ddb1097fdc7164d84dedeb3d95388 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Nov 2021 20:15:50 +0100 Subject: [PATCH 489/757] Update ci-testing.yml to Python 3.9 (#5660) --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5db6d41f4bcc..b2bc040191e7 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8 ] + python-version: [ 3.9 ] model: [ 'yolov5n' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 From 0453b758e7ff645528ae52aa85228f3672ff7594 Mon Sep 17 00:00:00 2001 From: Nrupatunga Date: Tue, 16 Nov 2021 17:06:00 +0530 Subject: [PATCH 490/757] TFDetect dynamic anchor count assignment fix (#5668) * fix tf.py when anchors not equal to 3 * revert the isort fix * update the fix to use anchor attribute available already --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index 6de0245cfe50..96482dd37bea 100644 --- a/models/tf.py +++ b/models/tf.py @@ -233,7 +233,7 @@ def call(self, inputs): xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) - z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return x if self.training else (tf.concat(z, 1), x) From 47fac9ff73aceedd267db1e734a98de122fc9430 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Nov 2021 13:58:15 +0100 Subject: [PATCH 491/757] Update train.py comment to 'Model attributes' (#5670) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 91bcd1e1e2e8..2838936d2d78 100644 --- a/train.py +++ b/train.py @@ -243,7 +243,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if cuda and RANK != -1: model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) - # Model parameters + # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) hyp['box'] *= 3 / nl # scale to layers hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers From 562191f5756273aca54225903f5933f7683daade Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Nov 2021 15:18:50 +0100 Subject: [PATCH 492/757] Update export.py docstring (#5689) --- export.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 4cf30e34fc7b..b3ab4df25ae3 100644 --- a/export.py +++ b/export.py @@ -1,14 +1,26 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Export a YOLOv5 PyTorch model to TorchScript, ONNX, CoreML, TensorFlow (saved_model, pb, TFLite, TF.js,) formats -TensorFlow exports authored by https://github.com/zldrobit +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | Example | Export `include=(...)` argument +--- | --- | --- +PyTorch | yolov5s.pt | - +TorchScript | yolov5s.torchscript.pt | 'torchscript' +ONNX | yolov5s.onnx | 'onnx' +CoreML | yolov5s.mlmodel | 'coreml' +TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' +TensorFlow GraphDef | yolov5s.pb | 'pb' +TensorFlow Lite | yolov5s.tflite | 'tflite' +TensorFlow.js | yolov5s_web_model/ | 'tfjs' Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs Inference: $ python path/to/detect.py --weights yolov5s.pt - yolov5s.onnx (must export with --dynamic) + yolov5s.torchscript.pt + yolov5s.onnx + yolov5s.mlmodel (under development) yolov5s_saved_model yolov5s.pb yolov5s.tflite From 8df64a912274ea3a82df2f96f0e3c3ab95713502 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Nov 2021 14:53:42 +0100 Subject: [PATCH 493/757] `NUM_THREADS` leave at least 1 CPU free (#5706) Updated strategy leaves at least 1 cpu free to avoid system overloads. Partially addresses https://github.com/ultralytics/yolov5/issues/5685 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 3504998b125d..68b1e634bebf 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,7 +35,7 @@ IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP -NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): From eb51ffdcac466e553607c470b0e8f19d5a61da67 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 19 Nov 2021 13:32:53 +0100 Subject: [PATCH 494/757] Prune unused imports (#5711) * prune unused imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 1 - utils/loggers/wandb/wandb_utils.py | 1 - 2 files changed, 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 96482dd37bea..84359c445797 100644 --- a/models/tf.py +++ b/models/tf.py @@ -11,7 +11,6 @@ """ import argparse -import logging import sys from copy import deepcopy from pathlib import Path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 47757dd1a74e..a4cbaee240d5 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -7,7 +7,6 @@ from pathlib import Path from typing import Dict -import pkg_resources as pkg import yaml from tqdm import tqdm From 36d12a500eae4561d09d4955e1b50b12e57bf6c6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Nov 2021 01:04:56 +0100 Subject: [PATCH 495/757] Explicitly compute TP, FP in val.py (#5727) --- utils/metrics.py | 21 +++++++++++++++------ val.py | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 2e0e0c65e63d..3f1dc559c75a 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -18,7 +18,7 @@ def fitness(x): return (x[:, :4] * w).sum(1) -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -37,7 +37,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes - unique_classes = np.unique(target_cls) + unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class @@ -45,7 +45,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels + n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: @@ -56,7 +56,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tpc = tp[i].cumsum(0) # Recall - recall = tpc / (n_l + 1e-16) # recall curve + recall = tpc / (n_l + eps) # recall curve r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision @@ -70,7 +70,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) + f1 = 2 * p * r / (p + r + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = {i: v for i, v in enumerate(names)} # to dict if plot: @@ -80,7 +80,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') def compute_ap(recall, precision): @@ -162,6 +165,12 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn diff --git a/val.py b/val.py index dfabb65b979c..cc6ff027b070 100644 --- a/val.py +++ b/val.py @@ -237,7 +237,7 @@ def run(data, # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): - p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class From 46daa7b78d281f0bf5ab512d170654259e4009e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Nov 2021 01:11:36 +0100 Subject: [PATCH 496/757] Remove `.autoshape()` method (#5694) --- hubconf.py | 3 ++- models/common.py | 8 +++----- models/yolo.py | 9 +-------- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/hubconf.py b/hubconf.py index 3488fef76ac5..03335f7906f0 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,6 +27,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path + from models.common import AutoShape from models.experimental import attempt_load from models.yolo import Model from utils.downloads import attempt_download @@ -55,7 +56,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: - model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS return model.to(device) except Exception as e: diff --git a/models/common.py b/models/common.py index 3930c8e7b2df..b9604f3c1cbd 100644 --- a/models/common.py +++ b/models/common.py @@ -23,7 +23,7 @@ from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import time_sync +from utils.torch_utils import copy_attr, time_sync def autopad(k, p=None): # kernel, padding @@ -405,12 +405,10 @@ class AutoShape(nn.Module): def __init__(self, model): super().__init__() + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes self.model = model.eval() - def autoshape(self): - LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() - return self - def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) diff --git a/models/yolo.py b/models/yolo.py index 305f0ca0cc88..db3d711a81fa 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -22,8 +22,7 @@ from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization -from utils.torch_utils import (copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, - time_sync) +from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync try: import thop # for FLOPs computation @@ -226,12 +225,6 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers self.info() return self - def autoshape(self): # add AutoShape module - LOGGER.info('Adding AutoShape... ') - m = AutoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - def info(self, verbose=False, img_size=640): # print model information model_info(self, verbose, img_size) From 5185981993737861575adb07f2817a74fa4b2baa Mon Sep 17 00:00:00 2001 From: IL2006 <94582889+IL2006@users.noreply.github.com> Date: Sat, 20 Nov 2021 08:26:48 +0800 Subject: [PATCH 497/757] SECURITY.md (#5695) * SECURITY_1.md * Delete SECURITY.md Co-authored-by: Glenn Jocher From d6ae1c835a0ea61268a29cb4de3dbd061828d386 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Nov 2021 19:26:07 +0100 Subject: [PATCH 498/757] Created using Colaboratory --- tutorial.ipynb | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7763a26066e2..9440ca8b1788 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -368,7 +368,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -412,7 +412,7 @@ "from yolov5 import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1081,6 +1081,27 @@ ], "execution_count": null, "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "VTRwsvA9u7ln" + }, + "source": [ + "# TensorRT \n", + "# https://developer.nvidia.com/nvidia-tensorrt-download\n", + "!lsb_release -a # check system\n", + "%ls /usr/local | grep cuda # check CUDA\n", + "!wget https://ultralytics.com/assets/TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # download\n", + "![ -d /content/TensorRT-8.2.0.6/ ] || tar -C /content/ -zxf ./TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # unzip\n", + "%pip list | grep tensorrt || pip install /content/TensorRT-8.2.0.6/python/tensorrt-8.2.0.6-cp37-none-linux_x86_64.whl # install\n", + "%env LD_LIBRARY_PATH=/usr/local/cuda-11.1/lib64:/content/cuda-11.1/lib64:/content/TensorRT-8.2.0.6/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 # add to path\n", + "\n", + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0" + ], + "execution_count": null, + "outputs": [] } ] -} +} \ No newline at end of file From f17c86b7f0d2038288d7292cb82dec2433cc91e5 Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Mon, 22 Nov 2021 03:21:44 +0800 Subject: [PATCH 499/757] Save *.npy features on detect.py `--visualize` (#5701) * Add feature map to save npy files Add feature map to save npy files,export npy files with 32 feature maps per layer. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update plots.py * Update plots.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update plots.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- tutorial.ipynb | 2 +- utils/plots.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9440ca8b1788..4ce87c75aa64 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1104,4 +1104,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/plots.py b/utils/plots.py index 9919e4d9d88f..69037ee9af70 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -132,7 +132,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec if 'Detect' not in module_type: batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: - f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots @@ -143,9 +143,10 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') - print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + print(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save def hist2d(x, y, n=100): From 7a39803476f8ae55fb25ed93a400a3bba998d5e7 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Mon, 22 Nov 2021 21:58:07 +0800 Subject: [PATCH 500/757] Export, detect and validation with TensorRT engine file (#5699) * Export and detect with TensorRT engine file * Resolve `isort` * Make validation works with TensorRT engine * feat: update export docstring * feat: change suffix from *.trt to *.engine * feat: get rid of pycuda * feat: make compatiable with val.py * feat: support detect with fp16 engine * Add Lite to Edge TPU string * Remove *.trt comment * Revert to standard success logger.info string * Fix Deprecation Warning ``` export.py:310: DeprecationWarning: Use build_serialized_network instead. with builder.build_engine(network, config) as engine, open(f, 'wb') as t: ``` * Revert deprecation warning fix @imyhxy it seems we can't apply the deprecation warning fix because then export fails, so I'm reverting my previous change here. * Update export.py * Update export.py * Update common.py * export onnx to file before building TensorRT engine file * feat: triger ONNX export failed early * feat: load ONNX model from file Co-authored-by: Glenn Jocher --- detect.py | 4 ++-- export.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++- models/common.py | 32 ++++++++++++++++++++++++---- val.py | 10 +++++---- 4 files changed, 90 insertions(+), 11 deletions(-) diff --git a/detect.py b/detect.py index 108f8f138052..29904f310200 100644 --- a/detect.py +++ b/detect.py @@ -77,11 +77,11 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx + stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: model.model.half() if half else model.model.float() diff --git a/export.py b/export.py index b3ab4df25ae3..35875f1fb0d3 100644 --- a/export.py +++ b/export.py @@ -12,6 +12,7 @@ TensorFlow GraphDef | yolov5s.pb | 'pb' TensorFlow Lite | yolov5s.tflite | 'tflite' TensorFlow.js | yolov5s_web_model/ | 'tfjs' +TensorRT | yolov5s.engine | 'engine' Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs @@ -24,6 +25,7 @@ yolov5s_saved_model yolov5s.pb yolov5s.tflite + yolov5s.engine TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -263,6 +265,51 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): LOGGER.info(f'\n{prefix} export failure: {e}') +def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + try: + check_requirements(('tensorrt',)) + import tensorrt as trt + + opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x + export_onnx(model, im, file, opset, train, False, simplify) + onnx = file.with_suffix('.onnx') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + f = str(file).replace('.pt', '.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path @@ -278,6 +325,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold @@ -322,6 +371,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_torchscript(model, im, file, optimize) if 'onnx' in include: export_onnx(model, im, file, opset, train, dynamic, simplify) + if 'engine' in include: + export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) @@ -360,13 +411,15 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], - help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') + help='available formats are (torchscript, onnx, engine, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() print_args(FILE.stem, opt) return opt diff --git a/models/common.py b/models/common.py index b9604f3c1cbd..8836a655986a 100644 --- a/models/common.py +++ b/models/common.py @@ -7,6 +7,7 @@ import math import platform import warnings +from collections import namedtuple from copy import copy from pathlib import Path @@ -285,11 +286,12 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # TensorFlow Lite: *.tflite # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True + # TensorRT: *.engine super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '', '.mlmodel'] + suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans jit = pt and 'torchscript' in w.lower() stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults @@ -317,6 +319,23 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + bindings = dict() + for index in range(model.num_bindings): + name = model.get_binding_name(index) + dtype = trt.nptype(model.get_binding_dtype(index)) + shape = tuple(model.get_binding_shape(index)) + data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) + bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + binding_addrs = {n: d.ptr for n, d in bindings.items()} + context = model.create_execution_context() + batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) import tensorflow as tf if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt @@ -334,7 +353,7 @@ def wrap_frozen_graph(gd, inputs, outputs): model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): - LOGGER.info(f'Loading {w} for TensorFlow Edge TPU inference...') + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') import tflite_runtime.interpreter as tfli delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime 'Darwin': 'libedgetpu.1.dylib', @@ -369,6 +388,11 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() else: # ONNX Runtime y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.engine: # TensorRT + assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = self.bindings['output'].data else: # TensorFlow model (TFLite, pb, saved_model) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pb: @@ -391,7 +415,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., 1] *= h # y y[..., 2] *= w # w y[..., 3] *= h # h - y = torch.tensor(y) + y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y diff --git a/val.py b/val.py index cc6ff027b070..64a7e4dffeb0 100644 --- a/val.py +++ b/val.py @@ -111,7 +111,7 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device, pt = next(model.parameters()).device, True # get model device, PyTorch model + device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() @@ -124,11 +124,13 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, pt = model.stride, model.pt + stride, pt, engine = model.stride, model.pt, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: model.model.half() if half else model.model.float() + elif engine: + batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 @@ -165,7 +167,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt: + if pt or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 4ca4aec46fa3ed89e5a16f09f6c85d40380ebb0f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Nov 2021 23:25:51 +0100 Subject: [PATCH 501/757] Do not save hyp.yaml and opt.yaml on evolve (#5775) * Do not save hyp.yaml and opt.yaml on evolve * Update general.py --- train.py | 11 ++++++----- utils/general.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 2838936d2d78..8d35f50afb11 100644 --- a/train.py +++ b/train.py @@ -76,13 +76,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) # Save run settings - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) - data_dict = None + if not evolve: + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.safe_dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.safe_dump(vars(opt), f, sort_keys=False) # Loggers + data_dict = None if RANK in [-1, 0]: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: diff --git a/utils/general.py b/utils/general.py index fa56ed49aba8..5a2bcc3660f6 100755 --- a/utils/general.py +++ b/utils/general.py @@ -777,7 +777,7 @@ def print_mutation(results, hyp, save_dir, bucket): i = np.argmax(fitness(data.values[:, :7])) # f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + - f'# Last generation: {len(data)}\n' + + f'# Last generation: {len(data) - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(hyp, f, sort_keys=False) From c55e674ac3ffa641cbbd465760701c64d3a3dcb9 Mon Sep 17 00:00:00 2001 From: rockstarr <41538890+miknyko@users.noreply.github.com> Date: Thu, 25 Nov 2021 15:42:15 +0800 Subject: [PATCH 502/757] fix the path error in export.py (#5778) * fix the path error in export.py * Update export.py Co-authored-by: Glenn Jocher From 4c7b2bdc30657354afcbc255385a163662e66c8b Mon Sep 17 00:00:00 2001 From: rockstarr <41538890+miknyko@users.noreply.github.com> Date: Thu, 25 Nov 2021 18:18:30 +0800 Subject: [PATCH 503/757] TorchScript `torch==1.7.0` Path support (#5781) * fix path error in export.py * Update export.py updated! * Update export.py oops forget something --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 35875f1fb0d3..9d6d04967c80 100644 --- a/export.py +++ b/export.py @@ -71,7 +71,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - (optimize_for_mobile(ts) if optimize else ts).save(f, _extra_files=extra_files) + (optimize_for_mobile(ts) if optimize else ts).save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: From f2ca30a407b00eb54999e9f350906e0c6eead906 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 25 Nov 2021 21:49:21 +0530 Subject: [PATCH 504/757] W&B: refactor W&B tables (#5737) * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reformat * Single-line argparser argument * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 2 +- utils/loggers/wandb/README.md | 33 +++++++++------ utils/loggers/wandb/wandb_utils.py | 68 +++++++++++++++++++++--------- 3 files changed, 68 insertions(+), 35 deletions(-) diff --git a/train.py b/train.py index 8d35f50afb11..9a5f402c3501 100644 --- a/train.py +++ b/train.py @@ -475,7 +475,7 @@ def parse_opt(known=False): # Weights & Biases arguments parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', action='store_true', help='W&B: Upload dataset as artifact table') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index d787fb7a5a0e..63d999859e6d 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -2,6 +2,7 @@ * [About Weights & Biases](#about-weights-&-biases) * [First-Time Setup](#first-time-setup) * [Viewing runs](#viewing-runs) +* [Disabling wandb](#disabling-wandb) * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) * [Reports: Share your work with the world!](#reports) @@ -49,31 +50,36 @@ Run information streams from your environment to the W&B cloud console as you tr * Environment: OS and Python types, Git repository and state, **training command**

Weights & Biases dashboard

+ + ## Disabling wandb +* training after running `wandb disabled` inside that directory creates no wandb run +![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - +* To enable wandb again, run `wandb online` +![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) ## Advanced Usage You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started.
-

1. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + Code $ python train.py --upload_data val - ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png)
-

2: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact.
Usage - Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)

3: Train using dataset artifact

@@ -81,7 +87,7 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage - Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml + Code $ python train.py --data {data}_wandb.yaml ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
@@ -123,7 +129,6 @@ Any run can be resumed using artifacts if the --resume argument sta
-

Reports

W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index a4cbaee240d5..2d6133ab94c5 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -202,7 +202,6 @@ def check_and_upload_dataset(self, opt): config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - LOGGER.info(f"Created dataset config file {config_path}") with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict @@ -244,7 +243,9 @@ def setup_training(self, opt): if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) self.val_table = self.val_artifact.get("val") if self.val_table_path_map is None: self.map_val_table_path() @@ -331,28 +332,41 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' self.data_dict = check_dataset(data_file) # parse and check data = dict(self.data_dict) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - path = Path(data_file).stem - path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = Path('data') / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! self.wandb_run.use_artifact(self.val_artifact) - self.wandb_run.use_artifact(self.train_artifact) self.val_artifact.wait() self.val_table = self.val_artifact.get('val') self.map_val_table_path() @@ -371,7 +385,7 @@ def map_val_table_path(self): for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'): + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. @@ -424,23 +438,34 @@ def log_training_progress(self, predn, path, names): """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] - total_conf = 0 + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: + cls = int(cls) box_data.append( {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), + "class_id": cls, "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"}) - total_conf += conf + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_path_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) + *avg_conf_per_class ) def val_one_image(self, pred, predn, path, names, im): @@ -490,7 +515,8 @@ def end_epoch(self, best_result=False): try: wandb.log(self.log_dict) except BaseException as e: - LOGGER.info(f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") self.wandb_run.finish() self.wandb_run = None @@ -502,7 +528,9 @@ def end_epoch(self, best_result=False): ('best' if best_result else '')]) wandb.log({"evaluation": self.result_table}) - self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): From 53349dac8e9fb447bb43319811699bb72d1c2470 Mon Sep 17 00:00:00 2001 From: Phil2020 <35833843+phodgers@users.noreply.github.com> Date: Thu, 25 Nov 2021 16:54:00 +0000 Subject: [PATCH 505/757] Scope TF imports in `DetectMultiBackend()` (#5792) * tensorflow or tflite exclusively as interpreter As per bug report https://github.com/ultralytics/yolov5/issues/5709 I think there should be only one attempt to assign interpreter, and it appears tflite is only ever needed for the case of edgetpu model. * Scope imports * Nested definition line fix * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 8836a655986a..284f03e6de20 100644 --- a/models/common.py +++ b/models/common.py @@ -337,19 +337,21 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): context = model.create_execution_context() batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) - import tensorflow as tf if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + import tensorflow as tf + def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) - LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') + import tensorflow as tf model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): @@ -361,6 +363,7 @@ def wrap_frozen_graph(gd, inputs, outputs): interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) else: LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs From 7c6bae0ae6711b470ace2587ff7cf313a90cfed0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Nov 2021 13:37:28 +0100 Subject: [PATCH 506/757] Remove NCOLS from tqdm (#5804) * Remove NCOLS from tqdm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 10 +++++----- utils/general.py | 2 +- val.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index 9a5f402c3501..8cb68fc0748e 100644 --- a/train.py +++ b/train.py @@ -39,10 +39,10 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, + check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, + print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/utils/general.py b/utils/general.py index 5a2bcc3660f6..8aa76fbdb6ad 100755 --- a/utils/general.py +++ b/utils/general.py @@ -838,4 +838,4 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): # Variables -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/val.py b/val.py index 64a7e4dffeb0..165cab1d6259 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -164,7 +164,7 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt or engine: From fcd180d33697848ea7acb96d7485c58110704d5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Nov 2021 12:29:45 +0100 Subject: [PATCH 507/757] Refactor new `model.warmup()` method (#5810) * Refactor new `model.warmup()` method * Add half --- detect.py | 3 +-- models/common.py | 7 +++++++ val.py | 3 +-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 29904f310200..ecf868b5eaf4 100644 --- a/detect.py +++ b/detect.py @@ -97,8 +97,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - if pt and device.type != 'cpu': - model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup + model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() diff --git a/models/common.py b/models/common.py index 284f03e6de20..72549809c8c3 100644 --- a/models/common.py +++ b/models/common.py @@ -421,6 +421,13 @@ def forward(self, im, augment=False, visualize=False, val=False): y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y + def warmup(self, imgsz=(1, 3, 640, 640), half=False): + # Warmup model by running inference once + if self.pt or self.engine or self.onnx: # warmup types + if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models + im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image + self.forward(im) # warmup + class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS diff --git a/val.py b/val.py index 165cab1d6259..bd0ce9a7861d 100644 --- a/val.py +++ b/val.py @@ -149,8 +149,7 @@ def run(data, # Dataloader if not training: - if pt and device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup + model.warmup(imgsz=(1, 3, imgsz, imgsz), half=half) # warmup pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, From 94d8fec6d846313fed5530f9d18d2f93f89e9e97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Nov 2021 18:12:46 +0100 Subject: [PATCH 508/757] GCP VM from Image example (#5814) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index fe1acb0a6540..9a55005a95c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,3 +59,6 @@ ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ # DDP test # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest From 8277033b65fe81d2b48178b335b2d91bcb41a98b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Nov 2021 12:04:36 +0100 Subject: [PATCH 509/757] Bump actions/cache from 2.1.6 to 2.1.7 (#5816) Bumps [actions/cache](https://github.com/actions/cache) from 2.1.6 to 2.1.7. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2.1.6...v2.1.7) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b2bc040191e7..9085b2b7e6dd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - name: Cache pip - uses: actions/cache@v2.1.6 + uses: actions/cache@v2.1.7 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} From 5ca5dd4c87fcc62491173b393fd51cf244805313 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Nov 2021 13:15:36 +0100 Subject: [PATCH 510/757] Update `dataset_stats()` to `cv2.INTER_AREA` (#5821) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 68b1e634bebf..ac81603c7d34 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -974,7 +974,7 @@ def hub_ops(f, max_dim=1920): im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio if r < 1.0: # image too large - im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR) + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) cv2.imwrite(str(f_new), im) zipped, data_dir, yaml_path = unzip(Path(path)) From a4207a202d6801df4586a8e044f60c496d94aeb4 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Tue, 30 Nov 2021 20:52:22 +0800 Subject: [PATCH 511/757] Fix TensorRT potential unordered binding addresses (#5826) * feat: change file suffix in pythonic way * fix: enforce binding addresses order * fix: enforce binding addresses order --- export.py | 3 ++- models/common.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 9d6d04967c80..b2f42142e16c 100644 --- a/export.py +++ b/export.py @@ -276,7 +276,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F assert onnx.exists(), f'failed to export ONNX file: {onnx}' LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - f = str(file).replace('.pt', '.engine') # TensorRT engine file + f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: logger.min_severity = trt.Logger.Severity.VERBOSE @@ -310,6 +310,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') + @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path diff --git a/models/common.py b/models/common.py index 72549809c8c3..cbd4ff479885 100644 --- a/models/common.py +++ b/models/common.py @@ -7,7 +7,7 @@ import math import platform import warnings -from collections import namedtuple +from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path @@ -326,14 +326,14 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) - bindings = dict() + bindings = OrderedDict() for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) shape = tuple(model.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) - binding_addrs = {n: d.ptr for n, d in bindings.items()} + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) From bc484579d7be481ffb5fba95020c515afd89be9b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Dec 2021 15:38:02 +0100 Subject: [PATCH 512/757] Handle non-TTY `wandb.errors.UsageError` (#5839) * `try: except (..., wandb.errors.UsageError)` * bug fix --- utils/loggers/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ae2d98bdc36d..2a68d9785071 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -24,7 +24,10 @@ assert hasattr(wandb, '__version__') # verify package import not local dir if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: - wandb_login_success = wandb.login(timeout=30) + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False if not wandb_login_success: wandb = None except (ImportError, AssertionError): From e8f8f2b9039e7879262675c56d21148398bf9aae Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Thu, 2 Dec 2021 17:51:19 +0800 Subject: [PATCH 513/757] Avoid inplace modifying`imgs` in `LoadStreams` (#5850) When OpenCV retrieving image fail, original code would modify source images **inplace**, which may result in plotting bounding boxes on a black image. That is, before inference, source image `im0s[i]` is OK, but after inference before `Process predictions`, `im0s[i]` may have been changed. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index ac81603c7d34..f3abfb1f6f90 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -337,7 +337,7 @@ def update(self, i, cap, stream): self.imgs[i] = im else: LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') - self.imgs[i] *= 0 + self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time From 1679aacdc7f08b55df0ebf985688e01ec1f2d9b6 Mon Sep 17 00:00:00 2001 From: gggmt <1065504814@qq.com> Date: Thu, 2 Dec 2021 17:57:39 +0800 Subject: [PATCH 514/757] Update `LoadImages` `ret_val=False` handling (#5852) Video errors may occur. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index f3abfb1f6f90..6ce7a81b69e7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -200,7 +200,7 @@ def __next__(self): # Read video self.mode = 'video' ret_val, img0 = self.cap.read() - if not ret_val: + while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video From 30db14fea8646aa3cbd2381b72e1bd45731e1d24 Mon Sep 17 00:00:00 2001 From: Vishnu Pradeep <61411495+pradeep-vishnu@users.noreply.github.com> Date: Thu, 2 Dec 2021 13:20:58 +0100 Subject: [PATCH 515/757] Update val.py (#5838) * Update val.py Solving Non-ASCII character '\xf0' error during runtime * Update val.py Co-authored-by: Glenn Jocher From 00e308f7be3b4152fa8c90efc38d8df3a9f0d4c2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 2 Dec 2021 16:06:45 +0100 Subject: [PATCH 516/757] Update TorchScript suffix to `*.torchscript` (#5856) --- detect.py | 8 ++++---- export.py | 6 +++--- models/common.py | 12 ++++++------ utils/activations.py | 4 ++-- val.py | 10 +++++----- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index ecf868b5eaf4..0b6875e5564c 100644 --- a/detect.py +++ b/detect.py @@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs diff --git a/export.py b/export.py index b2f42142e16c..437616a9890d 100644 --- a/export.py +++ b/export.py @@ -5,7 +5,7 @@ Format | Example | Export `include=(...)` argument --- | --- | --- PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript.pt | 'torchscript' +TorchScript | yolov5s.torchscript | 'torchscript' ONNX | yolov5s.onnx | 'onnx' CoreML | yolov5s.mlmodel | 'coreml' TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' @@ -19,7 +19,7 @@ Inference: $ python path/to/detect.py --weights yolov5s.pt - yolov5s.torchscript.pt + yolov5s.torchscript yolov5s.onnx yolov5s.mlmodel (under development) yolov5s_saved_model @@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' # YOLOv5 TorchScript model export try: LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript.pt') + f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} diff --git a/models/common.py b/models/common.py index cbd4ff479885..73f21729fa85 100644 --- a/models/common.py +++ b/models/common.py @@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module): def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # Usage: # PyTorch: weights = *.pt - # TorchScript: *.torchscript.pt + # TorchScript: *.torchscript # CoreML: *.mlmodel # TensorFlow: *_saved_model # TensorFlow: *.pb @@ -289,10 +289,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # TensorRT: *.engine super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] + suffix = Path(w).suffix.lower() + suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans - jit = pt and 'torchscript' in w.lower() + pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if jit: # TorchScript @@ -304,10 +304,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): stride, names = int(d['stride']), d['names'] elif pt: # PyTorch from models.experimental import attempt_load # scoped to avoid circular import - model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + model = attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names - elif coreml: # CoreML *.mlmodel + elif coreml: # CoreML import coremltools as ct model = ct.models.MLModel(w) elif dnn: # ONNX OpenCV DNN diff --git a/utils/activations.py b/utils/activations.py index 4c7d46c32104..a4ff789cf336 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -18,8 +18,8 @@ def forward(x): class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() @staticmethod def forward(x): - # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- diff --git a/val.py b/val.py index bd0ce9a7861d..27edd158a2f6 100644 --- a/val.py +++ b/val.py @@ -111,7 +111,7 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() @@ -124,10 +124,10 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, pt, engine = model.stride, model.pt, model.engine + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size @@ -166,7 +166,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or engine: + if pt or jit or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 92a7391039110b93e0028eeda5e370f2ec5a2f74 Mon Sep 17 00:00:00 2001 From: iumyx2612 <69593462+iumyx2612@users.noreply.github.com> Date: Thu, 2 Dec 2021 22:49:50 +0700 Subject: [PATCH 517/757] Add `--workers 8` argument to val.py (#5857) * Update val.py Add an option to choose number of workers if not called by train.py * Update comment * 120 char line width Co-authored-by: Glenn Jocher --- val.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 27edd158a2f6..4eec499d3029 100644 --- a/val.py +++ b/val.py @@ -89,6 +89,7 @@ def run(data, iou_thres=0.6, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output @@ -153,7 +154,7 @@ def run(data, pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, - prefix=colorstr(f'{task}: '))[0] + workers=workers, prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) @@ -312,6 +313,7 @@ def parse_opt(): parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') From 360eec69101ad9ffada78326b715b724b9b7eb0d Mon Sep 17 00:00:00 2001 From: Can Date: Fri, 3 Dec 2021 20:37:45 +0800 Subject: [PATCH 518/757] Update `plot_lr_scheduler()` (#5864) shallow copy modify originals --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 69037ee9af70..5742d050fdf5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -5,7 +5,7 @@ import math import os -from copy import copy +from copy import deepcopy from pathlib import Path import cv2 @@ -243,7 +243,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + optimizer, scheduler = deepcopy(optimizer), deepcopy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() From d885799c713e578082704c103c3a0b3796f7d10a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 3 Dec 2021 15:28:14 +0100 Subject: [PATCH 519/757] Update `nl` after `cutout()` (#5873) --- utils/datasets.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/datasets.py b/utils/datasets.py index 6ce7a81b69e7..5a3b2110b2e0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -611,6 +611,7 @@ def __getitem__(self, index): # Cutouts # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout labels_out = torch.zeros((nl, 6)) if nl: From 7bf04d9bbfffa6d88b018e11f431b971db2a7034 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Dec 2021 15:00:07 +0100 Subject: [PATCH 520/757] `AutoShape()` models as `DetectMultiBackend()` instances (#5845) * Update AutoShape() * autodownload ONNX * Cleanup * Finish updates * Add Usage * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * fix device * Update hubconf.py * Update common.py * smart param selection * autodownload all formats * autopad only pytorch models * new_shape edits * stride tensor fix * Cleanup --- export.py | 2 +- hubconf.py | 14 +++++++------- models/common.py | 40 ++++++++++++++++++++++++---------------- utils/general.py | 4 +++- 4 files changed, 35 insertions(+), 25 deletions(-) diff --git a/export.py b/export.py index 437616a9890d..21c83c697b4d 100644 --- a/export.py +++ b/export.py @@ -411,7 +411,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') diff --git a/hubconf.py b/hubconf.py index 03335f7906f0..e407677b3233 100644 --- a/hubconf.py +++ b/hubconf.py @@ -5,6 +5,7 @@ Usage: import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch """ import torch @@ -27,26 +28,25 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path - from models.common import AutoShape - from models.experimental import attempt_load + from models.common import AutoShape, DetectMultiBackend from models.yolo import Model from utils.downloads import attempt_download from utils.general import check_requirements, intersect_dicts, set_logging from utils.torch_utils import select_device - file = Path(__file__).resolve() check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - save_dir = Path('') if str(name).endswith('.pt') else file.parent - path = (save_dir / name).with_suffix('.pt') # checkpoint path + name = Path(name) + path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) if pretrained and channels == 3 and classes == 80: - model = attempt_load(path, map_location=device) # download/load FP32 model + model = DetectMultiBackend(path, device=device) # download/load FP32 model + # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load diff --git a/models/common.py b/models/common.py index 73f21729fa85..6a5303ba8c42 100644 --- a/models/common.py +++ b/models/common.py @@ -276,7 +276,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=True): + def __init__(self, weights='yolov5s.pt', device=None, dnn=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -287,6 +287,8 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True # TensorRT: *.engine + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import + super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) suffix = Path(w).suffix.lower() @@ -294,6 +296,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): check_suffix(w, suffixes) # check weights have acceptable suffix pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + attempt_download(w) # download if not local if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -303,11 +306,12 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] elif pt: # PyTorch - from models.experimental import attempt_load # scoped to avoid circular import model = attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names + self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) elif dnn: # ONNX OpenCV DNN @@ -316,7 +320,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) + check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) elif engine: # TensorRT @@ -376,7 +380,7 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] - elif self.coreml: # CoreML *.mlmodel + elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) @@ -433,24 +437,28 @@ class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + agnostic = False # NMS class-agnostic multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image def __init__(self, model): super().__init__() LOGGER.info('Adding AutoShape... ') copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) - m = self.model.model[-1] # Detect() - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) return self @torch.no_grad() @@ -465,7 +473,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images t = [time_sync()] - p = next(self.model.parameters()) # for device and type + p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type if isinstance(imgs, torch.Tensor): # torch with amp.autocast(enabled=p.device.type != 'cpu'): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference @@ -489,8 +497,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 @@ -498,12 +506,12 @@ def forward(self, imgs, size=640, augment=False, profile=False): with amp.autocast(enabled=p.device.type != 'cpu'): # Inference - y = self.model(x, augment, profile)[0] # forward + y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, - multi_label=self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, + agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) diff --git a/utils/general.py b/utils/general.py index 8aa76fbdb6ad..bbb9054a7235 100755 --- a/utils/general.py +++ b/utils/general.py @@ -455,7 +455,9 @@ def download_one(url, dir): def make_divisible(x, divisor): - # Returns x evenly divisible by divisor + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor From 1075488d893f2167737d89549c3f675b0713aa5a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Dec 2021 16:28:40 +0100 Subject: [PATCH 521/757] Single-command multiple-model export (#5882) * Export multiple models in series Export multiple models in series by adding additional `*.pt` files to the `--weights` argument, i.e.: ```bash python export.py --include tflite --weights yolov5n.pt # export 1 model python export.py --include tflite --weights yolov5n.pt yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt # export 5 models ``` * Update export.py * Update README.md --- README.md | 2 +- export.py | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 6e72d85da7ee..3074330e5505 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size * [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) diff --git a/export.py b/export.py index 21c83c697b4d..88d03a2c9475 100644 --- a/export.py +++ b/export.py @@ -2,17 +2,17 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | Export `include=(...)` argument +Format | Example | `--include ...` argument --- | --- | --- PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | 'torchscript' -ONNX | yolov5s.onnx | 'onnx' -CoreML | yolov5s.mlmodel | 'coreml' -TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' -TensorFlow GraphDef | yolov5s.pb | 'pb' -TensorFlow Lite | yolov5s.tflite | 'tflite' -TensorFlow.js | yolov5s_web_model/ | 'tfjs' -TensorRT | yolov5s.engine | 'engine' +TorchScript | yolov5s.torchscript | `torchscript` +ONNX | yolov5s.onnx | `onnx` +CoreML | yolov5s.mlmodel | `coreml` +TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` +TensorFlow GraphDef | yolov5s.pb | `pb` +TensorFlow Lite | yolov5s.tflite | `tflite` +TensorFlow.js | yolov5s_web_model/ | `tfjs` +TensorRT | yolov5s.engine | `engine` Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs @@ -400,7 +400,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') @@ -427,7 +427,8 @@ def parse_opt(): def main(opt): - run(**vars(opt)) + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From fa05f8c97798b228b79a61ae5d8d5251bbe34758 Mon Sep 17 00:00:00 2001 From: Li Zeng Date: Tue, 7 Dec 2021 16:01:41 +0100 Subject: [PATCH 522/757] `Detections().tolist()` explicit argument fix (#5907) debugged for missigned Detections attributes --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6a5303ba8c42..ec5fbfaec4ca 100644 --- a/models/common.py +++ b/models/common.py @@ -608,7 +608,7 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)] for d in x: for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: setattr(d, k, getattr(d, k)[0]) # pop out of list From 3f152e58074514b2531cb43fb57db380e085cd09 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 7 Dec 2021 20:39:11 +0530 Subject: [PATCH 523/757] Update wandb_utils.py (#5908) --- utils/loggers/wandb/wandb_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 2d6133ab94c5..221d3c88c56e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -186,6 +186,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.setup_training(opt) if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): From 554f782537b9af336c02c013468b78fe16ce092d Mon Sep 17 00:00:00 2001 From: greg2451 <51173502+greg2451@users.noreply.github.com> Date: Tue, 7 Dec 2021 18:20:16 +0100 Subject: [PATCH 524/757] Add *.engine (TensorRT extensions) to .gitignore (#5911) * Add *.engine (TensorRT extensions) to .gitignore * Update .dockerignore Co-authored-by: Glenn Jocher --- .dockerignore | 1 + .gitignore | 1 + 2 files changed, 2 insertions(+) diff --git a/.dockerignore b/.dockerignore index 6c2f2b9b7725..4be8d4108e78 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,6 +15,7 @@ data/samples/* **/*.pt **/*.pth **/*.onnx +**/*.engine **/*.mlmodel **/*.torchscript **/*.torchscript.pt diff --git a/.gitignore b/.gitignore index 5f8cab550021..8bb082b0355a 100755 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ VOC/ *.pt *.pb *.onnx +*.engine *.mlmodel *.torchscript *.tflite From 581dc301a70bef6d3e768adfe1a87b85e50e6268 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:37:33 +0100 Subject: [PATCH 525/757] Add ONNX inference providers (#5918) * Add ONNX inference providers Fix for https://github.com/ultralytics/yolov5/issues/5916 * Update common.py --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c269cfef9a6c 100644 --- a/models/common.py +++ b/models/common.py @@ -320,9 +320,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download From 7d56d451241e94cd9dbe4fcb9bfba0e92c6e0e23 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 14:57:03 +0100 Subject: [PATCH 526/757] Add hardware checks to `notebook_init()` (#5919) * Update notebook * Update notebook * update string * update string * Updates * Updates * Updates * check both ipython and psutil * remove sample_data if is_colab * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 3 ++- utils/__init__.py | 31 +++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4ce87c75aa64..45b27b7ab2cc 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -409,6 +409,7 @@ "%cd yolov5\n", "%pip install -qr requirements.txt # install\n", "\n", + "import torch\n", "from yolov5 import utils\n", "display = utils.notebook_init() # checks" ], @@ -983,7 +984,7 @@ "source": [ "# Reproduce\n", "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, diff --git a/utils/__init__.py b/utils/__init__.py index 2b0c896364a2..ff93fd760059 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,15 +4,34 @@ """ -def notebook_init(): - # For YOLOv5 notebooks +def notebook_init(verbose=True): + # Check system software and hardware print('Checking setup...') + + import os + import shutil + + from utils.general import check_requirements, emojis, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + import psutil from IPython import display # to display images and clear console output - from utils.general import emojis - from utils.torch_utils import select_device # YOLOv5 imports + if is_colab(): + shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + + if verbose: + # System info + # gb = 1 / 1000 ** 3 # bytes to GB + gib = 1 / 1024 ** 3 # bytes to GiB + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' + else: + s = '' - display.clear_output() select_device(newline=False) - print(emojis('Setup complete ✅')) + print(emojis(f'Setup complete ✅ {s}')) return display From a3d5f1d3e36d8e023806da0f0c744eef02591c9b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 16:46:24 +0100 Subject: [PATCH 527/757] Revert "Update `plot_lr_scheduler()` (#5864)" (#5920) This reverts commit 360eec69101ad9ffada78326b715b724b9b7eb0d. --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 5742d050fdf5..69037ee9af70 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -5,7 +5,7 @@ import math import os -from copy import deepcopy +from copy import copy from pathlib import Path import cv2 @@ -243,7 +243,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs - optimizer, scheduler = deepcopy(optimizer), deepcopy(scheduler) # do not modify originals + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() From c77a5a84e3c6083d4e707d2e252c1499e294495c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 17:31:31 +0100 Subject: [PATCH 528/757] Absolute '/content/sample_data' (#5922) --- utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/__init__.py b/utils/__init__.py index ff93fd760059..4658ed6473cd 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -19,7 +19,7 @@ def notebook_init(verbose=True): from IPython import display # to display images and clear console output if is_colab(): - shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory if verbose: # System info From 5bdb28ed1083c63a7837dfc2ef7bf00402a02dd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 23:15:14 +0100 Subject: [PATCH 529/757] Default PyTorch Hub to `autocast(False)` (#5926) --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index c269cfef9a6c..b39017378577 100644 --- a/models/common.py +++ b/models/common.py @@ -443,6 +443,7 @@ class AutoShape(nn.Module): multi_label = False # NMS multiple labels per box classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference def __init__(self, model): super().__init__() @@ -476,8 +477,9 @@ def forward(self, imgs, size=640, augment=False, profile=False): t = [time_sync()] p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=p.device.type != 'cpu'): + with amp.autocast(enabled=autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -506,7 +508,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(enabled=p.device.type != 'cpu'): + with amp.autocast(enabled=autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) From 4fb6dd4b26a4d1c39a1e2565999be62f53fb7c71 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Thu, 9 Dec 2021 17:10:16 -0500 Subject: [PATCH 530/757] Fix ONNX opset inconsistency with parseargs and run args (#5937) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 88d03a2c9475..4f83c75c89a0 100644 --- a/export.py +++ b/export.py @@ -325,7 +325,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version + opset=14, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) topk_per_class=100, # TF.js NMS: topk per class to keep From c45f9f678d7a17d37aaac50c324a82509d9c3cde Mon Sep 17 00:00:00 2001 From: Felix You <35478566+youyuxiansen@users.noreply.github.com> Date: Fri, 10 Dec 2021 22:27:20 +0800 Subject: [PATCH 531/757] Make `select_device()` robust to `batch_size=-1` (#5940) * Find out a bug. When set batch_size = -1 to use the autobatch. reproduce: * Fix type conflict Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 16289104eb48..cddb173948fb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,7 +53,7 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository -def select_device(device='', batch_size=None, newline=True): +def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' @@ -68,7 +68,7 @@ def select_device(device='', batch_size=None, newline=True): if cuda: devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count - if n > 1 and batch_size: # check batch_size is divisible by device_count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * (len(s) + 1) for i, d in enumerate(devices): From 922fbd82094e051553400cd4de8b63b9d202eee2 Mon Sep 17 00:00:00 2001 From: Pascal Maillard Date: Fri, 10 Dec 2021 17:32:09 +0100 Subject: [PATCH 532/757] fix .gitignore not tracking existing folders (#5946) * fix .gitignore not tracking existing folders fix .gitignore so that the files that are in the repository are actually being tracked. Everything in the data/ folder is ignored, which also means the subdirectories are ignored. Fix so that the subdirectories and their contents are still tracked. * Remove data/trainings Co-authored-by: Glenn Jocher --- .gitignore | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8bb082b0355a..327dc8566681 100755 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,11 @@ storage.googleapis.com runs/* data/* -!data/hyps/* +data/images/* +!data/*.yaml +!data/hyps +!data/scripts +!data/images !data/images/zidane.jpg !data/images/bus.jpg !data/*.sh From a42af30d8a011fb84dde6aaba89723e2c5d7a3d3 Mon Sep 17 00:00:00 2001 From: iumyx2612 <69593462+iumyx2612@users.noreply.github.com> Date: Sat, 11 Dec 2021 00:06:27 +0700 Subject: [PATCH 533/757] Update `strip_optimizer()` (#5949) Replace 'training_result' with 'best_fitness' in strip_optimizer() to match key with ckpt from train.py --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bbb9054a7235..6cc658cc3150 100755 --- a/utils/general.py +++ b/utils/general.py @@ -738,7 +738,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 From 2c6317547a46a2dfc414fe1a5886fb9f63c14bf4 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Fri, 10 Dec 2021 12:24:32 -0500 Subject: [PATCH 534/757] Add nms and agnostic nms to export.py (#5938) * add nms and agnostic nms to export.py * fix agnostic implies nms * reorder args to group TF args * PEP8 120 char Co-authored-by: Glenn Jocher --- export.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 4f83c75c89a0..7feb525711e8 100644 --- a/export.py +++ b/export.py @@ -328,6 +328,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' opset=14, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold @@ -381,9 +383,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if any(tf_exports): pb, tflite, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs, - topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, - iou_thres=iou_thres) # keras model + model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, + conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: @@ -414,6 +416,8 @@ def parse_opt(): parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') + parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') From 8f875d93a258d2b8b27a19499058f755af8aec4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Dec 2021 14:32:18 +0100 Subject: [PATCH 535/757] Refactor NUM_THREADS (#5954) --- utils/datasets.py | 5 ++--- utils/general.py | 9 +++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 5a3b2110b2e0..79b871c9294b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,8 +26,8 @@ from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective -from utils.general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, - xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -35,7 +35,6 @@ IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP -NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): diff --git a/utils/general.py b/utils/general.py index 6cc658cc3150..1da8a147510e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -33,14 +33,15 @@ from utils.metrics import box_iou, fitness # Settings +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads + torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads def set_logging(name=None, verbose=True): From 8f354362cd94c70908bf6168951b07bd32715ebe Mon Sep 17 00:00:00 2001 From: Yono Mittlefehldt Date: Sat, 11 Dec 2021 18:40:37 +0100 Subject: [PATCH 536/757] Fix Detections class `tolist()` method (#5945) * Fix tolist() to add the file for each Detection * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix PEP8 requirement for 2 spaces before an inline comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index b39017378577..c2edff4d3021 100644 --- a/models/common.py +++ b/models/common.py @@ -525,7 +525,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): super().__init__() d = pred[0].device # device gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations @@ -533,6 +533,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames + self.times = times # profiling times self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized @@ -612,10 +613,11 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)] - for d in x: - for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - setattr(d, k, getattr(d, k)[0]) # pop out of list + r = range(self.n) # iterable + x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list return x def __len__(self): From 19c56e60b100cf8ff9af65b4347de69e0cff76ae Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Sun, 12 Dec 2021 17:39:14 -0500 Subject: [PATCH 537/757] Fix `imgsz` bug (#5948) * fix imgsz bug * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 0b6875e5564c..14cdf96ca9db 100644 --- a/detect.py +++ b/detect.py @@ -38,7 +38,7 @@ @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam - imgsz=640, # inference size (pixels) + imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image From e8ef8fb1ca34436577cf6d1f3933b0c30e19992c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 13 Dec 2021 13:32:27 +0100 Subject: [PATCH 538/757] `pretrained=False` fix (#5966) * `pretriained=False` fix Fix for https://github.com/ultralytics/yolov5/issues/5964 * CI speed improvement --- hubconf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index e407677b3233..6bf4b0b0265f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -46,7 +46,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device) # download/load FP32 model # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.name}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load @@ -138,6 +138,6 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy - results = model(imgs) # batched inference + results = model(imgs, size=320) # batched inference results.print() results.save() From 2d0c6afbfea3c844ebdd30388ae668efa3243f7f Mon Sep 17 00:00:00 2001 From: jinmc Date: Tue, 14 Dec 2021 19:18:34 +0900 Subject: [PATCH 539/757] make parameter ignore epochs (#5972) * make parameter ignore epochs ignore epochs functionality add to prevent spikes at the beginning when fitness spikes and decreases after. Discussed at https://github.com/ultralytics/yolov5/issues/5971 * Update train.py Co-authored-by: Glenn Jocher From d699c21c752a9d9ca26232fabb91d55e4daea5f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 14 Dec 2021 11:24:39 +0100 Subject: [PATCH 540/757] YOLOv5s6 params and FLOPs fix (#5977) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3074330e5505..fa0645d4fd2c 100644 --- a/README.md +++ b/README.md @@ -236,9 +236,9 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi |[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 | | | | | | | | | |[YOLOv5n6][assets] |1280 |34.0 |50.7 |153 |8.1 |2.1 |3.2 |4.6 -|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |12.6 |16.8 |[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0 -|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.7 |111.4 |[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
From c9a46a60e09ab94009754ca71bde23e91aab33fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 14 Dec 2021 15:47:49 +0100 Subject: [PATCH 541/757] Update callbacks.py with `__init__()` (#5979) Add __init__() function. --- utils/callbacks.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/utils/callbacks.py b/utils/callbacks.py index 327b8639b60c..c9d936ef082d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -9,31 +9,32 @@ class Callbacks: Handles all registered callbacks for YOLOv5 Hooks """ - # Define the available callbacks - _callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], - 'teardown': [], - } + 'teardown': [], + } def register_action(self, hook, name='', callback=None): """ From b7d18f363665791bfaf58cd110dc162ebb5df3b5 Mon Sep 17 00:00:00 2001 From: Mrinal Jain Date: Wed, 15 Dec 2021 19:12:23 +0530 Subject: [PATCH 542/757] Increase `ar_thr` from 20 to 100 for better detection on slender (high aspect ratio) objects (#5556) * Making `ar_thr` available as a hyperparameter * Disabling ar_thr as hyperparameter and computing from the dataset instead * Fixing bug in ar_thr computation * Fix `ar_thr` to 100 --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 5dcfd49fdd05..0311b97b63db 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -269,7 +269,7 @@ def mixup(im, labels, im2, labels2): return im, labels -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] From da9a1b719ba7d10e209ff89efe28b074fb9a5f16 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Dec 2021 15:27:08 +0100 Subject: [PATCH 543/757] Allow `--weights URL` (#5991) --- models/common.py | 4 ++-- utils/downloads.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index c2edff4d3021..4f1afa13396c 100644 --- a/models/common.py +++ b/models/common.py @@ -296,7 +296,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): check_suffix(w, suffixes) # check weights have acceptable suffix pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults - attempt_download(w) # download if not local + w = attempt_download(w) # download if not local if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -306,7 +306,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] elif pt: # PyTorch - model = attempt_load(weights, map_location=device) + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names self.model = model # explicitly assign for to(), cpu(), cuda(), half() diff --git a/utils/downloads.py b/utils/downloads.py index 998a7a582a33..a8bacae4420f 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -49,9 +49,12 @@ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads i name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ - name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... - safe_download(file=name, url=url, min_bytes=1E5) - return name + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) From 628817dfae670302a69f83a7c44431877f90eb3f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Dec 2021 17:19:19 +0100 Subject: [PATCH 544/757] Recommend `jar xf file.zip` for zips (#5993) --- data/xView.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/xView.yaml b/data/xView.yaml index fabcdb0bdd13..5fcb7479d0af 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # xView 2018 dataset https://challenge.xviewdataset.org -# -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- +# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml # parent # ├── yolov5 From c1249a47c7fe19e2067cb25ed8347e67d26ff1f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Dec 2021 14:10:54 +0100 Subject: [PATCH 545/757] *.torchscript inference `self.jit` fix (#6007) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4f1afa13396c..cfecb20d2141 100644 --- a/models/common.py +++ b/models/common.py @@ -379,7 +379,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width - if self.pt: # PyTorch + if self.pt or self.jit: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] elif self.coreml: # CoreML From 407a9057478d3deea0a9984af42162d21afa2bd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Dec 2021 14:59:46 +0100 Subject: [PATCH 546/757] Check TensorRT>=8.0.0 version (#6021) * Check TensorRT>=8.0.0 version * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 5 +++-- utils/general.py | 12 +++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index cfecb20d2141..4fd608f4b3e2 100644 --- a/models/common.py +++ b/models/common.py @@ -21,8 +21,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, - non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, + make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, time_sync @@ -328,6 +328,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '8.0.0', verbose=True) # version requirement Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: diff --git a/utils/general.py b/utils/general.py index 1da8a147510e..7ff397fb4caa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -248,14 +248,16 @@ def check_python(minimum='3.6.2'): check_version(platform.python_version(), minimum, name='Python ', hard=True) -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False): +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - if hard: # assert min requirements met - assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' - else: - return result + s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, s # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result @try_except From 361705d9be532cfff592b8a89db40c8218ed1df2 Mon Sep 17 00:00:00 2001 From: Felix You <35478566+youyuxiansen@users.noreply.github.com> Date: Fri, 17 Dec 2021 22:42:26 +0800 Subject: [PATCH 547/757] Multi-layer capable `--freeze` argument (#6019) * support specfiy multiple frozen layers * fix bug * Cleanup Freeze section * Cleanup argument Co-authored-by: Glenn Jocher --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 8cb68fc0748e..ae19c1851d62 100644 --- a/train.py +++ b/train.py @@ -124,7 +124,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze + freeze = [f'model.{x}.' for x in (freeze if isinstance(freeze, list) else range(freeze))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -469,7 +469,7 @@ def parse_opt(known=False): parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') + parser.add_argument('--freeze', nargs='+', type=int, default=0, help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') From abbdd4802ea1f01c9e8c723d3792e0e0a92d604e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Dec 2021 16:43:00 +0100 Subject: [PATCH 548/757] train -> val comment fix (#6024) --- data/coco.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/coco.yaml b/data/coco.yaml index 2ccc6478b620..348a3d48c412 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -10,7 +10,7 @@ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] path: ../datasets/coco # dataset root dir train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # train images (relative to 'path') 5000 images +val: val2017.txt # val images (relative to 'path') 5000 images test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes From 26f0415287b7fa333f559a8300cedc2274943ab6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 19 Dec 2021 15:19:04 +0100 Subject: [PATCH 549/757] Add dataset source citations (#6032) --- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/xView.yaml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 1625dd1b9d2b..312791b33a2d 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent # ├── yolov5 diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 75b3bfdff43e..869dace0be2b 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Global Wheat 2020 dataset http://www.global-wheat.com/ +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent # ├── yolov5 diff --git a/data/Objects365.yaml b/data/Objects365.yaml index b10c28e764c1..4c7cf3fdb2c8 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Objects365 dataset https://www.objects365.org/ +# Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent # ├── yolov5 diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 653485e2079a..9481b7a04aee 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent # ├── yolov5 diff --git a/data/VOC.yaml b/data/VOC.yaml index 8dbaacf9c290..975d56466de1 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent # ├── yolov5 diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 7753da98269e..83a5c7d55e06 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent # ├── yolov5 diff --git a/data/coco.yaml b/data/coco.yaml index 348a3d48c412..3ed7e48a2185 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO 2017 dataset http://cocodataset.org +# COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent # ├── yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index 84a91b18359d..d07c704407a1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/data/xView.yaml b/data/xView.yaml index 5fcb7479d0af..fd82828dcb8c 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# xView 2018 dataset https://challenge.xviewdataset.org +# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml # parent From 0db9d5b6a217e3603622884b906dcd4c8008685c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 17:30:46 +0100 Subject: [PATCH 550/757] Kaggle `LOGGER` fix (#6041) --- utils/general.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/general.py b/utils/general.py index 7ff397fb4caa..e63ac3e20b62 100755 --- a/utils/general.py +++ b/utils/general.py @@ -46,6 +46,8 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger + for h in logging.root.handlers[:]: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) From b8a4babd603d09f518581c589fc8607993a5e192 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 17:42:52 +0100 Subject: [PATCH 551/757] Simplify `set_logging()` indexing (#6042) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index e63ac3e20b62..ef27eb570ffa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -46,7 +46,7 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger - for h in logging.root.handlers[:]: + for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) From dc54ed5763720ced4f6784552c47534af5413d45 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 18:24:07 +0100 Subject: [PATCH 552/757] `--freeze` fix (#6044) Fix for https://github.com/ultralytics/yolov5/issues/6038 --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index ae19c1851d62..17e816c06ede 100644 --- a/train.py +++ b/train.py @@ -60,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary device, callbacks ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze @@ -124,7 +124,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [f'model.{x}.' for x in (freeze if isinstance(freeze, list) else range(freeze))] # layers to freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -469,7 +469,7 @@ def parse_opt(known=False): parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=0, help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') From 95c7bc25d3eabc61b12bcfd95c866d9014d97714 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 22 Dec 2021 20:29:48 +0100 Subject: [PATCH 553/757] OpenVINO Export (#6057) * OpenVINO export * Remove timeout * Add 3 files * str * Constrain opset to 12 * Default ONNX opset to 12 * Make dir * Make dir * Cleanup * Cleanup * check_requirements(('openvino-dev',)) --- export.py | 36 ++++++++++++++++++++++++++++++------ requirements.txt | 1 + 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 7feb525711e8..600e0c318f33 100644 --- a/export.py +++ b/export.py @@ -8,6 +8,7 @@ TorchScript | yolov5s.torchscript | `torchscript` ONNX | yolov5s.onnx | `onnx` CoreML | yolov5s.mlmodel | `coreml` +OpenVINO | yolov5s_openvino_model/ | `openvino` TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` TensorFlow GraphDef | yolov5s.pb | `pb` TensorFlow Lite | yolov5s.tflite | `tflite` @@ -15,13 +16,14 @@ TensorRT | yolov5s.engine | `engine` Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs Inference: $ python path/to/detect.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s.mlmodel (under development) + yolov5s_openvino_model (under development) yolov5s_saved_model yolov5s.pb yolov5s.tflite @@ -144,6 +146,23 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): return ct_model +def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', '_openvino_model' + os.sep) + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" + subprocess.check_output(cmd, shell=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): @@ -317,7 +336,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx', 'coreml'), # include formats + include=('torchscript', 'onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode @@ -325,7 +344,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model - opset=14, # ONNX: opset version + opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) nms=False, # TF: add NMS to model @@ -338,9 +357,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' t = time.time() include = [x.lower() for x in include] tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports - imgsz *= 2 if len(imgsz) == 1 else 1 # expand file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12 + # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' @@ -372,12 +394,14 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: export_torchscript(model, im, file, optimize) - if 'onnx' in include: + if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) if 'engine' in include: export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) + if 'openvino' in include: + export_openvino(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -413,7 +437,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') diff --git a/requirements.txt b/requirements.txt index 22b51fc490e3..96fc9d1a1f32 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,6 +27,7 @@ seaborn>=0.11.0 # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export # Extras -------------------------------------- # albumentations>=1.0.3 From afa5cfb0f872bbd467a3e37bc041a1c908c18bba Mon Sep 17 00:00:00 2001 From: JieLi <32835610+jedi007@users.noreply.github.com> Date: Thu, 23 Dec 2021 18:53:00 +0800 Subject: [PATCH 554/757] Reduce G/D/CIoU logic operations (#6074) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consider that the default value is CIOU,adjust the order of judgment could reduce the number of judgments. And “elif CIoU:” didn't need 'if'. Co-authored-by: 李杰 <360751194@qq.comqq.com> --- utils/metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 3f1dc559c75a..e03e1bd7460b 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -222,20 +222,20 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= union = w1 * h1 + w2 * h2 - inter + eps iou = inter / union - if GIoU or DIoU or CIoU: + if CIoU or DIoU or GIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU + else: + return iou - rho2 / c2 # DIoU else: # GIoU https://arxiv.org/pdf/1902.09630.pdf c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU From c72270c076e1f087d3eb0b1ef3fb7ab55fe794ba Mon Sep 17 00:00:00 2001 From: Deep Patel <35742688+deepsworld@users.noreply.github.com> Date: Thu, 23 Dec 2021 07:49:00 -0500 Subject: [PATCH 555/757] Init tensor directly on device (#6068) Slightly more efficient than .to(device) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index db3d711a81fa..f659a04545b9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -72,9 +72,9 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij') + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') else: - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() From db6ec66a602a0b64a7db1711acd064eda5daf2b3 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 23 Dec 2021 05:23:50 -0800 Subject: [PATCH 556/757] W&B: track batch size after autobatch (#6039) * track batch size after autobatch * remove redundant import * Update __init__.py * Update __init__.py Co-authored-by: Glenn Jocher --- train.py | 1 + utils/callbacks.py | 2 +- utils/loggers/__init__.py | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 17e816c06ede..e2cd5ec85c09 100644 --- a/train.py +++ b/train.py @@ -138,6 +138,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz) + loggers.on_params_update({"batch_size": batch_size}) # Optimizer nbs = 64 # nominal batch size diff --git a/utils/callbacks.py b/utils/callbacks.py index c9d936ef082d..13d82ebc2e41 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -32,7 +32,7 @@ def __init__(self): 'on_fit_epoch_end': [], # fit = train + val 'on_model_save': [], 'on_train_end': [], - + 'on_params_update': [], 'teardown': [], } diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2a68d9785071..7a1df2a45ea7 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -157,3 +157,9 @@ def on_train_end(self, last, best, plots, epoch, results): else: self.wandb.finish_run() self.wandb = WandbLogger(self.opt) + + def on_params_update(self, params): + # Update hyperparams or configs of the experiment + # params: A dict containing {param: value} pairs + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) From 9155eb86419cecd43e542ca69923d26fc2fd9902 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 31 Dec 2021 01:13:19 +0530 Subject: [PATCH 557/757] W&B: Log best results after training ends (#6120) * log best.pt metrics at train end * update * Update __init__.py Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7a1df2a45ea7..8af5c402d5ee 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -147,6 +147,7 @@ def on_train_end(self, last, best, plots, epoch, results): self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') if self.wandb: + self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: From 7b6938d5b54f562e6883eb294b4904066bef8188 Mon Sep 17 00:00:00 2001 From: Awsaf Date: Fri, 31 Dec 2021 03:47:53 +0600 Subject: [PATCH 558/757] Log best results (#6085) * log best result in summary * comment added * add space for `flake8` * log `best/epoch` * fix `dimension` for epoch ValueError: all the input arrays must have same number of dimensions * log `best/` in `utils.logger.__init__` * fix pre-commit 1. missing whitespace around operator 2. over-indented --- utils/loggers/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 8af5c402d5ee..7679ee70f176 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -47,6 +47,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv @@ -125,6 +126,10 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): self.tb.add_scalar(k, v, epoch) if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) self.wandb.end_epoch(best_result=best_fitness == fi) From affa284352fa6d094d32fe2be69dbffe36bd20f8 Mon Sep 17 00:00:00 2001 From: Chen Gen <4850090@qq.com> Date: Fri, 31 Dec 2021 05:59:29 +0800 Subject: [PATCH 559/757] Refactor/reduce G/C/D/IoU `if: else` statements (#6087) * Refactor the code to reduece else * Update metrics.py * Cleanup Co-authored-by: Cmos Co-authored-by: Glenn Jocher --- utils/metrics.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index e03e1bd7460b..83defa7fd186 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,14 +234,10 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU - else: - return iou - rho2 / c2 # DIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py From d95978a562bec74eed1d42e370235937ab4e1d7a Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 1 Jan 2022 01:47:52 +0800 Subject: [PATCH 560/757] Add EdgeTPU support (#3630) * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * Put representative dataset in tfl_int8 block * detect.py TF inference * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * detect.py TF inference * Put representative dataset in tfl_int8 block * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * implement C3() and SiLU() * Add TensorFlow and TFLite Detection * Add --tfl-detect for TFLite Detection * Add int8 quantized TFLite inference in detect.py * Add --edgetpu for Edge TPU detection * Fix --img-size to add rectangle TensorFlow and TFLite input * Add --no-tf-nms to detect objects using models combined with TensorFlow NMS * Fix --img-size list type input * Update README.md * Add Android project for TFLite inference * Upgrade TensorFlow v2.3.1 -> v2.4.0 * Disable normalization of xywh * Rewrite names init in detect.py * Change input resolution 640 -> 320 on Android * Disable NNAPI * Update README.me --img 640 -> 320 * Update README.me for Edge TPU * Update README.md * Fix reshape dim to support dynamic batching * Fix reshape dim to support dynamic batching * Add epsilon argument in tf_BN, which is different between TF and PT * Set stride to None if not using PyTorch, and do not warmup without PyTorch * Add list support in check_img_size() * Add list input support in detect.py * sys.path.append('./') to run from yolov5/ * Add int8 quantization support for TensorFlow 2.5 * Add get_coco128.sh * Remove --no-tfl-detect in models/tf.py (Use tf-android-tfl-detect branch for EdgeTPU) * Update requirements.txt * Replace torch.load() with attempt_load() * Update requirements.txt * Add --tf-raw-resize to set half_pixel_centers=False * Remove android directory * Update README.md * Update README.md * Add multiple OS support for EdgeTPU detection * Fix export and detect * Export 3 YOLO heads with Edge TPU models * Remove xywh denormalization with Edge TPU models in detect.py * Fix saved_model and pb detect error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix pre-commit.ci failure * Add edgetpu in export.py docstring * Fix Edge TPU model detection exported by TF 2.7 * Add class names for TF/TFLite in DetectMultibackend * Fix assignment with nl in TFLite Detection * Add check when getting Edge TPU compiler version * Add UTF-8 encoding in opening --data file for Windows * Remove redundant TensorFlow import * Add Edge TPU in export.py's docstring * Add the detect layer in Edge TPU model conversion * Default `dnn=False` * Cleanup data.yaml loading * Update detect.py * Update val.py * Comments and generalize data.yaml names Co-authored-by: Glenn Jocher Co-authored-by: unknown Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 4 +++- export.py | 29 +++++++++++++++++++++++++---- models/common.py | 10 ++++++++-- val.py | 2 +- 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index 14cdf96ca9db..e6e74ea7dfeb 100644 --- a/detect.py +++ b/detect.py @@ -38,6 +38,7 @@ @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold @@ -76,7 +77,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size @@ -204,6 +205,7 @@ def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') diff --git a/export.py b/export.py index 600e0c318f33..a0758010e816 100644 --- a/export.py +++ b/export.py @@ -248,6 +248,24 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te LOGGER.info(f'\n{prefix} export failure: {e}') +def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): + # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + try: + cmd = 'edgetpu_compiler --version' + out = subprocess.run(cmd, shell=True, capture_output=True, check=True) + ver = out.stdout.decode().split()[-1] + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s {f_tfl}" + subprocess.run(cmd, shell=True, check=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: @@ -285,6 +303,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: check_requirements(('tensorrt',)) import tensorrt as trt @@ -356,7 +375,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' ): t = time.time() include = [x.lower() for x in include] - tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports + tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs')) # TensorFlow exports file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # Checks @@ -405,15 +424,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): - pb, tflite, tfjs = tf_exports[1:] + pb, tflite, edgetpu, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) - if tflite: - export_tflite(model, im, file, int8=int8, data=data, ncalib=100) + if tflite or edgetpu: + export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + if edgetpu: + export_edgetpu(model, im, file) if tfjs: export_tfjs(model, im, file) diff --git a/models/common.py b/models/common.py index 4fd608f4b3e2..b53de7001454 100644 --- a/models/common.py +++ b/models/common.py @@ -17,6 +17,7 @@ import requests import torch import torch.nn as nn +import yaml from PIL import Image from torch.cuda import amp @@ -276,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=False): + def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -284,6 +285,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): # TensorFlow: *_saved_model # TensorFlow: *.pb # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True # TensorRT: *.engine @@ -297,6 +299,9 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local + if data: # data.yaml path (optional) + with open(data, errors='ignore') as f: + names = yaml.safe_load(f)['names'] # class names if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -343,7 +348,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] - else: # TensorFlow model (TFLite, pb, saved_model) + else: # TensorFlow (TFLite, pb, saved_model) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') import tensorflow as tf @@ -425,6 +430,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., 1] *= h # y y[..., 2] *= w # w y[..., 3] *= h # h + y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y diff --git a/val.py b/val.py index 4eec499d3029..c1fcf61b468c 100644 --- a/val.py +++ b/val.py @@ -124,7 +124,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA From e1dc8943647941378620b9b230aec2862a913fe3 Mon Sep 17 00:00:00 2001 From: bilzard <36561962+bilzard@users.noreply.github.com> Date: Mon, 3 Jan 2022 06:10:19 +0900 Subject: [PATCH 561/757] Enable AdamW optimizer (#6152) --- train.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index e2cd5ec85c09..304c001b6547 100644 --- a/train.py +++ b/train.py @@ -22,7 +22,7 @@ import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP -from torch.optim import SGD, Adam, lr_scheduler +from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm import tqdm FILE = Path(__file__).resolve() @@ -155,8 +155,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g1.append(v.weight) - if opt.adam: + if opt.optimizer == 'Adam': optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + elif opt.optimizer == 'AdamW': + optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) @@ -460,7 +462,7 @@ def parse_opt(known=False): parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') From ec4b6dd2a31604fd9963b96ee472f78651bc1caa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Jan 2022 16:09:45 -0800 Subject: [PATCH 562/757] Update export format docstrings (#6151) * Update export documentation * Cleanup * Update export.py * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * Update README.md * Update README.md * Update train.py * Update train.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 58 +++++++++++++++++++++++++++++-------------------------- detect.py | 24 +++++++++++++++++------ export.py | 26 +++++++++++++------------ train.py | 11 +++++++++-- val.py | 14 +++++++++++++- 5 files changed, 85 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index fa0645d4fd2c..59abd084572c 100644 --- a/README.md +++ b/README.md @@ -62,15 +62,14 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
Install -[**Python>=3.6.0**](https://www.python.org/) is required with all -[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): - +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.6.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). ```bash -$ git clone https://github.com/ultralytics/yolov5 -$ cd yolov5 -$ pip install -r requirements.txt +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install ```
@@ -78,8 +77,9 @@ $ pip install -r requirements.txt
Inference -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download -from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -104,17 +104,17 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
Inference with detect.py -`detect.py` runs inference on a variety of sources, downloading models automatically from -the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -$ python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
@@ -122,16 +122,20 @@ $ python detect.py --source 0 # webcam
Training -Run commands below to reproduce results -on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on -first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the -largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` @@ -225,6 +229,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi ### Pretrained Checkpoints [assets]: https://github.com/ultralytics/yolov5/releases + [TTA]: https://github.com/ultralytics/yolov5/issues/303 |Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) @@ -257,7 +262,6 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - ##
Contact
For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or diff --git a/detect.py b/detect.py index e6e74ea7dfeb..1393f79746f6 100644 --- a/detect.py +++ b/detect.py @@ -2,14 +2,26 @@ """ Run inference on images, videos, directories, streams, etc. -Usage: - $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse diff --git a/export.py b/export.py index a0758010e816..67e32305ded1 100644 --- a/export.py +++ b/export.py @@ -2,18 +2,19 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | `--include ...` argument ---- | --- | --- -PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | `torchscript` -ONNX | yolov5s.onnx | `onnx` -CoreML | yolov5s.mlmodel | `coreml` -OpenVINO | yolov5s_openvino_model/ | `openvino` -TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` -TensorFlow GraphDef | yolov5s.pb | `pb` -TensorFlow Lite | yolov5s.tflite | `tflite` -TensorFlow.js | yolov5s_web_model/ | `tfjs` -TensorRT | yolov5s.engine | `engine` +Format | Example | `--include ...` argument +--- | --- | --- +PyTorch | yolov5s.pt | - +TorchScript | yolov5s.torchscript | `torchscript` +ONNX | yolov5s.onnx | `onnx` +CoreML | yolov5s.mlmodel | `coreml` +OpenVINO | yolov5s_openvino_model/ | `openvino` +TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` +TensorFlow GraphDef | yolov5s.pb | `pb` +TensorFlow Lite | yolov5s.tflite | `tflite` +TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu` +TensorFlow.js | yolov5s_web_model/ | `tfjs` +TensorRT | yolov5s.engine | `engine` Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs @@ -27,6 +28,7 @@ yolov5s_saved_model yolov5s.pb yolov5s.tflite + yolov5s_edgetpu.tflite yolov5s.engine TensorFlow.js: diff --git a/train.py b/train.py index 304c001b6547..bd2fb5898cb9 100644 --- a/train.py +++ b/train.py @@ -1,10 +1,17 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Train a YOLOv5 model on a custom dataset +Train a YOLOv5 model on a custom dataset. + +Models and datasets download automatically from the latest YOLOv5 release. +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data Usage: - $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) + $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ + import argparse import math import os diff --git a/val.py b/val.py index c1fcf61b468c..f7c9ef5e60d2 100644 --- a/val.py +++ b/val.py @@ -3,7 +3,19 @@ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: - $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + +Usage - formats: + $ python path/to/val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse From 968e30065aa1ccbebc42d1a19fd48f2aebc5cf52 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Jan 2022 19:47:03 -0800 Subject: [PATCH 563/757] Update greetings.yml (#6165) --- .github/workflows/greetings.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 0daf9514d3c5..6ced1132264a 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -37,9 +37,9 @@ jobs: [**Python>=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash - $ git clone https://github.com/ultralytics/yolov5 - $ cd yolov5 - $ pip install -r requirements.txt + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install ``` ## Environments From 5344e54da6aa5f40fc20115c672f29c96d7827cd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Jan 2022 10:42:50 -0800 Subject: [PATCH 564/757] [pre-commit.ci] pre-commit suggestions (#6177) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.0.1 → v4.1.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.0.1...v4.1.0) - [github.com/asottile/pyupgrade: v2.23.1 → v2.31.0](https://github.com/asottile/pyupgrade/compare/v2.23.1...v2.31.0) - [github.com/PyCQA/isort: 5.9.3 → 5.10.1](https://github.com/PyCQA/isort/compare/5.9.3...5.10.1) - [github.com/PyCQA/flake8: 3.9.2 → 4.0.1](https://github.com/PyCQA/flake8/compare/3.9.2...4.0.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 48e752f448f1..526a5609fdd7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.1.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -24,14 +24,14 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.23.1 + rev: v2.31.0 hooks: - id: pyupgrade args: [--py36-plus] name: Upgrade code - repo: https://github.com/PyCQA/isort - rev: 5.9.3 + rev: 5.10.1 hooks: - id: isort name: Sort imports @@ -60,7 +60,7 @@ repos: # - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 3.9.2 + rev: 4.0.1 hooks: - id: flake8 name: PEP8 From b4a29b5a8d63a8c2d4a8929942b44e8969c5dddd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 10:54:52 -0800 Subject: [PATCH 565/757] Update NMS `max_wh=7680` for 8k images (#6178) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index ef27eb570ffa..470e6d81d250 100755 --- a/utils/general.py +++ b/utils/general.py @@ -660,7 +660,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections From 63a4d862aae72935a010e1efd20bd9be5984f105 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 15:41:26 -0800 Subject: [PATCH 566/757] Add OpenVINO inference (#6179) --- detect.py | 2 +- export.py | 20 ++++++++++---------- models/common.py | 31 +++++++++++++++++++++++-------- val.py | 2 +- 4 files changed, 35 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index 1393f79746f6..6aa5b825da48 100644 --- a/detect.py +++ b/detect.py @@ -16,7 +16,7 @@ yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.mlmodel # CoreML (under development) - yolov5s_openvino_model # OpenVINO (under development) + yolov5s.xml # OpenVINO yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow protobuf yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index 67e32305ded1..fa40864ac378 100644 --- a/export.py +++ b/export.py @@ -20,16 +20,16 @@ $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs Inference: - $ python path/to/detect.py --weights yolov5s.pt - yolov5s.torchscript - yolov5s.onnx - yolov5s.mlmodel (under development) - yolov5s_openvino_model (under development) - yolov5s_saved_model - yolov5s.pb - yolov5s.tflite - yolov5s_edgetpu.tflite - yolov5s.engine + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s.xml # OpenVINO + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/models/common.py b/models/common.py index b53de7001454..519ce611d7ef 100644 --- a/models/common.py +++ b/models/common.py @@ -282,6 +282,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): # PyTorch: weights = *.pt # TorchScript: *.torchscript # CoreML: *.mlmodel + # OpenVINO: *.xml # TensorFlow: *_saved_model # TensorFlow: *.pb # TensorFlow Lite: *.tflite @@ -294,31 +295,38 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) suffix = Path(w).suffix.lower() - suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] + suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel', '.xml'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + pt, jit, onnx, engine, tflite, pb, saved_model, coreml, xml = (suffix == x for x in suffixes) # backends stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local if data: # data.yaml path (optional) with open(data, errors='ignore') as f: names = yaml.safe_load(f)['names'] # class names - if jit: # TorchScript + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] - elif pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) - stride = int(model.stride.max()) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements(('opencv-python>=4.5.4',)) @@ -403,6 +411,13 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() else: # ONNX Runtime y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description + request = self.executable_network.requests[0] # inference request + request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) + request.infer() + y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) elif self.engine: # TensorRT assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) self.binding_addrs['images'] = int(im.data_ptr()) diff --git a/val.py b/val.py index f7c9ef5e60d2..704a7a46eb38 100644 --- a/val.py +++ b/val.py @@ -10,7 +10,7 @@ yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.mlmodel # CoreML (under development) - yolov5s_openvino_model # OpenVINO (under development) + yolov5s.xml # OpenVINO yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow protobuf yolov5s.tflite # TensorFlow Lite From 7cad6597bb617b02d36b062039fb237f49efdaae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 18:43:21 -0800 Subject: [PATCH 567/757] Ignore `*_openvino_model/` dir (#6180) --- .dockerignore | 1 + .gitignore | 1 + 2 files changed, 2 insertions(+) diff --git a/.dockerignore b/.dockerignore index 4be8d4108e78..af51ccc3d8df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -24,6 +24,7 @@ data/samples/* **/*.pb *_saved_model/ *_web_model/ +*_openvino_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- diff --git a/.gitignore b/.gitignore index 327dc8566681..69a00843ea42 100755 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,7 @@ VOC/ *.h5 *_saved_model/ *_web_model/ +*_openvino_model/ darknet53.conv.74 yolov3-tiny.conv.15 From 5bd6a97b18285120c389f7c59d605322702d1f5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 20:08:15 -0800 Subject: [PATCH 568/757] Global export format sort (#6182) * Global export sort * Cleanup --- detect.py | 6 +- export.py | 148 +++++++++++++++++++++++------------------------ models/common.py | 80 ++++++++++++------------- val.py | 6 +- 4 files changed, 120 insertions(+), 120 deletions(-) diff --git a/detect.py b/detect.py index 6aa5b825da48..a4a1fb69b42e 100644 --- a/detect.py +++ b/detect.py @@ -15,13 +15,13 @@ $ python path/to/detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT """ import argparse diff --git a/export.py b/export.py index fa40864ac378..3b677d2ca144 100644 --- a/export.py +++ b/export.py @@ -2,19 +2,19 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | `--include ...` argument +Format | `export.py --include` | Model --- | --- | --- -PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | `torchscript` -ONNX | yolov5s.onnx | `onnx` -CoreML | yolov5s.mlmodel | `coreml` -OpenVINO | yolov5s_openvino_model/ | `openvino` -TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` -TensorFlow GraphDef | yolov5s.pb | `pb` -TensorFlow Lite | yolov5s.tflite | `tflite` -TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu` -TensorFlow.js | yolov5s_web_model/ | `tfjs` -TensorRT | yolov5s.engine | `engine` +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs @@ -23,13 +23,13 @@ $ python path/to/detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -126,6 +126,23 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst LOGGER.info(f'{prefix} export failure: {e}') +def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', '_openvino_model' + os.sep) + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" + subprocess.check_output(cmd, shell=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export ct_model = None @@ -148,27 +165,57 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): return ct_model -def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export +def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie + check_requirements(('tensorrt',)) + import tensorrt as trt - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', '_openvino_model' + os.sep) + opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x + export_onnx(model, im, file, opset, train, False, simplify) + onnx = file.with_suffix('.onnx') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" - subprocess.check_output(cmd, shell=True) + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + half &= builder.platform_has_fast_fp16 + LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, - conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): - # YOLOv5 TensorFlow saved_model export + conf_thres=0.25, prefix=colorstr('TensorFlow SavedModel:')): + # YOLOv5 TensorFlow SavedModel export keras_model = None try: import tensorflow as tf @@ -304,53 +351,6 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): LOGGER.info(f'\n{prefix} export failure: {e}') -def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - try: - check_requirements(('tensorrt',)) - import tensorrt as trt - - opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - export_onnx(model, im, file, opset, train, False, simplify) - onnx = file.with_suffix('.onnx') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - half &= builder.platform_has_fast_fp16 - LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') - if half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path @@ -417,12 +417,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_torchscript(model, im, file, optimize) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) + if 'openvino' in include: + export_openvino(model, im, file) if 'engine' in include: export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) - if 'openvino' in include: - export_openvino(model, im, file) # TensorFlow Exports if any(tf_exports): diff --git a/models/common.py b/models/common.py index 519ce611d7ef..284dd2bb3af0 100644 --- a/models/common.py +++ b/models/common.py @@ -316,17 +316,6 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - core = ie.IECore() - network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths - executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements(('opencv-python>=4.5.4',)) @@ -338,6 +327,13 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -356,9 +352,17 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] - else: # TensorFlow (TFLite, pb, saved_model) - if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + if saved_model: # SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + model = tf.keras.models.load_model(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): @@ -369,19 +373,15 @@ def wrap_frozen_graph(gd, inputs, outputs): graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") - elif saved_model: - LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') - import tensorflow as tf - model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - if 'edgetpu' in w.lower(): + if 'edgetpu' in w.lower(): # Edge TPU LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') import tflite_runtime.interpreter as tfli delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) - else: + else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model @@ -396,21 +396,13 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt or self.jit: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] - elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - elif self.onnx: # ONNX + elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy - if self.dnn: # ONNX OpenCV DNN - self.net.setInput(im) - y = self.net.forward() - else: # ONNX Runtime - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description @@ -423,13 +415,21 @@ def forward(self, im, augment=False, visualize=False, val=False): self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data - else: # TensorFlow model (TFLite, pb, saved_model) + elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.pb: - y = self.frozen_func(x=self.tf.constant(im)).numpy() - elif self.saved_model: + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.saved_model: # SavedModel y = self.model(im, training=False).numpy() - elif self.tflite: + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)).numpy() + elif self.tflite: # Lite input, output = self.input_details[0], self.output_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: @@ -451,7 +451,7 @@ def forward(self, im, augment=False, visualize=False, val=False): def warmup(self, imgsz=(1, 3, 640, 640), half=False): # Warmup model by running inference once - if self.pt or self.engine or self.onnx: # warmup types + if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image self.forward(im) # warmup diff --git a/val.py b/val.py index 704a7a46eb38..4d707f62bffa 100644 --- a/val.py +++ b/val.py @@ -9,13 +9,13 @@ $ python path/to/val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT """ import argparse From fb839298a16fca4143dc70619a084f9ed085ac07 Mon Sep 17 00:00:00 2001 From: Yin Rong Date: Tue, 4 Jan 2022 12:25:48 +0800 Subject: [PATCH 569/757] Fix TorchScript on mobile export (#6183) * fix export of TorchScript on mobile * Cleanup Co-authored-by: yinrong Co-authored-by: Glenn Jocher --- export.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 3b677d2ca144..d169aa0de5f5 100644 --- a/export.py +++ b/export.py @@ -75,7 +75,10 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - (optimize_for_mobile(ts) if optimize else ts).save(str(f), _extra_files=extra_files) + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: From a2f4a1799ba6dabea4cd74a3b1e292c102918670 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Wed, 5 Jan 2022 03:09:25 +0800 Subject: [PATCH 570/757] TensorRT 7 `anchor_grid` compatibility fix (#6185) * fix: TensorRT 7 incompatiable * Add comment * Add if: else and comment Co-authored-by: Glenn Jocher --- export.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index d169aa0de5f5..6cf1db2c45b8 100644 --- a/export.py +++ b/export.py @@ -175,7 +175,13 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F import tensorrt as trt opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - export_onnx(model, im, file, opset, train, False, simplify) + if opset == 12: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, opset, train, False, simplify) + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + export_onnx(model, im, file, opset, train, False, simplify) onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' @@ -418,12 +424,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: export_torchscript(model, im, file, optimize) + if 'engine' in include: # TensorRT required before ONNX + export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: export_openvino(model, im, file) - if 'engine' in include: - export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) From 7b31a531b45f9c8b9ec543cdfbf5c9d0c9aa920d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 13:39:13 -0800 Subject: [PATCH 571/757] Add `tensorrt>=7.0.0` checks (#6193) * Add `tensorrt>=7.0.0` checks * Update export.py * Update common.py * Update export.py --- export.py | 12 ++++++------ models/common.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/export.py b/export.py index 6cf1db2c45b8..a0cb5fdc5678 100644 --- a/export.py +++ b/export.py @@ -61,8 +61,8 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args, - url2file) +from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, + file_size, print_args, url2file) from utils.torch_utils import select_device @@ -174,14 +174,14 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_requirements(('tensorrt',)) import tensorrt as trt - opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - if opset == 12: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + if trt.__version__[0] == 7: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, opset, train, False, simplify) + export_onnx(model, im, file, 12, train, False, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 - export_onnx(model, im, file, opset, train, False, simplify) + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' diff --git a/models/common.py b/models/common.py index 284dd2bb3af0..836314568f67 100644 --- a/models/common.py +++ b/models/common.py @@ -337,7 +337,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '8.0.0', verbose=True) # version requirement + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: From b5b56a3c887e5cc93770f54233f3ba4b2cc214ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 17:49:09 -0800 Subject: [PATCH 572/757] Add CoreML inference (#6195) * Add Apple CoreML inference * Cleanup --- detect.py | 2 +- export.py | 3 +-- models/common.py | 9 ++++++--- val.py | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/detect.py b/detect.py index a4a1fb69b42e..2d1963ad6f86 100644 --- a/detect.py +++ b/detect.py @@ -17,7 +17,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index a0cb5fdc5678..3447fc6ed1ab 100644 --- a/export.py +++ b/export.py @@ -25,7 +25,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite @@ -156,7 +156,6 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') - model.train() # CoreML exports should be placed in model.train() mode ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) ct_model.save(f) diff --git a/models/common.py b/models/common.py index 836314568f67..d8d5423a16e0 100644 --- a/models/common.py +++ b/models/common.py @@ -420,9 +420,12 @@ def forward(self, im, augment=False, visualize=False, val=False): im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + y = y[list(y)[-1]] # last output else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel diff --git a/val.py b/val.py index 4d707f62bffa..4709f67511bb 100644 --- a/val.py +++ b/val.py @@ -11,7 +11,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite From 9e9219fe17070ae38b50d29c11d460ed0ec8b1db Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 19:32:42 -0800 Subject: [PATCH 573/757] Fix `nan`-robust stream FPS (#6198) Fix for Webcam stop working suddenly (Issue #6197) --- utils/datasets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 79b871c9294b..6584342a621d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,6 +6,7 @@ import glob import hashlib import json +import math import os import random import shutil @@ -308,8 +309,9 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) From 5402753a53b0fe0b5fe70af5fcf4498ad138c99b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 19:36:12 -0800 Subject: [PATCH 574/757] Edge TPU compiler comment (#6196) * Edge TPU compiler comment * 7 to 8 fix --- export.py | 6 +++--- models/common.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 3447fc6ed1ab..c56a0a99a635 100644 --- a/export.py +++ b/export.py @@ -17,7 +17,7 @@ TensorFlow.js | `tfjs` | yolov5s_web_model/ Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... Inference: $ python path/to/detect.py --weights yolov5s.pt # PyTorch @@ -179,7 +179,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F export_onnx(model, im, file, 12, train, False, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' @@ -308,7 +308,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: - cmd = 'edgetpu_compiler --version' + cmd = 'edgetpu_compiler --version' # install https://coral.ai/docs/edgetpu/compiler/ out = subprocess.run(cmd, shell=True, capture_output=True, check=True) ver = out.stdout.decode().split()[-1] LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') diff --git a/models/common.py b/models/common.py index d8d5423a16e0..b055cb68a439 100644 --- a/models/common.py +++ b/models/common.py @@ -376,8 +376,8 @@ def wrap_frozen_graph(gd, inputs, outputs): elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): # Edge TPU LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - import tflite_runtime.interpreter as tfli - delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime + import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime + delegate = {'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) From 8125ec5d4230441611c49f1064bbfae15a487fac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 13:01:21 -0800 Subject: [PATCH 575/757] TFLite `--int8` 'flatbuffers==1.12' fix (#6216) * TFLite `--int8` 'flatbuffers==1.12' fix Temporary workaround for TFLite INT8 export. * Update export.py * Update export.py --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index c56a0a99a635..0e8e4242f487 100644 --- a/export.py +++ b/export.py @@ -277,8 +277,6 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te try: import tensorflow as tf - from models.tf import representative_dataset_gen - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') @@ -288,6 +286,8 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: + from models.tf import representative_dataset_gen + check_requirements(('flatbuffers==1.12',)) # https://github.com/ultralytics/yolov5/issues/5707 dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] From 00d7b978690d16729f411393bbd56f9dbd6a840c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 13:34:36 -0800 Subject: [PATCH 576/757] TFLite `--int8` 'flatbuffers==1.12' fix 2 (#6217) * TFLite `--int8` 'flatbuffers==1.12' fix 2 Reorganizes #6216 fix to update before `tensorflow` import so no restart required. * Update export.py --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 0e8e4242f487..2466d2538ee8 100644 --- a/export.py +++ b/export.py @@ -287,7 +287,6 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen - check_requirements(('flatbuffers==1.12',)) # https://github.com/ultralytics/yolov5/issues/5707 dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -435,6 +434,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): pb, tflite, edgetpu, tfjs = tf_exports[1:] + if (tflite or edgetpu) and int8: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 + check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, From b4ac3df6ffd6efb02200b45083b5527af41740ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 14:55:04 -0800 Subject: [PATCH 577/757] Add `edgetpu_compiler` checks (#6218) * Add `edgetpu_compiler` checks * Update export.py * Update export.py * Update export.py * Update export.py * Update export.py * Update export.py --- export.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 2466d2538ee8..21aa67ff0a48 100644 --- a/export.py +++ b/export.py @@ -41,6 +41,7 @@ import argparse import json import os +import platform import subprocess import sys import time @@ -307,11 +308,15 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: - cmd = 'edgetpu_compiler --version' # install https://coral.ai/docs/edgetpu/compiler/ + cmd = 'edgetpu_compiler --version' + help = 'See https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. {help}' + assert subprocess.run(cmd, shell=True).returncode == 0, f'export requires edgetpu-compiler. {help}' out = subprocess.run(cmd, shell=True, capture_output=True, check=True) ver = out.stdout.decode().split()[-1] + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model cmd = f"edgetpu_compiler -s {f_tfl}" @@ -434,7 +439,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): pb, tflite, edgetpu, tfjs = tf_exports[1:] - if (tflite or edgetpu) and int8: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 + if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, From f80c463010101e463670440537662f671cbaaa04 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 20:57:20 -0800 Subject: [PATCH 578/757] Attempt `edgetpu-compiler` autoinstall (#6223) * Attempt `edgetpu-compiler` autoinstall Attempt to install edgetpu-compiler dependency if missing on Linux. * Update export.py * Update export.py --- export.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 21aa67ff0a48..6d15b21d031d 100644 --- a/export.py +++ b/export.py @@ -309,11 +309,16 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: cmd = 'edgetpu_compiler --version' - help = 'See https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. {help}' - assert subprocess.run(cmd, shell=True).returncode == 0, f'export requires edgetpu-compiler. {help}' - out = subprocess.run(cmd, shell=True, capture_output=True, check=True) - ver = out.stdout.decode().split()[-1] + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(cmd, shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', + 'sudo apt-get install edgetpu-compiler']: + subprocess.run(c, shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model From ad565e31d2f0bf515d94a95690a3e13a83ce3b30 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 6 Jan 2022 09:55:31 -0800 Subject: [PATCH 579/757] Update README speed reproduction command (#6228) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 59abd084572c..5f45eb407fc5 100644 --- a/README.md +++ b/README.md @@ -251,7 +251,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * All checkpoints are trained to 300 epochs with default settings and hyperparameters. * **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` +* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` * **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
From 33a67b4918aa3f2e572f115781e615a91fd543a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 6 Jan 2022 11:08:09 -0800 Subject: [PATCH 580/757] Update P2-P7 `models/hub` variants (#6230) * Update p2-p7 `models/hub` variants * Update common.py * AutoAnchor camelcase corrections --- models/common.py | 2 +- models/hub/yolov5-p2.yaml | 4 ++-- models/hub/yolov5-p34.yaml | 41 ++++++++++++++++++++++++++++++++++++++ models/hub/yolov5-p6.yaml | 4 ++-- models/hub/yolov5-p7.yaml | 4 ++-- train.py | 2 +- tutorial.ipynb | 2 +- 7 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 models/hub/yolov5-p34.yaml diff --git a/models/common.py b/models/common.py index b055cb68a439..e375507a5a7e 100644 --- a/models/common.py +++ b/models/common.py @@ -306,7 +306,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) - stride = int(model.stride.max()) # model stride + stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index ffe26ebad182..554117dda59a 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor evolves 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -21,7 +21,7 @@ backbone: [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 v6.0 head +# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml new file mode 100644 index 000000000000..dbf0f850083e --- /dev/null +++ b/models/hub/yolov5-p34.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 6, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 + ] + +# YOLOv5 v6.0 head with (P3, P4) outputs +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + + [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) + ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 28f3e439cccd..a17202f22044 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -23,7 +23,7 @@ backbone: [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 v6.0 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index bd2f5845f884..edd7d13a34a6 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -25,7 +25,7 @@ backbone: [-1, 1, SPPF, [1280, 5]], # 13 ] -# YOLOv5 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs head: [[-1, 1, Conv, [1024, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/train.py b/train.py index bd2fb5898cb9..410f16fed3bf 100644 --- a/train.py +++ b/train.py @@ -461,7 +461,7 @@ def parse_opt(known=False): parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') diff --git a/tutorial.ipynb b/tutorial.ipynb index 45b27b7ab2cc..fb808cf6e10b 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -777,7 +777,7 @@ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 121.58it/s]\n", "Plotting labels... \n", "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", + "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to \u001b[1mruns/train/exp\u001b[0m\n", From 6865d19a92d8c160c7fc3c92256627dadce1cd1e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 7 Jan 2022 09:31:17 -0800 Subject: [PATCH 581/757] TensorRT 7 export fix (#6235) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 6d15b21d031d..6adcf72e9e66 100644 --- a/export.py +++ b/export.py @@ -174,7 +174,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_requirements(('tensorrt',)) import tensorrt as trt - if trt.__version__[0] == 7: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] export_onnx(model, im, file, 12, train, False, simplify) # opset 12 From 9b13a594e953e4e2688ff6ac0190a2247733e4ca Mon Sep 17 00:00:00 2001 From: Jinwoong Yoo Date: Mon, 10 Jan 2022 15:40:47 +0900 Subject: [PATCH 582/757] Fix `cmd` string on `tfjs` export (#6243) * Fix cmd string on tfjs export * Cleanup Co-authored-by: Glenn Jocher --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 6adcf72e9e66..0236872c2d94 100644 --- a/export.py +++ b/export.py @@ -345,8 +345,8 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f + '/model.json' # *.json path - cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ - f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' subprocess.run(cmd, shell=True) json = open(f_json).read() From b3eaf5008bb8a34de481a6ef7ac8ba520d97b70e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Jan 2022 16:49:10 -1000 Subject: [PATCH 583/757] TensorRT pip install --- tutorial.ipynb | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index fb808cf6e10b..97f9074f9fd0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1090,19 +1090,13 @@ }, "source": [ "# TensorRT \n", - "# https://developer.nvidia.com/nvidia-tensorrt-download\n", - "!lsb_release -a # check system\n", - "%ls /usr/local | grep cuda # check CUDA\n", - "!wget https://ultralytics.com/assets/TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # download\n", - "![ -d /content/TensorRT-8.2.0.6/ ] || tar -C /content/ -zxf ./TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # unzip\n", - "%pip list | grep tensorrt || pip install /content/TensorRT-8.2.0.6/python/tensorrt-8.2.0.6-cp37-none-linux_x86_64.whl # install\n", - "%env LD_LIBRARY_PATH=/usr/local/cuda-11.1/lib64:/content/cuda-11.1/lib64:/content/TensorRT-8.2.0.6/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 # add to path\n", - "\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0" + "# https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-pip\n", + "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0 # export\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0 # inference" ], "execution_count": null, "outputs": [] } ] -} +} \ No newline at end of file From f3085accd3f768a5ffeb6be268d2eac1720f764d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 11 Jan 2022 10:13:17 -1000 Subject: [PATCH 584/757] Enable ONNX `--half` FP16 inference (#6268) * Enable ONNX ``--half` FP16 inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 2 +- tutorial.ipynb | 2 +- val.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 2d1963ad6f86..41c364c05d00 100644 --- a/detect.py +++ b/detect.py @@ -94,7 +94,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() diff --git a/tutorial.ipynb b/tutorial.ipynb index 97f9074f9fd0..251c18d97815 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/val.py b/val.py index 4709f67511bb..843943b5ff7e 100644 --- a/val.py +++ b/val.py @@ -137,9 +137,9 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: From 80473a65511859698aa36778c30997ba80943945 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 Jan 2022 15:48:40 -1000 Subject: [PATCH 585/757] Update `export.py` with Detect, Validate usages (#6280) --- export.py | 53 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/export.py b/export.py index 0236872c2d94..bca2564a7333 100644 --- a/export.py +++ b/export.py @@ -82,6 +82,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts.save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') @@ -125,7 +126,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst except Exception as e: LOGGER.info(f'{prefix} simplifier failure: {e}') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") + return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') @@ -143,13 +144,13 @@ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): subprocess.check_output(cmd, shell=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct @@ -162,10 +163,10 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): ct_model.save(f) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return ct_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') - - return ct_model + return None, None def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): @@ -216,7 +217,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -225,7 +226,6 @@ def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - keras_model = None try: import tensorflow as tf from tensorflow import keras @@ -247,10 +247,10 @@ def export_saved_model(model, im, file, dynamic, keras_model.save(f, save_format='tf') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return keras_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') - - return keras_model + return None, None def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): @@ -269,6 +269,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -300,7 +301,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te tflite_model = converter.convert() open(f, "wb").write(tflite_model) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -328,6 +329,7 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): subprocess.run(cmd, shell=True, check=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -364,6 +366,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): j.write(subst) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -431,15 +434,15 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: - export_torchscript(model, im, file, optimize) + f = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX - export_engine(model, im, file, train, half, simplify, workspace, verbose) + f = export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX - export_onnx(model, im, file, opset, train, dynamic, simplify) + f = export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: - export_openvino(model, im, file) + f = export_openvino(model, im, file) if 'coreml' in include: - export_coreml(model, im, file) + _, f = export_coreml(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -447,22 +450,26 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, - conf_thres=conf_thres, iou_thres=iou_thres) # keras model + model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, + topk_all=topk_all, + conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs - export_pb(model, im, file) + f = export_pb(model, im, file) if tflite or edgetpu: - export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + f = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) if edgetpu: - export_edgetpu(model, im, file) + f = export_edgetpu(model, im, file) if tfjs: - export_tfjs(model, im, file) + f = export_tfjs(model, im, file) # Finish LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nVisualize with https://netron.app') + f"\nVisualize with https://netron.app" + f"\nDetect with `python detect.py --weights {f}`" + f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f}')" + f"\nValidate with `python val.py --weights {f}`") def parse_opt(): @@ -490,7 +497,7 @@ def parse_opt(): parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], - help='available formats are (torchscript, onnx, engine, coreml, saved_model, pb, tflite, tfjs)') + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() print_args(FILE.stem, opt) return opt From af001349e46048f151b091b2ff593cdcd65a863b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Jan 2022 12:39:42 -1000 Subject: [PATCH 586/757] Add `is_kaggle()` function (#6285) * Add `is_kaggle()` function Return True if environment is Kaggle Notebook. * Remove root loggers only if is_kaggle() == True * Update general.py --- utils/general.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 470e6d81d250..bce2a1763e2a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -44,10 +44,21 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +def is_kaggle(): + # Is environment a Kaggle Notebook? + try: + assert os.environ.get('PWD') == '/kaggle/working' + assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return True + except AssertionError: + return False + + def set_logging(name=None, verbose=True): # Sets level and returns logger - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object + if is_kaggle(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) From e7bf38277f57086bf37486201909e9c04acfaa48 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Jan 2022 21:23:03 -1000 Subject: [PATCH 587/757] Fix `device` count check (#6290) * Fix device count check() * Update torch_utils.py * Update torch_utils.py * Update hubconf.py --- hubconf.py | 2 +- utils/torch_utils.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 6bf4b0b0265f..55d15abe2ac5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -61,7 +61,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url + s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e diff --git a/utils/torch_utils.py b/utils/torch_utils.py index cddb173948fb..060768e8251b 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -61,8 +61,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available + assert torch.cuda.device_count() > int(device), f'invalid CUDA device {device} requested' # check index + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() if cuda: From a1a9c6884c5cfda4c972f4087ad4d4b9c3da6518 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 14 Jan 2022 21:11:06 +0100 Subject: [PATCH 588/757] Fixing bug multi-gpu training (#6299) * Fixing bug multi-gpu training This solves this issue: https://github.com/ultralytics/yolov5/issues/6297#issue-1103853348 * Update torch_utils.py for pep8 --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 060768e8251b..451bcdd29b7c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -62,7 +62,8 @@ def select_device(device='', batch_size=0, newline=True): os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available - assert torch.cuda.device_count() > int(device), f'invalid CUDA device {device} requested' # check index + device_list = [int(val) for val in device.replace(',', '')] + assert all([torch.cuda.device_count() > element for element in device_list]), f'invalid CUDA device {device} requested' # check index os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() From 436ffc417ac2312de18287ddc4f87bdc2f7f5734 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Jan 2022 15:48:15 -1000 Subject: [PATCH 589/757] `select_device()` cleanup (#6302) * `select_device()` cleanup * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py --- utils/torch_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 451bcdd29b7c..7e464190f9ba 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -61,9 +61,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available - device_list = [int(val) for val in device.replace(',', '')] - assert all([torch.cuda.device_count() > element for element in device_list]), f'invalid CUDA device {device} requested' # check index + nd = torch.cuda.device_count() # number of CUDA devices + assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() From db1f83be6312a3d68c817fd25251194e5e7e5b5d Mon Sep 17 00:00:00 2001 From: Otfot Date: Mon, 17 Jan 2022 14:58:34 +0800 Subject: [PATCH 590/757] Fix `train.py` parameter groups desc error (#6318) * Fix `train.py` parameter groups desc error * Cleanup Co-authored-by: Glenn Jocher --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 410f16fed3bf..ebe6c2e8f5f9 100644 --- a/train.py +++ b/train.py @@ -172,7 +172,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay optimizer.add_param_group({'params': g2}) # add g2 (biases) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " - f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") + f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias") del g0, g1, g2 # Scheduler From 3119b2f27c198c6b9c02fb57d3b00b61a7bd2356 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 10:04:49 -1000 Subject: [PATCH 591/757] Remove `dataset_stats()` autodownload capability (#6303) * Remove `dataset_stats()` autodownload capability @kalenmike security update per Slack convo * Update datasets.py --- utils/datasets.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6584342a621d..a8f453aa1904 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -936,11 +936,10 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): +def dataset_stats(path='coco128.yaml', verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) - Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') + Usage: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -984,7 +983,7 @@ def hub_ops(f, max_dim=1920): data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? - check_dataset(data, autodownload) # download dataset if missing + check_dataset(data, autodownload=False) hub_dir = Path(data['path'] + ('-hub' if hub else '')) stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': From fd55271c04e9be68ea5299f7fe2aafcf4dc1984d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 10:49:26 -1000 Subject: [PATCH 592/757] Console corrupted -> corrupt (#6338) * Console corrupted -> corrupt Minor style changes. * Update export.py --- export.py | 3 +-- utils/datasets.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index bca2564a7333..11fead4a9e1d 100644 --- a/export.py +++ b/export.py @@ -452,8 +452,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, - topk_all=topk_all, - conf_thres=conf_thres, iou_thres=iou_thres) # keras model + topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs f = export_pb(model, im, file) if tflite or edgetpu: diff --git a/utils/datasets.py b/utils/datasets.py index a8f453aa1904..8159c3dcf264 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -424,9 +424,9 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -523,7 +523,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [l, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" pbar.close() if msgs: From e2e95b2d8e8b6be216f4a7c11955d622aff7d043 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 13:52:25 -1000 Subject: [PATCH 593/757] TensorRT `assert im.device.type != 'cpu'` on export (#6340) * TensorRT `assert im.device.type != 'cpu'` on export * Update export.py --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 11fead4a9e1d..2e90b0a1b24c 100644 --- a/export.py +++ b/export.py @@ -184,9 +184,10 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + assert onnx.exists(), f'failed to export ONNX file: {onnx}' f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: From 0cf932bf6346909189c53b375ef97551bb0c2326 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 15:18:23 -1000 Subject: [PATCH 594/757] `export.py` return exported files/dirs (#6343) * `export.py` return exported files/dirs * Path to str --- export.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/export.py b/export.py index 2e90b0a1b24c..a7a79b46b8bb 100644 --- a/export.py +++ b/export.py @@ -434,16 +434,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports + f = [''] * 10 # exported filenames if 'torchscript' in include: - f = export_torchscript(model, im, file, optimize) + f[0] = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX - f = export_engine(model, im, file, train, half, simplify, workspace, verbose) + f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX - f = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: - f = export_openvino(model, im, file) + f[3] = export_openvino(model, im, file) if 'coreml' in include: - _, f = export_coreml(model, im, file) + _, f[4] = export_coreml(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -451,25 +452,27 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, - topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model + model, f[5] = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, + topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs - f = export_pb(model, im, file) + f[6] = export_pb(model, im, file) if tflite or edgetpu: - f = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) if edgetpu: - f = export_edgetpu(model, im, file) + f[8] = export_edgetpu(model, im, file) if tfjs: - f = export_tfjs(model, im, file) + f[9] = export_tfjs(model, im, file) # Finish + f = [str(x) for x in f if x] # filter out '' and None LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nVisualize with https://netron.app" - f"\nDetect with `python detect.py --weights {f}`" - f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f}')" - f"\nValidate with `python val.py --weights {f}`") + f"\nDetect with `python detect.py --weights {f[-1]}`" + f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nValidate with `python val.py --weights {f[-1]}`") + return f # return list of exported files/dirs def parse_opt(): From e5219099cd6e76e4f75a4c8b376531af2791d358 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jan 2022 10:18:29 -1000 Subject: [PATCH 595/757] Created using Colaboratory --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 251c18d97815..b160e75adb58 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1077,7 +1077,7 @@ }, "source": [ "# VOC\n", - "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + "for b, m in zip([64, 64, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" ], "execution_count": null, @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 750c42e43eb38bf23659fcee50576156acd86c77 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jan 2022 12:24:40 -1000 Subject: [PATCH 596/757] `export.py` automatic `forward_export` (#6352) * `export.py` automatic `forward_export` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 ++- tutorial.ipynb | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index a7a79b46b8bb..589b381e035a 100644 --- a/export.py +++ b/export.py @@ -427,7 +427,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic - # m.forward = m.forward_export # assign forward (optional) + if hasattr(m, 'forward_export'): + m.forward = m.forward_export # assign custom forward (optional) for _ in range(2): y = model(im) # dry runs diff --git a/tutorial.ipynb b/tutorial.ipynb index b160e75adb58..6ff20dc36c40 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 9708cf56eaead29bce789a07cfe73ecc7d7d4838 Mon Sep 17 00:00:00 2001 From: johnk2hawaii <64561921+johnk2hawaii@users.noreply.github.com> Date: Wed, 19 Jan 2022 14:32:19 -1000 Subject: [PATCH 597/757] New environment variable `VERBOSE` (#6353) New environment variable `VERBOSE` --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bce2a1763e2a..41f47785bd16 100755 --- a/utils/general.py +++ b/utils/general.py @@ -36,6 +36,7 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +VERBOSE = str(os.getenv('VERBOSE', True)).lower() == 'true' # global verbose mode torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -54,7 +55,7 @@ def is_kaggle(): return False -def set_logging(name=None, verbose=True): +def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): for h in logging.root.handlers: From 4e841b9b16aa60a39cf7c11be58f55fa2fdc34f2 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 21 Jan 2022 04:50:17 +0800 Subject: [PATCH 598/757] Reuse `de_parallel()` rather than `is_parallel()` (#6354) --- utils/loss.py | 4 ++-- utils/torch_utils.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 194c8e503e0e..5aa9f017d2af 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -7,7 +7,7 @@ import torch.nn as nn from utils.metrics import bbox_iou -from utils.torch_utils import is_parallel +from utils.torch_utils import de_parallel def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 @@ -107,7 +107,7 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + det = de_parallel(model).model[-1] # Detect() module self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 7e464190f9ba..2a45f434c6a5 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -295,7 +295,7 @@ class ModelEMA: def __init__(self, model, decay=0.9999, updates=0): # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates @@ -309,7 +309,7 @@ def update(self, model): self.updates += 1 d = self.decay(self.updates) - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d From e1893c894a6c2a1af25038b87b6146eee6a6ee9c Mon Sep 17 00:00:00 2001 From: sitecao <95668894+sitecao@users.noreply.github.com> Date: Thu, 20 Jan 2022 18:06:26 -0500 Subject: [PATCH 599/757] `DEVICE_COUNT` instead of `WORLD_SIZE` to calculate `nw` (#6324) --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8159c3dcf264..96f05afe508e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,7 +35,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP +DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -110,7 +110,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count() // DEVICE_COUNT, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates return loader(dataset, From 9bcc32a5bf5a823707e47a1167fc87d6050e60f4 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 21 Jan 2022 04:52:47 +0530 Subject: [PATCH 600/757] Flush callbacks when on `--evolve` (#6374) * log best.pt metrics at train end * update * Update __init__.py * flush callbacks when using evolve Co-authored-by: Glenn Jocher --- train.py | 2 +- utils/loggers/__init__.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index ebe6c2e8f5f9..b20b7dbb2dda 100644 --- a/train.py +++ b/train.py @@ -612,7 +612,7 @@ def main(opt, callbacks=Callbacks()): # Train mutation results = train(hyp.copy(), opt, device, callbacks) - + callbacks = Callbacks() # Write mutation results print_mutation(results, hyp.copy(), save_dir, opt.bucket) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7679ee70f176..86ccf38443a9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -159,10 +159,7 @@ def on_train_end(self, last, best, plots, epoch, results): wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - else: - self.wandb.finish_run() - self.wandb = WandbLogger(self.opt) + self.wandb.finish_run() def on_params_update(self, params): # Update hyperparams or configs of the experiment From 1b41a1d0599337b760810ba9690b6e633e129e65 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jan 2022 17:59:41 -1000 Subject: [PATCH 601/757] FROM nvcr.io/nvidia/pytorch:21.12-py3 (#6377) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9a55005a95c5..35e346bf6850 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.10-py3 +FROM nvcr.io/nvidia/pytorch:21.12-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy Pillow -# RUN pip install --no-cache torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +# RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From bd815d48df18a23e2bb08d88e430183bfb48eb78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jan 2022 19:15:04 -1000 Subject: [PATCH 602/757] FROM nvcr.io/nvidia/pytorch:21.10-py3 (#6379) 21.12 generates dockerhub errors so rolling back to 21.10 with latest pytorch install. Not sure if this torch install will work on non-GPU dockerhub autobuild so this is an experiment. --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 35e346bf6850..d631a057c359 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.12-py3 +FROM nvcr.io/nvidia/pytorch:21.10-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy Pillow -# RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From c43439aa31afdca9d1adbd1cc35b57bfb95b442d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Jan 2022 21:06:02 -1000 Subject: [PATCH 603/757] Add `albumentations` to Dockerfile (#6392) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index d631a057c359..95e2cd4af66d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,9 +10,9 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 -RUN pip install --no-cache -U torch torchvision numpy Pillow +RUN pip install --no-cache -r requirements.txt albumentations coremltools onnx gsutil notebook numpy Pillow wandb>=0.12.2 RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +# RUN pip install --no-cache -U torch torchvision # Create working directory RUN mkdir -p /usr/src/app From 8efe97719c9b3b77c9db9b5c8592e051b7f0c9a7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 23 Jan 2022 03:37:21 +0100 Subject: [PATCH 604/757] Add `stop_training=False` flag to callbacks (#6365) * New flag 'stop_training' in util.callbacks.Callbacks class to prematurely stop training from callback handler * Removed most of the new checks, leaving only the one after calling 'on_train_batch_end' * Cleanup Co-authored-by: Glenn Jocher --- train.py | 2 ++ utils/callbacks.py | 1 + 2 files changed, 3 insertions(+) diff --git a/train.py b/train.py index b20b7dbb2dda..510377e1178e 100644 --- a/train.py +++ b/train.py @@ -352,6 +352,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) + if callbacks.stop_training: + return # end batch ------------------------------------------------------------------------------------------------ # Scheduler diff --git a/utils/callbacks.py b/utils/callbacks.py index 13d82ebc2e41..c51c268f20d6 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -35,6 +35,7 @@ def __init__(self): 'on_params_update': [], 'teardown': [], } + self.stop_training = False # set True to interrupt training def register_action(self, hook, name='', callback=None): """ From 482af479c07cd465890f63a08483f1ae6540987c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 Jan 2022 13:11:11 -0800 Subject: [PATCH 605/757] Add `detect.py` GIF video inference (#6410) * Add detect.py GIF video inference * Cleanup --- detect.py | 2 +- utils/datasets.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/detect.py b/detect.py index 41c364c05d00..9b553faa34e4 100644 --- a/detect.py +++ b/detect.py @@ -199,7 +199,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) diff --git a/utils/datasets.py b/utils/datasets.py index 96f05afe508e..fa73cba64d40 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -33,8 +33,8 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes +VID_FORMATS = ['avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag From cfecd903a3399ef4529a244303d8807edd6abae4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 Jan 2022 15:28:52 -0800 Subject: [PATCH 606/757] Update `greetings.yaml` email address (#6412) * Update `greetings.yaml` email address * Update greetings.yml --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 6ced1132264a..db2aaf8d9a39 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -31,7 +31,7 @@ jobs: If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. + For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. ## Requirements From ed9bac83922def8c7355c557df4d78208d490799 Mon Sep 17 00:00:00 2001 From: Jonathan Samelson Date: Tue, 25 Jan 2022 16:21:06 +0100 Subject: [PATCH 607/757] Rename logger from 'utils.logger' to 'yolov5' (#6421) * Gave a more explicit name to the logger * Cleanup Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 41f47785bd16..3d6da2fdb173 100755 --- a/utils/general.py +++ b/utils/general.py @@ -65,7 +65,7 @@ def set_logging(name=None, verbose=VERBOSE): return logging.getLogger(name) -LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.) +LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) class Profile(contextlib.ContextDecorator): From 16563ac5b54da3925cd71c83d58f53c102ec61ff Mon Sep 17 00:00:00 2001 From: Motoki Kimura Date: Wed, 26 Jan 2022 00:24:24 +0900 Subject: [PATCH 608/757] Prefer `tflite_runtime` for TFLite inference if installed (#6406) * import tflite_runtime if tensorflow not installed * rename tflite to tfli * Attempt tflite_runtime for all TFLite workflows Also rename tfli to tfl Co-authored-by: Glenn Jocher --- models/common.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index e375507a5a7e..346fa37ae2d0 100644 --- a/models/common.py +++ b/models/common.py @@ -374,17 +374,19 @@ def wrap_frozen_graph(gd, inputs, outputs): graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - if 'edgetpu' in w.lower(): # Edge TPU + try: + import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed + except ImportError: + import tensorflow.lite as tfl + if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime delegate = {'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) + interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - import tensorflow as tf - interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter = tfl.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs From ff8646cdea57a1e81a381de37b881e55ab273777 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Jan 2022 14:33:22 -0800 Subject: [PATCH 609/757] Update workflows (#6427) * Workflow updates * quotes fix * best to weights fix --- .github/workflows/ci-testing.yml | 29 ++++++++++++++--------------- .github/workflows/greetings.yml | 8 ++++---- .github/workflows/stale.yml | 2 +- README.md | 2 +- 4 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 9085b2b7e6dd..5cf1613ab0cd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -23,7 +23,7 @@ jobs: model: [ 'yolov5n' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 - timeout-minutes: 50 + timeout-minutes: 60 steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} @@ -60,35 +60,34 @@ jobs: # - name: W&B login # run: wandb login 345011b3fb26dc8337fd9b20e53857c1d403f2aa - - name: Download data - run: | - # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip - # unzip -q tmp.zip -d ../ - # rm tmp.zip + # - name: Download data + # run: | + # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip + # unzip -q tmp.zip -d ../datasets - name: Tests workflow run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - di=cpu # device + d=cpu # device + weights=runs/train/exp/weights/best.pt # Train - python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $d # Val - python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $di - python val.py --img 64 --batch 32 --weights runs/train/exp/weights/last.pt --device $di + python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $d + python val.py --img 64 --batch 32 --weights $weights --device $d # Detect - python detect.py --weights ${{ matrix.model }}.pt --device $di - python detect.py --weights runs/train/exp/weights/last.pt --device $di + python detect.py --weights ${{ matrix.model }}.pt --device $d + python detect.py --weights $weights --device $d python hubconf.py # hub # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model - python export.py --img 64 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + python export.py --weights ${{ matrix.model }}.pt --img 64 --include torchscript onnx # export # Python python - <=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: + [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash git clone https://github.com/ultralytics/yolov5 # clone cd yolov5 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b046dc949d1c..be2b0d97d5e7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -3,7 +3,7 @@ name: Close stale issues on: schedule: - - cron: "0 0 * * *" + - cron: '0 0 * * *' # Runs at 00:00 UTC every day jobs: stale: diff --git a/README.md b/README.md index 5f45eb407fc5..a73ba2797b1b 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr Install Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.6.0**](https://www.python.org/) environment, including +[**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). ```bash From d5966c93f1855baec531c3585da247cded72247f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Jan 2022 14:57:27 -0800 Subject: [PATCH 610/757] Namespace `VERBOSE` env variable to `YOLOv5_VERBOSE` (#6428) * Verbose updates * Verbose updates --- hubconf.py | 12 +++++------ utils/general.py | 54 ++++++++++++++++++++++++------------------------ utils/plots.py | 6 +++--- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/hubconf.py b/hubconf.py index 55d15abe2ac5..39fa614b2e34 100644 --- a/hubconf.py +++ b/hubconf.py @@ -12,10 +12,10 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates a specified YOLOv5 model + """Creates or loads a YOLOv5 model Arguments: - name (str): name of model, i.e. 'yolov5s' + name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes @@ -24,19 +24,19 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo device (str, torch.device, None): device to use for model parameters Returns: - YOLOv5 pytorch model + YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.yolo import Model from utils.downloads import attempt_download - from utils.general import check_requirements, intersect_dicts, set_logging + from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device + if not verbose: + LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) - set_logging(verbose=verbose) - name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path try: diff --git a/utils/general.py b/utils/general.py index 3d6da2fdb173..e9f5ec2ac128 100755 --- a/utils/general.py +++ b/utils/general.py @@ -36,7 +36,7 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -VERBOSE = str(os.getenv('VERBOSE', True)).lower() == 'true' # global verbose mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -241,20 +241,20 @@ def check_online(): def check_git_status(): # Recommend 'git pull' if code is out of date msg = ', for updates see https://github.com/ultralytics/yolov5' - print(colorstr('github: '), end='') - assert Path('.git').exists(), 'skipping check (not a git repository)' + msg - assert not is_docker(), 'skipping check (Docker image)' + msg - assert check_online(), 'skipping check (offline)' + msg + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert not is_docker(), s + 'skipping check (Docker image)' + msg + assert check_online(), s + 'skipping check (offline)' + msg cmd = 'git fetch && git config --get remote.origin.url' url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe + s += f'up to date with {url} ✅' + LOGGER.info(emojis(s)) # emoji-safe def check_python(minimum='3.6.2'): @@ -294,21 +294,21 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta except Exception as e: # DistributionNotFound or VersionConflict if requirements not met s = f"{prefix} {r} not found and is required by YOLOv5" if install: - print(f"{s}, attempting auto-update...") + LOGGER.info(f"{s}, attempting auto-update...") try: assert check_online(), f"'pip install {r}' skipped (offline)" - print(check_output(f"pip install '{r}'", shell=True).decode()) + LOGGER.info(check_output(f"pip install '{r}'", shell=True).decode()) n += 1 except Exception as e: - print(f'{prefix} {e}') + LOGGER.warning(f'{prefix} {e}') else: - print(f'{s}. Please install and rerun your command.') + LOGGER.info(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) + LOGGER.info(emojis(s)) def check_img_size(imgsz, s=32, floor=0): @@ -318,7 +318,7 @@ def check_img_size(imgsz, s=32, floor=0): else: # list i.e. img_size=[640, 480] new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -333,7 +333,7 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False @@ -363,9 +363,9 @@ def check_file(file, suffix=''): url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).is_file(): - print(f'Found {url} locally at {file}') # file already exists + LOGGER.info(f'Found {url} locally at {file}') # file already exists else: - print(f'Downloading {url} to {file}...') + LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file @@ -407,23 +407,23 @@ def check_dataset(data, autodownload=True): if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + LOGGER.info('\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()]) if s and autodownload: # download script root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename - print(f'Downloading {s} to {f}...') + LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script - print(f'Running {s} ...') + LOGGER.info(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") + LOGGER.info(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") else: raise Exception('Dataset not found.') @@ -445,13 +445,13 @@ def download_one(url, dir): if Path(url).is_file(): # exists in current path Path(url).rename(f) # move to dir elif not f.exists(): - print(f'Downloading {url} to {f}...') + LOGGER.info(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: torch.hub.download_url_to_file(url, f, progress=True) # torch download if unzip and f.suffix in ('.zip', '.gz'): - print(f'Unzipping {f}...') + LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': @@ -744,7 +744,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - print(f'WARNING: NMS time limit {time_limit}s exceeded') + LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output @@ -763,7 +763,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize - print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") def print_mutation(results, hyp, save_dir, bucket): @@ -786,8 +786,8 @@ def print_mutation(results, hyp, save_dir, bucket): f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') # Print to screen - print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) - print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n') + LOGGER.info(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) + LOGGER.info(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals) + '\n\n') # Save yaml with open(evolve_yaml, 'w') as f: diff --git a/utils/plots.py b/utils/plots.py index 69037ee9af70..74868403edc0 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -57,7 +57,7 @@ def check_font(font='Arial.ttf', size=10): return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing url = "https://ultralytics.com/assets/" + font.name - print(f'Downloading {url} to {font}...') + LOGGER.info(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, str(font), progress=False) try: return ImageFont.truetype(str(font), size) @@ -143,7 +143,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') - print(f'Saving {f}... ({n}/{channels})') + LOGGER.info(f'Saving {f}... ({n}/{channels})') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save @@ -417,7 +417,7 @@ def plot_results(file='path/to/results.csv', dir=''): # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f'Warning: Plotting error for {f}: {e}') + LOGGER.info(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) plt.close() From 3b7ac28ed1c760ee3ed6a9780027a4a3e775f937 Mon Sep 17 00:00:00 2001 From: toschi23 Date: Wed, 26 Jan 2022 14:26:02 +0100 Subject: [PATCH 611/757] Add `*.asf` video support (#6436) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index fa73cba64d40..4e0c38e76370 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -34,7 +34,7 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes -VID_FORMATS = ['avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes +VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag From fe7de6a82da3444d755453f86f40b508f3b99419 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Jan 2022 19:10:59 +0100 Subject: [PATCH 612/757] Revert "Remove `dataset_stats()` autodownload capability (#6303)" (#6442) This reverts commit 3119b2f27c198c6b9c02fb57d3b00b61a7bd2356. --- utils/datasets.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4e0c38e76370..85923e918aa5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -936,10 +936,11 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', verbose=False, profile=False, hub=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -983,7 +984,7 @@ def hub_ops(f, max_dim=1920): data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? - check_dataset(data, autodownload=False) + check_dataset(data, autodownload) # download dataset if missing hub_dir = Path(data['path'] + ('-hub' if hub else '')) stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': From 856d4e5733451c7fe9b12f183b384e986699b1f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Jan 2022 23:47:00 +0100 Subject: [PATCH 613/757] Fix `select_device()` for Multi-GPU (#6434) * Fix `select_device()` for Multi-GPU Possible fix for https://github.com/ultralytics/yolov5/issues/6431 * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update * Update * Update * Update * Update * Update * Update * Update * Update --- utils/datasets.py | 4 ++-- utils/torch_utils.py | 15 ++++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 85923e918aa5..4eb444087860 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -29,13 +29,13 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first +from utils.torch_utils import device_count, torch_distributed_zero_first # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes -DEVICE_COUNT = max(torch.cuda.device_count(), 1) +DEVICE_COUNT = max(device_count(), 1) # number of CUDA devices # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2a45f434c6a5..d958a8951074 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,6 +53,15 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). + try: + cmd = 'nvidia-smi -L | wc -l' + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception as e: + return 0 + + def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string @@ -61,10 +70,10 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - nd = torch.cuda.device_count() # number of CUDA devices - assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + nd = device_count() # number of CUDA devices assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' cuda = not cpu and torch.cuda.is_available() if cuda: From d8b5beb0b0a5cb3ec3ea20e9fff415057dcf25f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Jan 2022 08:18:01 +0100 Subject: [PATCH 614/757] Fix2 `select_device()` for Multi-GPU (#6461) * Fix2 select_device() for Multi-GPU * Cleanup * Cleanup * Simplify error message * Improve assert * Update torch_utils.py --- utils/datasets.py | 6 +++--- utils/torch_utils.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4eb444087860..07f6321e0285 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -29,13 +29,12 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import device_count, torch_distributed_zero_first +from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes -DEVICE_COUNT = max(device_count(), 1) # number of CUDA devices # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -110,7 +109,8 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // DEVICE_COUNT, batch_size if batch_size > 1 else 0, workers]) # number of workers + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates return loader(dataset, diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d958a8951074..2b51821a3b62 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -54,7 +54,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory def device_count(): - # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. + assert platform.system() == 'Linux', 'device_count() function only works on Linux' try: cmd = 'nvidia-smi -L | wc -l' return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) @@ -70,10 +71,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - nd = device_count() # number of CUDA devices - assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" cuda = not cpu and torch.cuda.is_available() if cuda: From 7539cd75c3a6c06d00848617f6265f39a765ccea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Jan 2022 20:23:17 +0100 Subject: [PATCH 615/757] Add Product Hunt social media icon (#6464) * Social media icons update * fix URL * Update README.md --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index a73ba2797b1b..f9947b98557d 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,10 @@ + + + + @@ -282,6 +286,10 @@ professional support requests please visit [https://ultralytics.com/contact](htt + + + + From 6445a8137e87f67cf3275c70e3585f634260417b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 15:54:51 +0100 Subject: [PATCH 616/757] Resolve dataset paths (#6489) --- utils/general.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index e9f5ec2ac128..86e3b3c1c54b 100755 --- a/utils/general.py +++ b/utils/general.py @@ -394,12 +394,15 @@ def check_dataset(data, autodownload=True): with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary - # Parse yaml - path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.' + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() for k in 'train', 'val', 'test': if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + # Parse yaml assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing From b884ea36c469d8501aa4016bf76cccfc3168ccd9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 21:17:56 +0100 Subject: [PATCH 617/757] Simplify TF normalized to pixels (#6494) --- models/common.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 346fa37ae2d0..557163310e74 100644 --- a/models/common.py +++ b/models/common.py @@ -446,10 +446,7 @@ def forward(self, im, augment=False, visualize=False, val=False): if int8: scale, zero_point = output['quantization'] y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., 0] *= w # x - y[..., 1] *= h # y - y[..., 2] *= w # w - y[..., 3] *= h # h + y[..., :4] *= [w, h, w, h] # xywh normalized to pixels y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y From 5e4ff195b21816d96b1fe0a94a9670a7e2ad34e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:06:29 +0100 Subject: [PATCH 618/757] Improved `export.py` usage examples (#6495) * Improved `export.py` usage examples * Cleanup --- export.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 589b381e035a..bb17703821e8 100644 --- a/export.py +++ b/export.py @@ -469,10 +469,10 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' f = [str(x) for x in f if x] # filter out '' and None LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nVisualize with https://netron.app" - f"\nDetect with `python detect.py --weights {f[-1]}`" - f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nValidate with `python val.py --weights {f[-1]}`") + f"\nDetect: python detect.py --weights {f[-1]}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nValidate: python val.py --weights {f[-1]}" + f"\nVisualize: https://netron.app") return f # return list of exported files/dirs From 77977e07912768738ef7ca46f44f19b6959206d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:34:15 +0100 Subject: [PATCH 619/757] CoreML inference fix `list()` -> `sorted()` (#6496) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 557163310e74..29d02e741e17 100644 --- a/models/common.py +++ b/models/common.py @@ -427,7 +427,7 @@ def forward(self, im, augment=False, visualize=False, val=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - y = y[list(y)[-1]] # last output + y = y[sorted(y)[-1]] # last output else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel From 842d049e1bbe5db87ad36f4ba86e1a9c2b6e413a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:59:26 +0100 Subject: [PATCH 620/757] Suppress `torch.jit.TracerWarning` on export (#6498) * Suppress torch.jit.TracerWarning TracerWarnings can be safely ignored. * Cleanup --- export.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index bb17703821e8..8666f3de63e0 100644 --- a/export.py +++ b/export.py @@ -45,6 +45,7 @@ import subprocess import sys import time +import warnings from pathlib import Path import torch @@ -508,8 +509,10 @@ def parse_opt(): def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) + with warnings.catch_warnings(): + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From 4c409332667477560200958b513b958bb8fdef71 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 23:52:50 +0100 Subject: [PATCH 621/757] Suppress export.run() TracerWarnings (#6499) Suppresses warnings when calling export.run() directly, not just CLI python export.py. Also adds Requirements examples for CPU and GPU backends --- export.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 8666f3de63e0..09c50baa415a 100644 --- a/export.py +++ b/export.py @@ -16,6 +16,10 @@ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... @@ -437,6 +441,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports f = [''] * 10 # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if 'torchscript' in include: f[0] = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX @@ -509,10 +514,8 @@ def parse_opt(): def main(opt): - with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From b73c62ebc5180d1fa3b412e55ab831d8285e1673 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 3 Feb 2022 15:59:52 +0530 Subject: [PATCH 622/757] W&B: Remember batchsize on resuming (#6512) * log best.pt metrics at train end * update * Update __init__.py * flush callbacks when using evolve * remember batch size on resuming * Update train.py Co-authored-by: Glenn Jocher --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 510377e1178e..2a973fb7164b 100644 --- a/train.py +++ b/train.py @@ -96,7 +96,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): From 19e0208fc9e33010717e066f9bd65c27db7c2b5c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 12:15:13 +0100 Subject: [PATCH 623/757] Update hyp.scratch-high.yaml (#6525) Update `lrf: 0.1`, tested on YOLOv5x6 to 55.0 mAP@0.5:0.95, slightly higher than current. --- data/hyps/hyp.scratch-high.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 5a586cc63fae..123cc8407413 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -4,7 +4,7 @@ # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 weight_decay: 0.0005 # optimizer weight decay 5e-4 warmup_epochs: 3.0 # warmup epochs (fractions ok) From cb40c9afda52a149b49d5e8d06100c60f6cd1614 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 18:11:28 +0100 Subject: [PATCH 624/757] TODO issues exempt from stale action (#6530) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index be2b0d97d5e7..7a83950c17b7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -34,5 +34,5 @@ jobs: stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 - exempt-issue-labels: 'documentation,tutorial' + exempt-issue-labels: 'documentation,tutorial,TODO' operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. From c3e599cfda112455d69da0fea64faadfaeaedcf2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 19:09:24 +0100 Subject: [PATCH 625/757] Update val_batch*.jpg for Chinese fonts (#6526) * Update plots for Chinese fonts * make is_chinese() non-str safe * Add global FONT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 71 ++++++++++++++++++++++++++++-------------------- utils/plots.py | 23 +++++++--------- 2 files changed, 52 insertions(+), 42 deletions(-) diff --git a/utils/general.py b/utils/general.py index 86e3b3c1c54b..fce5e38c6c9e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -37,6 +37,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -55,6 +56,21 @@ def is_kaggle(): return False +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows + + def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): @@ -68,6 +84,22 @@ def set_logging(name=None, verbose=VERBOSE): LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): @@ -152,34 +184,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): - # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - env = os.getenv(env_var) - if env: - path = Path(env) # use environment variable - else: - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - path.mkdir(exist_ok=True) # make if required - return path - - -def is_writeable(dir, test=False): - # Return True if directory has write permissions, test opening a file with write permissions if test=True - if test: # method 1 - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): # open file with write permissions - pass - file.unlink() # remove file - return True - except OSError: - return False - else: # method 2 - return os.access(dir, os.R_OK) # possible issues on Windows - - def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() @@ -207,7 +211,7 @@ def is_ascii(s=''): def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? - return re.search('[\u4e00-\u9fff]', s) + return True if re.search('[\u4e00-\u9fff]', str(s)) else False def emojis(str=''): @@ -378,6 +382,15 @@ def check_file(file, suffix=''): return files[0] # return file +def check_font(font=FONT): + # Download font to CONFIG_DIR if necessary + font = Path(font) + if not font.exists() and not (CONFIG_DIR / font.name).exists(): + url = "https://ultralytics.com/assets/" + font.name + LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') + torch.hub.download_url_to_file(url, str(font), progress=False) + + def check_dataset(data, autodownload=True): # Download and/or unzip dataset if not found locally # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip diff --git a/utils/plots.py b/utils/plots.py index 74868403edc0..be70ac8a030f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,12 +17,11 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, - try_except, user_config_dir, xywh2xyxy, xyxy2xywh) +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings -CONFIG_DIR = user_config_dir() # Ultralytics settings dir RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only @@ -49,16 +48,14 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' -def check_font(font='Arial.ttf', size=10): +def check_pil_font(font=FONT, size=10): # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font.name - LOGGER.info(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font), progress=False) + check_font(font) try: return ImageFont.truetype(str(font), size) except TypeError: @@ -67,7 +64,7 @@ def check_font(font='Arial.ttf', size=10): class Annotator: if RANK in (-1, 0): - check_font() # download TTF if necessary + check_pil_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): @@ -76,8 +73,8 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Fa if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width @@ -89,10 +86,10 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if label: w, h = self.font.getsize(label) # text width, height outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle([box[0], + self.draw.rectangle((box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1], fill=color) + box[1] + 1 if outside else box[1] + h + 1), fill=color) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 @@ -210,7 +207,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max # Annotate fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True) + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders From a82292ec5376cd7ff07fc6e85b731c09cdaeff4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 19:55:19 +0100 Subject: [PATCH 626/757] Social icons after text (#6473) * Social icons after text * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f9947b98557d..7bfea7c24e8f 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,13 @@ Open In Kaggle Join Forum +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ -
-

-YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- From 63ddb6f0d06f6309aa42bababd08c859197a27af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Feb 2022 19:15:12 +0100 Subject: [PATCH 686/757] Update autoanchor.py (#6794) * Update autoanchor.py * Update autoanchor.py --- utils/autoanchor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 27d6fb68bb38..51d4de306efd 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -57,9 +57,10 @@ def metric(k): # compute metric anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) - LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: - LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(emojis(s)) def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): @@ -120,7 +121,7 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 From bcc92e2169a233c3e974db40ddc9b496d9c29ec8 Mon Sep 17 00:00:00 2001 From: Louis Combaldieu Date: Fri, 4 Mar 2022 09:39:23 +0100 Subject: [PATCH 687/757] Update sweep.yaml (#6825) * Update sweep.yaml Changed focal loss gamma search range between 1 and 4 * Update sweep.yaml lowered the min value to match default --- utils/loggers/wandb/sweep.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index c7790d75f6b2..688b1ea0285f 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -88,7 +88,7 @@ parameters: fl_gamma: distribution: uniform min: 0.0 - max: 0.1 + max: 4.0 hsv_h: distribution: uniform min: 0.0 From 601dbb83f01b58355211f2565cfa4eecb48b1220 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Mar 2022 10:32:18 +0100 Subject: [PATCH 688/757] AutoAnchor improved initialization robustness (#6854) * Update AutoAnchor * Update AutoAnchor * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autoanchor.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 51d4de306efd..a631c21a3b26 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -125,15 +125,17 @@ def print_results(k, verbose=True): wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - # Kmeans calculation - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar - LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points') + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) # Plot From 8a66ebad44e8ecf90c7d27757c832579398d4baf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Mar 2022 14:10:13 +0100 Subject: [PATCH 689/757] Add `*.ts` to `VID_FORMATS` (#6859) --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index e132e04f6d9d..c325b9910ed3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -33,8 +33,8 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes -VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): From 47288407450f83ccbdbd2e950bf339e30e67a181 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Mar 2022 16:16:17 +0100 Subject: [PATCH 690/757] Update `--cache disk` deprecate `*_npy/` dirs (#6876) * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Cleanup * Cleanup --- utils/datasets.py | 76 +++++++++++++++--------------- utils/loggers/wandb/wandb_utils.py | 2 +- val.py | 2 +- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c325b9910ed3..6a2dc58dd6cd 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -407,19 +407,19 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert self.img_files, f'{prefix}No images found' + assert self.im_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache - self.label_files = img2label_paths(self.img_files) # labels + self.label_files = img2label_paths(self.im_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == self.cache_version # same version - assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash + assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash except Exception: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -437,7 +437,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update + self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index @@ -466,7 +466,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] + self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh @@ -485,24 +485,20 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) - self.imgs, self.img_npy = [None] * n, [None] * n + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: - if cache_images == 'disk': - self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') - self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] - self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(NUM_THREADS).imap(self.load_image, range(n)) + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: if cache_images == 'disk': - if not self.img_npy[i].exists(): - np.save(self.img_npy[i].as_posix(), x[0]) - gb += self.img_npy[i].stat().st_size + gb += self.npy_files[i].stat().st_size else: # 'ram' - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() @@ -512,8 +508,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), - desc=desc, total=len(self.img_files)) + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.im_files)) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f @@ -530,8 +526,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): LOGGER.info('\n'.join(msgs)) if nf == 0: LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, len(self.img_files) + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings x['version'] = self.cache_version # cache version try: @@ -543,7 +539,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): return x def __len__(self): - return len(self.img_files) + return len(self.im_files) # def __iter__(self): # self.count = -1 @@ -622,17 +618,15 @@ def __getitem__(self, index): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return torch.from_numpy(img), labels_out, self.img_files[index], shapes + return torch.from_numpy(img), labels_out, self.im_files[index], shapes def load_image(self, i): - # loads 1 image from dataset index 'i', returns (im, original hw, resized hw) - im = self.imgs[i] + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], if im is None: # not cached in RAM - npy = self.img_npy[i] - if npy and npy.exists(): # load npy - im = np.load(npy) + if fn.exists(): # load npy + im = np.load(fn) else: # read image - f = self.img_files[i] im = cv2.imread(f) # BGR assert im is not None, f'Image Not Found {f}' h0, w0 = im.shape[:2] # orig hw @@ -643,7 +637,13 @@ def load_image(self, i): interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: - return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic @@ -777,16 +777,16 @@ def load_mosaic9(self, index): @staticmethod def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed + im, label, path, shapes = zip(*batch) # transposed for i, lb in enumerate(label): lb[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes + return torch.stack(im, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 - img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) @@ -800,13 +800,13 @@ def collate_fn4(batch): else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - img4.append(im) + im4.append(im) label4.append(lb) for i, lb in enumerate(label4): lb[:, 0] = i # add target image index for build_targets() - return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- @@ -999,12 +999,12 @@ def hub_ops(f, max_dim=1920): 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), 'per_class': (x > 0).sum(0).tolist()}, 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in - zip(dataset.img_files, dataset.labels)]} + zip(dataset.im_files, dataset.labels)]} if hub: im_dir = hub_dir / 'images' im_dir.mkdir(parents=True, exist_ok=True) - for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'): + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'): pass # Profile diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 3835436543d2..786e58a19972 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -403,7 +403,7 @@ def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[i # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.img_files) if not img_files else img_files + img_files = tqdm(dataset.im_files) if not img_files else img_files for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') diff --git a/val.py b/val.py index 78abbda8231a..8bde37bd5dc7 100644 --- a/val.py +++ b/val.py @@ -297,7 +297,7 @@ def run(data, pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() From 7e98b4801a2f3e607aa2636a4346e2482f961596 Mon Sep 17 00:00:00 2001 From: vnekat <92971065+vnekat@users.noreply.github.com> Date: Mon, 7 Mar 2022 00:50:01 +0530 Subject: [PATCH 691/757] Update yolov5s.yaml (#6865) * Update yolov5s.yaml * Update yolov5s.yaml Co-authored-by: Glenn Jocher From 596de6d5a00223dc4be86377dfba6df4341b76b1 Mon Sep 17 00:00:00 2001 From: DavidB Date: Mon, 7 Mar 2022 03:21:16 +0700 Subject: [PATCH 692/757] Default FP16 TensorRT export (#6798) * Assert engine precision #6777 * Default to FP32 inputs for TensorRT engines * Default to FP16 TensorRT exports #6777 * Remove wrong line #6777 * Automatically adjust detect.py input precision #6777 * Automatically adjust val.py input precision #6777 * Add missing colon * Cleanup * Cleanup * Remove default trt_fp16_input definition * Experiment * Reorder detect.py if statement to after half checks * Update common.py * Update export.py * Cleanup Co-authored-by: Glenn Jocher --- detect.py | 4 ++++ export.py | 5 ++--- models/common.py | 3 +++ val.py | 4 ++++ 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/detect.py b/detect.py index 76f67bea1b90..ba43ed9e1eed 100644 --- a/detect.py +++ b/detect.py @@ -97,6 +97,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() + elif engine and model.trt_fp16_input != half: + LOGGER.info('model ' + ( + 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') + half = model.trt_fp16_input # Dataloader if webcam: diff --git a/export.py b/export.py index 286df623d252..7a5205d55ee6 100644 --- a/export.py +++ b/export.py @@ -233,9 +233,8 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F for out in outputs: LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - half &= builder.platform_has_fast_fp16 - LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') - if half: + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 else 32} engine in {f}') + if builder.platform_has_fast_fp16: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) diff --git a/models/common.py b/models/common.py index 0dae0244e932..70ee7105abfc 100644 --- a/models/common.py +++ b/models/common.py @@ -338,6 +338,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + trt_fp16_input = False logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) @@ -348,6 +349,8 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): shape = tuple(model.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + if model.binding_is_input(index) and dtype == np.float16: + trt_fp16_input = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] diff --git a/val.py b/val.py index 8bde37bd5dc7..dfbfa3935210 100644 --- a/val.py +++ b/val.py @@ -144,6 +144,10 @@ def run(data, model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size + if model.trt_fp16_input != half: + LOGGER.info('model ' + ( + 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') + half = model.trt_fp16_input else: half = False batch_size = 1 # export.py models default to batch-size 1 From c8a589920e877016c8a9be00fd0077005dc68f51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 13:48:59 +0100 Subject: [PATCH 693/757] Bump actions/setup-python from 2 to 3 (#6880) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 3. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5cf1613ab0cd..10fab276f8f2 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} From a5a1760ea6d1c172b91fa5b0606434c8379b45f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 13:49:27 +0100 Subject: [PATCH 694/757] Bump actions/checkout from 2 to 3 (#6881) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/rebase.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 10fab276f8f2..f2096ce17a17 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,7 +25,7 @@ jobs: # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 60 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 67f51f0e8bce..8bc88e957a36 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index a4db1efb2971..75c57546166b 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the latest code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: token: ${{ secrets.ACTIONS_TOKEN }} fetch-depth: 0 # otherwise, you will fail to push refs to dest repo From acc58c1dcfba054ef936ee1458a8ff74a088ee74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Mar 2022 13:52:53 +0100 Subject: [PATCH 695/757] Fix TRT `max_workspace_size` deprecation notice (#6856) * Fix TRT `max_workspace_size` deprecation notice * Update export.py * Update export.py --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index 7a5205d55ee6..1e3d3e2f2e71 100644 --- a/export.py +++ b/export.py @@ -218,6 +218,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F builder = trt.Builder(logger) config = builder.create_builder_config() config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(flag) From e6e36aac109794999f1dafab244b9ec4887a33d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Mar 2022 19:26:37 +0100 Subject: [PATCH 696/757] Update bytes to GB with bitshift (#6886) --- utils/__init__.py | 7 +++---- utils/autobatch.py | 7 ++++--- utils/general.py | 5 +++-- utils/torch_utils.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 4658ed6473cd..a63c473a4340 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -21,14 +21,13 @@ def notebook_init(verbose=True): if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + # System info if verbose: - # System info - # gb = 1 / 1000 ** 3 # bytes to GB - gib = 1 / 1024 ** 3 # bytes to GiB + gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage("/") display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/autobatch.py b/utils/autobatch.py index cb94f041e95d..e53b4787b87d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -34,11 +34,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + gb = 1 << 30 # bytes to GiB (1024 ** 3) d = str(device).upper() # 'CUDA:0' properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / 1024 ** 3 # (GiB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) + t = properties.total_memory / gb # (GiB) + r = torch.cuda.memory_reserved(device) / gb # (GiB) + a = torch.cuda.memory_allocated(device) / gb # (GiB) f = t - (r + a) # free inside reserved LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') diff --git a/utils/general.py b/utils/general.py index d1594a8b5cea..36c180fe4cf2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,11 +223,12 @@ def emojis(str=''): def file_size(path): # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): - return path.stat().st_size / 1E6 + return path.stat().st_size / mb elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb else: return 0.0 diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c11d2a4269ef..2e6fba06626a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -86,7 +86,7 @@ def select_device(device='', batch_size=0, newline=True): space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB else: s += 'CPU\n' From 6dd82c025298d219a1eb1fe8e486fb99d5324d34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Mar 2022 18:22:53 +0100 Subject: [PATCH 697/757] Move `git_describe()` to general.py (#6918) * Move `git_describe()` to general.py * Move `git_describe()` to general.py --- utils/general.py | 21 +++++++++++++++++++++ utils/torch_utils.py | 21 ++------------------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/utils/general.py b/utils/general.py index 36c180fe4cf2..a7891cbccbab 100755 --- a/utils/general.py +++ b/utils/general.py @@ -15,6 +15,7 @@ import signal import time import urllib +from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path @@ -221,6 +222,18 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_update_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + def file_size(path): # Return file/dir size (MB) mb = 1 << 20 # bytes to MiB (1024 ** 2) @@ -243,6 +256,14 @@ def check_online(): return False +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + @try_except @WorkingDirectory(ROOT) def check_git_status(): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2e6fba06626a..efcacc9ca735 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -3,7 +3,6 @@ PyTorch utils """ -import datetime import math import os import platform @@ -12,14 +11,13 @@ import warnings from contextlib import contextmanager from copy import deepcopy -from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -from utils.general import LOGGER +from utils.general import LOGGER, file_update_date, git_describe try: import thop # for FLOPs computation @@ -40,21 +38,6 @@ def torch_distributed_zero_first(local_rank: int): dist.barrier(device_ids=[0]) -def date_modified(path=__file__): - # Return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError: - return '' # not a git repository - - def device_count(): # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. assert platform.system() == 'Linux', 'device_count() function only works on Linux' @@ -67,7 +50,7 @@ def device_count(): def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe() or file_update_date()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' cpu = device == 'cpu' if cpu: From d3d9cbce221b2ced46dde374f24fde72c8e71c37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 12:41:06 +0100 Subject: [PATCH 698/757] PyTorch 1.11.0 compatibility updates (#6932) Resolves `AttributeError: 'Upsample' object has no attribute 'recompute_scale_factor'` first raised in https://github.com/ultralytics/yolov5/issues/5499 --- models/experimental.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 463e5514a06e..01bdfe72db4f 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -94,21 +94,22 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - if fuse: - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model - else: - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse + ckpt = (ckpt['ema'] or ckpt['model']).float() # FP32 model + model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: - m.inplace = inplace # pytorch 1.7.0 compatibility - if type(m) is Detect: + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): + m.inplace = inplace # torch 1.7.0 compatibility + if t is Detect: if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif type(m) is Conv: - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + elif t is nn.Upsample: + m.recompute_scale_factor = None # torch 1.11.0 compatibility + elif t is Conv: + m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility if len(model) == 1: return model[-1] # return model From 055e72af5b887832d5e7267ac9226c825d498cd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 12:58:41 +0100 Subject: [PATCH 699/757] Optimize PyTorch 1.11.0 compatibility update (#6933) --- models/experimental.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 01bdfe72db4f..782ecbeface9 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -106,10 +106,10 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif t is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility elif t is Conv: m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility if len(model) == 1: return model[-1] # return model From caf7ad0500f8fc58567a7aa01ca91d5ee77691d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 18:41:47 +0100 Subject: [PATCH 700/757] Allow 3-point segments (#6938) May resolve https://github.com/ultralytics/yolov5/issues/6931 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6a2dc58dd6cd..00d0d94e0847 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -906,7 +906,7 @@ def verify_image_label(args): nf = 1 # label found with open(lb_file) as f: lb = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any([len(x) > 8 for x in lb]): # is segment + if any(len(x) > 6 for x in lb): # is segment classes = np.array([x[0] for x in lb], dtype=np.float32) segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) From 84efa62b2d0a619309a7437aa82cebdfc4de1bed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Mar 2022 16:18:40 +0100 Subject: [PATCH 701/757] Fix PyTorch Hub export inference shapes (#6949) May resolve https://github.com/ultralytics/yolov5/issues/6947 --- models/common.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 70ee7105abfc..ac3af20d533e 100644 --- a/models/common.py +++ b/models/common.py @@ -544,10 +544,9 @@ def forward(self, imgs, size=640, augment=False, profile=False): g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) From b94b59e199047aa8bf2cdd4401ae9f5f42b929e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Mar 2022 16:31:52 +0100 Subject: [PATCH 702/757] DetectMultiBackend() `--half` handling (#6945) * DetectMultiBackend() `--half` handling * CI fixes * rename .half to .fp16 to avoid conflict * warmup fix * val update * engine update * engine update --- detect.py | 17 ++++------------- models/common.py | 13 ++++++++----- val.py | 25 +++++++++---------------- 3 files changed, 21 insertions(+), 34 deletions(-) diff --git a/detect.py b/detect.py index ba43ed9e1eed..ccb9fbf5103f 100644 --- a/detect.py +++ b/detect.py @@ -89,19 +89,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size - # Half - half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA - if pt or jit: - model.model.half() if half else model.model.float() - elif engine and model.trt_fp16_input != half: - LOGGER.info('model ' + ( - 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') - half = model.trt_fp16_input - # Dataloader if webcam: view_img = check_imshow() @@ -114,12 +105,12 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim diff --git a/models/common.py b/models/common.py index ac3af20d533e..251463525392 100644 --- a/models/common.py +++ b/models/common.py @@ -277,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -297,6 +297,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local + fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 if data: # data.yaml path (optional) with open(data, errors='ignore') as f: names = yaml.safe_load(f)['names'] # class names @@ -305,11 +306,13 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) + model.half() if fp16 else model.float() if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] @@ -338,11 +341,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - trt_fp16_input = False logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) bindings = OrderedDict() + fp16 = False # default updated below for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) @@ -350,7 +353,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) if model.binding_is_input(index) and dtype == np.float16: - trt_fp16_input = True + fp16 = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] @@ -458,11 +461,11 @@ def forward(self, im, augment=False, visualize=False, val=False): y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y - def warmup(self, imgsz=(1, 3, 640, 640), half=False): + def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models - im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image + im = torch.zeros(*imgsz).to(self.device).type(torch.half if self.fp16 else torch.float) # input image self.forward(im) # warmup @staticmethod diff --git a/val.py b/val.py index dfbfa3935210..64c4d4ff9dae 100644 --- a/val.py +++ b/val.py @@ -125,7 +125,6 @@ def run(data, training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly @@ -136,23 +135,17 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA - if pt or jit: - model.model.half() if half else model.model.float() - elif engine: + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: batch_size = model.batch_size - if model.trt_fp16_input != half: - LOGGER.info('model ' + ( - 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') - half = model.trt_fp16_input else: - half = False - batch_size = 1 # export.py models default to batch-size 1 - device = torch.device('cpu') - LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends') + device = model.device + if not pt or jit: + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check @@ -166,7 +159,7 @@ def run(data, # Dataloader if not training: - model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz), half=half) # warmup + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad = 0.0 if task in ('speed', 'benchmark') else 0.5 rect = False if task == 'benchmark' else pt # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images From c6b4f84fd1ce03496d64db4d4b1e5895ca5c879b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 00:45:07 +0100 Subject: [PATCH 703/757] Update Dockerfile `torch==1.11.0+cu113` (#6954) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 489dd04ce5c9..896751d50d2d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y torch torchvision torchtext RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \ - torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html + torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # RUN pip install --no-cache -U torch torchvision # Create working directory From c84dd27d62d979bf4a97472808a7ef8747d64491 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 12:57:08 +0100 Subject: [PATCH 704/757] New val.py `cuda` variable (#6957) * New val.py `cuda` variable Fix for ONNX GPU val. * Update val.py --- val.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/val.py b/val.py index 64c4d4ff9dae..8f2119531949 100644 --- a/val.py +++ b/val.py @@ -143,7 +143,7 @@ def run(data, batch_size = model.batch_size else: device = model.device - if not pt or jit: + if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') @@ -152,6 +152,7 @@ def run(data, # Configure model.eval() + cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 @@ -177,7 +178,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or jit or engine: + if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 52c1399fdc6c3db550123e47a2cdcb6dc951e211 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 13:16:29 +0100 Subject: [PATCH 705/757] DetectMultiBackend() return `device` update (#6958) Fixes ONNX validation that returns outputs on CPU. --- models/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 251463525392..48cf55795dd4 100644 --- a/models/common.py +++ b/models/common.py @@ -458,7 +458,8 @@ def forward(self, im, augment=False, visualize=False, val=False): y = (y.astype(np.float32) - zero_point) * scale # re-scale y[..., :4] *= [w, h, w, h] # xywh normalized to pixels - y = torch.tensor(y) if isinstance(y, np.ndarray) else y + if isinstance(y, np.ndarray): + y = torch.tensor(y, device=self.device) return (y, []) if val else y def warmup(self, imgsz=(1, 3, 640, 640)): From 701e1177ac5cfec2f10552e55766d184ca760e12 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 14:00:48 +0100 Subject: [PATCH 706/757] Tensor initialization on device improvements (#6959) * Update common.py speed improvements Eliminate .to() ops where possible for reduced data transfer overhead. Primarily affects warmup and PyTorch Hub inference. * Updates * Updates * Update detect.py * Update val.py --- models/common.py | 2 +- val.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 48cf55795dd4..83aecb7569d6 100644 --- a/models/common.py +++ b/models/common.py @@ -466,7 +466,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models - im = torch.zeros(*imgsz).to(self.device).type(torch.half if self.fp16 else torch.float) # input image + im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input self.forward(im) # warmup @staticmethod diff --git a/val.py b/val.py index 8f2119531949..2dd2aec679f9 100644 --- a/val.py +++ b/val.py @@ -87,7 +87,7 @@ def process_batch(detections, labels, iouv): matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - matches = torch.Tensor(matches).to(iouv.device) + matches = torch.from_numpy(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @@ -155,7 +155,7 @@ def run(data, cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader @@ -196,7 +196,7 @@ def run(data, loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls # NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) From c13d4ce7ef30acc78e3dbdd9aa4f17e01ed34521 Mon Sep 17 00:00:00 2001 From: paradigm Date: Sat, 12 Mar 2022 16:15:09 +0100 Subject: [PATCH 707/757] EdgeTPU optimizations (#6808) * removed transpose op for better edgetpu support * fix for training case * enabled experimental new quantizer flag * precalculate add and mul ops at compile time Co-authored-by: Glenn Jocher --- export.py | 2 +- models/tf.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1e3d3e2f2e71..7dd06433fe36 100644 --- a/export.py +++ b/export.py @@ -331,7 +331,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = False + converter.experimental_new_quantizer = True f = str(file).replace('.pt', '-int8.tflite') tflite_model = converter.convert() diff --git a/models/tf.py b/models/tf.py index 74681e403afd..728907f8fb47 100644 --- a/models/tf.py +++ b/models/tf.py @@ -222,19 +222,21 @@ def call(self, inputs): x.append(self.m[i](inputs[i])) # x(bs,20,20,255) to x(bs,3,20,20,85) ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] - x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) + x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference y = tf.sigmoid(x[i]) - xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] + grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 + anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 + xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy + wh = y[..., 2:4] ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return x if self.training else (tf.concat(z, 1), x) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @staticmethod def _make_grid(nx=20, ny=20): From 2d45de617e0e80fb96424425587b6ce123aa0012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 10:54:51 +0100 Subject: [PATCH 708/757] Model `ema` key backward compatibility fix (#6972) Fix for older model loading issue in https://github.com/ultralytics/yolov5/commit/d3d9cbce221b2ced46dde374f24fde72c8e71c37#commitcomment-68622388 --- models/experimental.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/experimental.py b/models/experimental.py index 782ecbeface9..1230f4656c8f 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -94,7 +94,7 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - ckpt = (ckpt['ema'] or ckpt['model']).float() # FP32 model + ckpt = (ckpt.get('ema') or ckpt['model']).float() # FP32 model model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates From 99de551f979f6aca1f817504831c821cff64b5fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 12:41:06 +0100 Subject: [PATCH 709/757] pt model to cpu on TF export --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 7dd06433fe36..c50de15cf0b8 100644 --- a/export.py +++ b/export.py @@ -494,7 +494,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs From 932dc78496ca532a41780335468589ad7f0147f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 15:07:13 +0100 Subject: [PATCH 710/757] YOLOv5 Export Benchmarks for GPU (#6963) * Add benchmarks.py GPU support * Updates * Updates * Updates * Updates * Add --half * Add TRT requirements * Cleanup * Add TF to warmup types * Update export.py * Update export.py * Update benchmarks.py --- export.py | 24 ++++++++++++------------ models/common.py | 7 ++++--- utils/benchmarks.py | 18 +++++++++++++++--- 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/export.py b/export.py index c50de15cf0b8..d4f980fdb993 100644 --- a/export.py +++ b/export.py @@ -75,18 +75,18 @@ def export_formats(): # YOLOv5 export formats - x = [['PyTorch', '-', '.pt'], - ['TorchScript', 'torchscript', '.torchscript'], - ['ONNX', 'onnx', '.onnx'], - ['OpenVINO', 'openvino', '_openvino_model'], - ['TensorRT', 'engine', '.engine'], - ['CoreML', 'coreml', '.mlmodel'], - ['TensorFlow SavedModel', 'saved_model', '_saved_model'], - ['TensorFlow GraphDef', 'pb', '.pb'], - ['TensorFlow Lite', 'tflite', '.tflite'], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite'], - ['TensorFlow.js', 'tfjs', '_web_model']] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix']) + x = [['PyTorch', '-', '.pt', True], + ['TorchScript', 'torchscript', '.torchscript', True], + ['ONNX', 'onnx', '.onnx', True], + ['OpenVINO', 'openvino', '_openvino_model', False], + ['TensorRT', 'engine', '.engine', True], + ['CoreML', 'coreml', '.mlmodel', False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], + ['TensorFlow GraphDef', 'pb', '.pb', True], + ['TensorFlow Lite', 'tflite', '.tflite', False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], + ['TensorFlow.js', 'tfjs', '_web_model', False]] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): diff --git a/models/common.py b/models/common.py index 83aecb7569d6..4ad040fcd7f1 100644 --- a/models/common.py +++ b/models/common.py @@ -464,10 +464,11 @@ def forward(self, im, augment=False, visualize=False, val=False): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once - if self.pt or self.jit or self.onnx or self.engine: # warmup types - if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models + if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types + if self.device.type != 'cpu': # only warmup GPU models im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - self.forward(im) # warmup + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup @staticmethod def model_type(p='path/to/model.pt'): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 962df812a9d3..bdbbdc43b639 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -19,6 +19,7 @@ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: $ python utils/benchmarks.py --weights yolov5s.pt --img 640 @@ -41,20 +42,29 @@ import val from utils import notebook_init from utils.general import LOGGER, print_args +from utils.torch_utils import select_device def run(weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference ): y, t = [], time.time() formats = export.export_formats() - for i, (name, f, suffix) in formats.iterrows(): # index, (name, file, suffix) + device = select_device(device) + for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) try: - w = weights if f == '-' else export.run(weights=weights, imgsz=[imgsz], include=[f], device='cpu')[-1] + if device.type != 'cpu': + assert gpu, f'{name} inference not supported on GPU' + if f == '-': + w = weights # PyTorch format + else: + w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others assert suffix in str(w), 'export failed' - result = val.run(data, w, batch_size, imgsz=imgsz, plots=False, device='cpu', task='benchmark') + result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) y.append([name, metrics[3], speeds[1]]) # mAP, t_inference @@ -78,6 +88,8 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print_args(FILE.stem, opt) return opt From c09fb2aa95b6ca86c460aa106e2308805649feb9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Mar 2022 16:32:56 +0100 Subject: [PATCH 711/757] Update TQDM bar format (#6988) --- utils/autoanchor.py | 2 +- utils/datasets.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index a631c21a3b26..6cd2267a375a 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -152,7 +152,7 @@ def print_results(k, verbose=True): # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar + pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) diff --git a/utils/datasets.py b/utils/datasets.py index 00d0d94e0847..5ce6d607fb7a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,6 +35,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -427,7 +428,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' @@ -492,7 +493,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT) for i, x in pbar: if cache_images == 'disk': gb += self.npy_files[i].stat().st_size @@ -509,7 +510,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, total=len(self.im_files)) + desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f From 3f634d43c8ecea14aa9037e2fd28ded0433d491d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Mar 2022 15:33:54 +0100 Subject: [PATCH 712/757] Conditional `Timeout()` by OS (disable on Windows) (#7013) * Conditional `Timeout()` by OS (disable on Windows) * Update general.py --- utils/general.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index a7891cbccbab..e8b3b05c5fe1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -123,13 +123,15 @@ def _timeout_handler(self, signum, frame): raise TimeoutError(self.timeout_message) def __enter__(self): - signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM - signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised def __exit__(self, exc_type, exc_val, exc_tb): - signal.alarm(0) # Cancel SIGALRM if it's scheduled - if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError - return True + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True class WorkingDirectory(contextlib.ContextDecorator): From 7c6a33564a84a0e78ec19da66ea6016d51c32e0a Mon Sep 17 00:00:00 2001 From: Max Strobel Date: Thu, 17 Mar 2022 16:37:09 +0100 Subject: [PATCH 713/757] fix: add default PIL font as fallback (#7010) * fix: add default font as fallback Add default font as fallback if the downloading of the Arial.ttf font fails for some reason, e.g. no access to public internet. * Update plots.py Co-authored-by: Maximilian Strobel Co-authored-by: Glenn Jocher --- utils/plots.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 6c3f5bcaef37..90f3f241cc5a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -7,6 +7,7 @@ import os from copy import copy from pathlib import Path +from urllib.error import URLError import cv2 import matplotlib @@ -55,11 +56,13 @@ def check_pil_font(font=FONT, size=10): try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception: # download if missing - check_font(font) try: + check_font(font) return ImageFont.truetype(str(font), size) except TypeError: check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() class Annotator: From 4effd064b169fc049b4a4bca401b120bf2e93c14 Mon Sep 17 00:00:00 2001 From: Mrinal Jain Date: Fri, 18 Mar 2022 07:29:24 -0400 Subject: [PATCH 714/757] Consistent saved_model output format (#7032) --- export.py | 2 +- models/common.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index d4f980fdb993..2d4a68e62f89 100644 --- a/export.py +++ b/export.py @@ -275,7 +275,7 @@ def export_saved_model(model, im, file, dynamic, m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x), [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec]) tfm.__call__(im) tf.saved_model.save( tfm, diff --git a/models/common.py b/models/common.py index 4ad040fcd7f1..5561d92ecb73 100644 --- a/models/common.py +++ b/models/common.py @@ -441,7 +441,7 @@ def forward(self, im, augment=False, visualize=False, val=False): else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)[0]).numpy() + y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)).numpy() else: # Lite or Edge TPU From b0ba101ac0aa898a4e4b867d377e140af8d4258a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 01:04:48 +0100 Subject: [PATCH 715/757] `ComputeLoss()` indexing/speed improvements (#7048) * device as class attribute * Update loss.py * Update loss.py * improve zeros * tensor split --- utils/loss.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 5aa9f017d2af..0f0137817955 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -89,9 +89,10 @@ def forward(self, pred, true): class ComputeLoss: + sort_obj_iou = False + # Compute losses def __init__(self, model, autobalance=False): - self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -111,26 +112,28 @@ def __init__(self, model, autobalance=False): self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.device = device for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) - def __call__(self, p, targets): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + tobj = torch.zeros(pi.shape[:4], device=self.device) # target obj n = b.shape[0] # number of targets if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions # Regression - pxy = ps[:, :2].sigmoid() * 2 - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) lbox += (1.0 - iou).mean() # iou loss @@ -144,9 +147,9 @@ def __call__(self, p, targets): # predictions, targets, model # Classification if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t = torch.full_like(pcls, self.cn, device=self.device) # targets t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(ps[:, 5:], t) # BCE + lcls += self.BCEcls(pcls, t) # BCE # Append targets to text file # with open('targets.txt', 'a') as file: @@ -170,15 +173,15 @@ def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=targets.device) # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices g = 0.5 # bias off = torch.tensor([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets + ], device=self.device).float() * g # offsets for i in range(self.nl): anchors = self.anchors[i] @@ -206,14 +209,12 @@ def build_targets(self, p, targets): offsets = 0 # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh + bc, gxy, gwh, a = t.unsafe_chunk(4, dim=1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices + gi, gj = gij.T # grid indices # Append - a = t[:, 6].long() # anchor indices indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices tbox.append(torch.cat((gxy - gij, gwh), 1)) # box anch.append(anchors[a]) # anchors From 9ebec7885fb461993cf7123b36abf61ffd5dfd95 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 13:51:22 +0100 Subject: [PATCH 716/757] Update Dockerfile to `git clone` instead of `COPY` (#7053) Resolves git command errors that currently happen in image, i.e.: ```bash root@382ae64aeca2:/usr/src/app# git pull Warning: Permanently added the ECDSA host key for IP address '140.82.113.3' to the list of known hosts. git@github.com: Permission denied (publickey). fatal: Could not read from remote repository. Please make sure you have the correct access rights and the repository exists. ``` --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 896751d50d2d..304e8b2801a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +# COPY . /usr/src/app # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ From 6843ea5d7f9c5d4b8132d00ba17fb296dc81d867 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 13:55:32 +0100 Subject: [PATCH 717/757] Create SECURITY.md (#7054) * Create SECURITY.md Resolves https://github.com/ultralytics/yolov5/issues/7052 * Move into ./github * Update SECURITY.md --- .github/SECURITY.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/SECURITY.md diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 000000000000..aa3e8409da6b --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. + +### Reporting a Vulnerability + +To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! From 9f4d11379bb931586c1f51c1d85c6fac9fc37eda Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 14:18:05 +0100 Subject: [PATCH 718/757] Fix incomplete URL substring sanitation (#7056) Resolves code scanning alert in https://github.com/ultralytics/yolov5/issues/7055 --- utils/datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 5ce6d607fb7a..8627344af7b4 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -15,6 +15,7 @@ from multiprocessing.pool import Pool, ThreadPool from pathlib import Path from threading import Thread +from urllib.parse import urlparse from zipfile import ZipFile import cv2 @@ -301,7 +302,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' - if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + if urlparse(s).hostname in ('youtube.com', 'youtu.be'): # if source is YouTube video check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From 529fbc1814f899eab2df8146944c23d0e168610e Mon Sep 17 00:00:00 2001 From: Philip Gutjahr Date: Sun, 20 Mar 2022 15:46:29 +0100 Subject: [PATCH 719/757] Use PIL to eliminate chroma subsampling in crops (#7008) * use pillow to save higher-quality jpg (w/o color subsampling) * Cleanup and doc issue Co-authored-by: Glenn Jocher --- utils/plots.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 90f3f241cc5a..a30c0faf962a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -458,7 +458,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes @@ -470,5 +470,7 @@ def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BG crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory - cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0) return crop From f327eee614384583a93e6f5374280e78b80a250d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 16:27:51 +0100 Subject: [PATCH 720/757] Fix `check_anchor_order()` in pixel-space not grid-space (#7060) * Update `check_anchor_order()` Use mean area per output layer for added stability. * Check in pixel-space not grid-space fix --- models/yolo.py | 2 +- utils/autoanchor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index f659a04545b9..2f4bbe0f71d1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -110,8 +110,8 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i s = 256 # 2x min stride m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) - check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 6cd2267a375a..7eb46af91195 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -17,7 +17,7 @@ def check_anchor_order(m): # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchors.prod(-1).view(-1) # anchor area + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da.sign() != ds.sign(): # same order From d5e363f29d7619f2a186678eb6d61672f49b11f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:02:05 +0100 Subject: [PATCH 721/757] Update detect.py non-inplace with `y.tensor_split()` (#7062) --- models/yolo.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 2f4bbe0f71d1..09215101e8a0 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,9 +62,10 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) + xy, wh, conf = y.tensor_split((2, 4), 4) + xy = (xy * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From 0529b77232d72c41557fb03753caa356f583e5fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:03:37 +0100 Subject: [PATCH 722/757] Update common.py lists for tuples (#7063) Improved profiling. --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 5561d92ecb73..066f8774d3c3 100644 --- a/models/common.py +++ b/models/common.py @@ -31,7 +31,7 @@ def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad return p @@ -133,7 +133,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) @@ -194,7 +194,7 @@ def forward(self, x): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) - return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class Focus(nn.Module): @@ -205,7 +205,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) @@ -219,7 +219,7 @@ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, s def forward(self, x): y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) + return torch.cat((y, self.cv2(y)), 1) class GhostBottleneck(nn.Module): From e278fd63ec6c09d264c2bc983ad91717c577e97c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:15:22 +0100 Subject: [PATCH 723/757] Update W&B message to `LOGGER.info()` (#7064) --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 86ccf38443a9..ce0bea00e1af 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -56,7 +56,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, if not wandb: prefix = colorstr('Weights & Biases: ') s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" - print(emojis(s)) + self.logger.info(emojis(s)) # TensorBoard s = self.save_dir From 9e75cbf4c18457297cd7b28653ebeb5b1262e8c9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:17:04 +0100 Subject: [PATCH 724/757] Update __init__.py (#7065) --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ce0bea00e1af..866bdc4be2f5 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -47,7 +47,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',] + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv From 178c1095768a81edefc4c4ae87984ab1962e0906 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:37:27 +0100 Subject: [PATCH 725/757] Add non-zero `da` `check_anchor_order()` condition (#7066) --- utils/autoanchor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 7eb46af91195..882712d45a38 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -20,7 +20,7 @@ def check_anchor_order(m): a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order + if da and (da.sign() != ds.sign()): # same order LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) From 9cd89b75cca8bb165a3b19c9b8356f7b3bb22b31 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:55:13 +0100 Subject: [PATCH 726/757] Fix2 `check_anchor_order()` in pixel-space not grid-space (#7067) Follows https://github.com/ultralytics/yolov5/pull/7060 which provided only a partial solution to this issue. #7060 resolved occurences in yolo.py, this applies the same fix in autoanchor.py. --- utils/autoanchor.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 882712d45a38..77518abe9889 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -40,7 +40,8 @@ def metric(k): # compute metric bpr = (best > 1 / thr).float().mean() # best possible recall return bpr, aat - anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' if bpr > 0.98: # threshold to recompute @@ -55,8 +56,9 @@ def metric(k): # compute metric new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - check_anchor_order(m) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' From 9b771a3e7112f864cb9c877733eca9240e8fb4a5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 09:33:39 +0100 Subject: [PATCH 727/757] Revert "Update detect.py non-inplace with `y.tensor_split()` (#7062)" (#7074) This reverts commit d5e363f29d7619f2a186678eb6d61672f49b11f1. --- models/yolo.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 09215101e8a0..2f4bbe0f71d1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,10 +62,9 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.tensor_split((2, 4), 4) - xy = (xy * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From 6f128031d073754ee8ed6b6a85ecb6c0619cd0a7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 18:35:36 +0100 Subject: [PATCH 728/757] Update loss.py with `if self.gr < 1:` (#7087) * Update loss.py with `if self.gr < 1:` * Update loss.py --- utils/loss.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 0f0137817955..b49cc7f66e66 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -139,11 +139,13 @@ def __call__(self, p, targets): # predictions, targets lbox += (1.0 - iou).mean() # iou loss # Objectness - score_iou = iou.detach().clamp(0).type(tobj.dtype) + iou = iou.detach().clamp(0).type(tobj.dtype) if self.sort_obj_iou: - sort_id = torch.argsort(score_iou) - b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) From a2d617ece94dcd8c9bc205ea70f1223c84fdbe3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 19:18:34 +0100 Subject: [PATCH 729/757] Update loss for FP16 `tobj` (#7088) --- utils/loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index b49cc7f66e66..a06330e034bc 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -125,7 +125,7 @@ def __call__(self, p, targets): # predictions, targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], device=self.device) # target obj + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj n = b.shape[0] # number of targets if n: From a600baed8efc6407ec4fb7a71fa1dbe3be23d441 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 15:41:19 +0100 Subject: [PATCH 730/757] Update model summary to display model name (#7101) --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index efcacc9ca735..793c9c184a44 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -229,7 +229,8 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + name = model.yaml_file.rstrip('.yaml').replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) From 05aae1733352289e4c4dca031159df7f0354d049 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 17:36:05 +0100 Subject: [PATCH 731/757] `torch.split()` 1.7.0 compatibility fix (#7102) * Update loss.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update loss.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loss.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index a06330e034bc..bf9b592d4ad2 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,13 +108,15 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors self.device = device - for k in 'na', 'nc', 'nl', 'anchors': - setattr(self, k, getattr(det, k)) def __call__(self, p, targets): # predictions, targets lcls = torch.zeros(1, device=self.device) # class loss @@ -129,7 +131,8 @@ def __call__(self, p, targets): # predictions, targets n = b.shape[0] # number of targets if n: - pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression pxy = pxy.sigmoid() * 2 - 0.5 From ee0b3b2a953bd50ba29b39119a09ef9521596416 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 18:02:35 +0100 Subject: [PATCH 732/757] Update benchmarks significant digits (#7103) --- utils/benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index bdbbdc43b639..446248c03f68 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -67,7 +67,7 @@ def run(weights=ROOT / 'yolov5s.pt', # weights path result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, metrics[3], speeds[1]]) # mAP, t_inference + y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference except Exception as e: LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') y.append([name, None, None]) # mAP, t_inference From 6134ec5d9484ac9ac743121b1c74709e93c68a88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 20:05:07 +0100 Subject: [PATCH 733/757] Model summary `pathlib` fix (#7104) Stems not working correctly for YOLOv5l with current .rstrip() implementation. After fix: ``` YOLOv5l summary: 468 layers, 46563709 parameters, 46563709 gradients, 109.3 GFLOPs ``` --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 793c9c184a44..72f8a0fd1659 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -11,6 +11,7 @@ import warnings from contextlib import contextmanager from copy import deepcopy +from pathlib import Path import torch import torch.distributed as dist @@ -229,7 +230,7 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - name = model.yaml_file.rstrip('.yaml').replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") From ecc2c7ba73e71211b192cba69e255afad92de67a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 20:44:07 +0100 Subject: [PATCH 734/757] Remove named arguments where possible (#7105) * Remove named arguments where possible Speed improvements. * Update yolo.py * Update yolo.py * Update yolo.py --- models/common.py | 14 +++++++------- models/yolo.py | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index 066f8774d3c3..0286c74fe8cd 100644 --- a/models/common.py +++ b/models/common.py @@ -121,7 +121,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class C3(nn.Module): @@ -136,7 +136,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3TR(C3): @@ -527,7 +527,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=autocast): + with amp.autocast(autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -550,19 +550,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(enabled=autocast): + with amp.autocast(autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, - agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, + self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) diff --git a/models/yolo.py b/models/yolo.py index 2f4bbe0f71d1..9f4701c49f9d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -71,13 +71,13 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device + shape = 1, self.na, ny, nx, 2 # grid shape if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij') else: - yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) - grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() - anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ - .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d)) + grid = torch.stack((xv, yv), 2).expand(shape).float() + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float() return grid, anchor_grid From c3ae4e4af6f75aff537b876adc11da3de441dd60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Mar 2022 01:19:37 +0100 Subject: [PATCH 735/757] Multi-threaded VisDrone and VOC downloads (#7108) * Multi-threaded VOC download * Update VOC.yaml * Update * Update general.py * Update general.py --- data/GlobalWheat2020.yaml | 1 + data/Objects365.yaml | 1 + data/SKU-110K.yaml | 1 + data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 1 + utils/general.py | 11 +++++++---- 7 files changed, 13 insertions(+), 6 deletions(-) diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 869dace0be2b..c1ba289f2833 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -34,6 +34,7 @@ names: ['wheat_head'] # class names download: | from utils.general import download, Path + # Download dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 4c7cf3fdb2c8..bd6e5d6e1144 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -65,6 +65,7 @@ download: | from utils.general import Path, download, np, xyxy2xywhn + # Make Directories dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 9481b7a04aee..46459eab6bb7 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -24,6 +24,7 @@ download: | from tqdm import tqdm from utils.general import np, pd, Path, download, xyxy2xywh + # Download dir = Path(yaml['path']) # dataset root dir parent = Path(dir.parent) # download dir diff --git a/data/VOC.yaml b/data/VOC.yaml index 975d56466de1..be04fb1e2ecb 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -62,7 +62,7 @@ download: | urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images - download(urls, dir=dir / 'images', delete=False) + download(urls, dir=dir / 'images', delete=False, threads=3) # Convert path = dir / f'images/VOCdevkit' diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 83a5c7d55e06..2a3b2f03e674 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -54,7 +54,7 @@ download: | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] - download(urls, dir=dir) + download(urls, dir=dir, threads=4) # Convert for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': diff --git a/data/coco.yaml b/data/coco.yaml index 3ed7e48a2185..7494fc2f9cd1 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -30,6 +30,7 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't download: | from utils.general import download, Path + # Download labels segments = False # segment or box labels dir = Path(yaml['path']) # dataset root dir diff --git a/utils/general.py b/utils/general.py index e8b3b05c5fe1..b0c5e9d69ab7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -449,8 +449,9 @@ def check_dataset(data, autodownload=True): if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - LOGGER.info('\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()]) + LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])) if s and autodownload: # download script + t = time.time() root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename @@ -465,9 +466,11 @@ def check_dataset(data, autodownload=True): r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - LOGGER.info(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(emojis(f"Dataset download {s}")) else: - raise Exception('Dataset not found.') + raise Exception(emojis('Dataset not found ❌')) return data # dictionary @@ -491,7 +494,7 @@ def download_one(url, dir): if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: - torch.hub.download_url_to_file(url, f, progress=True) # torch download + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download if unzip and f.suffix in ('.zip', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': From bc3ed957ce0f0990a3cb408e462197b83b0d075f Mon Sep 17 00:00:00 2001 From: yeshanliu <41566254+yeshanliu@users.noreply.github.com> Date: Wed, 23 Mar 2022 22:35:15 +0800 Subject: [PATCH 736/757] `np.fromfile()` Chinese image paths fix (#6979) * :tada: :new: now can read Chinese image path. use "cv2.imdecode(np.fromfile(f, np.uint8), cv2.IMREAD_COLOR)" instead of "cv2.imread(f)" for Chinese image path. * Update datasets.py * Update __init__.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 3 +++ utils/loggers/__init__.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index 8627344af7b4..f212e54633be 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -32,6 +32,9 @@ segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first +# Remap +cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # for Chinese filenames + # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 866bdc4be2f5..ff6722ecd48a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -148,6 +148,9 @@ def on_train_end(self, last, best, plots, epoch, results): if self.tb: import cv2 + import numpy as np + + cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # remap for Chinese files for f in files: self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') From a0a4adf6de4de3d9d5ac00c23796c844a8e57200 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Mar 2022 11:31:22 +0100 Subject: [PATCH 737/757] Add PyTorch Hub `results.save(labels=False)` option (#7129) Resolves https://github.com/ultralytics/yolov5/issues/388#issuecomment-1077121821 --- models/common.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/common.py b/models/common.py index 0286c74fe8cd..115e3c3145ff 100644 --- a/models/common.py +++ b/models/common.py @@ -131,7 +131,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) @@ -589,7 +589,7 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string @@ -606,7 +606,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, 'im': save_one_box(box, im, file=file, save=save)}) else: # all others - annotator.box_label(box, label, color=colors(cls)) + annotator.box_label(box, label if labels else '', color=colors(cls)) im = annotator.im else: s += '(no detections)' @@ -633,19 +633,19 @@ def print(self): LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - def show(self): - self.display(show=True) # show results + def show(self, labels=True): + self.display(show=True, labels=labels) # show results - def save(self, save_dir='runs/detect/exp'): + def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, save_dir=save_dir) # save results + self.display(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None return self.display(crop=True, save=save, save_dir=save_dir) # crop results - def render(self): - self.display(render=True) # render results + def render(self, labels=True): + self.display(render=True, labels=labels) # render results return self.imgs def pandas(self): From 8eaecd23aa4490ce02c5ad1a0872c073cabc3205 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 25 Mar 2022 13:45:08 -0400 Subject: [PATCH 738/757] SparseML integration --- detect.py | 3 +- export.py | 186 ++++++++++++++++++++++++++--- models/common.py | 48 ++++---- models/yolo.py | 16 ++- train.py | 129 ++++++++++++-------- utils/activations.py | 17 +++ utils/downloads.py | 3 + utils/general.py | 8 +- utils/loggers/wandb/wandb_utils.py | 4 + utils/torch_utils.py | 30 ++++- val.py | 3 +- 11 files changed, 346 insertions(+), 101 deletions(-) diff --git a/detect.py b/detect.py index ccb9fbf5103f..559b3414f506 100644 --- a/detect.py +++ b/detect.py @@ -45,6 +45,7 @@ increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync +from export import load_checkpoint @torch.no_grad() @@ -89,7 +90,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + model, extras = load_checkpoint(type_='val', weights=weights, device=device) # load FP32 model stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size diff --git a/export.py b/export.py index 2d4a68e62f89..078b3c6940f0 100644 --- a/export.py +++ b/export.py @@ -43,6 +43,7 @@ """ import argparse +from copy import deepcopy import json import os import platform @@ -57,20 +58,26 @@ import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile +from sparseml.pytorch.utils import ModuleExporter +from sparseml.pytorch.sparsification.quantization import skip_onnx_input_quantize + FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import Conv +from models.common import Conv, DetectMultiBackend from models.experimental import attempt_load -from models.yolo import Detect +from models.yolo import Detect, Model from utils.activations import SiLU from utils.datasets import LoadImages from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, - file_size, print_args, url2file) -from utils.torch_utils import select_device + file_size, print_args, url2file, intersect_dicts) +from utils.torch_utils import select_device, torch_distributed_zero_first, is_parallel +from utils.downloads import attempt_download +from utils.sparse import SparseMLWrapper, check_download_sparsezoo_weights + def export_formats(): @@ -118,14 +125,33 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, im, f, verbose=False, opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) - 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + # export through SparseML so quantized and pruned graphs can be corrected + save_dir = f.parent.absolute() + save_name = str(f).split(os.path.sep)[-1] + + # get the number of outputs so we know how to name and change dynamic axes + # nested outputs can be returned if model is exported with dynamic + def _count_outputs(outputs): + count = 0 + if isinstance(outputs, list) or isinstance(outputs, tuple): + for out in outputs: + count += _count_outputs(out) + else: + count += 1 + return count + + outputs = model(im) + num_outputs = _count_outputs(outputs) + input_names = ['input'] + output_names = [f'out_{i}' for i in range(num_outputs)] + dynamic_axes = {k: {0: 'batch'} for k in (input_names + output_names)} if dynamic else None + exporter = ModuleExporter(model, save_dir) + exporter.export_onnx(im, name=save_name, convert_qat=True, + input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes) + try: + skip_onnx_input_quantize(f, f) + except: + pass # Checks model_onnx = onnx.load(f) # load onnx model @@ -407,6 +433,128 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') +def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): + pickle = not sparseml_wrapper.qat_active(epoch) # qat does not support pickled exports + ckpt_model = deepcopy(model.module if is_parallel(model) else model).float() + yaml = ckpt_model.yaml + if not pickle: + ckpt_model = ckpt_model.state_dict() + + version = 6 if isinstance([module for module in model.model.modules()][1], Conv) else 5 + + return {'epoch': epoch, + 'model': ckpt_model, + 'optimizer': optimizer.state_dict(), + 'yaml': yaml, + 'hyp': model.hyp, + 'version': version, + **ema.state_dict(pickle), + **sparseml_wrapper.state_dict(), + **kwargs} + +def load_checkpoint( + type_, + weights, + device, + cfg=None, + hyp=None, + nc=None, + data=None, + dnn=False, + half = False, + recipe=None, + resume=None, + rank=-1 + ): + with torch_distributed_zero_first(rank): + # download if not found locally or from sparsezoo if stub + weights = attempt_download(weights) or check_download_sparsezoo_weights(weights) + ckpt = torch.load(weights[0] if isinstance(weights, list) or isinstance(weights, tuple) + else weights, map_location="cpu") # load checkpoint + start_epoch = ckpt['epoch'] + 1 if 'epoch' in ckpt else 0 + pickled = isinstance(ckpt['model'], nn.Module) + train_type = type_ == 'train' + ensemble_type = type_ == 'ensemble' + val_type = type_ =='val' + + if pickled and ensemble_type: + cfg = None + if ensemble_type: + model = attempt_load(weights, map_location=device) # load ensemble using pickled + state_dict = model.state_dict() + elif val_type: + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + state_dict = model.model.state_dict() + else: + # load model from config and weights + cfg = cfg or (ckpt['yaml'] if 'yaml' in ckpt else None) or \ + (ckpt['model'].yaml if pickled else None) + model = Model(cfg, ch=3, nc=ckpt['nc'] if ('nc' in ckpt and not nc) else nc, + anchors=hyp.get('anchors') if hyp else None).to(device) + model_key = 'ema' if (not train_type and 'ema' in ckpt and ckpt['ema']) else 'model' + state_dict = ckpt[model_key].float().state_dict() if pickled else ckpt[model_key] + if val_type: + model = DetectMultiBackend(model=model, device=device, dnn=dnn, data=data, fp16=half) + + # turn gradients for params back on in case they were removed + for p in model.parameters(): + p.requires_grad = True + + # load sparseml recipe for applying pruning and quantization + recipe = recipe or (ckpt['recipe'] if 'recipe' in ckpt else None) + sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe) + exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume + loaded = False + + if not train_type: + # update param names for yolov5x5 models (model.x -> model.model.x) + ''' + if ('version' not in ckpt or ckpt['version'] < 6) and sparseml_wrapper.manager is not None: + for modifier in sparseml_wrapper.manager.pruning_modifiers: + updated_params = [] + for param in modifier.params: + updated_params.append( + "model." + param if (param.startswith('model.') and + not param.startswith('model.model.')) else param + ) + modifier.params = updated_params + ''' + # apply the recipe to create the final state of the model when not training + sparseml_wrapper.apply() + else: + # intialize the recipe for training and restore the weights before if no quantized weights + quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) + if not quantized_state_dict: + state_dict = load_state_dict(model, state_dict, train=True, exclude_anchors=exclude_anchors) + loaded = True + sparseml_wrapper.initialize(start_epoch) + + if not loaded: + state_dict = load_state_dict(model, state_dict, train=train_type, exclude_anchors=exclude_anchors) + + model.float() + report = 'Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights) + + return model, { + 'ckpt': ckpt, + 'state_dict': state_dict, + 'start_epoch': start_epoch, + 'sparseml_wrapper': sparseml_wrapper, + 'report': report, + } + + +def load_state_dict(model, state_dict, train, exclude_anchors): + # fix older state_dict names not porting to the new model setup + state_dict = {key if not key.startswith("module.") else key[7:]: val for key, val in state_dict.items()} + + if train: + # load any missing weights from the model + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=['anchor'] if exclude_anchors else []) + + model.load_state_dict(state_dict, strict=not train) # load + + return state_dict @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -414,7 +562,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats + include=('onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode @@ -430,7 +578,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25 # TF.js NMS: confidence threshold + conf_thres=0.25, # TF.js NMS: confidence threshold + remove_grid=False, ): t = time.time() include = [x.lower() for x in include] # to lowercase @@ -443,8 +592,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model - nc, names = model.nc, model.names # number of classes, class names + model, extras = load_checkpoint(type_='ensemble', weights=weights, device=device) # load FP32 model + sparseml_wrapper = extras['sparseml_wrapper'] + nc, names = extras["ckpt"]["nc"], model.names # number of classes, class names # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand @@ -469,6 +619,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' m.onnx_dynamic = dynamic if hasattr(m, 'forward_export'): m.forward = m.forward_export # assign custom forward (optional) + model.model[-1].export = not remove_grid # set Detect() layer grid export for _ in range(2): y = model(im) # dry runs @@ -541,6 +692,7 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument("--remove-grid", action="store_true", help="remove export of Detect() layer grid") parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') @@ -556,4 +708,4 @@ def main(opt): if __name__ == "__main__": opt = parse_opt() - main(opt) + main(opt) \ No newline at end of file diff --git a/models/common.py b/models/common.py index 115e3c3145ff..e0b783f55033 100644 --- a/models/common.py +++ b/models/common.py @@ -31,7 +31,7 @@ def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: - p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p @@ -121,7 +121,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) class C3(nn.Module): @@ -131,12 +131,12 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) class C3TR(C3): @@ -194,7 +194,7 @@ def forward(self, x): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) - return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) class Focus(nn.Module): @@ -205,7 +205,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) # return self.conv(self.contract(x)) @@ -219,7 +219,7 @@ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, s def forward(self, x): y = self.cv1(x) - return torch.cat((y, self.cv2(y)), 1) + return torch.cat([y, self.cv2(y)], 1) class GhostBottleneck(nn.Module): @@ -277,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): + def __init__(self, weights='yolov5s.pt', model=None, device=torch.device('cpu'), dnn=False, data=None, fp16=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -303,11 +303,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, names = yaml.safe_load(f)['names'] # class names if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + model = model or (attempt_load(weights if isinstance(weights, list) else w, map_location=device)) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() + self.model = model.model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata @@ -527,7 +527,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): + with amp.autocast(enabled=autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -550,19 +550,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(autocast): + with amp.autocast(enabled=autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, - self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, + agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) @@ -589,7 +589,7 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string @@ -606,7 +606,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, 'im': save_one_box(box, im, file=file, save=save)}) else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) + annotator.box_label(box, label, color=colors(cls)) im = annotator.im else: s += '(no detections)' @@ -633,19 +633,19 @@ def print(self): LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - def show(self, labels=True): - self.display(show=True, labels=labels) # show results + def show(self): + self.display(show=True) # show results - def save(self, labels=True, save_dir='runs/detect/exp'): + def save(self, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results + self.display(save=True, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None return self.display(crop=True, save=save, save_dir=save_dir) # crop results - def render(self, labels=True): - self.display(render=True, labels=labels) # render results + def render(self): + self.display(render=True) # render results return self.imgs def pandas(self): diff --git a/models/yolo.py b/models/yolo.py index 9f4701c49f9d..f08d41ce1585 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -19,6 +19,7 @@ from models.common import * from models.experimental import * +from utils.activations import replace_activations from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization @@ -33,6 +34,7 @@ class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter + export = True # onnx export def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() @@ -53,7 +55,7 @@ def forward(self, x): bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - if not self.training: # inference + if not self.training and self.export: # inference if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) @@ -67,7 +69,7 @@ def forward(self, x): y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) - return x if self.training else (torch.cat(z, 1), x) + return x if self.training or not self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device @@ -291,7 +293,15 @@ def parse_model(d, ch): # model_dict, input_channels(3) if i == 0: ch = [] ch.append(c2) - return nn.Sequential(*layers), sorted(save) + + model = nn.Sequential(*layers) + + # override all activations in model if provided in config + if 'act' in d: + LOGGER.info(f'overriding activations in model to {d["act"]}') + replace_activations(model, d["act"]) + + return model, sorted(save) if __name__ == '__main__': diff --git a/train.py b/train.py index 60be962d447f..a263d1e9c996 100644 --- a/train.py +++ b/train.py @@ -40,6 +40,7 @@ import val # for end-of-epoch mAP from models.experimental import attempt_load +from export import load_checkpoint, create_checkpoint from models.yolo import Model from utils.autoanchor import check_anchors from utils.autobatch import check_train_batch_size @@ -56,6 +57,7 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.sparse import SparseMLWrapper LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -85,9 +87,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save run settings if not evolve: with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) + yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) + yaml.dump(vars(opt), f, sort_keys=False) # Loggers data_dict = None @@ -105,6 +107,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Config plots = not evolve # create plots cuda = device.type != 'cpu' + half_precision = cuda init_seeds(1 + RANK) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None @@ -115,20 +118,27 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') + check_suffix(weights, ['.pt', '.pth']) # check weights + pretrained = weights.endswith('.pt') or weights.endswith('.pth') or weights.startswith('zoo:') if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + model, extras = load_checkpoint( + type_ = 'train', + weights=weights, + device=device, + cfg=opt.cfg, + hyp=hyp, + nc=nc, + recipe=opt.recipe, + resume=opt.resume, + rank=LOCAL_RANK + ) + ckpt, state_dict, sparseml_wrapper = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'] + LOGGER.info(extras['report']) else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + sparseml_wrapper = SparseMLWrapper(model, opt.recipe) + sparseml_wrapper.initialize(start_epoch=0.0) + ckpt = None # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze @@ -183,11 +193,22 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model) if RANK in [-1, 0] else None + ema = ModelEMA(model, enabled=not opt.disable_ema) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: + # Epochs + start_epoch = ckpt['epoch'] + 1 + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, ckpt['epoch'], epochs)) + epochs += ckpt['epoch'] # finetune additional epochs + if sparseml_wrapper.qat_active(start_epoch): + ema.enabled = False + # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) @@ -198,15 +219,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] - # Epochs - start_epoch = ckpt['epoch'] + 1 - if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' - if epochs < start_epoch: - LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") - epochs += ckpt['epoch'] # finetune additional epochs - - del ckpt, csd + del ckpt # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -247,7 +260,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - model.half().float() # pre-reduce anchor precision callbacks.run('on_pretrain_routine_end') @@ -273,15 +285,29 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=cuda) + if scheduler: + scheduler.last_epoch = start_epoch - 1 # do not mov + scaler = amp.GradScaler(enabled=half_precision) stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') + + # SparseML Integration + if RANK in [-1, 0]: + sparseml_wrapper.initialize_loggers(loggers.logger, loggers.tb, loggers.wandb) + scaler = sparseml_wrapper.modify(scaler, optimizer, model, train_loader) + scheduler = sparseml_wrapper.check_lr_override(scheduler, RANK) + epochs = sparseml_wrapper.check_epoch_override(epochs, RANK) + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + if sparseml_wrapper.qat_active(epoch): + LOGGER.info('Disabling half precision and EMA, QAT scheduled to run') + half_precision = False + scaler._enabled = False + ema.enabled = False model.train() # Update image weights (optional, single-GPU only) @@ -313,7 +339,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if scheduler: + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) @@ -326,7 +353,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward - with amp.autocast(enabled=cuda): + with amp.autocast(enabled=half_precision): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: @@ -345,6 +372,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if ema: ema.update(model) last_opt_step = ni + elif hasattr(scaler, "emulated_step"): + # Call for SparseML integration since the number of steps per epoch can vary + # This keeps the number of steps per epoch equivalent to the number of batches per epoch + # Does not step the scaler or the optimizer + scaler.emulated_step() # Log if RANK in [-1, 0]: @@ -359,7 +391,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() + if scheduler: + scheduler.step() if RANK in [-1, 0]: # mAP @@ -376,25 +409,23 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_dir=save_dir, plots=False, callbacks=callbacks, - compute_loss=compute_loss) + compute_loss=compute_loss, + half=half_precision) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - if fi > best_fitness: + if fi > best_fitness or sparseml_wrapper.reset_best(epoch): best_fitness = fi log_vals = list(mloss) + list(results) + lr callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat()} + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt_extras = {'nc': nc, + 'best_fitness': best_fitness, + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'date': datetime.now().isoformat()} + ckpt = create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **ckpt_extras) # Save last, best and delete torch.save(ckpt, last) @@ -422,7 +453,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + LOGGER.info(f'\n{epochs - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers @@ -431,7 +462,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, - model=attempt_load(f, device).half(), + model=load_checkpoint(type_='ensemble', weights=best, device=device)[0], iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 single_cls=single_cls, dataloader=val_loader, @@ -440,7 +471,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary verbose=True, plots=True, callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots + compute_loss=compute_loss, # val best model with plots + half=half_precision) if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) @@ -491,6 +523,9 @@ def parse_opt(known=False): parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + parser.add_argument('--recipe', type=str, default=None, help='Path to a sparsification recipe, ' + 'see https://github.com/neuralmagic/sparseml for more information') + parser.add_argument('--disable-ema', action='store_true', help='Disable EMA model updates (enabled by default)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt @@ -508,7 +543,7 @@ def main(opt, callbacks=Callbacks()): ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: - opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: @@ -518,8 +553,8 @@ def main(opt, callbacks=Callbacks()): if opt.evolve: if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -575,7 +610,7 @@ def main(opt, callbacks=Callbacks()): 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch diff --git a/utils/activations.py b/utils/activations.py index a4ff789cf336..b119d915e54c 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -7,6 +7,23 @@ import torch.nn as nn import torch.nn.functional as F +def is_activation(mod, act_types=None): + if not act_types: + act_types = (nn.ELU, nn.Hardshrink, nn.Hardsigmoid, nn.Hardtanh, nn.Hardswish, nn.LeakyReLU, + nn.LogSigmoid, nn.PReLU, nn.ReLU, nn.ReLU6, nn.RReLU, nn.SELU, nn.CELU, nn.GELU, + nn.Sigmoid, nn.SiLU, nn.Softplus, nn.Softshrink, nn.Softsign, nn.Tanh, nn.Tanhshrink, + SiLU, Hardswish, Mish, MemoryEfficientMish, FReLU) + + return isinstance(mod, act_types) + + +def replace_activations(mod, act, act_types=None): + for name, child in mod.named_children(): + if is_activation(child, act_types): + child_act = act if not isinstance(act, str) else eval(act)() + setattr(mod, name, child_act) + else: + replace_activations(child, act, act_types) # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- class SiLU(nn.Module): # export-friendly version of nn.SiLU() diff --git a/utils/downloads.py b/utils/downloads.py index d7b87cb2cadd..714ffb2a0452 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -42,6 +42,9 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() # Attempt file download if does not exist + if not isinstance(file, (Path, str)) or str(file).startswith("zoo:"): + return + file = Path(str(file).strip().replace("'", '')) if not file.exists(): diff --git a/utils/general.py b/utils/general.py index b0c5e9d69ab7..aeea4f3792c6 100755 --- a/utils/general.py +++ b/utils/general.py @@ -803,9 +803,11 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False + pickled = isinstance(x['model'], torch.nn.Module) + if pickled: + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 786e58a19972..a2c7102bce14 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -169,6 +169,10 @@ def __init__(self, opt, run_id=None, job_type='Training'): if opt.upload_dataset: if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + self.wandb_run.config.update({ + 'opt': vars(opt), + 'data_dict': self.wandb_artifact_data_dict + }, allow_val_change=True) if opt.resume: # resume from artifact diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 72f8a0fd1659..02698e656481 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -285,27 +285,47 @@ class ModelEMA: For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, tau=2000, updates=0): + def __init__(self, model, decay=0.9999, tau=2000, updates=0, enabled=True): # Create EMA - self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self._model = model + self._ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): + self.enabled=enabled + for p in self._ema.parameters(): p.requires_grad_(False) + @property + def ema(self): + if not self.enabled: + return deepcopy(self._model.module if is_parallel(self._model) else self._model).eval() + return self._ema + + def state_dict(self, pickle=True): + ema = deepcopy(self.ema).float() + return { + 'ema': ema if pickle else ema.state_dict(), + 'updates': self.updates, + } + def update(self, model): + self._model = model + if not self.enabled: + return # Update EMA parameters with torch.no_grad(): + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict self.updates += 1 d = self.decay(self.updates) - msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: + mv = msd[k].detach() v *= d - v += (1 - d) * msd[k].detach() + v += (1. - d) * mv + v *= mv != 0 # preserve pruned parameters in model (equal to 0) def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes diff --git a/val.py b/val.py index 2dd2aec679f9..a7503a50f247 100644 --- a/val.py +++ b/val.py @@ -35,6 +35,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from export import load_checkpoint from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader @@ -135,7 +136,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + model, extras = load_checkpoint(type_='val', weights=weights, device=device) # load FP32 model stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA From 1f7355281cc32b15f01849c97c89e92f24b04feb Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 25 Mar 2022 13:57:29 -0400 Subject: [PATCH 739/757] Add SparseML dependancy --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 96fc9d1a1f32..818b9d30bdfd 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,3 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation +sparseml[torch,torchvision]>=0.11 # Pruning and Quantization From 4db2a15cf62ef76756cbb3611a93cfef5f450d3a Mon Sep 17 00:00:00 2001 From: Konstantin Date: Sat, 26 Mar 2022 10:54:09 -0400 Subject: [PATCH 740/757] Update: add missing files --- models_v5.0/yolov5l.yaml | 48 +++++++++++++ models_v5.0/yolov5m.yaml | 48 +++++++++++++ models_v5.0/yolov5s.yaml | 48 +++++++++++++ models_v5.0/yolov5x.yaml | 48 +++++++++++++ utils/sparse.py | 141 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 333 insertions(+) create mode 100644 models_v5.0/yolov5l.yaml create mode 100644 models_v5.0/yolov5m.yaml create mode 100644 models_v5.0/yolov5s.yaml create mode 100644 models_v5.0/yolov5x.yaml create mode 100644 utils/sparse.py diff --git a/models_v5.0/yolov5l.yaml b/models_v5.0/yolov5l.yaml new file mode 100644 index 000000000000..71ebf86e5791 --- /dev/null +++ b/models_v5.0/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5m.yaml b/models_v5.0/yolov5m.yaml new file mode 100644 index 000000000000..3c749c916246 --- /dev/null +++ b/models_v5.0/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5s.yaml b/models_v5.0/yolov5s.yaml new file mode 100644 index 000000000000..aca669d60d8b --- /dev/null +++ b/models_v5.0/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5x.yaml b/models_v5.0/yolov5x.yaml new file mode 100644 index 000000000000..d3babdf7baf0 --- /dev/null +++ b/models_v5.0/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/utils/sparse.py b/utils/sparse.py new file mode 100644 index 000000000000..73a3e29e5259 --- /dev/null +++ b/utils/sparse.py @@ -0,0 +1,141 @@ +import math + +from sparsezoo import Zoo +from sparseml.pytorch.optim import ScheduledModifierManager +from sparseml.pytorch.utils import SparsificationGroupLogger + +from utils.torch_utils import is_parallel + + +def _get_model_framework_file(model, path): + transfer_request = 'recipe_type=transfer' in path + checkpoint_available = any([file.checkpoint for file in model.framework_files]) + final_available = any([not file.checkpoint for file in model.framework_files]) + + if transfer_request and checkpoint_available: + # checkpoints are saved for transfer learning use cases, + # return checkpoint if avaiable and requested + return [file for file in model.framework_files if file.checkpoint][0] + elif final_available: + # default to returning final state, if available + return [file for file in model.framework_files if not file.checkpoint][0] + + raise ValueError(f"Could not find a valid framework file for {path}") + + +def check_download_sparsezoo_weights(path): + if isinstance(path, str): + if path.startswith("zoo:"): + # load model from the SparseZoo and override the path with the new download + model = Zoo.load_model_from_stub(path) + file = _get_model_framework_file(model, path) + path = file.downloaded_path() + + return path + + if isinstance(path, list): + return [check_download_sparsezoo_weights(p) for p in path] + + return path + + +class SparseMLWrapper(object): + def __init__(self, model, recipe): + self.enabled = bool(recipe) + self.model = model.module if is_parallel(model) else model + self.recipe = recipe + self.manager = ScheduledModifierManager.from_yaml(recipe) if self.enabled else None + self.logger = None + + def state_dict(self): + return { + 'recipe': str(self.manager) if self.enabled else None, + } + + def apply(self): + if not self.enabled: + return + + self.manager.apply(self.model) + + def initialize(self, start_epoch): + if not self.enabled: + return + + self.manager.initialize(self.model, start_epoch) + + def initialize_loggers(self, logger, tb_writer, wandb_logger): + self.logger = logger + + if not self.enabled: + return + + def _logging_lambda(tag, value, values, step, wall_time, level): + if not wandb_logger or not wandb_logger.wandb: + return + + if value is not None: + wandb_logger.log({tag: value}) + + if values: + wandb_logger.log(values) + + self.manager.initialize_loggers([ + SparsificationGroupLogger( + lambda_func=_logging_lambda, + tensorboard=tb_writer, + ) + ]) + + if wandb_logger and wandb_logger.wandb: + artifact = wandb_logger.wandb.Artifact('recipe', type='recipe') + with artifact.new_file('recipe.yaml') as file: + file.write(str(self.manager)) + wandb_logger.wandb.log_artifact(artifact) + + def modify(self, scaler, optimizer, model, dataloader): + if not self.enabled: + return scaler + + return self.manager.modify(model, optimizer, steps_per_epoch=len(dataloader), wrap_optim=scaler) + + def check_lr_override(self, scheduler, rank): + # Override lr scheduler if recipe makes any LR updates + if self.enabled and self.manager.learning_rate_modifiers: + if rank in [0,-1]: + self.logger.info('Disabling LR scheduler, managing LR using SparseML recipe') + scheduler = None + + return scheduler + + def check_epoch_override(self, epochs, rank): + # Override num epochs if recipe explicitly modifies epoch range + if self.enabled and self.manager.epoch_modifiers and self.manager.max_epochs: + if rank in [0,-1]: + self.logger.info(f'Overriding number of epochs from SparseML manager to {epochs}') + epochs = self.manager.max_epochs or epochs # override num_epochs + + return epochs + + def qat_active(self, epoch): + if not self.enabled or not self.manager.quantization_modifiers: + return False + + qat_start = min([mod.start_epoch for mod in self.manager.quantization_modifiers]) + + return qat_start < epoch + 1 + + def reset_best(self, epoch): + if not self.enabled: + return False + + # if pruning is active or quantization just started, need to reset best checkpoint + # this is in case the pruned and/or quantized model do not fully recover + pruning_start = math.floor(max([mod.start_epoch for mod in self.manager.pruning_modifiers])) \ + if self.manager.pruning_modifiers else -1 + pruning_end = math.ceil(max([mod.end_epoch for mod in self.manager.pruning_modifiers])) \ + if self.manager.pruning_modifiers else -1 + qat_start = math.floor(max([mod.start_epoch for mod in self.manager.quantization_modifiers])) \ + if self.manager.quantization_modifiers else -1 + + return (pruning_start <= epoch <= pruning_end) or epoch == qat_start \ No newline at end of file From 70fb4cd287950f49b343a6849eed43fe7668e1e4 Mon Sep 17 00:00:00 2001 From: Konstantin Gulin <66528950+KSGulin@users.noreply.github.com> Date: Wed, 30 Mar 2022 15:45:17 +0100 Subject: [PATCH 741/757] Update requirements.txt --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 818b9d30bdfd..cf72a9ea4165 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation -sparseml[torch,torchvision]>=0.11 # Pruning and Quantization +sparseml[torch,torchvision]>=0.12 # Pruning and Quantization From bf225ec63be543b30e70e5fb344b3101d368a4fa Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 30 Mar 2022 13:36:17 -0400 Subject: [PATCH 742/757] Update: sparseml-nightly support --- requirements.txt | 2 +- utils/general.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 818b9d30bdfd..cf72a9ea4165 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation -sparseml[torch,torchvision]>=0.11 # Pruning and Quantization +sparseml[torch,torchvision]>=0.12 # Pruning and Quantization diff --git a/utils/general.py b/utils/general.py index aeea4f3792c6..dcdbf95ddca1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -319,6 +319,10 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta n = 0 # number of packages updates for r in requirements: + if r.startswith("sparseml"): + version = r.split("sparseml")[1] + if pkg.working_set.find(pkg.Requirement("sparseml-nightly" + version)): + continue try: pkg.require(r) except Exception: # DistributionNotFound or VersionConflict if requirements not met From 6d3667abd8e8c2c2edacf74129a9f87b169c38d6 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 30 Mar 2022 14:41:41 -0400 Subject: [PATCH 743/757] Update: remove model versioning --- export.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/export.py b/export.py index 078b3c6940f0..ab609664a2b2 100644 --- a/export.py +++ b/export.py @@ -440,14 +440,11 @@ def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): if not pickle: ckpt_model = ckpt_model.state_dict() - version = 6 if isinstance([module for module in model.model.modules()][1], Conv) else 5 - return {'epoch': epoch, 'model': ckpt_model, 'optimizer': optimizer.state_dict(), 'yaml': yaml, 'hyp': model.hyp, - 'version': version, **ema.state_dict(pickle), **sparseml_wrapper.state_dict(), **kwargs} From 28579c88fc7649844923f12caba0c990047c959a Mon Sep 17 00:00:00 2001 From: Konstantin Date: Tue, 5 Apr 2022 12:46:33 -0400 Subject: [PATCH 744/757] Partial update for multi-stage recipes --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index ab609664a2b2..0c5bf9c0c1cf 100644 --- a/export.py +++ b/export.py @@ -498,7 +498,7 @@ def load_checkpoint( p.requires_grad = True # load sparseml recipe for applying pruning and quantization - recipe = recipe or (ckpt['recipe'] if 'recipe' in ckpt else None) + recipe = (ckpt['recipe'] if ('recipe' in ckpt) else None) if resume else recipe sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe) exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False From e5999d577cd4172bea7ac78cca00020d467c74f5 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 6 Apr 2022 13:12:16 -0400 Subject: [PATCH 745/757] Update: multi-stage recipe support --- export.py | 17 +++-------------- utils/sparse.py | 11 +++++++---- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/export.py b/export.py index 0c5bf9c0c1cf..5af8553f04b7 100644 --- a/export.py +++ b/export.py @@ -498,24 +498,13 @@ def load_checkpoint( p.requires_grad = True # load sparseml recipe for applying pruning and quantization - recipe = (ckpt['recipe'] if ('recipe' in ckpt) else None) if resume else recipe - sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe) + recipe_new = (ckpt['recipe'] if ('recipe' in ckpt) else None) if resume else recipe + recipe_base = None if resume else ckpt['recipe'] + sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe_new, recipe_base) exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False if not train_type: - # update param names for yolov5x5 models (model.x -> model.model.x) - ''' - if ('version' not in ckpt or ckpt['version'] < 6) and sparseml_wrapper.manager is not None: - for modifier in sparseml_wrapper.manager.pruning_modifiers: - updated_params = [] - for param in modifier.params: - updated_params.append( - "model." + param if (param.startswith('model.') and - not param.startswith('model.model.')) else param - ) - modifier.params = updated_params - ''' # apply the recipe to create the final state of the model when not training sparseml_wrapper.apply() else: diff --git a/utils/sparse.py b/utils/sparse.py index 73a3e29e5259..95652045f21f 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -40,11 +40,14 @@ def check_download_sparsezoo_weights(path): class SparseMLWrapper(object): - def __init__(self, model, recipe): - self.enabled = bool(recipe) + def __init__(self, model, recipe_new, recipe_base = None): + self.enabled = bool(recipe_new) self.model = model.module if is_parallel(model) else model - self.recipe = recipe - self.manager = ScheduledModifierManager.from_yaml(recipe) if self.enabled else None + if self.enabled: + self.manager = (ScheduledModifierManager.compose_staged(recipe_base, recipe_new) + if recipe_base else ScheduledModifierManager.from_yaml(recipe_new)) + else: + self.manager = None self.logger = None def state_dict(self): From 3218a78be333cde71364d639fae9c2bdbe35e1d1 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 6 Apr 2022 16:04:15 -0400 Subject: [PATCH 746/757] Update: remove sparseml dep --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cf72a9ea4165..96fc9d1a1f32 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,3 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation -sparseml[torch,torchvision]>=0.12 # Pruning and Quantization From bbbcf6b09df95eec33015bec7e2bef8f89a9ea5b Mon Sep 17 00:00:00 2001 From: Konstantin Date: Wed, 6 Apr 2022 18:08:09 -0400 Subject: [PATCH 747/757] Fix: multi-stage recipe handeling --- export.py | 17 +++++++++++++---- train.py | 5 +++-- utils/sparse.py | 13 ++++++------- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/export.py b/export.py index 5af8553f04b7..394779b77b22 100644 --- a/export.py +++ b/export.py @@ -433,8 +433,10 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') -def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): +def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, additional_recipe, **kwargs): pickle = not sparseml_wrapper.qat_active(epoch) # qat does not support pickled exports + if additional_recipe is not None: + sparseml_wrapper.add_stage(additional_recipe) ckpt_model = deepcopy(model.module if is_parallel(model) else model).float() yaml = ckpt_model.yaml if not pickle: @@ -498,9 +500,13 @@ def load_checkpoint( p.requires_grad = True # load sparseml recipe for applying pruning and quantization - recipe_new = (ckpt['recipe'] if ('recipe' in ckpt) else None) if resume else recipe - recipe_base = None if resume else ckpt['recipe'] - sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe_new, recipe_base) + additional_recipe = None + if resume: + recipe = ckpt['recipe'] if ('recipe' in ckpt) else None + elif ckpt['recipe'] or recipe: + recipe, additional_recipe = (ckpt['recipe'], recipe) if (ckpt['recipe'] and recipe) else ((ckpt['recipe'] or recipe), None) + + sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe) exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False @@ -513,6 +519,8 @@ def load_checkpoint( if not quantized_state_dict: state_dict = load_state_dict(model, state_dict, train=True, exclude_anchors=exclude_anchors) loaded = True + if not resume: + start_epoch = sparseml_wrapper.manager.max_epochs + 1 sparseml_wrapper.initialize(start_epoch) if not loaded: @@ -527,6 +535,7 @@ def load_checkpoint( 'start_epoch': start_epoch, 'sparseml_wrapper': sparseml_wrapper, 'report': report, + 'additional_recipe': additional_recipe } diff --git a/train.py b/train.py index a263d1e9c996..665d856717e5 100644 --- a/train.py +++ b/train.py @@ -132,7 +132,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary resume=opt.resume, rank=LOCAL_RANK ) - ckpt, state_dict, sparseml_wrapper = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'] + ckpt, state_dict, sparseml_wrapper, start_epoch = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'], extras['start_epoch'] LOGGER.info(extras['report']) else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create @@ -424,7 +424,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ckpt_extras = {'nc': nc, 'best_fitness': best_fitness, 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat()} + 'date': datetime.now().isoformat(), + 'additional_recipe': extras["additional_recipe"]} ckpt = create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **ckpt_extras) # Save last, best and delete diff --git a/utils/sparse.py b/utils/sparse.py index 95652045f21f..9530283e7370 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -40,14 +40,10 @@ def check_download_sparsezoo_weights(path): class SparseMLWrapper(object): - def __init__(self, model, recipe_new, recipe_base = None): - self.enabled = bool(recipe_new) + def __init__(self, model, recipe): + self.enabled = bool(recipe) self.model = model.module if is_parallel(model) else model - if self.enabled: - self.manager = (ScheduledModifierManager.compose_staged(recipe_base, recipe_new) - if recipe_base else ScheduledModifierManager.from_yaml(recipe_new)) - else: - self.manager = None + self.manager = ScheduledModifierManager.from_yaml(recipe) if self.enabled else None self.logger = None def state_dict(self): @@ -102,6 +98,9 @@ def modify(self, scaler, optimizer, model, dataloader): return self.manager.modify(model, optimizer, steps_per_epoch=len(dataloader), wrap_optim=scaler) + def add_stage(self, additional_recipe): + self.manager = ScheduledModifierManager.compose_staged(self.manager, additional_recipe) + def check_lr_override(self, scheduler, rank): # Override lr scheduler if recipe makes any LR updates if self.enabled and self.manager.learning_rate_modifiers: From 140ee49cc3138b0521fa771e2a5ab6eb58c9980f Mon Sep 17 00:00:00 2001 From: Konstantin Date: Thu, 7 Apr 2022 16:34:39 -0400 Subject: [PATCH 748/757] Fix: multi stage support --- export.py | 21 +++++++-------------- requirements.txt | 1 + train.py | 15 ++++++--------- utils/loggers/__init__.py | 5 ++++- utils/sparse.py | 30 +++++++++++++++++++----------- 5 files changed, 37 insertions(+), 35 deletions(-) diff --git a/export.py b/export.py index 394779b77b22..ce377c758c04 100644 --- a/export.py +++ b/export.py @@ -433,10 +433,8 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') -def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, additional_recipe, **kwargs): +def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): pickle = not sparseml_wrapper.qat_active(epoch) # qat does not support pickled exports - if additional_recipe is not None: - sparseml_wrapper.add_stage(additional_recipe) ckpt_model = deepcopy(model.module if is_parallel(model) else model).float() yaml = ckpt_model.yaml if not pickle: @@ -500,27 +498,23 @@ def load_checkpoint( p.requires_grad = True # load sparseml recipe for applying pruning and quantization - additional_recipe = None + checkpoint_recipe = None if resume: - recipe = ckpt['recipe'] if ('recipe' in ckpt) else None + train_recipe = ckpt['recipe'] if ('recipe' in ckpt) else None elif ckpt['recipe'] or recipe: - recipe, additional_recipe = (ckpt['recipe'], recipe) if (ckpt['recipe'] and recipe) else ((ckpt['recipe'] or recipe), None) + train_recipe, checkpoint_recipe = recipe, ckpt['recipe'] - sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, recipe) + sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, checkpoint_recipe, train_recipe) exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False - if not train_type: - # apply the recipe to create the final state of the model when not training - sparseml_wrapper.apply() - else: + sparseml_wrapper.apply(ckpt['epoch'] if 'epoch' in ckpt else 0) + if train_type: # intialize the recipe for training and restore the weights before if no quantized weights quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) if not quantized_state_dict: state_dict = load_state_dict(model, state_dict, train=True, exclude_anchors=exclude_anchors) loaded = True - if not resume: - start_epoch = sparseml_wrapper.manager.max_epochs + 1 sparseml_wrapper.initialize(start_epoch) if not loaded: @@ -535,7 +529,6 @@ def load_checkpoint( 'start_epoch': start_epoch, 'sparseml_wrapper': sparseml_wrapper, 'report': report, - 'additional_recipe': additional_recipe } diff --git a/requirements.txt b/requirements.txt index 96fc9d1a1f32..ab1c44f64132 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,3 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation +sparseml[torch, torchvision] >= 0.12 \ No newline at end of file diff --git a/train.py b/train.py index 665d856717e5..e8ed8984d710 100644 --- a/train.py +++ b/train.py @@ -136,8 +136,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(extras['report']) else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - sparseml_wrapper = SparseMLWrapper(model, opt.recipe) - sparseml_wrapper.initialize(start_epoch=0.0) + sparseml_wrapper = SparseMLWrapper(model, None, opt.recipe) + sparseml_wrapper.initialize(start_epoch=0) ckpt = None # Freeze @@ -196,16 +196,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema = ModelEMA(model, enabled=not opt.disable_ema) if RANK in [-1, 0] else None # Resume - start_epoch, best_fitness = 0, 0.0 + start_epoch, best_fitness = sparseml_wrapper.start_epoch, 0.0 if pretrained: - # Epochs - start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) - epochs += ckpt['epoch'] # finetune additional epochs + (weights, start_epoch-1, epochs)) + epochs += start_epoch # finetune additional epochs if sparseml_wrapper.qat_active(start_epoch): ema.enabled = False @@ -424,8 +422,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ckpt_extras = {'nc': nc, 'best_fitness': best_fitness, 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat(), - 'additional_recipe': extras["additional_recipe"]} + 'date': datetime.now().isoformat()} ckpt = create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **ckpt_extras) # Save last, best and delete diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ff6722ecd48a..3b2230c02a14 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -87,7 +87,10 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + try: + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + except Exception: + warnings.warn("Couldn't create quantized graph for Tensorboard") if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() diff --git a/utils/sparse.py b/utils/sparse.py index 9530283e7370..d624554a9ca6 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -40,28 +40,39 @@ def check_download_sparsezoo_weights(path): class SparseMLWrapper(object): - def __init__(self, model, recipe): - self.enabled = bool(recipe) + def __init__(self, model, checkpoint_recipe, train_recipe): + self.enabled = bool(checkpoint_recipe or train_recipe) self.model = model.module if is_parallel(model) else model - self.manager = ScheduledModifierManager.from_yaml(recipe) if self.enabled else None + self.checkpoint_manager = ScheduledModifierManager.from_yaml(checkpoint_recipe) if checkpoint_recipe else None + self.manager = ScheduledModifierManager.from_yaml(train_recipe) if train_recipe else None self.logger = None + self.start_epoch = None def state_dict(self): + if self.checkpoint_manager: + manager = ScheduledModifierManager.compose_staged(self.checkpoint_manager, self.manager) + else: + manager = self.manager return { - 'recipe': str(self.manager) if self.enabled else None, + 'recipe': str(manager) if self.enabled else None, } - def apply(self): + def apply(self, epoch): if not self.enabled: return - self.manager.apply(self.model) + if epoch < 0: + epoch = math.inf + + if self.checkpoint_manager: + self.checkpoint_manager.apply_structure(self.model, epoch) def initialize(self, start_epoch): if not self.enabled: return - + self.manager.initialize(self.model, start_epoch) + self.start_epoch = start_epoch def initialize_loggers(self, logger, tb_writer, wandb_logger): self.logger = logger @@ -98,9 +109,6 @@ def modify(self, scaler, optimizer, model, dataloader): return self.manager.modify(model, optimizer, steps_per_epoch=len(dataloader), wrap_optim=scaler) - def add_stage(self, additional_recipe): - self.manager = ScheduledModifierManager.compose_staged(self.manager, additional_recipe) - def check_lr_override(self, scheduler, rank): # Override lr scheduler if recipe makes any LR updates if self.enabled and self.manager.learning_rate_modifiers: @@ -115,7 +123,7 @@ def check_epoch_override(self, epochs, rank): if self.enabled and self.manager.epoch_modifiers and self.manager.max_epochs: if rank in [0,-1]: self.logger.info(f'Overriding number of epochs from SparseML manager to {epochs}') - epochs = self.manager.max_epochs or epochs # override num_epochs + epochs = self.manager.max_epochs + self.start_epoch or epochs # override num_epochs return epochs From 912040caa773d70f247fc0c2ff9e385145f94258 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 06:26:44 -0400 Subject: [PATCH 749/757] Fix: non-recipe runs --- export.py | 3 +-- train.py | 5 +++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index ce377c758c04..bdf579768908 100644 --- a/export.py +++ b/export.py @@ -498,7 +498,7 @@ def load_checkpoint( p.requires_grad = True # load sparseml recipe for applying pruning and quantization - checkpoint_recipe = None + checkpoint_recipe = train_recipe = None if resume: train_recipe = ckpt['recipe'] if ('recipe' in ckpt) else None elif ckpt['recipe'] or recipe: @@ -526,7 +526,6 @@ def load_checkpoint( return model, { 'ckpt': ckpt, 'state_dict': state_dict, - 'start_epoch': start_epoch, 'sparseml_wrapper': sparseml_wrapper, 'report': report, } diff --git a/train.py b/train.py index e8ed8984d710..738155ad1f77 100644 --- a/train.py +++ b/train.py @@ -132,7 +132,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary resume=opt.resume, rank=LOCAL_RANK ) - ckpt, state_dict, sparseml_wrapper, start_epoch = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'], extras['start_epoch'] + ckpt, state_dict, sparseml_wrapper = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'] LOGGER.info(extras['report']) else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create @@ -196,7 +196,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema = ModelEMA(model, enabled=not opt.disable_ema) if RANK in [-1, 0] else None # Resume - start_epoch, best_fitness = sparseml_wrapper.start_epoch, 0.0 + start_epoch = sparseml_wrapper.start_epoch or 0 + best_fitness = 0.0 if pretrained: if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) From a8dfa0f73054ba3250345182c571972947e84241 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 07:17:02 -0400 Subject: [PATCH 750/757] Add: legacy hyperparam files --- data/hyps/hyp.finetune.yaml | 38 +++++++++++++++++++++++++++++++++++++ data/hyps/hyp.scratch.yaml | 33 ++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 data/hyps/hyp.finetune.yaml create mode 100644 data/hyps/hyp.scratch.yaml diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml new file mode 100644 index 000000000000..1b84cff95c2c --- /dev/null +++ b/data/hyps/hyp.finetune.yaml @@ -0,0 +1,38 @@ +# Hyperparameters for VOC finetuning +# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +# Hyperparameter Evolution Results +# Generations: 306 +# P R mAP.5 mAP.5:.95 box obj cls +# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 + +lr0: 0.0032 +lrf: 0.12 +momentum: 0.843 +weight_decay: 0.00036 +warmup_epochs: 2.0 +warmup_momentum: 0.5 +warmup_bias_lr: 0.05 +box: 0.0296 +cls: 0.243 +cls_pw: 0.631 +obj: 0.301 +obj_pw: 0.911 +iou_t: 0.2 +anchor_t: 2.91 +# anchors: 3.63 +fl_gamma: 0.0 +hsv_h: 0.0138 +hsv_s: 0.664 +hsv_v: 0.464 +degrees: 0.373 +translate: 0.245 +scale: 0.898 +shear: 0.602 +perspective: 0.0 +flipud: 0.00856 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.243 diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml new file mode 100644 index 000000000000..44f26b6658ae --- /dev/null +++ b/data/hyps/hyp.scratch.yaml @@ -0,0 +1,33 @@ +# Hyperparameters for COCO training from scratch +# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) From 7bdf2e66fda64556cd9b91bb8bea42fc21346635 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 07:43:05 -0400 Subject: [PATCH 751/757] Fix: add copy-paste to hyps --- data/hyps/hyp.finetune.yaml | 1 + data/hyps/hyp.scratch.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index 1b84cff95c2c..3aa1923f78a6 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -36,3 +36,4 @@ flipud: 0.00856 fliplr: 0.5 mosaic: 1.0 mixup: 0.243 +copy_paste: 0.0 diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml index 44f26b6658ae..e10b9893dd50 100644 --- a/data/hyps/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch.yaml @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 From 20f6f91ed61e8bc37c30e86229a3677b8fa5f2fb Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 09:39:54 -0400 Subject: [PATCH 752/757] Fix: nit --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ab1c44f64132..36f39017d6af 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation -sparseml[torch, torchvision] >= 0.12 \ No newline at end of file +sparseml[torch,torchvision] >= 0.12 \ No newline at end of file From 5eadf3a40dbbf413caff3fa492dc7a31198e7d0e Mon Sep 17 00:00:00 2001 From: Benjamin Date: Fri, 8 Apr 2022 14:47:49 -0400 Subject: [PATCH 753/757] apply structure fixes --- export.py | 2 +- utils/sparse.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index bdf579768908..f489aaa28f07 100644 --- a/export.py +++ b/export.py @@ -508,7 +508,7 @@ def load_checkpoint( exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False - sparseml_wrapper.apply(ckpt['epoch'] if 'epoch' in ckpt else 0) + sparseml_wrapper.apply_checkpoint_structure(float("inf")) if train_type: # intialize the recipe for training and restore the weights before if no quantized weights quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) diff --git a/utils/sparse.py b/utils/sparse.py index d624554a9ca6..59b4640756f2 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -57,7 +57,7 @@ def state_dict(self): 'recipe': str(manager) if self.enabled else None, } - def apply(self, epoch): + def apply_checkpoint_structure(self, epoch): if not self.enabled: return From 011e7df5c7e4b02bfa4bb31adeba5917aaf02028 Mon Sep 17 00:00:00 2001 From: Benjamin Date: Fri, 8 Apr 2022 15:02:12 -0400 Subject: [PATCH 754/757] Squashed rebase to v6.1 upstream --- .dockerignore | 12 +- .github/ISSUE_TEMPLATE/bug-report.md | 55 - .github/ISSUE_TEMPLATE/bug-report.yml | 85 ++ .github/ISSUE_TEMPLATE/config.yml | 8 + .github/ISSUE_TEMPLATE/feature-request.md | 27 - .github/ISSUE_TEMPLATE/feature-request.yml | 50 + .github/ISSUE_TEMPLATE/question.md | 13 - .github/ISSUE_TEMPLATE/question.yml | 33 + .github/PULL_REQUEST_TEMPLATE.md | 9 + .github/SECURITY.md | 7 + .github/dependabot.yml | 31 +- .github/workflows/ci-testing.yml | 65 +- .github/workflows/codeql-analysis.yml | 66 +- .github/workflows/greetings.yml | 37 +- .github/workflows/rebase.yml | 12 +- .github/workflows/stale.yml | 30 +- .gitignore | 26 +- .pre-commit-config.yaml | 66 + CONTRIBUTING.md | 94 ++ Dockerfile | 35 +- LICENSE | 2 +- README.md | 372 ++++-- data/Argoverse.yaml | 67 + data/GlobalWheat2020.yaml | 71 +- data/Objects365.yaml | 113 ++ data/SKU-110K.yaml | 41 +- data/VOC.yaml | 80 ++ data/VisDrone.yaml | 36 +- data/argoverse_hd.yaml | 21 - data/coco.yaml | 80 +- data/coco128.yaml | 48 +- data/hyp.finetune.yaml | 38 - .../hyp.Objects365.yaml} | 6 + data/hyps/hyp.VOC.yaml | 40 + data/hyps/hyp.scratch-high.yaml | 34 + .../hyp.scratch-low.yaml} | 9 +- data/hyps/hyp.scratch-med.yaml | 34 + data/objects365.yaml | 102 -- data/scripts/download_weights.sh | 20 + data/scripts/get_argoverse_hd.sh | 61 - data/scripts/get_coco.sh | 22 +- data/scripts/get_coco128.sh | 20 +- data/scripts/get_voc.sh | 116 -- data/voc.yaml | 21 - data/xView.yaml | 102 ++ detect.py | 272 ++-- export.py | 559 ++++++++ hubconf.py | 62 +- models/common.py | 521 ++++++-- models/experimental.py | 96 +- models/export.py | 272 ---- models/hub/anchors.yaml | 63 +- models/hub/yolov3-spp.yaml | 6 +- models/hub/yolov3-tiny.yaml | 6 +- models/hub/yolov3.yaml | 8 +- models/hub/yolov5-bifpn.yaml | 48 + models/hub/yolov5-fpn.yaml | 28 +- models/hub/yolov5-p2.yaml | 88 +- models/hub/yolov5-p34.yaml | 41 + models/hub/yolov5-p6.yaml | 92 +- models/hub/yolov5-p7.yaml | 98 +- models/hub/yolov5-panet.yaml | 30 +- models/hub/yolov5l6.yaml | 98 +- models/hub/yolov5m6.yaml | 98 +- models/hub/yolov5n6.yaml | 60 + models/hub/yolov5s-ghost.yaml | 48 + models/hub/yolov5s-transformer.yaml | 18 +- models/hub/yolov5s6.yaml | 98 +- models/hub/yolov5x6.yaml | 98 +- models/tf.py | 466 +++++++ models/yolo.py | 225 ++-- models/yolov5l.yaml | 18 +- models/yolov5m.yaml | 18 +- models/yolov5n.yaml | 48 + models/yolov5s.yaml | 18 +- models/yolov5x.yaml | 18 +- requirements.txt | 34 +- setup.cfg | 45 + test.py | 349 ----- train.py | 779 ++++++----- tutorial.ipynb | 861 +++++------- utility.py | 49 - utils/__init__.py | 36 + utils/activations.py | 28 +- utils/augmentations.py | 277 ++++ utils/autoanchor.py | 119 +- utils/autobatch.py | 58 + utils/aws/resume.py | 9 +- utils/aws/userdata.sh | 4 +- utils/benchmarks.py | 104 ++ utils/callbacks.py | 78 ++ utils/datasets.py | 1187 ++++++++--------- utils/{google_utils.py => downloads.py} | 89 +- utils/flask_rest_api/README.md | 11 +- utils/flask_rest_api/restapi.py | 2 +- utils/general.py | 684 ++++++---- .../additional_requirements.txt | 2 +- utils/google_app_engine/app.yaml | 2 +- utils/loggers/__init__.py | 171 +++ utils/loggers/wandb/README.md | 152 +++ .../loggers/wandb/__init__.py | 0 .../wandb}/log_dataset.py | 13 +- utils/loggers/wandb/sweep.py | 41 + utils/loggers/wandb/sweep.yaml | 143 ++ utils/loggers/wandb/wandb_utils.py | 562 ++++++++ utils/loss.py | 86 +- utils/metrics.py | 159 ++- utils/plots.py | 450 ++++--- utils/sparse.py | 139 -- utils/torch_utils.py | 289 ++-- utils/wandb_logging/__init__.py | 0 utils/wandb_logging/wandb_utils.py | 320 ----- val.py | 381 ++++++ weights/download_weights.sh | 12 - 114 files changed, 8512 insertions(+), 5149 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.yml delete mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/ISSUE_TEMPLATE/question.yml create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/SECURITY.md create mode 100644 .pre-commit-config.yaml create mode 100644 CONTRIBUTING.md mode change 100755 => 100644 README.md create mode 100644 data/Argoverse.yaml create mode 100644 data/Objects365.yaml create mode 100644 data/VOC.yaml delete mode 100644 data/argoverse_hd.yaml delete mode 100644 data/hyp.finetune.yaml rename data/{hyp.finetune_objects365.yaml => hyps/hyp.Objects365.yaml} (58%) create mode 100644 data/hyps/hyp.VOC.yaml create mode 100644 data/hyps/hyp.scratch-high.yaml rename data/{hyp.scratch.yaml => hyps/hyp.scratch-low.yaml} (80%) create mode 100644 data/hyps/hyp.scratch-med.yaml delete mode 100644 data/objects365.yaml create mode 100755 data/scripts/download_weights.sh delete mode 100644 data/scripts/get_argoverse_hd.sh delete mode 100644 data/scripts/get_voc.sh delete mode 100644 data/voc.yaml create mode 100644 data/xView.yaml create mode 100644 export.py delete mode 100644 models/export.py create mode 100644 models/hub/yolov5-bifpn.yaml create mode 100644 models/hub/yolov5-p34.yaml create mode 100644 models/hub/yolov5n6.yaml create mode 100644 models/hub/yolov5s-ghost.yaml create mode 100644 models/tf.py create mode 100644 models/yolov5n.yaml create mode 100644 setup.cfg delete mode 100644 test.py delete mode 100644 utility.py create mode 100644 utils/augmentations.py create mode 100644 utils/autobatch.py create mode 100644 utils/benchmarks.py create mode 100644 utils/callbacks.py rename utils/{google_utils.py => downloads.py} (56%) create mode 100644 utils/loggers/__init__.py create mode 100644 utils/loggers/wandb/README.md rename __init__.py => utils/loggers/wandb/__init__.py (100%) rename utils/{wandb_logging => loggers/wandb}/log_dataset.py (61%) create mode 100644 utils/loggers/wandb/sweep.py create mode 100644 utils/loggers/wandb/sweep.yaml create mode 100644 utils/loggers/wandb/wandb_utils.py delete mode 100644 utils/sparse.py delete mode 100644 utils/wandb_logging/__init__.py delete mode 100644 utils/wandb_logging/wandb_utils.py create mode 100644 val.py delete mode 100755 weights/download_weights.sh diff --git a/.dockerignore b/.dockerignore index 3c6b6ab02e03..af51ccc3d8df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,17 +8,23 @@ coco storage.googleapis.com data/samples/* -**/results*.txt +**/results*.csv *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.weights **/*.pt **/*.pth **/*.onnx +**/*.engine **/*.mlmodel **/*.torchscript - +**/*.torchscript.pt +**/*.tflite +**/*.h5 +**/*.pb +*_saved_model/ +*_web_model/ +*_openvino_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 362059b288d5..000000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -name: "🐛 Bug report" -about: Create a report to help us improve -title: '' -labels: bug -assignees: '' - ---- - -Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you: - - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo - - **Common dataset**: coco.yaml or coco128.yaml - - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments - -If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. - - -## 🐛 Bug -A clear and concise description of what the bug is. - - -## To Reproduce (REQUIRED) - -Input: -``` -import torch - -a = torch.tensor([5]) -c = a / 0 -``` - -Output: -``` -Traceback (most recent call last): - File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code - exec(code_obj, self.user_global_ns, self.user_ns) - File "", line 5, in - c = a / 0 -RuntimeError: ZeroDivisionError -``` - - -## Expected behavior -A clear and concise description of what you expected to happen. - - -## Environment -If applicable, add screenshots to help explain your problem. - - - OS: [e.g. Ubuntu] - - GPU [e.g. 2080 Ti] - - -## Additional context -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 000000000000..fcb64138b088 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,85 @@ +name: 🐛 Bug Report +# title: " " +description: Problems with YOLOv5 +labels: [bug, triage] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🐛 Bug Report! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report. + required: true + + - type: dropdown + attributes: + label: YOLOv5 Component + description: | + Please select the part of YOLOv5 where you found the bug. + multiple: true + options: + - "Training" + - "Validation" + - "Detection" + - "Export" + - "PyTorch Hub" + - "Multi-GPU" + - "Evolution" + - "Integrations" + - "Other" + validations: + required: false + + - type: textarea + attributes: + label: Bug + description: Provide console output with error messages and/or screenshots of the bug. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: Please specify the software and hardware you used to produce the bug. + placeholder: | + - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB) + - OS: Ubuntu 20.04 + - Python: 3.9.0 + validations: + required: false + + - type: textarea + attributes: + label: Minimal Reproducible Example + description: > + When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. + This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + placeholder: | + ``` + # Code to reproduce your issue here + ``` + validations: + required: false + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000000..f388d7bacf66 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Slack + url: https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg + about: Ask on Ultralytics Slack Forum + - name: Stack Overflow + url: https://stackoverflow.com/search?q=YOLOv5 + about: Ask on Stack Overflow with 'YOLOv5' tag diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 87db3eacbf02..000000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: "🚀 Feature request" -about: Suggest an idea for this project -title: '' -labels: enhancement -assignees: '' - ---- - -## 🚀 Feature - - -## Motivation - - - -## Pitch - - - -## Alternatives - - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 000000000000..68ef985186ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,50 @@ +name: 🚀 Feature Request +description: Suggest a YOLOv5 idea +# title: " " +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🚀 Feature Request! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests. + required: true + + - type: textarea + attributes: + label: Description + description: A short description of your feature. + placeholder: | + What new feature would you like to see in YOLOv5? + validations: + required: true + + - type: textarea + attributes: + label: Use case + description: | + Describe the use case of your feature request. It will help us understand and prioritize the feature request. + placeholder: | + How would this feature be used, and who would use it? + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index 2c22aea70a7b..000000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -name: "❓Question" -about: Ask a general question -title: '' -labels: question -assignees: '' - ---- - -## ❔Question - - -## Additional context diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 000000000000..8e0993c68bab --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,33 @@ +name: ❓ Question +description: Ask a YOLOv5 question +# title: " " +labels: [question] +body: + - type: markdown + attributes: + value: | + Thank you for asking a YOLOv5 ❓ Question! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions. + required: true + + - type: textarea + attributes: + label: Question + description: What is your question? + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..f25b017ace8b --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ + diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 000000000000..aa3e8409da6b --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. + +### Reporting a Vulnerability + +To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9910689197f5..c1b3d5d514c3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,12 +1,23 @@ version: 2 updates: -- package-ecosystem: pip - directory: "/" - schedule: - interval: weekly - time: "04:00" - open-pull-requests-limit: 10 - reviewers: - - glenn-jocher - labels: - - dependencies + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 5 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..f2096ce17a17 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,6 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: CI CPU testing -on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows +on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: branches: [ master ] pull_request: @@ -16,16 +18,16 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8] - model: ['yolov5s'] # models to test + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ 3.9 ] + model: [ 'yolov5n' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 - timeout-minutes: 50 + timeout-minutes: 60 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} @@ -37,44 +39,55 @@ jobs: python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - name: Cache pip - uses: actions/cache@v1 + uses: actions/cache@v2.1.7 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} restore-keys: | ${{ runner.os }}-${{ matrix.python-version }}-pip- + # Known Keras 2.7.0 issue: https://github.com/ultralytics/yolov5/pull/5486 - name: Install dependencies run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx + pip install -q onnx tensorflow-cpu keras==2.6.0 # wandb # extras python --version pip --version pip list shell: bash - - name: Download data - run: | - # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip - # unzip -q tmp.zip -d ../ - # rm tmp.zip + # - name: W&B login + # run: wandb login 345011b3fb26dc8337fd9b20e53857c1d403f2aa + + # - name: Download data + # run: | + # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip + # unzip -q tmp.zip -d ../datasets - name: Tests workflow run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - di=cpu # inference devices # define device - - # train - python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di - # detect - python detect.py --weights weights/${{ matrix.model }}.pt --device $di - python detect.py --weights runs/train/exp/weights/last.pt --device $di - # test - python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di - python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + d=cpu # device + weights=runs/train/exp/weights/best.pt + # Train + python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $d + # Val + python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $d + python val.py --img 64 --batch 32 --weights $weights --device $d + # Detect + python detect.py --weights ${{ matrix.model }}.pt --device $d + python detect.py --weights $weights --device $d python hubconf.py # hub - python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect - python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export + # Export + python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model + python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model + python export.py --weights ${{ matrix.model }}.pt --img 64 --include torchscript onnx # export + # Python + python - <=1.7`. To install run: + [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash - $ pip install -r requirements.txt + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install ``` ## Environments - + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - + - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - + + ## Status - - ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - + + CI CPU testing + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index e86c57744b84..75c57546166b 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -1,10 +1,9 @@ -name: Automatic Rebase # https://github.com/marketplace/actions/automatic-rebase +name: Automatic Rebase on: issue_comment: types: [created] - jobs: rebase: name: Rebase @@ -12,10 +11,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the latest code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: - fetch-depth: 0 + token: ${{ secrets.ACTIONS_TOKEN }} + fetch-depth: 0 # otherwise, you will fail to push refs to dest repo - name: Automatic Rebase - uses: cirrus-actions/rebase@1.3.1 + uses: cirrus-actions/rebase@1.5 env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets.ACTIONS_TOKEN }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 0a094e237b34..7a83950c17b7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,18 +1,38 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: Close stale issues on: schedule: - - cron: "0 0 * * *" + - cron: '0 0 * * *' # Runs at 00:00 UTC every day jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v3 + - uses: actions/stale@v4 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' - stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-issue-message: | + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://ultralytics.com/hub + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 - exempt-issue-labels: 'documentation,tutorial' + exempt-issue-labels: 'documentation,tutorial,TODO' operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. diff --git a/.gitignore b/.gitignore index 91ce33fb931e..69a00843ea42 100755 --- a/.gitignore +++ b/.gitignore @@ -19,26 +19,23 @@ *.avi *.data *.json - *.cfg +!setup.cfg !cfg/yolov3*.cfg storage.googleapis.com runs/* data/* +data/images/* +!data/*.yaml +!data/hyps +!data/scripts +!data/images !data/images/zidane.jpg !data/images/bus.jpg -!data/coco.names -!data/coco_paper.names -!data/coco.data -!data/coco_*.data -!data/coco_*.txt -!data/trainvalno5k.shapes !data/*.sh -pycocotools/* -results*.txt -gcp_test*.sh +results*.csv # Datasets ------------------------------------------------------------------------------------------------------------- coco/ @@ -53,9 +50,16 @@ VOC/ # Neural Network weights ----------------------------------------------------------------------------------------------- *.weights *.pt +*.pb *.onnx +*.engine *.mlmodel *.torchscript +*.tflite +*.h5 +*_saved_model/ +*_web_model/ +*_openvino_model/ darknet53.conv.74 yolov3-tiny.conv.15 @@ -84,7 +88,7 @@ sdist/ var/ wheels/ *.egg-info/ -wandb/ +/wandb/ .installed.cfg *.egg diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..526a5609fdd7 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,66 @@ +# Define hooks for code formations +# Will be applied on any updated commit files if a user has installed and linked commit hook + +default_language_version: + python: python3.8 + +# Define bot property if installed via https://github.com/marketplace/pre-commit-ci +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: quarterly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + - id: check-toml + - id: pretty-format-json + - id: check-docstring-first + + - repo: https://github.com/asottile/pyupgrade + rev: v2.31.0 + hooks: + - id: pyupgrade + args: [--py36-plus] + name: Upgrade code + + - repo: https://github.com/PyCQA/isort + rev: 5.10.1 + hooks: + - id: isort + name: Sort imports + + # TODO + #- repo: https://github.com/pre-commit/mirrors-yapf + # rev: v0.31.0 + # hooks: + # - id: yapf + # name: formatting + + # TODO + #- repo: https://github.com/executablebooks/mdformat + # rev: 0.7.7 + # hooks: + # - id: mdformat + # additional_dependencies: + # - mdformat-gfm + # - mdformat-black + # - mdformat_frontmatter + + # TODO + #- repo: https://github.com/asottile/yesqa + # rev: v1.2.3 + # hooks: + # - id: yesqa + + - repo: https://github.com/PyCQA/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + name: PEP8 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..ebde03a562a0 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,94 @@ +## Contributing to YOLOv5 🚀 + +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! + +## Submitting a Pull Request (PR) 🛠️ + +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update + +Select `requirements.txt` to update by clicking on it in GitHub. +

PR_step1

+ +### 2. Click 'Edit this file' + +Button is in top-right corner. +

PR_step2

+ +### 3. Make Changes + +Change `matplotlib` version from `3.2.2` to `3.3`. +

PR_step3

+ +### 4. Preview Changes and Submit PR + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! +

PR_step4

+ +### PR recommendations + +To allow your work to be integrated as seamlessly as possible, we advise you to: + +- ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an + automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may + be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name + of your local branch: + +```bash +git remote add upstream https://github.com/ultralytics/yolov5.git +git fetch upstream +# git checkout feature # <--- replace 'feature' with local branch name +git merge upstream/master +git push -u origin -f +``` + +- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee + +## Submitting a Bug Report 🐛 + +If you spot a problem with YOLOv5 please submit a Bug Report! + +For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need in order to get started. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: + +* ✅ **Minimal** – Use as little code as possible that still produces the same problem +* ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +* ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: + +* ✅ **Current** – Verify that your code is up-to-date with current + GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new + copy to ensure your problem has not already been resolved by previous commits. +* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 ** +Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. + +## License + +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/Dockerfile b/Dockerfile index b47e5bbff194..304e8b2801a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,7 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.03-py3 +FROM nvcr.io/nvidia/pytorch:21.10-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -7,31 +9,36 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies COPY requirements.txt . RUN python -m pip install --upgrade pip -RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook +RUN pip uninstall -y torch torchvision torchtext +RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \ + torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +# RUN pip install --no-cache -U torch torchvision # Create working directory RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +# COPY . /usr/src/app + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ # Set environment variables -ENV HOME=/usr/src/app +# ENV HOME=/usr/src/app -# --------------------------------------------------- Extras Below --------------------------------------------------- +# Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t -# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done # Pull and Run # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t # Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t # Kill all # sudo docker kill $(sudo docker ps -q) @@ -45,8 +52,14 @@ ENV HOME=/usr/src/app # Bash into stopped container # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash -# Send weights to GCP -# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt - # Clean up # docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest diff --git a/LICENSE b/LICENSE index 9e419e042146..92b370f0e0e1 100644 --- a/LICENSE +++ b/LICENSE @@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. \ No newline at end of file +. diff --git a/README.md b/README.md old mode 100755 new mode 100644 index b25c6fca983c..3ebc085b6c33 --- a/README.md +++ b/README.md @@ -1,78 +1,162 @@ - - -  +
+

+ + +

+
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ + + + + +
+ +##
Documentation
+ +See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + +##
Quick Start Examples
+ +
+Install + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.7.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). -CI CPU testing - -This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` -

-
- YOLOv5-P5 640 Figure (click to expand) - -

-
-
- Figure Notes (click to expand) - - * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. - * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
-- **April 11, 2021**: [v5.0 release](https://github.com/ultralytics/yolov5/releases/tag/v5.0): YOLOv5-P6 1280 models, [AWS](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart), [Supervise.ly](https://github.com/ultralytics/yolov5/issues/2518) and [YouTube](https://github.com/ultralytics/yolov5/pull/2752) integrations. -- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. -- **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. -- **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. +
+Inference +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). -## Pretrained Checkpoints +```python +import torch + +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom + +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
-[assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) ---- |--- |--- |--- |--- |--- |---|--- |--- -[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 -| | | | | | || | -[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 -| | | | | | || | -[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
- Table Notes (click to expand) - - * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` - * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment` +Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + +```bash +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` +
+
+Training -## Requirements +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. -Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: - ```bash -$ pip install -r requirements.txt +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` + + +
-## Tutorials +
+Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ + RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) @@ -80,91 +164,141 @@ $ pip install -r requirements.txt * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) +
-## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - -## Inference - -`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. -```bash -$ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube video - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -To run inference on example images in `data/images`: -```bash -$ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 +##
Environments
-Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) -YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) +Get started in seconds with our verified environments. Click each icon below for details. -Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS -image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) -image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) -Results saved to runs/detect/exp2 -Done. (0.103s) -``` - + -### PyTorch Hub +##
Integrations
-Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): -```python -import torch + -# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') +|Weights and Biases|Roboflow ⭐ NEW| +|:-:|:-:| +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | -# Image -img = 'https://ultralytics.com/images/zidane.jpg' -# Inference -results = model(img) -results.print() # or .show(), .save() -``` + -Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). -```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - +##
Why YOLOv5
+

+
+ YOLOv5-P5 640 Figure (click to expand) -## Citation +

+
+
+ Figure Notes (click to expand) -[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686) +* **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. +* **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +* **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. +* **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +
+### Pretrained Checkpoints -## About Us +[assets]: https://github.com/ultralytics/yolov5/releases -Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of expertise supporting government, academic and business clients. We offer a wide range of vision AI services, spanning from simple expert advice up to delivery of fully customized, end-to-end production solutions, including: -- **Cloud-based AI** systems operating on **hundreds of HD video streams in realtime.** -- **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.** -- **Custom data training**, hyperparameter evolution, and model exportation to any destination. +[TTA]: https://github.com/ultralytics/yolov5/issues/303 + +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |12.6 |16.8 +|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |55.0
**55.8** |72.7
**72.7** |3136
- |26.2
- |19.4
- |140.7
- |209.8
- -For business inquiries and professional support requests please visit us at https://www.ultralytics.com. +
+ Table Notes (click to expand) +* All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +* **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +* **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -## Contact +
-**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. +##
Contribute
+ +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! + + + +##
Contact
+ +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or +professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). + +
+ + diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml new file mode 100644 index 000000000000..312791b33a2d --- /dev/null +++ b/data/Argoverse.yaml @@ -0,0 +1,67 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Example usage: python train.py --data Argoverse.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Argoverse ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +nc: 8 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + + from tqdm import tqdm + from utils.general import download, Path + + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = img_name[:-3] + "txt" + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path('../datasets/Argoverse') # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir, delete=False) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index f45182b43e25..c1ba289f2833 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,43 +1,42 @@ -# Global Wheat 2020 dataset http://www.global-wheat.com/ -# Train command: python train.py --data GlobalWheat2020.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /datasets/GlobalWheat2020 -# /yolov5 - - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: # 3422 images - - ../datasets/GlobalWheat2020/images/arvalis_1 - - ../datasets/GlobalWheat2020/images/arvalis_2 - - ../datasets/GlobalWheat2020/images/arvalis_3 - - ../datasets/GlobalWheat2020/images/ethz_1 - - ../datasets/GlobalWheat2020/images/rres_1 - - ../datasets/GlobalWheat2020/images/inrae_1 - - ../datasets/GlobalWheat2020/images/usask_1 - -val: # 748 images (WARNING: train set contains ethz_1) - - ../datasets/GlobalWheat2020/images/ethz_1 - -test: # 1276 images - - ../datasets/GlobalWheat2020/images/utokyo_1 - - ../datasets/GlobalWheat2020/images/utokyo_2 - - ../datasets/GlobalWheat2020/images/nau_1 - - ../datasets/GlobalWheat2020/images/uq_1 - -# number of classes -nc: 1 - -# class names -names: [ 'wheat_head' ] - - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan +# Example usage: python train.py --data GlobalWheat2020.yaml +# parent +# ├── yolov5 +# └── datasets +# └── GlobalWheat2020 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +nc: 1 # number of classes +names: ['wheat_head'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, Path + # Download - dir = Path('../datasets/GlobalWheat2020') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] download(urls, dir=dir) diff --git a/data/Objects365.yaml b/data/Objects365.yaml new file mode 100644 index 000000000000..bd6e5d6e1144 --- /dev/null +++ b/data/Objects365.yaml @@ -0,0 +1,113 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Objects365 dataset https://www.objects365.org/ by Megvii +# Example usage: python train.py --data Objects365.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Objects365 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 80000 images +test: # test images (optional) + +# Classes +nc: 365 # number of classes +names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', + 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', + 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', + 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', + 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', + 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', + 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', + 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', + 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', + 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', + 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', + 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', + 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', + 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', + 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', + 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', + 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', + 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', + 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', + 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', + 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', + 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', + 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', + 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', + 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', + 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', + 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', + 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', + 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', + 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', + 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', + 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', + 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', + 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', + 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', + 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', + 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', + 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', + 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', + 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', + 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from pycocotools.coco import COCO + from tqdm import tqdm + + from utils.general import Path, download, np, xyxy2xywhn + + + # Make Directories + dir = Path(yaml['path']) # dataset root dir + for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + + # Train, Val Splits + for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: + print(f"Processing {split} in {patches} patches ...") + images, labels = dir / 'images' / split, dir / 'labels' / split + + # Download + url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" + if split == 'train': + download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json + download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8) + elif split == 'val': + download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json + download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) + download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) + + # Move + for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): + f.rename(images / f.name) # move to /images/{split} + + # Labels + coco = COCO(dir / f'zhiyuan_objv2_{split}.json') + names = [x["name"] for x in coco.loadCats(coco.getCatIds())] + for cid, cat in enumerate(names): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): + width, height = im["width"], im["height"] + path = Path(im["file_name"]) # image filename + try: + with open(labels / path.with_suffix('.txt').name, 'a') as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) + x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped + file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") + except Exception as e: + print(e) diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index a8c1f25b385a..46459eab6bb7 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,39 +1,40 @@ -# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 -# Train command: python train.py --data SKU-110K.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /datasets/SKU-110K -# /yolov5 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail +# Example usage: python train.py --data SKU-110K.yaml +# parent +# ├── yolov5 +# └── datasets +# └── SKU-110K ← downloads here -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/SKU-110K/train.txt # 8219 images -val: ../datasets/SKU-110K/val.txt # 588 images -test: ../datasets/SKU-110K/test.txt # 2936 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images -# number of classes -nc: 1 +# Classes +nc: 1 # number of classes +names: ['object'] # class names -# class names -names: [ 'object' ] - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil from tqdm import tqdm from utils.general import np, pd, Path, download, xyxy2xywh + # Download - datasets = Path('../datasets') # download directory + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] - download(urls, dir=datasets, delete=False) + download(urls, dir=parent, delete=False) # Rename directories - dir = (datasets / 'SKU-110K') if dir.exists(): shutil.rmtree(dir) - (datasets / 'SKU110K_fixed').rename(dir) # rename dir + (parent / 'SKU110K_fixed').rename(dir) # rename dir (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir # Convert labels diff --git a/data/VOC.yaml b/data/VOC.yaml new file mode 100644 index 000000000000..be04fb1e2ecb --- /dev/null +++ b/data/VOC.yaml @@ -0,0 +1,80 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford +# Example usage: python train.py --data VOC.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VOC ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +nc: 20 # number of classes +names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from utils.general import download, Path + + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = yaml['names'].index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', delete=False, threads=3) + + # Convert + path = dir / f'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index c4603b200132..2a3b2f03e674 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,24 +1,24 @@ -# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# Train command: python train.py --data VisDrone.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /VisDrone -# /yolov5 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University +# Example usage: python train.py --data VisDrone.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VisDrone ← downloads here -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../VisDrone/VisDrone2019-DET-train/images # 6471 images -val: ../VisDrone/VisDrone2019-DET-val/images # 548 images -test: ../VisDrone/VisDrone2019-DET-test-dev/images # 1610 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images -# number of classes -nc: 10 +# Classes +nc: 10 # number of classes +names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] -# class names -names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ] - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, os, Path @@ -49,12 +49,12 @@ download: | # Download - dir = Path('../VisDrone') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] - download(urls, dir=dir) + download(urls, dir=dir, threads=4) # Convert for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml deleted file mode 100644 index 0ba314d82ce1..000000000000 --- a/data/argoverse_hd.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /argoverse -# /yolov5 - - -# download command/URL (optional) -download: bash data/scripts/get_argoverse_hd.sh - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images -val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges -test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview - -# number of classes -nc: 8 - -# class names -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/coco.yaml b/data/coco.yaml index f818a49ff0fa..7494fc2f9cd1 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,35 +1,45 @@ -# COCO 2017 dataset http://cocodataset.org -# Train command: python train.py --data coco.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /coco -# /yolov5 - - -# download command/URL (optional) -download: bash data/scripts/get_coco.sh - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco/train2017.txt # 118287 images -val: ../coco/val2017.txt # 5000 images -test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 - -# number of classes -nc: 80 - -# class names -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] - -# Print classes -# with open('data/coco.yaml') as f: -# d = yaml.safe_load(f) # dict -# for i, x in enumerate(d['names']): -# print(i, x) +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO 2017 dataset http://cocodataset.org by Microsoft +# Example usage: python train.py --data coco.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# Classes +nc: 80 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names + + +# Download script/URL (optional) +download: | + from utils.general import download, Path + + + # Download labels + segments = False # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) + + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/data/coco128.yaml b/data/coco128.yaml index 83fbc29d3404..d07c704407a1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,28 +1,30 @@ -# COCO 2017 dataset http://cocodataset.org - first 128 training images -# Train command: python train.py --data coco128.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /coco128 -# /yolov5 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128 ← downloads here -# download command/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco128/images/train2017/ # 128 images -val: ../coco128/images/train2017/ # 128 images +# Classes +nc: 80 # number of classes +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names -# number of classes -nc: 80 -# class names -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128.zip diff --git a/data/hyp.finetune.yaml b/data/hyp.finetune.yaml deleted file mode 100644 index 1b84cff95c2c..000000000000 --- a/data/hyp.finetune.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Hyperparameters for VOC finetuning -# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 -# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - - -# Hyperparameter Evolution Results -# Generations: 306 -# P R mAP.5 mAP.5:.95 box obj cls -# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 - -lr0: 0.0032 -lrf: 0.12 -momentum: 0.843 -weight_decay: 0.00036 -warmup_epochs: 2.0 -warmup_momentum: 0.5 -warmup_bias_lr: 0.05 -box: 0.0296 -cls: 0.243 -cls_pw: 0.631 -obj: 0.301 -obj_pw: 0.911 -iou_t: 0.2 -anchor_t: 2.91 -# anchors: 3.63 -fl_gamma: 0.0 -hsv_h: 0.0138 -hsv_s: 0.664 -hsv_v: 0.464 -degrees: 0.373 -translate: 0.245 -scale: 0.898 -shear: 0.602 -perspective: 0.0 -flipud: 0.00856 -fliplr: 0.5 -mosaic: 1.0 -mixup: 0.243 diff --git a/data/hyp.finetune_objects365.yaml b/data/hyps/hyp.Objects365.yaml similarity index 58% rename from data/hyp.finetune_objects365.yaml rename to data/hyps/hyp.Objects365.yaml index 2b104ef2d9bf..74971740f7c7 100644 --- a/data/hyp.finetune_objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,3 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for Objects365 training +# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve +# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials + lr0: 0.00258 lrf: 0.17 momentum: 0.779 @@ -26,3 +31,4 @@ flipud: 0.0 fliplr: 0.5 mosaic: 1.0 mixup: 0.0 +copy_paste: 0.0 diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml new file mode 100644 index 000000000000..0aa4e7d9f8f5 --- /dev/null +++ b/data/hyps/hyp.VOC.yaml @@ -0,0 +1,40 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for VOC training +# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve +# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials + +# YOLOv5 Hyperparameter Evolution Results +# Best generation: 467 +# Last generation: 996 +# metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss +# 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865 + +lr0: 0.00334 +lrf: 0.15135 +momentum: 0.74832 +weight_decay: 0.00025 +warmup_epochs: 3.3835 +warmup_momentum: 0.59462 +warmup_bias_lr: 0.18657 +box: 0.02 +cls: 0.21638 +cls_pw: 0.5 +obj: 0.51728 +obj_pw: 0.67198 +iou_t: 0.2 +anchor_t: 3.3744 +fl_gamma: 0.0 +hsv_h: 0.01041 +hsv_s: 0.54703 +hsv_v: 0.27739 +degrees: 0.0 +translate: 0.04591 +scale: 0.75544 +shear: 0.0 +perspective: 0.0 +flipud: 0.0 +fliplr: 0.5 +mosaic: 0.85834 +mixup: 0.04266 +copy_paste: 0.0 +anchors: 3.412 diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml new file mode 100644 index 000000000000..123cc8407413 --- /dev/null +++ b/data/hyps/hyp.scratch-high.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for high-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.1 # segment copy-paste (probability) diff --git a/data/hyp.scratch.yaml b/data/hyps/hyp.scratch-low.yaml similarity index 80% rename from data/hyp.scratch.yaml rename to data/hyps/hyp.scratch-low.yaml index 44f26b6658ae..b9ef1d55a3b6 100644 --- a/data/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,10 +1,10 @@ -# Hyperparameters for COCO training from scratch -# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for low-augmentation COCO training from scratch +# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 weight_decay: 0.0005 # optimizer weight decay 5e-4 warmup_epochs: 3.0 # warmup epochs (fractions ok) @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml new file mode 100644 index 000000000000..d6867d7557ba --- /dev/null +++ b/data/hyps/hyp.scratch-med.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for medium-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/data/objects365.yaml b/data/objects365.yaml deleted file mode 100644 index eb99995903cf..000000000000 --- a/data/objects365.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Objects365 dataset https://www.objects365.org/ -# Train command: python train.py --data objects365.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /datasets/objects365 -# /yolov5 - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/objects365/images/train # 1742289 images -val: ../datasets/objects365/images/val # 5570 images - -# number of classes -nc: 365 - -# class names -names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', - 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', - 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', - 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', - 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', - 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', - 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', - 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', - 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', - 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', - 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', - 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', - 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', - 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', - 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', - 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', - 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', - 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', - 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', - 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', - 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', - 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', - 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', - 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', - 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', - 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', - 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', - 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', - 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', - 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', - 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', - 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', - 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', - 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', - 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', - 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', - 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', - 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', - 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', - 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', - 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] - - -# download command/URL (optional) -------------------------------------------------------------------------------------- -download: | - from pycocotools.coco import COCO - from tqdm import tqdm - - from utils.general import download, Path - - # Make Directories - dir = Path('../datasets/objects365') # dataset directory - for p in 'images', 'labels': - (dir / p).mkdir(parents=True, exist_ok=True) - for q in 'train', 'val': - (dir / p / q).mkdir(parents=True, exist_ok=True) - - # Download - url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" - download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir, delete=False) # annotations json - download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', - curl=True, delete=False, threads=8) - - # Move - train = dir / 'images' / 'train' - for f in tqdm(train.rglob('*.jpg'), desc=f'Moving images'): - f.rename(train / f.name) # move to /images/train - - # Labels - coco = COCO(dir / 'zhiyuan_objv2_train.json') - names = [x["name"] for x in coco.loadCats(coco.getCatIds())] - for cid, cat in enumerate(names): - catIds = coco.getCatIds(catNms=[cat]) - imgIds = coco.getImgIds(catIds=catIds) - for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): - width, height = im["width"], im["height"] - path = Path(im["file_name"]) # image filename - try: - with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file: - annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) - for a in coco.loadAnns(annIds): - x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) - x, y = x + w / 2, y + h / 2 # xy to center - file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n") - - except Exception as e: - print(e) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh new file mode 100755 index 000000000000..e9fa65394178 --- /dev/null +++ b/data/scripts/download_weights.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Download latest models from https://github.com/ultralytics/yolov5/releases +# Example usage: bash path/to/download_weights.sh +# parent +# └── yolov5 +# ├── yolov5s.pt ← downloads here +# ├── yolov5m.pt +# └── ... + +python - <train.txt -cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt - -mkdir ../VOC ../VOC/images ../VOC/images/train ../VOC/images/val -mkdir ../VOC/labels ../VOC/labels/train ../VOC/labels/val - -python3 - "$@" <= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir, delete=False) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/detect.py b/detect.py index d932cca3e08e..ccb9fbf5103f 100644 --- a/detect.py +++ b/detect.py @@ -1,97 +1,154 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU +""" + import argparse -import time +import os +import sys from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn -from models.export import load_checkpoint -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized - - -def detect(opt): - source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size - save_img = not opt.nosave and not source.endswith('.txt') # save inference images - webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://', 'https://')) +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import select_device, time_sync + + +@torch.no_grad() +def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + ): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - # Initialize - set_logging() - device = select_device(opt.device) - half = device.type != 'cpu' # half precision only supported on CUDA - # Load model - model, extras = load_checkpoint('ensemble', weights, device) # load FP32 model - stride = int(model.stride.max()) # model stride - imgsz = check_img_size(imgsz, s=stride) # check img_size - names = model.module.names if hasattr(model, 'module') else model.names # get class names - if half: - model.half() # to FP16 - - # Second-stage classifier - classify = False - if classify: - modelc = load_classifier(name='resnet101', n=2) # initialize - modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() - - # Set Dataloader - vid_path, vid_writer = None, None + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - t0 = time.time() - for path, img, im0s, vid_cap in dataset: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + dt, seen = [0.0, 0.0, 0.0], 0 + for path, im, im0s, vid_cap, s in dataset: + t1 = time_sync() + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 # Inference - t1 = time_synchronized() - pred = model(img, augment=opt.augment)[0] + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + t3 = time_sync() + dt[1] += t3 - t2 - # Apply NMS - pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms, - max_det=opt.max_det) - t2 = time_synchronized() + # NMS + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + dt[2] += time_sync() - t3 - # Apply Classifier - if classify: - pred = apply_classifier(pred, modelc, img, im0s) + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - # Process detections - for i, det in enumerate(pred): # detections per image + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 if webcam: # batch_size >= 1 - p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' else: - p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path - save_path = str(save_dir / p.name) # img.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] # print string + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): @@ -102,21 +159,19 @@ def detect(opt): for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - if save_img or opt.save_crop or view_img: # Add bbox to image + if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) - if opt.save_crop: + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - # Print time (inference + NMS) - print(f'{s}Done. ({t2 - t1:.3f}s)') - # Stream results + im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond @@ -126,59 +181,72 @@ def detect(opt): if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' - if vid_path != save_path: # new video - vid_path = save_path - if isinstance(vid_writer, cv2.VideoWriter): - vid_writer.release() # release previous video writer + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer.write(im0) + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + # Print time (inference-only) + LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") - - print(f'Done. ({time.time() - t0:.3f}s)') + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights) # update model (to fix SourceChangeWarning) -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') - parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() - print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) - - with torch.no_grad(): - if opt.update: # update all models (to fix SourceChangeWarning) - for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect(opt=opt) - strip_optimizer(opt.weights) - else: - detect(opt=opt) + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/export.py b/export.py new file mode 100644 index 000000000000..2d4a68e62f89 --- /dev/null +++ b/export.py @@ -0,0 +1,559 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + +Usage: + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... + +Inference: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + +TensorFlow.js: + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model + $ npm start +""" + +import argparse +import json +import os +import platform +import subprocess +import sys +import time +import warnings +from pathlib import Path + +import pandas as pd +import torch +import torch.nn as nn +from torch.utils.mobile_optimizer import optimize_for_mobile + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import Conv +from models.experimental import attempt_load +from models.yolo import Detect +from utils.activations import SiLU +from utils.datasets import LoadImages +from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, + file_size, print_args, url2file) +from utils.torch_utils import select_device + + +def export_formats(): + # YOLOv5 export formats + x = [['PyTorch', '-', '.pt', True], + ['TorchScript', 'torchscript', '.torchscript', True], + ['ONNX', 'onnx', '.onnx', True], + ['OpenVINO', 'openvino', '_openvino_model', False], + ['TensorRT', 'engine', '.engine', True], + ['CoreML', 'coreml', '.mlmodel', False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], + ['TensorFlow GraphDef', 'pb', '.pb', True], + ['TensorFlow Lite', 'tflite', '.tflite', False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], + ['TensorFlow.js', 'tfjs', '_web_model', False]] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) + + +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export + try: + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') + + ts = torch.jit.trace(model, im, strict=False) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'{prefix} export failure: {e}') + + +def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export + try: + check_requirements(('onnx',)) + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + torch.onnx.export(model, im, f, verbose=False, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if simplify: + try: + check_requirements(('onnx-simplifier',)) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify( + model_onnx, + dynamic_input_shape=dynamic, + input_shapes={'images': list(im.shape)} if dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'{prefix} export failure: {e}') + + +def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', '_openvino_model' + os.sep) + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" + subprocess.check_output(cmd, shell=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_coreml(model, im, file, prefix=colorstr('CoreML:')): + # YOLOv5 CoreML export + try: + check_requirements(('coremltools',)) + import coremltools as ct + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + ct_model.save(f) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return ct_model, f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + return None, None + + +def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + try: + check_requirements(('tensorrt',)) + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, train, False, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, train, False, simplify) # opset 13 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 else 32} engine in {f}') + if builder.platform_has_fast_fp16: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_saved_model(model, im, file, dynamic, + tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25, keras=False, prefix=colorstr('TensorFlow SavedModel:')): + # YOLOv5 TensorFlow SavedModel export + try: + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFDetect, TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + m = tf.function(lambda x: keras_model(x)) # full model + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec]) + tfm.__call__(im) + tf.saved_model.save( + tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if + check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return keras_model, f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + return None, None + + +def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + try: + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): + # YOLOv5 TensorFlow Lite export + try: + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): + # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + try: + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(cmd + ' >/dev/null', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', + 'sudo apt-get install edgetpu-compiler']: + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s {f_tfl}" + subprocess.run(cmd, shell=True, check=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): + # YOLOv5 TensorFlow.js export + try: + check_requirements(('tensorflowjs',)) + import re + + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f + '/model.json' # *.json path + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' + subprocess.run(cmd, shell=True) + + json = open(f_json).read() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', + r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', + json) + j.write(subst) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + +@torch.no_grad() +def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + train=False, # model.train() mode + optimize=False, # TorchScript: optimize for mobile + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF: dynamic axes + simplify=False, # ONNX: simplify model + opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25 # TF.js NMS: confidence threshold + ): + t = time.time() + include = [x.lower() for x in include] # to lowercase + formats = tuple(export_formats()['Argument'][1:]) # --include arguments + flags = [x in include for x in formats] + assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {formats}' + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights + + # Load PyTorch model + device = select_device(device) + assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' + model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model + nc, names = model.nc, model.names # number of classes, class names + + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12 + assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' + + # Input + gs = int(max(model.stride)) # grid size (max stride) + imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples + im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection + + # Update model + if half: + im, model = im.half(), model.half() # to FP16 + model.train() if train else model.eval() # training mode = no Detect() layer grid construction + for k, m in model.named_modules(): + if isinstance(m, Conv): # assign export-friendly activations + if isinstance(m.act, nn.SiLU): + m.act = SiLU() + elif isinstance(m, Detect): + m.inplace = inplace + m.onnx_dynamic = dynamic + if hasattr(m, 'forward_export'): + m.forward = m.forward_export # assign custom forward (optional) + + for _ in range(2): + y = model(im) # dry runs + shape = tuple(y[0].shape) # model output shape + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") + + # Exports + f = [''] * 10 # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + if jit: + f[0] = export_torchscript(model, im, file, optimize) + if engine: # TensorRT required before ONNX + f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose) + if onnx or xml: # OpenVINO requires ONNX + f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) + if xml: # OpenVINO + f[3] = export_openvino(model, im, file) + if coreml: + _, f[4] = export_coreml(model, im, file) + + # TensorFlow Exports + if any((saved_model, pb, tflite, edgetpu, tfjs)): + if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 + check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` + assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' + model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, + topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model + if pb or tfjs: # pb prerequisite to tfjs + f[6] = export_pb(model, im, file) + if tflite or edgetpu: + f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + if edgetpu: + f[8] = export_edgetpu(model, im, file) + if tfjs: + f[9] = export_tfjs(model, im, file) + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f"\nDetect: python detect.py --weights {f[-1]}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nValidate: python val.py --weights {f[-1]}" + f"\nVisualize: https://netron.app") + return f # return list of exported files/dirs + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--train', action='store_true', help='model.train() mode') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') + parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') + parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') + parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') + parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') + parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument('--include', nargs='+', + default=['torchscript', 'onnx'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') + opt = parser.parse_args() + print_args(FILE.stem, opt) + return opt + + +def main(opt): + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/hubconf.py b/hubconf.py index f74e70c85a65..39fa614b2e34 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,18 +1,21 @@ -"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch """ import torch def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates a specified YOLOv5 model + """Creates or loads a YOLOv5 model Arguments: - name (str): name of model, i.e. 'yolov5s' + name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes @@ -21,42 +24,44 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo device (str, torch.device, None): device to use for model parameters Returns: - YOLOv5 pytorch model + YOLOv5 model """ from pathlib import Path - from models.yolo import Model, attempt_load - from utils.general import check_requirements, set_logging - from utils.google_utils import attempt_download + from models.common import AutoShape, DetectMultiBackend + from models.yolo import Model + from utils.downloads import attempt_download + from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device - check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) - set_logging(verbose=verbose) - - fname = Path(name).with_suffix('.pt') # checkpoint filename + if not verbose: + LOGGER.setLevel(logging.WARNING) + check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) + name = Path(name) + path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path try: + device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) + if pretrained and channels == 3 and classes == 80: - model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model + model = DetectMultiBackend(path, device=device) # download/load FP32 model + # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - msd = model.state_dict() # model state_dict + ckpt = torch.load(attempt_download(path), map_location=device) # load csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: - model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device) + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url + s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e @@ -65,6 +70,11 @@ def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None): return _create(path, autoshape=autoshape, verbose=verbose, device=device) +def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): + # YOLOv5-nano model https://github.com/ultralytics/yolov5 + return _create('yolov5n', pretrained, channels, classes, autoshape, verbose, device) + + def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-small model https://github.com/ultralytics/yolov5 return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device) @@ -85,6 +95,11 @@ def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tru return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device) +def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): + # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5n6', pretrained, channels, classes, autoshape, verbose, device) + + def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device) @@ -110,16 +125,19 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr # model = custom(path='path/to/model.pt') # custom # Verify inference + from pathlib import Path + import cv2 import numpy as np from PIL import Image imgs = ['data/images/zidane.jpg', # filename - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI + Path('data/images/zidane.jpg'), # Path + 'https://ultralytics.com/images/zidane.jpg', # URI cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy - results = model(imgs) # batched inference + results = model(imgs, size=320) # batched inference results.print() results.save() diff --git a/models/common.py b/models/common.py index 4211db406c3d..115e3c3145ff 100644 --- a/models/common.py +++ b/models/common.py @@ -1,39 +1,44 @@ -# YOLOv5 common modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" +import json import math +import platform +import warnings +from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path +import cv2 import numpy as np import pandas as pd import requests import torch import torch.nn as nn +import yaml from PIL import Image from torch.cuda import amp -from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box -from utils.plots import colors, plot_one_box -from utils.torch_utils import time_synchronized +from utils.datasets import exif_transpose, letterbox +from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, + make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import copy_attr, time_sync def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad return p -def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Conv, self).__init__() + super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) @@ -41,10 +46,16 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k def forward(self, x): return self.act(self.bn(self.conv(x))) - def fuseforward(self, x): + def forward_fuse(self, x): return self.act(self.conv(x)) +class DWConv(Conv): + # Depth-wise convolution class + def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + class TransformerLayer(nn.Module): # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) def __init__(self, c, num_heads): @@ -70,31 +81,21 @@ def __init__(self, c1, c2, num_heads, num_layers): if c1 != c2: self.conv = Conv(c1, c2) self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) self.c2 = c2 def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape - p = x.flatten(2) - p = p.unsqueeze(0) - p = p.transpose(0, 3) - p = p.squeeze(3) - e = self.linear(p) - x = p + e - - x = self.tr(x) - x = x.unsqueeze(3) - x = x.transpose(0, 3) - x = x.reshape(b, self.c2, w, h) - return x + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Bottleneck, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_, c2, 3, 1, g=g) @@ -107,35 +108,35 @@ def forward(self, x): class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSP, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(C3, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) - # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3TR(C3): @@ -146,10 +147,26 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): self.m = TransformerBlock(c_, c_, 4, n) +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 def __init__(self, c1, c2, k=(5, 9, 13)): - super(SPP, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) @@ -157,21 +174,69 @@ def __init__(self, c1, c2, k=(5, 9, 13)): def forward(self, x): x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Focus, self).__init__() + super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat((y, self.cv2(y)), 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + class Contract(nn.Module): # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) def __init__(self, gain=2): @@ -179,11 +244,11 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' s = self.gain - x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) class Expand(nn.Module): @@ -193,67 +258,276 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain - x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): # Concatenate a list of tensors along dimension def __init__(self, dimension=1): - super(Concat, self).__init__() + super().__init__() self.d = dimension def forward(self, x): return torch.cat(x, self.d) -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - max_det = 1000 # maximum number of detections per image - - def __init__(self): - super(NMS, self).__init__() +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript + # ONNX Runtime: *.onnx + # ONNX OpenCV DNN: *.onnx with --dnn + # OpenVINO: *.xml + # CoreML: *.mlmodel + # TensorRT: *.engine + # TensorFlow SavedModel: *_saved_model + # TensorFlow GraphDef: *.pb + # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import - def forward(self, x): - return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + w = attempt_download(w) # download if not local + fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 + if data: # data.yaml path (optional) + with open(data, errors='ignore') as f: + names = yaml.safe_load(f)['names'] # class names + + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files) + model.half() if fp16 else model.float() + if extra_files['config.txt']: + d = json.loads(extra_files['config.txt']) # extra_files dict + stride, names = int(d['stride']), d['names'] + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + bindings = OrderedDict() + fp16 = False # default updated below + for index in range(model.num_bindings): + name = model.get_binding_name(index) + dtype = trt.nptype(model.get_binding_dtype(index)) + shape = tuple(model.get_binding_shape(index)) + data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) + bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + if model.binding_is_input(index) and dtype == np.float16: + fp16 = True + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + context = model.create_execution_context() + batch_size = bindings['images'].shape[0] + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + if saved_model: # SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # graph_def + gd.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = {'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # Lite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + elif tfjs: + raise Exception('ERROR: YOLOv5 TF.js inference is not supported') + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False, val=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.pt or self.jit: # PyTorch + y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) + return y if val else y[0] + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description + request = self.executable_network.requests[0] # inference request + request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) + request.infer() + y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) + elif self.engine: # TensorRT + assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = self.bindings['output'].data + elif self.coreml: # CoreML + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key + y = y[k] # output + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.saved_model: # SavedModel + y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)).numpy() + else: # Lite or Edge TPU + input, output = self.input_details[0], self.output_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + y = (y.astype(np.float32) - zero_point) * scale # re-scale + y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + + if isinstance(y, np.ndarray): + y = torch.tensor(y, device=self.device) + return (y, []) if val else y + + def warmup(self, imgsz=(1, 3, 640, 640)): + # Warmup model by running inference once + if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types + if self.device.type != 'cpu': # only warmup GPU models + im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup + + @staticmethod + def model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + from export import export_formats + suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes + check_suffix(p, suffixes) # checks + p = Path(p).name # eliminate trailing separators + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) + xml |= xml2 # *_openvino_model or *.xml + tflite &= not edgetpu # *.tflite + return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs class AutoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference def __init__(self, model): - super(AutoShape, self).__init__() + super().__init__() + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() - def autoshape(self): - print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) return self @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/images/zidane.jpg' - # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # file: imgs = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - t = [time_synchronized()] - p = next(self.model.parameters()) # for device and type + t = [time_sync()] + p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=p.device.type != 'cpu'): + with amp.autocast(autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -261,50 +535,52 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename - if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 - t.append(time_synchronized()) - - with amp.autocast(enabled=p.device.type != 'cpu'): + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + t.append(time_sync()) + + with amp.autocast(autocast): # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + y = self.model(x, augment, profile) # forward + t.append(time_sync()) # Post-process - y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, + self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) + t.append(time_sync()) return Detections(imgs, y, files, t, self.names, x.shape) class Detections: - # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, times=None, names=None, shape=None): - super(Detections, self).__init__() + # YOLOv5 detections class for inference results + def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): + super().__init__() d = pred[0].device # device - gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames + self.times = times # profiling times self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized @@ -313,51 +589,63 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' - if pred is not None: + s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class - str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - for *box, conf, cls in pred: # xyxy, confidence, class + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: - save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) else: # all others - plot_one_box(box, im, label=label, color=colors(cls)) + annotator.box_label(box, label if labels else '', color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(str.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: f = self.files[i] im.save(save_dir / f) # save - print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + self.t) - def show(self): - self.display(show=True) # show results + def show(self, labels=True): + self.display(show=True, labels=labels) # show results - def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir - self.display(save=True, save_dir=save_dir) # save results + def save(self, labels=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + self.display(save=True, labels=labels, save_dir=save_dir) # save results - def crop(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir - self.display(crop=True, save_dir=save_dir) # crop results - print(f'Saved results to {save_dir}\n') + def crop(self, save=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + return self.display(crop=True, save=save, save_dir=save_dir) # crop results - def render(self): - self.display(render=True) # render results + def render(self, labels=True): + self.display(render=True, labels=labels) # render results return self.imgs def pandas(self): @@ -372,10 +660,11 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] - for d in x: - for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - setattr(d, k, getattr(d, k)[0]) # pop out of list + r = range(self.n) # iterable + x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list return x def __len__(self): @@ -385,7 +674,7 @@ def __len__(self): class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super(Classify, self).__init__() + super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) self.flat = nn.Flatten() diff --git a/models/experimental.py b/models/experimental.py index afa787907104..1230f4656c8f 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,18 +1,22 @@ -# YOLOv5 experimental modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" +import math import numpy as np import torch import torch.nn as nn -from models.common import Conv, DWConv -from utils.google_utils import attempt_download +from models.common import Conv +from utils.downloads import attempt_download class CrossConv(nn.Module): # Cross Convolution Downsample def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super(CrossConv, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, (1, k), (1, s)) self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) @@ -25,11 +29,11 @@ def forward(self, x): class Sum(nn.Module): # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 def __init__(self, n, weight=False): # n: number of inputs - super(Sum, self).__init__() + super().__init__() self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object if weight: - self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights def forward(self, x): y = x[0] # no weight @@ -43,89 +47,69 @@ def forward(self, x): return y -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super(GhostConv, self).__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super(GhostBottleneck, self).__init__() - c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super(MixConv2d, self).__init__() - groups = len(k) + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy + super().__init__() + n = len(k) # number of convolutions if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) a *= np.array(k) ** 2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.m = nn.ModuleList( + [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) + self.act = nn.SiLU() def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) class Ensemble(nn.ModuleList): # Ensemble of models def __init__(self): - super(Ensemble, self).__init__() + super().__init__() - def forward(self, x, augment=False): + def forward(self, x, augment=False, profile=False, visualize=False): y = [] for module in self: - y.append(module(x, augment)[0]) + y.append(module(x, augment, profile, visualize)[0]) # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 1) # nms ensemble return y, None # inference, train output -def attempt_load(weights, map_location=None, inplace=True): +def attempt_load(weights, map_location=None, inplace=True, fuse=True): from models.yolo import Detect, Model # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + ckpt = torch.load(attempt_download(w), map_location=map_location) # load + ckpt = (ckpt.get('ema') or ckpt['model']).float() # FP32 model + model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: - m.inplace = inplace # pytorch 1.7.0 compatibility - elif type(m) is Conv: - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): + m.inplace = inplace # torch 1.7.0 compatibility + if t is Detect: + if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility + delattr(m, 'anchor_grid') + setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) + elif t is Conv: + m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility if len(model) == 1: return model[-1] # return model diff --git a/models/export.py b/models/export.py deleted file mode 100644 index 6043c84c6246..000000000000 --- a/models/export.py +++ /dev/null @@ -1,272 +0,0 @@ -"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats - -Usage: - $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 -""" - -import argparse -from copy import deepcopy -from pathlib import Path -import sys -import time -import os - -sys.path.append('./') # to run '$ python *.py' files in subdirectories - -import torch -import torch.nn as nn -from torch.utils.mobile_optimizer import optimize_for_mobile - -from sparseml.pytorch.utils import ModuleExporter -from sparseml.pytorch.sparsification.quantization import skip_onnx_input_quantize - -import models -from models.experimental import attempt_load -from models.yolo import Model -from utils.activations import Hardswish, SiLU -from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging -from utils.google_utils import attempt_download -from utils.sparse import SparseMLWrapper, check_download_sparsezoo_weights -from utils.torch_utils import select_device, intersect_dicts, is_parallel, torch_distributed_zero_first - - -def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): - pickle = not sparseml_wrapper.qat_active(epoch) # qat does not support pickled exports - ckpt_model = deepcopy(model.module if is_parallel(model) else model).float() - yaml = ckpt_model.yaml - if not pickle: - ckpt_model = ckpt_model.state_dict() - - return {'epoch': epoch, - 'model': ckpt_model, - 'optimizer': optimizer.state_dict(), - 'yaml': yaml, - 'hyp': model.hyp, - **ema.state_dict(pickle), - **sparseml_wrapper.state_dict(), - **kwargs} - - -def load_checkpoint(type_, weights, device, cfg=None, hyp=None, nc=None, recipe=None, resume=None, rank=-1): - with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally - weights = check_download_sparsezoo_weights(weights) # download from sparsezoo if zoo stub - ckpt = torch.load(weights[0] if isinstance(weights, list) or isinstance(weights, tuple) - else weights, map_location=device) # load checkpoint - start_epoch = ckpt['epoch'] + 1 if 'epoch' in ckpt else 0 - pickled = isinstance(ckpt['model'], nn.Module) - train_type = type_ == 'train' - ensemble_type = type_ == 'ensemble' - - if pickled and ensemble_type: - # load ensemble using pickled - cfg = None - model = attempt_load(weights, map_location=device) # load FP32 model - state_dict = model.state_dict() - else: - # load model from config and weights - cfg = cfg or (ckpt['yaml'] if 'yaml' in ckpt else None) or \ - (ckpt['model'].yaml if pickled else None) - model = Model(cfg, ch=3, nc=ckpt['nc'] if ('nc' in ckpt and not nc) else nc, - anchors=hyp.get('anchors') if hyp else None).to(device) - model_key = 'ema' if (not train_type and 'ema' in ckpt and ckpt['ema']) else 'model' - state_dict = ckpt[model_key].float().state_dict() if pickled else ckpt[model_key] - - # turn gradients for params back on in case they were removed - for p in model.parameters(): - p.requires_grad = True - - # load sparseml recipe for applying pruning and quantization - recipe = recipe or (ckpt['recipe'] if 'recipe' in ckpt else None) - sparseml_wrapper = SparseMLWrapper(model, recipe) - exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume - loaded = False - - if not train_type: - # apply the recipe to create the final state of the model when not training - sparseml_wrapper.apply() - else: - # intialize the recipe for training and restore the weights before if no quantized weights - quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) - if not quantized_state_dict: - state_dict = load_state_dict(model, state_dict, train=True, exclude_anchors=exclude_anchors) - loaded = True - sparseml_wrapper.initialize(start_epoch) - - if not loaded: - state_dict = load_state_dict(model, state_dict, train=train_type, exclude_anchors=exclude_anchors) - - model.float() - report = 'Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights) - - return model, { - 'ckpt': ckpt, - 'state_dict': state_dict, - 'start_epoch': start_epoch, - 'sparseml_wrapper': sparseml_wrapper, - 'report': report, - } - - -def load_state_dict(model, state_dict, train, exclude_anchors): - # fix older state_dict names not porting to the new model setup - state_dict = {key if not key.startswith("module.") else key[7:]: val for key, val in state_dict.items()} - - if train: - # load any missing weights from the model - state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=['anchor'] if exclude_anchors else []) - - model.load_state_dict(state_dict, strict=not train) # load - - return state_dict - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov3.pt', help='weights path') # from yolov3/models/ - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only - parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only - parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only - parser.add_argument("--remove-grid", action="store_true", help="remove export of Detect() layer grid") - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - opt.include = [x.lower() for x in opt.include] - print(opt) - set_logging() - t = time.time() - - # Load PyTorch model - device = select_device(opt.device) - model, extras = load_checkpoint('ensemble', opt.weights, device) # load FP32 model - sparseml_wrapper = extras['sparseml_wrapper'] - labels = model.names - - # Checks - gs = int(max(model.stride)) # grid size (max stride) - opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - - # Input - img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection - - # Update model - if opt.half: - img, model = img.half(), model.half() # to FP16 - if opt.train: - model.train() # training mode (no grid construction in Detect layer) - else: - model.eval() - for k, m in model.named_modules(): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - if isinstance(m, models.common.Conv): # assign export-friendly activations - if isinstance(m.act, nn.Hardswish): - m.act = Hardswish() - elif isinstance(m.act, nn.SiLU): - m.act = SiLU() - elif isinstance(m, models.yolo.Detect): - m.inplace = opt.inplace - m.onnx_dynamic = opt.dynamic - # m.forward = m.forward_export # assign forward (optional) - model.model[-1].export = not opt.remove_grid # set Detect() layer grid export - - for _ in range(2): - y = model(img) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") - - # TorchScript export ----------------------------------------------------------------------------------------------- - if 'torchscript' in opt.include or 'coreml' in opt.include: - prefix = colorstr('TorchScript:') - try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if opt.optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # ONNX export ------------------------------------------------------------------------------------------------------ - if 'onnx' in opt.include: - prefix = colorstr('ONNX:') - try: - import onnx - - print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = opt.weights.replace('.pt', '.onnx') # filename - # export through SparseML so quantized and pruned graphs can be corrected - save_dir = Path(f).parent.absolute() - save_name = f.split(os.path.sep)[-1] - - # get the number of outputs so we know how to name and change dynamic axes - # nested outputs can be returned if model is exported with dynamic - def _count_outputs(outputs): - count = 0 - if isinstance(outputs, list) or isinstance(outputs, tuple): - for out in outputs: - count += _count_outputs(out) - else: - count += 1 - return count - - outputs = model(img) - num_outputs = _count_outputs(outputs) - input_names = ['input'] - output_names = [f'out_{i}' for i in range(num_outputs)] - dynamic_axes = {k: {0: 'batch'} for k in (input_names + output_names)} if opt.dynamic else None - exporter = ModuleExporter(model, save_dir) - exporter.export_onnx(img, name=save_name, convert_qat=True, - input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes) - try: - skip_onnx_input_quantize(f, f) - except: - pass - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print - - # Simplify - if opt.simplify: - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify( - model_onnx, - dynamic_input_shape=opt.dynamic, - input_shapes={'images': list(img.shape)} if opt.dynamic else None) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # CoreML export ---------------------------------------------------------------------------------------------------- - if 'coreml' in opt.include: - prefix = colorstr('CoreML:') - try: - import coremltools as ct - - print(f'{prefix} starting export with coremltools {ct.__version__}...') - assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' - model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # Finish - print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index a07a4dc72387..e4d7beb06e07 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,58 +1,59 @@ -# Default YOLOv5 anchors for COCO data +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- # P5-640: anchors_p5_640: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # P6 ------------------------------------------------------------------------------------------------------------------- # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 anchors_p6_640: - - [ 9,11, 21,19, 17,41 ] # P3/8 - - [ 43,32, 39,70, 86,64 ] # P4/16 - - [ 65,131, 134,130, 120,265 ] # P5/32 - - [ 282,180, 247,354, 512,387 ] # P6/64 + - [9,11, 21,19, 17,41] # P3/8 + - [43,32, 39,70, 86,64] # P4/16 + - [65,131, 134,130, 120,265] # P5/32 + - [282,180, 247,354, 512,387] # P6/64 # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 anchors_p6_1280: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 anchors_p6_1920: - - [ 28,41, 67,59, 57,141 ] # P3/8 - - [ 144,103, 129,227, 270,205 ] # P4/16 - - [ 209,452, 455,396, 358,812 ] # P5/32 - - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + - [28,41, 67,59, 57,141] # P3/8 + - [144,103, 129,227, 270,205] # P4/16 + - [209,452, 455,396, 358,812] # P5/32 + - [653,922, 1109,570, 1387,1187] # P6/64 # P7 ------------------------------------------------------------------------------------------------------------------- # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 anchors_p7_640: - - [ 11,11, 13,30, 29,20 ] # P3/8 - - [ 30,46, 61,38, 39,92 ] # P4/16 - - [ 78,80, 146,66, 79,163 ] # P5/32 - - [ 149,150, 321,143, 157,303 ] # P6/64 - - [ 257,402, 359,290, 524,372 ] # P7/128 + - [11,11, 13,30, 29,20] # P3/8 + - [30,46, 61,38, 39,92] # P4/16 + - [78,80, 146,66, 79,163] # P5/32 + - [149,150, 321,143, 157,303] # P6/64 + - [257,402, 359,290, 524,372] # P7/128 # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 anchors_p7_1280: - - [ 19,22, 54,36, 32,77 ] # P3/8 - - [ 70,83, 138,71, 75,173 ] # P4/16 - - [ 165,159, 148,334, 375,151 ] # P5/32 - - [ 334,317, 251,626, 499,474 ] # P6/64 - - [ 750,326, 534,814, 1079,818 ] # P7/128 + - [19,22, 54,36, 32,77] # P3/8 + - [70,83, 138,71, 75,173] # P4/16 + - [165,159, 148,334, 375,151] # P5/32 + - [334,317, 251,626, 499,474] # P6/64 + - [750,326, 534,814, 1079,818] # P7/128 # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 anchors_p7_1920: - - [ 29,34, 81,55, 47,115 ] # P3/8 - - [ 105,124, 207,107, 113,259 ] # P4/16 - - [ 247,238, 222,500, 563,227 ] # P5/32 - - [ 501,476, 376,939, 749,711 ] # P6/64 - - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 + - [29,34, 81,55, 47,115] # P3/8 + - [105,124, 207,107, 113,259] # P4/16 + - [247,238, 222,500, 563,227] # P5/32 + - [501,476, 376,939, 749,711] # P6/64 + - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 38dcc449f0d0..c66982158ce8 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,9 +1,9 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index ff7638cad3be..b28b44315248 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,9 +1,9 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,14, 23,27, 37,58] # P4/16 - [81,82, 135,169, 344,319] # P5/32 diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index f2e761355469..d1ef91290a8d 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,9 +1,9 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 @@ -28,7 +28,7 @@ backbone: # YOLOv3 head head: [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [512, 1, 1]], [-1, 1, Conv, [1024, 3, 1]], [-1, 1, Conv, [512, 1, 1]], [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml new file mode 100644 index 000000000000..504815f5cfa0 --- /dev/null +++ b/models/hub/yolov5-bifpn.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 BiFPN head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index e772bffecbbc..a23e9c6fbf9f 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,42 +1,42 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, Bottleneck, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 6, BottleneckCSP, [1024]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 FPN head +# YOLOv5 v6.0 FPN head head: - [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 1, Conv, [512, 1, 1]], - [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + [-1, 3, C3, [512, False]], # 14 (P4/16-medium) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 1, Conv, [256, 1, 1]], - [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + [-1, 3, C3, [256, False]], # 18 (P3/8-small) [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 0633a90fd065..554117dda59a 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,54 +1,54 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer -# anchors -anchors: 3 - -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 9 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - - [ -1, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 - [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) - - [ -1, 1, Conv, [ 128, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 - [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) - - [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P2 + [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P3 + [-1, 3, C3, [256, False]], # 24 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 27 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 30 (P5/32-large) + + [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) ] diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml new file mode 100644 index 000000000000..dbf0f850083e --- /dev/null +++ b/models/hub/yolov5-p34.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 6, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 + ] + +# YOLOv5 v6.0 head with (P3, P4) outputs +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + + [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) + ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 3728a118f090..a17202f22044 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,56 +1,56 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer -# anchors -anchors: 3 - -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index ca8f8492ce0e..edd7d13a34a6 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,67 +1,67 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer -# anchors -anchors: 3 - -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 3, C3, [ 1024 ] ], - [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 - [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], - [ -1, 3, C3, [ 1280, False ] ], # 13 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 + [-1, 3, C3, [1280]], + [-1, 1, SPPF, [1280, 5]], # 13 ] -# YOLOv5 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs head: - [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 - [ -1, 3, C3, [ 1024, False ] ], # 17 + [[-1, 1, Conv, [1024, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 10], 1, Concat, [1]], # cat backbone P6 + [-1, 3, C3, [1024, False]], # 17 - [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 21 + [-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 21 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 25 + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 25 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 29 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 26], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 32 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 35 (P5/32-large) - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + [-1, 1, Conv, [768, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) - [ -1, 1, Conv, [ 1024, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 - [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + [-1, 1, Conv, [1024, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P7 + [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) - [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) ] diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 340f95a4dbc9..ccfbf900691c 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,48 +1,48 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 PANet head +# YOLOv5 v6.0 PANet head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 + [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 11298b01f479..632c2cb699e3 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,60 +1,60 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 48afc865593a..ecc53fd68ba6 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,60 +1,60 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml new file mode 100644 index 000000000000..0c0c71d32551 --- /dev/null +++ b/models/hub/yolov5n6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml new file mode 100644 index 000000000000..ff9519c3f1aa --- /dev/null +++ b/models/hub/yolov5s-ghost.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3Ghost, [128]], + [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3Ghost, [256]], + [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3Ghost, [512]], + [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3Ghost, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, GhostConv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3Ghost, [512, False]], # 13 + + [-1, 1, GhostConv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) + + [-1, 1, GhostConv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) + + [-1, 1, GhostConv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index f2d666722b30..100d7c447527 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,30 +1,30 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 1df577a2cc97..a28fb559482b 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,60 +1,60 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 5ebc02124fe7..ba795c4aad31 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,60 +1,60 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/tf.py b/models/tf.py new file mode 100644 index 000000000000..728907f8fb47 --- /dev/null +++ b/models/tf.py @@ -0,0 +1,466 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow, Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs +""" + +import argparse +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +from tensorflow import keras + +from models.common import C3, SPP, SPPF, Bottleneck, BottleneckCSP, Concat, Conv, DWConv, Focus, autopad +from models.experimental import CrossConv, MixConv2d, attempt_load +from models.yolo import Detect +from utils.activations import SiLU +from utils.general import LOGGER, make_divisible, print_args + + +class TFBN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super().__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class TFPad(keras.layers.Layer): + def __init__(self, pad): + super().__init__() + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class TFConv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + assert isinstance(k, int), "Convolution with multiple kernels are not allowed." + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + + conv = keras.layers.Conv2D( + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + + # YOLOv5 activations + if isinstance(w.act, nn.LeakyReLU): + self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity + elif isinstance(w.act, nn.Hardswish): + self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity + elif isinstance(w.act, (nn.SiLU, SiLU)): + self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + else: + raise Exception(f'no matching TensorFlow activation found for {w.act}') + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFFocus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255 # normalize 0-255 to 0-1 + return self.conv(tf.concat([inputs[:, ::2, ::2, :], + inputs[:, 1::2, ::2, :], + inputs[:, ::2, 1::2, :], + inputs[:, 1::2, 1::2, :]], 3)) + + +class TFBottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFConv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D( + c2, k, s, 'VALID', use_bias=bias, + kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, ) + + def call(self, inputs): + return self.conv(inputs) + + +class TFBottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) + self.act = lambda x: keras.activations.relu(x, alpha=0.1) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class TFC3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFSPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class TFSPPF(keras.layers.Layer): + # Spatial pyramid pooling-Fast layer + def __init__(self, c1, c2, k=5, w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) + self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') + + def call(self, inputs): + x = self.cv1(inputs) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) + + +class TFDetect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super().__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), + [self.nl, 1, -1, 1, 2]) + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz + for i in range(self.nl): + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + z = [] # inference output + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) + + if not self.training: # inference + y = tf.sigmoid(x[i]) + grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 + anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 + xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy + wh = y[..., 2:4] ** 2 * anchor_grid + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, y[..., 4:]], -1) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) + + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class TFUpsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super().__init__() + assert scale_factor == 2, "scale_factor must be 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) + + def call(self, inputs): + return self.upsample(inputs) + + +class TFConcat(keras.layers.Layer): + def __init__(self, dimension=1, w=None): + super().__init__() + assert dimension == 1, "convert only NCHW to NHWC concat" + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except NameError: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, Bottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + args.append(imgsz) + else: + c2 = ch[f] + + tf_m = eval('TF' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in torch_m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class TFModel: + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) + + def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25): + y = [] # outputs + x = inputs + for i, m in enumerate(self.model.layers): + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) + return nms, x[1] + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression( + boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) + return nms, x[1] + + return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): + input = np.transpose(img, [1, 2, 0]) + input = np.expand_dims(input, axis=0).astype(np.float32) + input /= 255 + yield [input] + if n >= ncalib: + break + + +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size + ): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + _ = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + _ = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/models/yolo.py b/models/yolo.py index f16e69b17df5..9f4701c49f9d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,24 +1,31 @@ -# YOLOv5 YOLO-specific modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules + +Usage: + $ python path/to/models/yolo.py --cfg yolov5s.yaml +""" import argparse -import logging import sys from copy import deepcopy from pathlib import Path -sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import * from models.experimental import * -from utils.activations import replace_activations from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr +from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from utils.plots import feature_visualization +from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None @@ -26,74 +33,76 @@ class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter - export = True # onnx export def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer - super(Detect, self).__init__() + super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.zeros(1)] * self.nl # init grid - a = torch.tensor(anchors).float().view(self.nl, -1, 2) - self.register_buffer('anchors', a) # shape(nl,na,2) - self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid + self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): - # x = x.copy() # for profiling z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - if not self.training and self.export: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic: - self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + if not self.training: # inference + if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) - return x if self.training or not self.export else (torch.cat(z, 1), x) + return x if self.training else (torch.cat(z, 1), x) - @staticmethod - def _make_grid(nx=20, ny=20): - yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + def _make_grid(self, nx=20, ny=20, i=0): + d = self.anchors[i].device + shape = 1, self.na, ny, nx, 2 # grid shape + if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij') + else: + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d)) + grid = torch.stack((xv, yv), 2).expand(shape).float() + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float() + return grid, anchor_grid class Model(nn.Module): def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() + super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name - with open(cfg) as f: + with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) - # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -101,57 +110,46 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i s = 256 # 2x min stride m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) - check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # logger.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) self.info() - logger.info('') + LOGGER.info('') - def forward(self, x, augment=False, profile=False): + def forward(self, x, augment=False, profile=False, visualize=False): if augment: - return self.forward_augment(x) # augmented inference, None - else: - return self.forward_once(x, profile) # single-scale inference, train + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train - def forward_augment(self, x): + def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward + yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) + y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False): + def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS - t = time_synchronized() - for _ in range(10): - _ = m(x) - dt.append((time_synchronized() - t) * 100) - if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") - logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - + self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output - - if profile: - logger.info('%.1fms total' % sum(dt)) + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _descale_pred(self, p, flips, scale, img_size): @@ -171,6 +169,30 @@ def _descale_pred(self, p, flips, scale, img_size): p = torch.cat((x, y, wh, p[..., 4:]), -1) return p + def _clip_augmented(self, y): + # Clip YOLOv5 augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][:, :-i] # large + i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][:, i:] # small + return y + + def _profile_one_layer(self, m, x, dt): + c = isinstance(m, Detect) # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. @@ -178,57 +200,48 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - logger.info( + LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: - # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - logger.info('Fusing layers... ') + LOGGER.info('Fusing layers... ') for m in self.model.modules(): - if type(m) is Conv and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward + m.forward = m.forward_fuse # update forward self.info() return self - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - logger.info('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - logger.info('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - - def autoshape(self): # add AutoShape module - logger.info('Adding AutoShape... ') - m = AutoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - def info(self, verbose=False, img_size=640): # print model information model_info(self, verbose, img_size) + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, Detect): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -239,24 +252,24 @@ def parse_model(d, ch): # model_dict, input_channels(3) for j, a in enumerate(args): try: args[j] = eval(a) if isinstance(a, str) else a # eval strings - except: + except NameError: pass - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR]: + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR]: + if m in [BottleneckCSP, C3, C3TR, C3Ghost]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[x] for x in f]) + c2 = sum(ch[x] for x in f) elif m is Detect: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors @@ -268,34 +281,28 @@ def parse_model(d, ch): # model_dict, input_channels(3) else: c2 = ch[f] - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params + np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: ch = [] ch.append(c2) - - model = nn.Sequential(*layers) - - # override all activations in model if provided in config - if 'act' in d: - logger.info(f'overriding activations in model to {d["act"]}') - replace_activations(model, d["act"]) - - return model, sorted(save) + return nn.Sequential(*layers), sorted(save) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file - set_logging() + opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(FILE.stem, opt) device = select_device(opt.device) # Create model @@ -303,12 +310,20 @@ def parse_model(d, ch): # model_dict, input_channels(3) model.train() # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) - # y = model(img, profile=True) + if opt.profile: + img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + y = model(img, profile=True) + + # Test all models + if opt.test: + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') - # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 71ebf86e5791..ce8a5de46a27 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,30 +1,30 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 3c749c916246..ad13ab370ff6 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,30 +1,30 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml new file mode 100644 index 000000000000..8a28a40d6e20 --- /dev/null +++ b/models/yolov5n.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index aca669d60d8b..f35beabb1e1c 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,30 +1,30 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index d3babdf7baf0..f617a027d8a2 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,30 +1,30 @@ -# parameters +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/requirements.txt b/requirements.txt index 2a1d702cad90..96fc9d1a1f32 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,31 +1,37 @@ # pip install -r requirements.txt -# base ---------------------------------------- +# Base ---------------------------------------- matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 -Pillow +Pillow>=7.1.2 PyYAML>=5.3.1 +requests>=2.23.0 scipy>=1.4.1 -torch~=1.7.0 # sparseml requires 1.7 right now for quantization +torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 -# logging ------------------------------------- +# Logging ------------------------------------- tensorboard>=2.4.1 # wandb -# plotting ------------------------------------ +# Plotting ------------------------------------ +pandas>=1.1.4 seaborn>=0.11.0 -pandas -# export -------------------------------------- -# coremltools>=4.1 -# onnx>=1.9.0 -# scikit-learn==0.19.2 # for coreml quantization +# Export -------------------------------------- +# coremltools>=4.1 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.3.6 # ONNX simplifier +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export -# extras -------------------------------------- +# Extras -------------------------------------- +# albumentations>=1.0.3 # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 -pycocotools>=2.0 # COCO mAP -sparseml[torch,torchvision]>=0.5 # Pruning and Quantization -thop # FLOPS computation +# pycocotools>=2.0 # COCO mAP +# roboflow +thop # FLOPs computation diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000000..20ea49a8b4d6 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,45 @@ +# Project-wide configuration file, can be used for package metadata and other toll configurations +# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments + +[metadata] +license_file = LICENSE +description-file = README.md + + +[tool:pytest] +norecursedirs = + .git + dist + build +addopts = + --doctest-modules + --durations=25 + --color=yes + + +[flake8] +max-line-length = 120 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# https://pep8.readthedocs.io/en/latest/intro.html#error-codes +format = pylint +# see: https://www.flake8rules.com/ +ignore = + E731 # Do not assign a lambda expression, use a def + F405 # name may be undefined, or defined from star imports: module + E402 # module level import not at top of file + F401 # module imported but unused + W504 # line break after binary operator + E127 # continuation line over-indented for visual indent + W504 # line break after binary operator + E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ + E501 # line too long + F403 # ‘from module import *’ used; unable to detect undefined names + + +[isort] +# https://pycqa.github.io/isort/docs/configuration/options.html +line_length = 120 +multi_line_output = 0 diff --git a/test.py b/test.py deleted file mode 100644 index 4196dfe1ef40..000000000000 --- a/test.py +++ /dev/null @@ -1,349 +0,0 @@ -import argparse -import json -import os -from pathlib import Path -from threading import Thread - -import numpy as np -import torch -import yaml -from tqdm import tqdm - -from models.export import load_checkpoint -from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ - box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr -from utils.metrics import ap_per_class, ConfusionMatrix -from utils.plots import plot_images, output_to_target, plot_study_txt -from utils.torch_utils import select_device, time_synchronized - - -def test(data, - weights=None, - batch_size=32, - imgsz=640, - conf_thres=0.001, - iou_thres=0.6, # for NMS - save_json=False, - single_cls=False, - augment=False, - verbose=False, - model=None, - dataloader=None, - save_dir=Path(''), # for saving images - save_txt=False, # for auto-labelling - save_hybrid=False, # for hybrid auto-labelling - save_conf=False, # save auto-label confidences - plots=True, - wandb_logger=None, - compute_loss=None, - half_precision=True, - is_coco=False, - opt=None): - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device = next(model.parameters()).device # get model device - - else: # called directly - set_logging() - device = select_device(opt.device, batch_size=batch_size) - - # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model, extras = load_checkpoint('ensemble', weights, device) # load FP32 model - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check img_size - - # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 - # if device.type != 'cpu' and torch.cuda.device_count() > 1: - # model = nn.DataParallel(model) - - # Half - half = device.type != 'cpu' and half_precision # half precision only supported on CUDA - if half: - model.half() - - # Configure - model.eval() - if isinstance(data, str): - is_coco = data.endswith('coco.yaml') - with open(data) as f: - data = yaml.safe_load(f) - check_dataset(data) # check - nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Logging - log_imgs = 0 - if wandb_logger and wandb_logger.wandb: - log_imgs = min(wandb_logger.log_imgs, 100) - # Dataloader - if not training: - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, - prefix=colorstr(f'{task}: '))[0] - - seen = 0 - confusion_matrix = ConfusionMatrix(nc=nc) - names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} - coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. - loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - img = img.to(device, non_blocking=True) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - targets = targets.to(device) - nb, _, height, width = img.shape # batch size, channels, height, width - - with torch.no_grad(): - # Run model - t = time_synchronized() - out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t - - # Compute loss - if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls - - # Run NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_synchronized() - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t1 += time_synchronized() - t - - # Statistics per image - for si, pred in enumerate(out): - labels = targets[targets[:, 0] == si, 1:] - nl = len(labels) - tcls = labels[:, 0].tolist() if nl else [] # target class - path = Path(paths[si]) - seen += 1 - - if len(pred) == 0: - if nl: - stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) - continue - - # Predictions - if single_cls: - pred[:, 5] = 0 - predn = pred.clone() - scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred - - # Append to text file - if save_txt: - gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - # W&B logging - Media Panel Plots - if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation - if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) - wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None - - # Append to pycocotools JSON dictionary - if save_json: - # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(pred.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - # Assign all predictions as incorrect - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) - if nl: - detected = [] # target indices - tcls_tensor = labels[:, 0] - - # target boxes - tbox = xywh2xyxy(labels[:, 1:5]) - scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels - if plots: - confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) - - # Per target class - for cls in torch.unique(tcls_tensor): - ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices - pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices - - # Search for detections - if pi.shape[0]: - # Prediction to target ious - ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices - - # Append detections - detected_set = set() - for j in (ious > iouv[0]).nonzero(as_tuple=False): - d = ti[i[j]] # detected target - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all targets already located in image - break - - # Append statistics (correct, conf, pcls, tcls) - stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) - - # Plot images - if plots and batch_i < 3: - f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels - Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() - f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions - Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() - - # Compute statistics - stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) - ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 - mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class - else: - nt = torch.zeros(1) - - # Print results - pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format - print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) - - # Print results per class - if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): - for i, c in enumerate(ap_class): - print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) - - # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple - if not training: - print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) - - # Plots - if plots: - confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - if wandb_logger and wandb_logger.wandb: - val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] - wandb_logger.log({"Validation": val_batches}) - if wandb_images: - wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) - - # Save JSON - if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = '../coco/annotations/instances_val2017.json' # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) - with open(pred_json, 'w') as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') - if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - print(f'pycocotools unable to run: {e}') - - # Return results - model.float() # for training - if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") - maps = np.zeros(nc) + map - for i, c in enumerate(ap_class): - maps[c] = ap[i] - return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(prog='test.py') - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') - parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') - parser.add_argument('--project', default='runs/test', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - opt = parser.parse_args() - opt.save_json |= opt.data.endswith('coco.yaml') - opt.data = check_file(opt.data) # check file - print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) - - if opt.task in ('train', 'val', 'test'): # run normally - test(opt.data, - opt.weights, - opt.batch_size, - opt.img_size, - opt.conf_thres, - opt.iou_thres, - opt.save_json, - opt.single_cls, - opt.augment, - opt.verbose, - save_txt=opt.save_txt | opt.save_hybrid, - save_hybrid=opt.save_hybrid, - save_conf=opt.save_conf, - opt=opt - ) - - elif opt.task == 'speed': # speed benchmarks - for w in opt.weights: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) - - elif opt.task == 'study': # run over a range of settings and save/plot - # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt - x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in opt.weights: - f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to - y = [] # y axis - for i in x: # img-size - print(f'\nRunning {f} point {i}...') - r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, opt=opt) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_study_txt(x=x) # plot diff --git a/train.py b/train.py index b8dff33dc176..60be962d447f 100644 --- a/train.py +++ b/train.py @@ -1,160 +1,193 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 model on a custom dataset. + +Models and datasets download automatically from the latest YOLOv5 release. +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data + +Usage: + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) + $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch +""" + import argparse -import logging import math import os import random +import sys import time +from copy import deepcopy +from datetime import datetime from pathlib import Path -from threading import Thread import numpy as np +import torch import torch.distributed as dist import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler -import torch.utils.data import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.tensorboard import SummaryWriter +from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm import tqdm -import test # import test.py to get mAP after each epoch -from models.export import load_checkpoint, create_checkpoint +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val # for end-of-epoch mAP +from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ - check_requirements, print_mutation, set_logging, one_cycle, colorstr +from utils.downloads import attempt_download +from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, + check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, + print_args, print_mutation, strip_optimizer) +from utils.loggers import Loggers +from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss -from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.sparse import SparseMLWrapper -from utils.torch_utils import ModelEMA, select_device, torch_distributed_zero_first -from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume +from utils.metrics import fitness +from utils.plots import plot_evolve, plot_labels +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first -logger = logging.getLogger(__name__) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def train(hyp, opt, device, tb_writer=None): - logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank +def train(hyp, # path/to/hyp.yaml or hyp dictionary + opt, + device, + callbacks + ): + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze # Directories - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last = wdir / 'last.pt' - best = wdir / 'best.pt' - results_file = save_dir / 'results.txt' + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' - # Save run settings - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - # Configure - plots = not opt.evolve # create plots + # Save run settings + if not evolve: + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.safe_dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.safe_dump(vars(opt), f, sort_keys=False) + + # Loggers + data_dict = None + if RANK in [-1, 0]: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.wandb: + data_dict = loggers.wandb.data_dict + if resume: + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Config + plots = not evolve # create plots cuda = device.type != 'cpu' - half_precision = cuda - init_seeds(2 + rank) - with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict - is_coco = opt.data.endswith('coco.yaml') - - # Logging- Doing this before checking the dataset. Might update data_dict - loggers = {'wandb': None} # loggers dict - wandb_logger = None - if rank in [-1, 0]: - opt.hyp = hyp # add hyperparameters - run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) - loggers['wandb'] = wandb_logger.wandb - data_dict = wandb_logger.data_dict - if wandb_logger.wandb: - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - - nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes - names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + init_seeds(1 + RANK) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model - pretrained = weights.endswith('.pt') or weights.endswith('.pth') or weights.startswith('zoo:') + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') if pretrained: - model, extras = load_checkpoint('train', weights, device, opt.cfg, hyp, nc, opt.recipe, opt.resume, rank) - ckpt, state_dict, sparseml_wrapper = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'] - logger.info(extras['report']) # report + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: - model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - sparseml_wrapper = SparseMLWrapper(model, opt.recipe) - sparseml_wrapper.initialize(start_epoch=0.0) - ckpt = None - with torch_distributed_zero_first(rank): - check_dataset(data_dict) # check - train_path = data_dict['train'] - test_path = data_dict['val'] + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [] # parameter names to freeze (full or partial) + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): - print('freezing %s' % k) + LOGGER.info(f'freezing {k}') v.requires_grad = False + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz) + loggers.on_params_update({"batch_size": batch_size}) + # Optimizer nbs = 64 # nominal batch size - accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay - logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") - - pg0, pg1, pg2 = [], [], [] # optimizer parameter groups - for k, v in model.named_modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): - pg2.append(v.bias) # biases - if isinstance(v, nn.BatchNorm2d): - pg0.append(v.weight) # no decay - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): - pg1.append(v.weight) # apply decay - - if opt.adam: - optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") + + g0, g1, g2 = [], [], [] # optimizer parameter groups + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias + g2.append(v.bias) + if isinstance(v, nn.BatchNorm2d): # weight (no decay) + g0.append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g1.append(v.weight) + + if opt.optimizer == 'Adam': + optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + elif opt.optimizer == 'AdamW': + optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: - optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay - optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) - del pg0, pg1, pg2 + optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay + optimizer.add_param_group({'params': g2}) # add g2 (biases) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " + f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias") + del g0, g1, g2 - # Scheduler https://arxiv.org/pdf/1812.01187.pdf - # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR - if opt.linear_lr: - lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear - else: + # Scheduler + if opt.cos_lr: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # plot_lr_scheduler(optimizer, scheduler, epochs) + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, enabled=not opt.disable_ema) if rank in [-1, 0] else None + ema = ModelEMA(model) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: - # Epochs - start_epoch = ckpt['epoch'] + 1 - if opt.resume: - assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) - if epochs < start_epoch: - logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) - epochs += ckpt['epoch'] # finetune additional epochs - if sparseml_wrapper.qat_active(start_epoch): - ema.enabled = False - # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) @@ -162,144 +195,125 @@ def train(hyp, opt, device, tb_writer=None): # EMA if ema and ckpt.get('ema'): - ema.load_state_dict(ckpt) - - # Results - if ckpt.get('training_results') is not None: - results_file.write_text(ckpt['training_results']) # write results.txt + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] - del ckpt, state_dict + # Epochs + start_epoch = ckpt['epoch'] + 1 + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs - # Image sizes - gs = max(int(model.stride.max()), 32) # grid size (max stride) - nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + del ckpt, csd # DP mode - if cuda and rank == -1 and torch.cuda.device_count() > 1: + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm - if opt.sync_bn and cuda and rank != -1: + if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logger.info('Using SyncBatchNorm()') + LOGGER.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, - world_size=opt.world_size, workers=opt.workers, - image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) - mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class - nb = len(dataloader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, + hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, rank=LOCAL_RANK, workers=workers, + image_weights=opt.image_weights, quad=opt.quad, + prefix=colorstr('train: '), shuffle=True) + mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class + nb = len(train_loader) # number of batches + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 - if rank in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader - hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, - world_size=opt.world_size, workers=opt.workers, - pad=0.5, prefix=colorstr('val: '))[0] + if RANK in [-1, 0]: + val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=None if noval else opt.cache, + rect=True, rank=-1, workers=workers * 2, pad=0.5, + prefix=colorstr('val: '))[0] - if not opt.resume: + if not resume: labels = np.concatenate(dataset.labels, 0) - c = torch.tensor(labels[:, 0]) # classes + # c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: - plot_labels(labels, names, save_dir, loggers) - if tb_writer: - tb_writer.add_histogram('classes', c, 0) + plot_labels(labels, names, save_dir) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end') # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, - # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 - find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) - - # Model parameters - hyp['box'] *= 3. / nl # scale to layers - hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + if cuda and RANK != -1: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model - model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names # Start training t0 = time.time() - nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - if scheduler: - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=half_precision) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class - logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' - f'Using {dataloader.num_workers} dataloader workers\n' - f'Logging results to {save_dir}\n' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') - - # SparseML Integration - sparseml_wrapper.initialize_loggers(logger, tb_writer, wandb_logger, rank) - scaler = sparseml_wrapper.modify(scaler, optimizer, model, dataloader) - scheduler = sparseml_wrapper.check_lr_override(scheduler) - epochs = sparseml_wrapper.check_epoch_override(epochs) - for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - if sparseml_wrapper.qat_active(epoch): - logger.info('Disabling half precision and EMA, QAT scheduled to run') - half_precision = False - scaler._enabled = False - ema.enabled = False - model.train() - # Update image weights (optional) + # Update image weights (optional, single-GPU only) if opt.image_weights: - # Generate indices - if rank in [-1, 0]: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - # Broadcast if DDP - if rank != -1: - indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() - dist.broadcast(indices, 0) - if rank != 0: - dataset.indices = indices.cpu().numpy() - - # Update mosaic border + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - mloss = torch.zeros(4, device=device) # mean losses - if rank != -1: - dataloader.sampler.set_epoch(epoch) - pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) - if rank in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) + if RANK in [-1, 0]: + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) - imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp - # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - if scheduler: - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) @@ -309,14 +323,14 @@ def train(hyp, opt, device, tb_writer=None): sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward - with amp.autocast(enabled=half_precision): + with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size - if rank != -1: - loss *= opt.world_size # gradient averaged between devices in DDP mode + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. @@ -324,244 +338,208 @@ def train(hyp, opt, device, tb_writer=None): scaler.scale(loss).backward() # Optimize - if ni % accumulate == 0: + if ni - last_opt_step >= accumulate: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) - elif hasattr(scaler, "emulated_step"): - # Call for SparseML integration since the number of steps per epoch can vary - # This keeps the number of steps per epoch equivalent to the number of batches per epoch - # Does not step the scaler or the optimizer - scaler.emulated_step() - - # Print - if rank in [-1, 0]: - mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) - pbar.set_description(s) - - # Plot - if plots and ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - # if tb_writer: - # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in - save_dir.glob('train*.jpg') if x.exists()]}) + last_opt_step = ni + # Log + if RANK in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) + if callbacks.stop_training: + return # end batch ------------------------------------------------------------------------------------------------ - # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard - if scheduler: - scheduler.step() + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() - # DDP process 0 or single-GPU - if rank in [-1, 0]: + if RANK in [-1, 0]: # mAP - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) - final_epoch = epoch + 1 == epochs - if not opt.notest or final_epoch: # Calculate mAP - wandb_logger.current_epoch = epoch + 1 - results, maps, times = test.test(data_dict, - batch_size=batch_size * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss, - is_coco=is_coco, - half_precision=half_precision) - - # Write - with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss - - # Log - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if tb_writer: - tb_writer.add_scalar(tag, x, epoch) # tensorboard - if wandb_logger.wandb: - wandb_logger.log({tag: x}) # W&B + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - if fi > best_fitness or sparseml_wrapper.reset_best(epoch): + if fi > best_fitness: best_fitness = fi - wandb_logger.end_epoch(best_result=best_fitness == fi) + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model - if (not opt.nosave) or (final_epoch and not opt.evolve): # if save - ckpt_extras = {'nc': nc, - 'best_fitness': best_fitness, - 'training_results': results_file.read_text(), - 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} - ckpt = create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **ckpt_extras) + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'date': datetime.now().isoformat()} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if wandb_logger.wandb: - if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: - wandb_logger.log_model( - last.parent, opt, epoch, fi, best_model=best_fitness == fi) + if (epoch > 0) and (opt.save_period > 0) and (epoch % opt.save_period == 0): + torch.save(ckpt, w / f'epoch{epoch}.pt') del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # Stop Single-GPU + if RANK == -1 and stopper(epoch=epoch, fitness=fi): + break + + # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 + # stop = stopper(epoch=epoch, fitness=fi) + # if RANK == 0: + # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks + + # Stop DPP + # with torch_distributed_zero_first(RANK): + # if stop: + # break # must break all DDP ranks # end epoch ---------------------------------------------------------------------------------------------------- - # end training - if rank in [-1, 0]: - # Plots - if plots: - plot_results(save_dir=save_dir) # save as results.png - if wandb_logger.wandb: - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files - if (save_dir / f).exists()]}) - # Test best.pt - logger.info('%g epochs completed in %.3f hours.\n' % (epochs - start_epoch + 1, (time.time() - t0) / 3600)) - if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in [last, best] if best.exists() else [last]: # speed, mAP tests - test_model, _ = load_checkpoint('ensemble', m, device) - results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=test_model, - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False, - is_coco=is_coco, - half_precision=half_precision) - - # Strip optimizers - final = best if best.exists() else last # final model + # end training ----------------------------------------------------------------------------------------------------- + if RANK in [-1, 0]: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb and not opt.evolve: # Log the stripped model - wandb_logger.wandb.log_artifact(str(final), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) - wandb_logger.finish_run() - else: - dist.destroy_process_group() + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, plots, epoch, results) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + torch.cuda.empty_cache() return results -if __name__ == '__main__': +def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='yolov3.pt', help='initial weights path') + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--notest', action='store_true', help='only test final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') - parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - parser.add_argument('--project', default='runs/train', help='save to project/name') - parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') - parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') - parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') - parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') - parser.add_argument('--recipe', type=str, default=None, help='Path to a sparsification recipe, ' - 'see https://github.com/neuralmagic/sparseml for more information') - parser.add_argument('--disable-ema', action='store_true', help='Disable EMA model updates (enabled by default)') - opt = parser.parse_args() - - # Set DDP variables - opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 - opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 - set_logging(opt.global_rank) - if opt.global_rank in [-1, 0]: + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + opt = parser.parse_known_args()[0] if known else parser.parse_args() + return opt + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in [-1, 0]: + print_args(FILE.stem, opt) check_git_status() - check_requirements(exclude=('pycocotools', 'thop')) + check_requirements(exclude=['thop']) # Resume - wandb_run = check_wandb_resume(opt) - if opt.resume and not wandb_run: # resume an interrupted run + if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - apriori = opt.global_rank, opt.local_rank - with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate - logger.info('Resuming training from %s' % ckpt) + with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: + opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate + LOGGER.info(f'Resuming training from {ckpt}') else: - # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) - opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode - opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) - if opt.local_rank != -1: - assert torch.cuda.device_count() > opt.local_rank - torch.cuda.set_device(opt.local_rank) - device = torch.device('cuda', opt.local_rank) - dist.init_process_group(backend='nccl', init_method='env://') # distributed backend - assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' - assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' - opt.batch_size = opt.total_batch_size // opt.world_size - - # Hyperparameters - with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Train - logger.info(opt) if not opt.evolve: - tb_writer = None # init loggers - if opt.global_rank in [-1, 0]: - prefix = colorstr('tensorboard: ') - logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer) + train(opt.hyp, opt, device, callbacks) + if WORLD_SIZE > 1 and RANK == 0: + LOGGER.info('Destroying process group... ') + dist.destroy_process_group() # Evolve hyperparameters (optional) else: @@ -593,23 +571,27 @@ def train(hyp, opt, device, tb_writer=None): 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0)} # image mixup (probability) - - assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' - opt.notest, opt.nosave = True, True # only test/save final epoch + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists - for _ in range(300): # generations to evolve - if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt('evolve.txt', ndmin=2) + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() # weights + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection @@ -620,7 +602,7 @@ def train(hyp, opt, device, tb_writer=None): mp, s = 0.8, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) - g = np.array([x[0] for x in meta.values()]) # gains 0-1 + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 ng = len(meta) v = np.ones(ng) while all(v == 1): # mutate until a change occurs (prevent duplicates) @@ -635,12 +617,27 @@ def train(hyp, opt, device, tb_writer=None): hyp[k] = round(hyp[k], 5) # significant digits # Train mutation - results = train(hyp.copy(), opt, device) - + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() # Write mutation results - print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + print_mutation(results, hyp.copy(), save_dir, opt.bucket) # Plot results - plot_evolution(yaml_file) - print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' - f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') \ No newline at end of file + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3954feadfcb2..1479a164cd8e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -6,7 +6,6 @@ "name": "YOLOv5 Tutorial", "provenance": [], "collapsed_sections": [], - "toc_visible": true, "include_colab_link": true }, "kernelspec": { @@ -16,9 +15,10 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "8815626359d84416a2f44a95500580a4": { + "eb95db7cae194218b3fcefb439b6352f": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], @@ -28,17 +28,19 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", + "layout": "IPY_MODEL_769ecde6f2e64bacb596ce972f8d3d2d", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_876609753c2946248890344722963d44", - "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" + "IPY_MODEL_384a001876054c93b0af45cd1e960bfe", + "IPY_MODEL_dded0aeae74440f7ba2ffa0beb8dd612", + "IPY_MODEL_5296d28be75740b2892ae421bbec3657" ] } }, - "3b85609c4ce94a74823f2cfe141ce68e": { + "769ecde6f2e64bacb596ce972f8d3d2d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -87,118 +89,76 @@ "left": null } }, - "876609753c2946248890344722963d44": { + "384a001876054c93b0af45cd1e960bfe": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_9f09facb2a6c4a7096810d327c8b551c", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": "100%", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_25621cff5d16448cb7260e839fd0f543" + } + }, + "dded0aeae74440f7ba2ffa0beb8dd612": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", + "style": "IPY_MODEL_0ce7164fc0c74bb9a2b5c7037375a727", "_dom_classes": [], - "description": "100%", + "description": "", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 819257867, + "max": 818322941, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 819257867, + "value": 818322941, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" + "layout": "IPY_MODEL_c4c4593c10904cb5b8a5724d60c7e181" } }, - "8abfdd8778e44b7ca0d29881cb1ada05": { + "5296d28be75740b2892ae421bbec3657": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", + "style": "IPY_MODEL_473371611126476c88d5d42ec7031ed6", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [08:43<00:00, 1.56MB/s]", + "value": " 780M/780M [00:11<00:00, 91.9MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" + "layout": "IPY_MODEL_65efdfd0d26c46e79c8c5ff3b77126cc" } }, - "78c6c3d97c484916b8ee167c63556800": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "9dd0f182db5d45378ceafb855e486eb8": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "a3dab28b45c247089a3d1b8b09f327de": { + "9f09facb2a6c4a7096810d327c8b551c": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -210,80 +170,10 @@ "_model_module": "@jupyter-widgets/controls" } }, - "32451332b7a94ba9aacddeaa6ac94d50": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "0fffa335322b41658508e06aed0acbf0": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_85823e71fea54c39bd11e2e972348836", - "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd" - ] - } - }, - "a354c6f80ce347e5a3ef64af87c0eccb": { + "25621cff5d16448cb7260e839fd0f543": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -332,56 +222,14 @@ "left": null } }, - "85823e71fea54c39bd11e2e972348836": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa", - "_dom_classes": [], - "description": "100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 22091032, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 22091032, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f" - } - }, - "fb11acd663fa4e71b041d67310d045fd": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb" - } - }, - "8a919053b780449aae5523658ad611fa": { + "0ce7164fc0c74bb9a2b5c7037375a727": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", - "description_width": "initial", + "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, @@ -390,9 +238,10 @@ "_model_module": "@jupyter-widgets/controls" } }, - "5bae9393a58b44f7b69fb04816f94f6f": { + "c4c4593c10904cb5b8a5724d60c7e181": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -441,9 +290,10 @@ "left": null } }, - "d26c6d16c7f24030ab2da5285bf198ee": { + "473371611126476c88d5d42ec7031ed6": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -455,9 +305,10 @@ "_model_module": "@jupyter-widgets/controls" } }, - "f7767886b2364c8d9efdc79e175ad8eb": { + "65efdfd0d26c46e79c8c5ff3b77126cc": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -523,13 +374,14 @@ { "cell_type": "markdown", "metadata": { - "id": "HvhYZrIZCEyo" + "id": "t6MPjfT5NrKQ" }, "source": [ - "\n", + "\n", + "\n", "\n", - "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" + "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { @@ -550,27 +402,26 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9b022435-4197-41fc-abea-81f86ce857d0" + "outputId": "3809e5a9-dd41-4577-fe62-5531abf7cca2" }, "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", + "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", - "%pip install -qr requirements.txt # install dependencies\n", + "%pip install -qr requirements.txt # install\n", "\n", "import torch\n", - "from IPython.display import Image, clear_output # to display images\n", - "\n", - "clear_output()\n", - "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" + "from yolov5 import utils\n", + "display = utils.notebook_init() # checks" ], "execution_count": null, "outputs": [ { "output_type": "stream", + "name": "stdout", "text": [ - "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n" - ], - "name": "stdout" + "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 42.2/166.8 GB disk)\n" + ] } ] }, @@ -584,7 +435,15 @@ "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", - " " + "```shell\n", + "python detect.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " path/ # directory\n", + " path/*.jpg # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" ] }, { @@ -592,58 +451,51 @@ "metadata": { "id": "zR9ZbuQCH7FX", "colab": { - "base_uri": "https://localhost:8080/", - "height": 534 + "base_uri": "https://localhost:8080/" }, - "outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d" + "outputId": "8f7e6588-215d-4ebd-93af-88b871e770a7" }, "source": [ - "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", - "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", + "display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], "execution_count": null, "outputs": [ { "output_type": "stream", + "name": "stdout", "text": [ - "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", + "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", - "Results saved to runs/detect/exp\n", - "Done. (0.087)\n" - ], - "name": "stdout" - }, - { - "output_type": "execute_result", - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1UvrN/MhhmtmH/AE0rzJRl9hnbGpLm1Obmt5LfPkoxdvmdqpGzTzks33MrRbvL37WrevtPmkuNk3zLI27958tZd1bJZ3mz94Xk/vN8taxl9kr4vhM9YUt2SFJtq/8AXX5vlqb7PNdTPNM6r5iLsVf4f9qnzW8KM72yKpX+KrDWf7vYJtoXb95vmrS8fi5iPe5iCGSZrdYfObYvy7v7zLUNxcFVaNHaM/Mu3/ZqzInkxhGm+79xf7tZN1I7L9/HzfPu/irejTlUkYyqcseWRDM0Plu8kzfc+6v8VZ0cszN87qPm+fy/m2rVm6Z7iTyfl2xpt8yNdu6qk0nlqXh2hG+4y161GmeZWqSjL3SNpEZfJjhXb/D/ALVIq/ut83zf3fmpkbIrDftC7P4fvbqVVTCPHBtH8MbN/FXV7P7RjGt7xGq3O48Z2/N8vy7qfIszRq6Pj+9u+9VhbXbJs3/MqfP8u75qVbVMt5j/ADfe2rTfvfEbxqe5ykSXj/Y3DzSBv4Kt2zIsa70y+/dtb/0KmW8aW6tcvM21fl3bPutWlHYO1vvmhYf3JF/irel8ISrT5CssYM/7l2Rm/vfLUNxpsysNm4fLtfd92tVdI+UvezbXZP71X9I8Ga14hMh0DQri+EWzzRFEWC5zjOOnQ/lXrYalXxNRU6MXKT2STbfyWpxuTnLlgm32RyMmkvtY72Z93y/N92si+sXkupk2MNvy7a9Pl+E3jiRk2+BtTz3JtWx/Ks7Ufg98Q1K/ZvBGqvlfmxYt/hXrxyPOv+gap/4BL/I5qmDxcv8Al1L/AMBf+R5Lqmkutrvdm3r8yMtc1qmmlv8Ab+8te0X3wT+JchMa/D/WCGXLEWLnn8qwr74BfFhi0dv8NNZ2Hp/oD/4U45HnX2sNU/8AAJf5HDUy/Hy/5dS/8Bf+R4Vqlrc28jI6fKv8VUvJmkH8TbvmdVr2DV/2cPjTJBttvhTrROMcabIf6Vz837Mvx5H7v/hUXiHH95NKl/wq5ZJnXLf6tU/8Al/kY/2fj/h9lL/wF/5HARw+Wd+9v92rlrbTSXGx5mZW/vV2sP7NXx13Av8ACDxGfc6VL/hWlZ/s7fG5U82X4P66GxjH9kyf4Vw1clzxx/3Wp/4BL/I6Y5djv+fUv/AX/kcfb2fksr/+Oq1adrbvMqo/ys33Pm212Np+z38ZwUf/AIVbrqKFyR/ZsgOfyrRh+AXxcjRm/wCFZa3uP3f+JZJ/hXHLJM7/AOgSr/4Ll/kdtPLsY96cvuf+Rx0cMkbbEfdWhaxO3753Zd38O77tdVbfAr4tyuwufhrrgCr8pOnyfN+lWbX4G/FpVDn4b6wGAYLmwfgflXPLI8++zhKv/guf+R108uxcf+XcvuZy6wvtabDf7W6jzN0iPvZR8uzzK7OP4KfFRkIj+HWsq+xuXsXxu/KlPwQ+KrBVk+H2rnav/QPf/CsP7Cz3m1wtX/wXP/I744HFdIP7mcpCtzNIRDtbb/DJUMizKuwQ7dqfe/iVq69vgt8Vf4PhtrQ29D9jf/CiL4HfGK/lW1sfhVr8zf8APOLTJGZvwAzWryXPErvCVf8AwXL/ACNYYLEOWsH9zOJmjhb5PmLL8yM33t396mzSTRsr7Fd1Tb9+utv/AIEfF21Lx/8ACsfECSl8SRPpsgKH6EVUk+CfxeWUlPhfr2W6gabJgfpTjkmfSj/ulX/wXL/Ip4LF/wAj+5nNtM7EI0+xV/hWp7eZGwn3X/i+atmT4J/GHIZPhdrudvP/ABKpOP0q5pv7Pnx9vibuy+C/iaZVfaJY9GmcH8Qtb/2FnahzSwtRf9uS/wAh+wxKlrB/czJh1CazmKO6uzJj+98taVvqD+WHd2LfeWnx/Bf4zwztK/w21zcG2lTpsn+FaWn/AAC+Pl7CZbL4O+Jp4ifkeHSJmVT6ZC1vHJc6pLmlhqi/7cl/kc88PjFK/I/uZlyakkP+pdVZm3M1QNqzzK3nPk7/AJljeuhP7Pn7RbhQ3wT8VAAYLDQJ92P7v3awPEnw1+JnhWyl1rxB4F1a0toCBPNdafIiQ5O0biRgckDnuacsmzOMHUlh5pLVvklZLu9NDlqU8Sot8jsvJmbqGoJMrbPlXb/E9ULjWCtsE6j+9WfNep5g42/8DqrdaomXTf8ALs+balcUY8u55NbFS6FqTUHaNXCMwas261J2kOeBs3Lu/iaq8l58pmhfb8vytWXdawFjb58t/dpyOeNbl0Ld1fTbt4mVFZfn2vWfNdJI3zuwH8DVTuNSuJOqLt/u1Va82/Oh/wC+a56nNE9CjiveNCS+eF98aMwX+Kh77cyzvN96s0zP5nzzcf3aljuEab9z/DXFWifS4XEc3KlI0HuPNGxH+ZvvbqktZ3jbY75C/das/wA5JJGdPvMnyK1WrW3uZJkT+7/FXHUjyxPfw+I5S/G7yHZM2/8A3v4ateSjR/I+NtUoflben975quRqixsyOzM38P8AdrllHlPeo4jmHqvk7dif7+7+KpJJJvOTf/wHdUTRuI9kz7t33amVXjiCTP8Adb5t1YSid8a0dgX5meB+iv8A+PVK8z+SJnfLt/d/hqDa8fKHhmoZtqt3bdtSlLmNvrRbVtuAk3y/+zVGJk/jT5o3qFpJ2jZPOyy/NtX71NaRFz8ir/Czf3qcaPMH1rm0JJ7h1Vnd1dW/8dqDzHkHmK/8X3aTa7s0Py//ABVV2byZN6JtK/K3z1v7PliclXGcurLM0yLh0h3fwtTFk2q2x2D/AN3fVJrpFY+Vu/21qP7chXncm7+Jq3jGR52IxkbFybUJvlfyVVm+Zqq3E3mKd83FRtMm5tnzL/BVRr5/M2bFUN99a6qcZHz+KxXNAtrP50bIHYK38NNjkDN5EzqrfNVKOYwJvR12K1SrdPcNvR/mX/x6uuMT5vFVoyNG3kdWV3mxWhbuiqr+d8v8f+1WPp58xnR/7+379atlHDIuNmVX+Grj73xHkyrGnZyO395Vbb8y1raer3Ejb33fwvub7y1nabDH5m+GHhtvzSVtaXZ/xzRrhfu7aInmyqcxr2VnNJE3zqEk/hX71dPpdrtjjf8AeSstZeh2L/I6Ip2rt+b+Guk8O2aW67LmFdsa/N8/3aoxlI39Ls0VU3pjcm5F/u1r2Vo8i7HhyzNu3R0zQ7OTy40httu5Ny/7VdJY2KMuyHdvVW37kro+I5/aGJNYpNC28tjavy/3WqZ7GFo1h37fl3OrfwtWtHo8022GaHbu/i/hqKbT3WRnfcn8Hyv822ly/aOmjL3zFis5mkFz8zlvl3b/ALu2npY/6QZpptgk27/722r62aQt5Nt5n7z+GT7y1FdWO2FfLfJVPustTKMeXmPewsvdM/ULO2kZZkRnX7RtRm/h/wBqub1rT5lkbZN/F95WrsLiOH+NJNv8DL/ermNUi+y5fYvzM3yq275qcYwl7x72Gj8Kkee69YvNC80L+cjN8jN/6DXE+JNPfcyb2O75fl+9XqHiCHcrfIy/P+6b+7XGa5Z+dG6JG3y/MjVyVpfzHqxwvN7x7Vp8NtCrvMm8eb95fvK1S28T3DOnkx+Urs0TL8rK1VoLiBWY2bqUjb7zL95v/iant77/AEjyfszPtVd1eNGPLA+e9pyl+xtXjb/SUV/l3J/FWjC0MinyX/g2orL8y/8AAqz47jyW2PJ+6Z9yxqn3f+BVehbtcvhFXcjf7VefXk/5TupVOaVxLqOFZCj7WPlKrrG3zfN/FUUdq8ciu7sGWp7iRPtDpIil9m/5U+WRqY1siq58lX/j+VvlWuKpUlHc9CnHm+EbarDM02+GRt0u3yW/9Coe12uIXufKRv8AWqzfdpI4937503IqMzqvy7amihgkjO+GR3++vy/7NefUqcsz0KMfc5jCks0vJpvJdflfbFI33qzri3kmuDc7MlV27vl+9XRX0MyqblJoV2yr8uysya3hjV08lfmqqPN7U6OaJzV4rwyM7quP4G2fdrI8lLiTY80m2H7nz11WpWv7vem77vzKy/w1g3Gmp8r+WqfL8n95q+wy3mjLUxqcv2Situk+5/O3eW7I/wDDuqzDG9nCH2Nt3/eVd1RTK80ZTf8AOu1fl/vf7VSRqkfkwIm3/vpt1fXUZHj4qpGMWSWs3mN8+5f7rMv3qjnZ7qF0R9u5/vfdqxIr7o3G7+9taq7MIV2O67t/zr/drq9ofPVK0ucVLV9q/Plv4F31JDM+0v8Aw/7NRF3jwmzCsnybf4lqONpp5vOebbt+VFrKpIiMpfCX4WeSYul4r7futs2/8Bq3DJBDD/pPVt2+P+9trJhWFv7zsr/N81akLTfIny7vvff27Vrx8ZKPN8R3UYy+I2bVdrJMib0k2t+7+993+KtK3t7OaN3dPNO35WX5axIWS0Z32bty7VMdbdveLbwo+xUVU2bV+avnsRU97mPQo0/5i7C0k0bbyzOsX71tm1f+A06G427vszthk27W/h/3qqtdOq+Sj7n/AIY2/u1Fcag4Z3uYVXcy/wCr+VVrwMRKSPSp04/aLn9o7v8ARn8vav3W/wBmkVbO4ZbmaFn8v5f3afNtqGCRFklSWaGT+L94v3V/u0QyPFIIYQ3lbvm/hb/7Ksaf7szqe8XbO3S6jTY7LF/C33WqePyZFlR9u2Nv4vmakt1Tj7SY0H30WSpJI5lhX/RsnbuZmbdt+b7tVUqX6GUVL3SMxzRgwpNCu7+Lf91ajaO5kka5m+ZG/h3bq0Lf7THhJoY0Xb8iqv3qrzWsyyMkNzlm+6rbV21NPTZ3JqfCZ8kaXExhTdlot27+Ff8AZqtdNNbr86bZWTbtVa1VhdlD7GQs/wA0e373+1RNZ2aoIdjbm+VP71KVTlkc0uaMTl9SsUhUyJudv4lVqwtStwtqLaZMvJ/Ev3mrsNSs4biLMN4xLfK67P7tYOrWvkSM83ysqqvmKv3lr0sPzT5W/hPJrOcuY4y+hSNPJ2N8vy/M1ZkNjDcZ+RQ6ttX/AGq6TUIYZjJC+1d3z+X/AA1RmtYZ5lSHaiq/zrXrwlJwkeVUjIxfsDzXBdNyfw+W1Ot9Lkz8+7Zt3L/s10Xl+XJvS23Bmp0dijRt5Myp/syJ92m6zjG3QSpxjLmMWHS0jh8xId7bvl3fLSzRpDN5MwyZE+b5/mrX1C12ybPm3fKy+X/EtUry28mbfMn3k+RqqMve8jqjTHafcQ+YkGxfN+78r/dWug024aGP+HG7duX+7WDZ27+WzvDGzfeRlatjT7yT7Os0yZbf95aIy5pe6a8v8x02l30y7k+9uTcjN97bWrHdJJbo++Quqbkkjfburm7KHyLj7TCjfc+dletVZoGt/wB9BuDbvvPt/h/hrup1P5jjqQ7mxY648MiokeEbarMy/wAX+zVxfEEMLLD9p37X+b5q5r7YmYrbfNvWL7rfd/3qinmdpC7uw2/N8tdkahxy906tfFCSSMU3Ax/Lu2/L81Jb60l18m9WZXb95G3y1zEeqIsaiZNrSfM0b/w1Nb6lDHGpKfxfe3fLtrfm9wiMoROjbVE2hH6L/D/eqjPs8wpDDlJn+dd27bWba3UM3yb2O77kf8NWYw8itJbblVv7rVFT4SebmmMmuJpFP2lNnktsT/aX+GpobXgyeSuf4Y1+7V2GzeaFXeRWZk+81W/sq/IXTY3yov8Avf3qw9nzG0cROJi3WlvG/mPbK38KbqzLjR7lYWdIcPu+9Ia7aTTRdXAmS2/h+ST+H5arSaDM0x+0ozJv3bVeqjHl90qOI984yTR0W3kdEwF+aX5f4qp/Ybn5BM8e5vm/d11V5ptyvm20MPKtufd8u5f7tVLjR/s9ud8K79nyeWtYSpm8cRyyOauIYY7eL5P49yMtU7izT5XdGbc27/eroZrCGNW2Q8r827+7WbqEaRzNGkzJ5nzbtn3q4qlMuNYoQ3jrI33vvbfm/hq5Y7DJ+5dQq/wyPWe0c0cjI6L83yqrNUtvZ+WpTYxlb7jfeWvnswwvc9nD4rl1N+yunWVd6KWV93/AamlvIY5f33HmT/Kv+zVPSYUXKu7Nt+X5nrRhsZmk/dpwu1kaT71fF1MH+91Po8PiuaF5CNbosnzv5Qbds+eq8027dvtsnZtTd/6E1ai2rzfuRZ7/AC03/N93dSrpE98sWyyVpNnz7vlX/vqoo4OcavPI9SNb3DKgjNxMkPzLu/vfdrQj0va3nQou3cvzf3q1NP0HzJGf5ZW3/wAL/L/u1o2ugwwyCH7GyGOXb977te/g8L7WV1E48RjIU9zHj0tNsvnfPu+8v92tOx8N3lxHHDNYbjDtfcqf+PV0Fn4XRpF2Q7f3v3m/irf0/wALwwx/PuVlf5Nrf+O19bgcO4xiuU+Yx2KVTmZzVh4f8+Pe8Kld22Jm+ZVq/b+FZm+dPnRW+9H92up0/S0jhhjRGil37ty/Mvy/3qvWeg7l+eZYl+Y7f9rdX0mHj7p89Uqcuhztn4d8z50sG2/89P7zVfs/Dc0qvD9m8oxvXT6X4ZRjJCLfZtZvK8tvl/4FWnY+HYbWFEfcq7t6/wD2VdMqZySrSlLQ5CTwvCsKfZkZljl3S/uqbcaDbQ/6ZCjeV/D8n8Vd5Ho810q+Sir8/wC9Zf4l/hpt54ZmWR0+V4vu/wB3atcNSiHtDzG48LzSK3yYC/NuX+Jf9qsy68Pvayb38yR9nyM392vUdU0WGNSiQtsjT/lj91v96ua1LSRIwh3/ADyfcWRflX/erzK1HmO/C1jhLzR5ncTJbMi/wLJt+ZqxNS0fyZGe58zcybdrfL5bV3Osx+XdPDvX5fuTfwVzd5bvNcI7zbYWZm3TPu3Nt/vV4MsLKLke/RxUTjrzT7lpA7wq3lptdl+bbXP61C9vveGFnT5WSXbXZ67DuuAmxl3fNuV/4awdYhdl+T5lX7lYuHU6lLuYCypCzzDrs27W/i/3alk/0i4PyLt27tzU+4s3hmdgkbBv4m/hao5pHkj3x7R5ibdrfw1rTpwcvcMvae7ZyM+5uoWt/wBzbNtZ2+b/ANmrJu9833IWHy/LV7UGePaiuxVk3bvusq1UuA7/AHGUv/D8n3q9PD04Hl4iXvXM+Oa2kj3puDqu7d/eqnLN5i7H+RV/8eq3qGxlZ0RkC/f21Raby4wghWYN9za/zLXfGPL7xySqc3ulmO3eZVP3yqbtu3atEMgbajp5b/3lqPYm4yI/7r+6rfNU8N1+887y2+Z9u1fm3VcfeMvQs28aMzB4Y2Xb8rL/AMtP96r8Ni8kbfuflk+/UFrDtYuibG/u1s2Nv5sKI/y7v4W/iq3KUYlxlL4ipZ6fBD9/a6s/zR1o2enx71Tzt+7/AJZr92rcNjbSKiTBcyfxba0LDTYYmEMKMyxr97/a/vVZftOaPulb+zd4XZjcr/Iq16f+znpdy8V/bWljI0s80EcUaIWaV/mAwBySTgYrk9N0eeRlTZ8zfxf3a+mf+CUtvHpP7Zvw9+0CRwfHemx4STYQTLgHODxkgkdwCOM5r9C8McW8u4up4tR5vZ060rbX5aM3a/S9rXPRyLEeyzeM7X5VN29ISZseGf2SP2pPGT30Xhf9nfxpetpl21pqKQ+Grkm2nX70Tgp8rjjKnkZGRzXD67oOueF9ZufDvibRrrTtQspmivLG+t2imgkU4KOjAFWB6gjNfpN/wUz/AOCoH7Sf7Pf7S9x8E/gvcaVpNhollay3V1caal1NfSzRLKQ3mZCIqsqgKA2dxLHIC5X7W114P/4KCf8ABNy2/baufBFhpvj3wdOtlrFxZ3BjTylnWOaEBmO+M+dHMiOS6Fiqsdzb/wCgsp8QOJvZ5fjs3wVOnhMbKEISp1HKcJVF+79omkuWfeL93rro/uMNnOP5aNbE0kqdVpJqV2nLa6aWj8tup+fPgnwJ42+JPiODwh8PfCWpa5qt1n7Pp2lWT3E0mBkkIgJIA5J7Ctb4m/Aj41fBc2v/AAtv4UeIfDYvd32N9a0mW3Wfb94IXUBiMjIHIyPWv1v/AGQf2W/Hv7P37DukQfsy6f4T0/4i+L9LtNQ13xD4juJbiDdKhcMDCH8zy0cLGi4iyWc78tv7XwB8H/2ifFnwh8X/AAv/AOCgPi3wH4r0LVLBvL1PSLZrdraPaS5lV4Y4l8shZElXDIykkngr8vmHjtSw+ZVXQpU5YelU9m4uo1Xmk7OpCKi4cqeqjKXNJLpfTz63F0YV5OEYuEXa13zvo2la1vJu7Pw/0fwn4p8RWOoap4f8NahfW2k2wudVuLOzeWOyhLBBJKygiNNzKu5sDLAdTXU6n+zH+0ZovgI/FPWPgZ4stfDgt1uG1u40CdLYQsQFkMhXAQ5GG6HI55r78/4IdX+j/D7wB8a9c1HV459I0W/tZZLgSJ80MEV2zy7QxABQA5yVODgnGa+cvjz/AMFdv2rPjhNrfhldfh8PeENadoJdD0S1ijuBYk4MX2p1aTeycMwwCS2FCnbX3MOL+Kc04txeU5Xg6bpYaVPnqVKkleNSEZWjFQfv6y3dtFe19fWWZZhiMxqYfD0ouNNxvJtrRpOyVt9/LueEfDb9nn48fGK1lv8A4VfBzxL4it4G2zXOj6LNPGjehdFKg+2c1k+Pfhr8Q/hZrZ8NfEvwNq+gagF3fY9Y0+S2kK/3gsgBI9xxX7IeKdb+O/xb+APgvWv+CXvxT8CadoFhpMcFzpuowJLLCqwx+Va7tsiQuikh43VWBx83OK+Zf+CgXx0+PUP7HjfBL9uj9nG9PjGXW4ZPDfjrRjB/ZBK7mEhljLhLkoJUMAVdyMz/ACYAPz2QeKec53nFOh9WoqM6nI6XtWsTTV2nOUJxjGSVrtQbaT8jiwfEOKxWJjD2cbN2cea1SPm00k7btI+BPBPgPxt8SvEcHg/4eeEdS1zVbnP2fTtJsnuJpABkkIgJwACSegAya1vib8CPjV8Fza/8Lb+FHiHw2L3d9jfWtJlt1n2/eCF1AYjIyByMj1r9Brr4gab/AMEqf+CeXgzxF8KPB2mP8RPifbwXN/q14/2hQzQecZThsOIo5I0SNcRhnLndlt7P2Av28fE37efiHW/2O/2yPD2k+JNP8SaRNNY3cVmLV3MWHeFxEVGQoMiSIFdGjJycgp34jxE4ilhsRnODwEZ5bQlKMpOpatOMHyzqQjbl5YtOylK8lF6q+m086xrpzxVKinQg2m7+80nZyS2svN62Pzg8N+FPFPjK/fSvCHhrUNVuo7aW4kttNs3nkWGNS8khVASEVQWZugAJOBXX+D/2U/2mfiB4Vj8ceB/gD4w1fR5lZoNS0/w9cSwyqvUoyoQ4GCOM8givt7/gkD8Nv+FM/t7fF74TXN07S+H9JurGIJIsiSRR6hEodmB+9t2cY/iYHaRiuV+IH/Ba79ojU/2j4bf4fWOmaP4Is/EEdoujSack9xe2izhWaWVuVkdc8RlQmQMsQXbpxvG/FGOz+vl2QYOnVjSp06rqVKjimqkXJJRUW7y+y72VnfdW0q5rmFbGToYOlGSjGMuaTa0krpWS3fT01Pg6eCe1ne2uYXjkjYrJG6kMrA4IIPQ02vtP/guj8P8Aw74T/a203xVoyeXc+JfCsF3qcawqqtLHLJAJNw+8SkaA5HGwcnPHxZX3PC2fU+J+HsNmkIcirRUuW97PZq9lezTSdlfex62X4tY/BU8QlbmV7di94Y8P33i3xLp3hXSyv2nU76G0t9+ceZI4Rc4BOMkdAT7V+mvx4/aE+Fv/AAR68IeE/wBn34EfCjRNf8a3WiLd+IfEepW/kvMhkYeZK0f7yQySCbZGZMRIij5hivg79iEaKf2wfhn/AMJD5H2T/hNtO837Tu2Z89Nv3ec7sY7ZxnjNew/8FrP7S/4bt1X7djyv+Ef037F97/VeTz14+/5nTj8c18RxbgqHE3HOByLHXeFVKrWlC7UaklKMIqVmm1G7la9tdbnk5jShj82o4SrrT5ZTa2Ummkr27XufQC+JPhT/AMFfv2TPG2u6r8NdH8K/FTwNCt6mp2Ft5jXCrFI8Y8zb5rQyhJozGS+xgjjccCvzMr73/wCCCH2j/hb/AMQ/tez+y/8AhEIvt/mbsbvtA257Y2+b159O9fC/ir+z/wDhJ9S/snyvsv2+b7N5G7Z5e87du/5sYxjdzjrzWnAtCGR8TZvkOGb+rUXRnTi25Kn7WDcoJu7teN0r2SenW9ZRBYTH4nB078keVxW9uZar8NEe1f8ABND4PeAvjl+2b4Q8C/EqCG50kSz3s2n3AQx3z28LzJA6sRuRmQblAbcoIIwSR9d/tS/8FZPi7+yb8ctZ/Z8+G37NfhrStD8OXC2umRX1tNGbiLaCssSQNGiRsCCoAPHU54Hyd/wTL/Z58eftB/tV6Jb+CfGF54cXwwy61qHiCytxJJaxwuu1EDfIXkZggD5XBYlXClT91ftCf8FpP2efhX8XJvhzovwo1HxiNB1BrXUtejngijilRgshttysZdpBGT5YJX5SVw1fF+INCWaeIMMPDAf2lGGH96hzumqMnNtVHL4G5rRJ+9Zel/LzmDxGcqCo+3ShrC/Lyu+99rtaW3PJ/wDgqD4M8AfF39iLwF+2ZqPwns/BHjbWNQgW/sYreOOa8juI5CRKfkabAhSVGIZ1RiCACxHl3wS/4LRftBfBD4VaJ8J9F+F3ge7s9Csxa2tw+nTQO8YJILJBKke7nlgo3Hk5Ykn2j/gqVp8X7av7JHhn9tT4IfEHUbzwr4fDDUPCVxaIptWllEUs77CSs0bBUdWLrsO9GVdxk/NRVZ2CIpJJwAByTXreHnD+ScUcErB5xSVT2Ner+6nz3w75naleVpPli93pr1sdOS4LC4/KvZYmPNyzl7rv7jv8Ouui/M/XX/gnd/wUX/aF/bH8ba/J458AeE9E8I+FtJN1rOrWMdwHErZ8uMNLOVXhJHZiDgR9twNfkt/wWB+PVh+0ZqPxQ+L+jaNaWVhqE8cenR2lmsJkt45oo45pNoy8rqA7MxJy2M4AA/QH48sv/BO//gmJon7Ptk32bx58V99z4kKnEsEDohuFP+7GYbbHfdIR3r8p/wBr9zH+zh4ocDOLaH/0oiryOGuHMkWBz3PstoKlh5UqtGgo3tKFOLU6mrd+eovdfRRt1OLDYLCRwuNxlCCjBwnGFuqSd5fN7eSPg2S8RbfY8jF1/iqpNqE6t/Ds2fL/AL1U5ryZmf59yfwVQupnkjXD1/PnKfkMqxcm1b922x2P/AqzbjUHkjbemdv3WWkmkddyLyGX5v4arNNH/tDy1/irOW5cZcwNI8cn8S0jSOrnY/3v4VqGY7dpfcSqfw/dqMzZUOEbdWEtjqp+7ImaaZGZy6j/AGtlSwyOWV0+9VZd7yBH5/3anhXdl3+U/wAG2uOoexha0omjDG7D54dir92tGxby1SHZlt27zFqlZ/LCEwx+ati1jRcP/F/HXHKP8x9PhcRzcpahhTy98P8AF9+rEdttUzJuKKn3aS1R5Iw8m77/APu1fsbXpPHN8v8Adril7srnu0cQVYY2+/N8vy53VIlvtb7jP8/3mrRexRo1d/Lb+/8A7NJ/Z8K7pidy7N336x5uY7adacfiM+S1fcQ8O7d83y1XmiRV+RMH+7WnNb/KuU+WP5qrzWs32hpt+35NtEolyrlNpBuaZ32f7K/eqJm2NvL8rLT7hUkm2TcbV/76qpcEQo3kyfMvzfN92tIxlzGFTHRiLNeBWe2RGDt/E1U7ieETBLlGO35d2+oZpn85HhfLfxVD9odd2/qu7/gVdXs+b3jzamYdB9xcOsyif5F+7uqvdSOuX86o7i885fJdGqBr0RwtsfJ3fJuropx5Tz62O5pEs155UK702ruqvJfbpGd9v+z89VLrUPOP94N95WqtcTeW33Nyt/FXXGj7p4uKx32UaX2oudnZanspt+P7rf3ayY5vmZEdvm+61aemr5jff2t/DW/wnhVsR7T4Tcso33b04WtnS4XW4RJH3BlrF05Jm28fL/drpdMt3aRPnXarf8Cp8sDm5jY0uFzGPkV9r/8AjtbNjDC209m+X5vl21R02NIWLumFb+Kuk0u1hVVd4f8AcrL4Z3OeUuaNi/oP7mZI4fubPvNXX6DDbMu9LbfL/wA9N25f++a57SbVFuN8yL8v3P8AZrrdCj8uT7mX2bm2pWhidT4f0+8uGKPNuVolZdq7du2uk0uFJGWR0VkZd0vz7a53RZkRNjvMszMvlLu+Xay/d/2a6OwmhjZfOjX7vybU+61VH3fhDl5QGy3j/hG35khWql5D51wdm1v7rNV6S6m8tZpJl3tu3qyVTjuU3Lc2bsys3ySMny1fuSNKdTlK8kMduypv+8v+saqdxvZvJ2ZRV3eZ/C1W/tJf/Rkhx/tN826qcjLJMUlO2Nfm3fd+asZS+zE9zAynKUTPvf36+SkOX/56bvu1iapbpNsSF1A+b7qVt3kKRxpNs+bft21laq00a+TbJ8rbtn8VZR9pHY+xwdPmjqchqywxs0yfK6/w/wANchrC7ZiXTP3vu/w12WrWb7v3zqv95VX+GuZ8QWoVW8kLsZ/nauWpL+Y92jTlLU7e31BJIf3L/NGu5I2q9a6gJpjIiMg+X7r1xseoRxoqedtH3XaOtGz1S2h2o03G/d977tedHnifASlA7iGbdGX38bdrU+2ukj3cMrM/zN/Dtrm4dY/c/Nxu+6y/ebbVttUhkt38n94zJ91XrzazqrRHTTqU4yN+HULaPc8Lsksnyoyr8v8A31TI55re3XY6nb8u5n+9WNb3jiNoNi7Vfd81XIrp5IykyLsZP/Hv4a8mu5KpyqR6+Dq88byNmHZNGs1ymGV9u1vutT7q8FvCwuXX5l2qu7btrNjukMKo7fPH8yf3V/hqS1vIL6x3/u5fMfci/wB3bXHUlHnPZpy5oDrqTzI1k2b2j/vJ/DUV5aw27L533m+ZNvzVZk/0hlh+Vn2bdy/Lu/3qSRbby9+9Qrffb+7XVQ1mOp8JiapNc+W6WzruX5kVovu/7K1jXkLyZd/mMe1d2zaqtXQ6g0LW7J/Av31X5WrE1C6RQvztv2/73y19hl3wHFUl7vMZsi/aJntbZMt/D8nzNT4YfMVUebDbPlZU3fNUbRfM7pM3y/c21dh2QsPOdkf+6qV9LT+A8DFVpx+IrrbutuJndSV/9BqnqGxV855vODJ91V+bdWpIvlKnkw7F2t82/wC9/vVmalHJ5jQyOu1fvqv3lrq5zxpfvJlKaZI1RHRlbbt3b6YzIsjw+crbk3bmpzbIYWTfGq7/AJWb+H/ZqhJBNt3ojfdrCtU5YHXSo8vuotWcwkkCO/ys23atbVmqRt+8fcNnzrXPwxuqxI/9+ta1kRZgjoyL/eb5vlr53GVF8UT1MPTf/bpu6e3lqm+44X/lntrThkfzP9cq7f4mb7v+7WPbzDyVPnKrbv4v7tWvtGxvnmyknP8Atbf9mvAxFTmPao06UY2NBv30bI7r97/eanzX00kzwokap8rfvPm+XbWV5w+xjyZmR/N2qzL/AA/7taC3jrblEdXdkVd0ledze6dHs/dLlvJuUQ/K7fdX5KsW1ws0ivcvnzIti/3l21Rt1SS4+5t2v8u35W3bfvVow2v7x53ufk+VXZfu0/iOOUZx940tNjdrdYN6nb83zVY2zR3IkR2Xc33V/i3VXt/JiXyfJVH3K25n/wDZasrav5n7mRt7I3y0SlL5GMYe0nK4+K38tnh37tr7d0jbmVqFWCSNZppst975U+VqmUpKqvMmz5V3Kv8AFUbW94rM72ao33/mf71ZyhGOw7S5eUga5hmtXuXhZVVN3y/eqz9lhZT+5kD7Pu/xU6OSY70dMfKvyqnzNTLhLm3kSbZt3bf3ivuatIx55cpy1o+7qYmoSWelw70Rssnz7k3bWrntQH2yRv8ARm3L/E33WWul1+N7i4N5DtdVfa/z1zWoRwx7oV+V9n3Wr08PT5o6RPCxEuaXumDJGjTP5afd+42yq81n++aaHa/9+P8Ai/3q1rrZJl0eP7nzMq1SVXhZ/n+b7uP4lr1OX7J59SRB5c0nyQ/39svmJU3lia1DvCzpu/hWnrG7Y8sbmX7/APtVZtYvtE/kzXjAL/d+bb/wGj2ful0Zc0Cp5Y2/voVO75WVvl2/7tV2sUjjbejPub/e3VtSKkzB4U3qrbdrfxNVeOx/el5LZUP3nkjeol7p1x+Io6fZ/eQw87fkVv4auwR+VG6Sxrhmot5k+5nH+0v+992o1me3byfmfy23fN/drCMpR+E6vspMst5zbpk+ZF+/t+WrOn3UNvHsR8Kv/LOT+Gs2GSa5kZ3TLs3yfP8AL/3zVea98lvOnmwv3fmrro1JHJUj/Kbcl9DGodLn/WL8ysvzf99Uv26FkHyRna/z7n2/LWPb3ieSqeYxT/ZpJ72OOTej4/vq1dkZe8eZW90172eGe4RAi/d/hf8A9Cqx532fbD2/gZV3VjrdQsrunySK33dn3f8Adqb7T9oWJoblVf8A2q6acub4jil7sjZ+1LuTY+7anzts+7/s1t6LFtl3pMuJF+VWi+Zv71c9Yq8ypDNNsSR/8tXYeH9N85vJRFX51Zmkro5faE85o6fpT3W3f13/ACRsnyr/AL1bsPh3czMjq80iK3mR/d+X+7Uul6ftBezRQ+z+J/4q6O10b7RGEQbWVtm1v4quMeWPvC5omFp+h/Z1abyYyrJtfa+7bT5PDkKxF/tK7F+b93/C1dXa6DulXybNfN+9tZPlqxZ6L5KvClmz7n2/Kn8VRyw+IfOecat4dmt5P3yM67dzR+V95v8AerEv9FtrcD5edjMrN91f9mvUdY8PpLM6P52/+BlT+KsXUtFmFn8kKsivu27aOXmCMjzDUNJeH/SXSNk2f8s/4d396sO68P3jLLDA7OrJuTdXp974Z+0RtBMjfvH3vuT5VrOuPC9+qvsRSfvP/srXLKjy6le1PMJtBRVl85Gfb8yMsXzVNY6ait/qWxH823b81dxeeF/LkEyJI67/AOH+KiHwztVvJhkUSbt7N95VrzMZh4VNGdtHESOc0/TUa62PCuzbteORPvVvx+HXWEJ9maV9jbl/9lq1b6T9kUpc2y7vuOuz5l/2qtWbOszQyPJhm2/NF822vlsVgYxq3jE+iweK93lZSh0tJIR5O5N3/Lu33lqa30vcslh+8VNq7W3fxVanjtkZZv7vzbv7rVYsy/lbPJZnVfk/u/8AAqzjh4yp/CepLGSjLkiQ2Okv5MkLou/YuyON9vzVv2Ni7RxzJuyzqssap8sdVYVeS3ihS3b93t/eVuaXawyRpFMmBvVv+BV7uCw1tTzcViOhp6ToYZhDNDtaH5vl/irWs9JhuIzDC7I7L/wKo9LmTef3zeVu2+YqfNW/pqvHPsTblvldpF+8v96vpMPR948CtW5jMOg/6O/2Z/461rPR3dVjm3NtXc7f3q1LWz+XzE2/vPueZ92tSx0lLyZJ5o9g+83l/wAVetGj2PLqVDPsdBhlVZnmb5m3Mq/Lt/2a2LPwrDdI5mh3S7N2373/AHzW/o/hlGjEJX5Gf/WL81dLpfhW/t8eXMv+xJGn8NdEqPumEqxw8Ph1J49727PtX5fl+VaLzw/NcWoRN21olTbt+aSvQ4fDKJGqbGY72+9/DWfdaKi2cMN1uaNV/wB2uSVMcah5dc+GXhbf5PlNJ8qK33ttctrmi+TdSpNbbdqV6tr2jfvpU+Vvn+XzPl21xviS18mR08mRVkfbu37ty1w1qJ1063KeWeINMhb5Hdk3PuRW+61crqlvNHv2bW2vu+VflX+H5a9H17T4bht4tt7Rv/F/yzX/AGa5HWLGOEi53rt+bev92vNqUaXLY9ClWv8AaPPtWs3mnEkz/wCrXbE396uc1Rkjj+Sbd/D5jJ8zf7tdv4iExh3/AMbL93eu3bXE6pHNHDN5L7VV93y/w15dSjL7MT0qOI0Oe1HUI5pNiOzJs2tI33aozSQ+Y0O+R1z91flqXUpI5LgQ+Szovzbdnyt/tVTuNQjmk2JJhNvzf3VatoUugSrRkJdXELIjzJIny7Nyv8yrVG4mdV2FG+V/ut/47U9w7xxjeik/e2r/AHaq+dumW2SPb8ufMrrjHlOSpLm+Ijk2LDv2Y/56tWfNDbQtlIdrN/FVq6Z5vkSZkT+Flpqqkiok3Cr8qybN1dHLzR5jn5v5itHbwrIjo+7bU9jZvuKIkm9m2r/C1N3JC290Zmb5f31XrOGbyW2Ox3Pu3Uvdpl048xYs/JWZIZ9vyrt+b+9XT6PYxyzt5KM3lqqqzfdrAsbEY8+a23Irfd3/ADV2Xh87Y0dEyfu7V+8tZ81ocp1fYLdjorxyRLDMsqr95ZP4latvT9LgjUeciu0n39vy7as6Lp9s6p8kbSt8vyo25f8Aere03Rd7Dem9o1/u/Ltq6fvSMKlP+UpWen289mqQ9W+dFj+8v+9X0F/wTJsbWw/bP+G5uo5Gjfxzpu0xsAS/nDackHgMVyO4z0615FHY2f2jYlsqPu3ytH937v3Vr2D9inxX4U+HP7Snw58a+MNbj07R9I8WWN5qF9cKxWCFJ1dmYKCeAOwr9O8Msu+vZri6ibvSw1eSSV+ZuDp2/wDJ7/K3U9bh/De1xdR3+CnN+t1y/qfpD+3z/wAEovEX7XP7QF38ZfhV8YtEsrq4gtrTxHpWro7G0kihQIyNCGOWi8s+W6rjhgxDgL5P+3F8Tvgd+x3+xbaf8E7vgp8Q38R+Iby4Evi3VdPlj8uPE/mTpPsdvKkeRFUQAkrGnztyN/zV/wAFWv2kvDfjf9uTxb4n+CPxCs9b0ie1soU1HRL52hlkhtY45AHUhZAGVgGQspHIJr5bvPiRq0JdhBbMQu7mN8n9a+l4W4gyWWAy2GeZnOpRwqpzhQVDl5akY+6pzTbmqbuo6K9k3c68FnOXQpUI4zEuUadmoKFrSS0u1uo9D9YPg/4i+GH/AAU+/Yr8N/s2XPxjk8KfFPwNFDFpz6jcgNqJiiZFZEVw1xC8KgOV+eJ0DFWGPMx0/wCCdvwF/Y8+F/iL4if8FCfjq2vvcabLD4f8MeHNanglmk4HmQCR0e5m3MuEKeUnLSblPy/lSvxn1+yuEuEsYISvzRyKXDA+ow3FVtV/aH8XahcPLqTW900Q2LLPJK+T6Alulems54cw2JqUMuzirQwdSo6jpRoe/Ft80o063xQjJ9EnbVJ6u9f25lFKo4UcVOFKT5nFQ1V3dqMt0n6aH6e/8Er/ABJ4P0j9lf8AaVtbnxDZWCz+GS1nBqWpQpL5Rtb2JS2SufnliTdgKXcAckCvg61a2W5ja9jkeESAypE4VmXPIBIIBx0JB+hrx2//AGhvGFpvSHQdPZlOCSzgA/8AfVZN7+1N40tdqHw5pqyE8oyyED8Q1fYZXx/wLlGcY/H/AFmcnipQlb2cly8lNQte7ve176dvM9TC8U8P0MVWre0k/aNO3K9LJL5n7NS/8E0fh38aND8N/F//AIJn/tHweG4n0a3XW7O48SXLzrPsDeZLJAWeGc5xJCVVQw+UIPlrov24vEOm/AH/AIJv3X7OH7TfxvsPiL8R766iGnL9t3Xlu5uPNSZtxMxSJFcebIBv3BOAcV+Gqftr/FHRLiSXRdK06BsYZoZJ0bHvtkFUpf22PiLcTm5v/D+ku0nMkrecx3e5MnNfBLP+GcRmOGqZhnFStRw9RVIJ4a1ZuLvGMq9+ZpXs9E2tDzY5rldXEU/bYqU4wkpK9P3tNk572P22+F198Hf+CqX7FPhn9mjXPiknh34peA44o9LbWJEZr9o4WQNGm8NcRPCoDlRvidAxDDHmbX7OP7Jfwr/4JKprH7T/AO1R8ZdK1HXV0ua08OaBobYknVinmeQsxR7iZvlTG1UjUszMQcp+Gdj+2l8QkkWaHw7pUc0Q37oxMNp9Qd/Fav8Aw2L8S9ckW41zTbCZwuC8sk0hA9AS9ZYziPg3kr5fh83q0surzc50FQvL3nzThCrvGEn05XZXXV36pYnL5KdGniZRoTd3Dk11d2lLon6H68f8EfvjNbeOv24vif8AFrx/4ktbKfxD4cvr921K/jQ4N5FOygsRlY4kYkgYVI8nAFfDskkJ+KDSi6h8v+3yfO89fL2+f97fnbtxzuzjHOa+c9P/AGofFl118PWK4OGyJF/9mq7B+0V4keIzNpumlduV2JIdxzjH3q+py7xP8NMqz3F46liZ2r06VNQ9lK0FSUoqz63UtrK1utz3MLi8tp4qpXpzdpqKtbblTX6n6g/8F3PEPh7xH+0d4QuvD3iCwv418Cwl2sryOXaHuJpUJ2k4DRujqTwysCMivh6vI2/aF8UAZbRLA/u933n6f99VHcftGeI0fZFpOncfeLh+P/Hq34U8XPDfhfh7D5VHFVKipR5eb2UlfVu9tbb92b5fjMBl+Bhh1NvlVr2se3eF/EF94T8S6d4q0wKbnTL6G7tw5IG+Nw65wQcZA6EH3r9NPj5+z38L/wDgsH4P8J/tB/AT4raFoPja20RbTxB4b1S5814kEjHy5RHmSMxyGbbIYyJUZT8oAr8V5P2k/FKR5/sbTA2cbT5n/wAVUcX7VXj/AEqX7dpunWELp92WF5VZfxD15vE/ilwDneLw2PwGYVMPiqHMoT9i5xcZq0oSg7KSdk97pq61McfXwuLlCvRquFSF7PlurPdNdUfs6nhn4Vf8Egv2S/G+iax8SdI8U/FTxzCtkmmafc+WbdWikSM+Xu81YYt80hlITexVBtODXzl/wTN/Yb+DH7Z2s+J7X4sfFy60ZtEtI3s9F0ieGK8uQ2d1yWmR18mPaFYKpOXGWQY3fm/q37XPj0SSXdxoumyuV3tJKspZz9S9Yl5+2l8SLSISxeE9GbIycCXj/wAfrjwXHnCGFynGKlm9VY7FyjKeI9hquWyUY072UVFOKV76t32txRxGGoYeqvrElVqNNz5e2yS2slp+p+sP/BOP4xfCD9ir9vHxL4J8VfEjS9R8MahFdeH7XxvGWS1JWdHhnJyVWNzGFZssqkht+wFj3fxC/wCCHPi34g+Mb7xz+z/+0B4S1Dwnq91Jd6XNfSys8ccjlhGJIFkSYKCAJARu67RX4r3n7cvxHtpfLHhHRf8AgSTcf+RKZZ/8FGPjVpQeystG063iBJ2wT3KKx+glrXH8c8OvOHmuUZvOjXqU4Qq82H9pGpyX5Zct48stX8Lt5IxxOY4WGJeIw+JcJtJSvDmTts7aWfofuD+1Jc/A79gr/gnXqf7E2g/FXTPF/jTxPqm/VoLSUbrZzLFLLM8cbsYFVIYkRXbLsd20jeB83/8ABLL4HeFfjd+1zosfjjWtOttK8NRPrdzbX15HG161vhkiRX/1mGxI4wQI4nzgV+X8/wDwUB+KCF3l8HaFvByQVnyT/wB/Kgk/4KE/FSIgjwPoQB77Z/8A45XZlvGXBmX8N43AU8yqvEYpznUrui789RKLlGCaUUkkkk9O5lSznKMNgqtH28ueo23Pl6vS6XTTbU/Sz/gop+0vL+1L+1Nr/jewvfN0PTZP7K8OBH3IbOFmAkBHXzHLyZ9HA7V8rftHeDPEXxC+CmveDvCdmtxqF9BGttC0yxhiJkY/MxAHCnrXzpc/8FFfijChKeCvD5b+FSk/P/kSq7f8FHfi4kYY+A/Dxbb8wCz8H0/1lfT4fxB8OMPw6smpVJxoqn7LSDvyuPK3e2/W7T11ZvLiXhuGAeD5pKHLy6Rd7NW+8464/YW/aYeTKeBrYj/sMW3/AMXUD/sG/tNlsR+CLcBVwv8AxObb/wCLr0zRv28vjjrkipZ/DvQDvxtGyfv/ANtK9e+Gfi39sH4lzNaaP8G9PkmePfZRQ2VyxuR6r8/T3r88lgvByK1xOI+5f/Kz4udPgKE9a1W/ov8A5E+Um/YE/abBLjwhbkkcj+17br/38qAf8E//ANpxl2N4HteDkE6xbf8Axyv1T/Zj/Yr/AGkfiLq9sv7St/4d+G+nXC5828hmkuB6fuQ5YfjivS9Y/wCCedsnjWx0Lwp8f7LVNOmvvKvNRTw1Mojizjco87JP4Vj9W8F3L/e8R9y/+VlqPAkNVUq/d/8Aan4uz/8ABP8A/ajkO7/hBbUt6nWrb/45UR/4J9/tUHA/4Qe1Azn/AJDNr/8AHK/W74l/8E9/2qvB2vX1ppPi3wQbRJm+wLqXmRXEkXUOyedx8vNdBoH7FvhnSPDMeo/Ez9oa3k1JlDSWPhjwZd3EaZGdvms+3dWc8J4KL4sXifuX/wArLjPgWW1Wr93/ANqfjmn/AAT+/amU7f8AhBbXnuNZtcD8PMqxD+wD+06pBk8EW3y9P+Jzbf8AxdfqlqX7Pn9sytD4A8WXYbcRF/bHh9l8znAGEm+U/XNeYfHD9n79v74T2suu6N8MvDmsaXH9yZEnjkk4yPlMny/jURwHgpU2xeJ+5f8Ays6IYjginL+NUXy/+1Pg22/YQ/aPTa8nge2DL/1GLb/4ur8X7Ef7RuVVvBlsu1cbv7Wt/wD4uvSNf/bJ/aE8K6pJpOv/AA20GCaIYkj2T7g393/WVn/8N7/F0bd3gfQVz97Mc/H/AJErmq5d4Hw0lisV9y/+VnsYarwvL+HVn/X/AG6cxbfsYftBwqEfwtbnnOf7Ug4/8fq7bfse/H22gyvhe3MpbJb+0YP/AIuuli/bu+JUi7h4O0Tn7o2Tf/HKng/bl+I0/wAq+EtEB91m/wDjlck8v8CeuLxX3L/5UerTnkmnLOX9fI5Zv2PvjyzAP4at3XdnH9owc/X56mX9kD41JEdnhCAOPuf8TKDGPT79dOv7cHxCYKF8K6KWLYZQs3H/AI/Uq/ts/EGSMtH4V0cnbkKY5v8A4usnl/gNHfF4r7l/8qOh4jKIvWcv6+Rx0n7H3xzkkMknhOA+w1OD/wCLqGX9jn48Stz4UgUe2pwH+b12LftwfEeMEv4Q0bAOGfbNj/0Oobj9uz4gwnavhTRM7sAFZv8A4ur/ALO8CP8AoLxX3L/5UR7fJd+eX9fI4ib9iz4/lCq+FIGBOSv9qQf/ABdULv8AYf8A2h5590fg+JU9P7Xt/wD4uu/uv28/iZbyso8H6GVX+LZN/wDHKoz/APBQj4oRS+WPBWhd/mMc+OP+2lbU8B4Fx0WLxX3L/wCVGNWtkUvinL+vkcHP+wx+0lv3ReCbYn+8NXth/wCz1Rf9gz9pl02f8IPb8nLf8Tm2/wDjld1ef8FHvivaKWPgjw6SBnG2fp/38qg//BTX4truK+A/Dhx0AS45/wDItdEMu8EJaLFYr7l/8rPPqVOGoy96pP7v+AcZL+wV+1KwwvgW3Pu2tWuf/RlV5P8Agn9+1NI5c+Bbfn/qN2v/AMcrtJ/+CoHxbjTcngPw3nGcGO4/+O1UP/BU74yAE/8ACv8Awxz935bjn/yLWqy/wStpisT9y/8AlZzyqcKy3q1Pu/8AtTkT/wAE+/2qdpT/AIQC1I/7Ddr/APHKhf8A4J6ftYOdqeBbNF2441q1/wDjldif+Cqfxm8woPh74Y4Gc7bj/wCO01f+Cq3xjZcjwB4XB90uf/jtbRwPgt0xWJ+5f/KzjnDg6W9Wp93/ANqcrD/wT4/arUqx8C2ile/9s2v/AMcq7L+wp+0vo2nS6heeBIZI7eJpJEh1S3diAMnCh8seOg5PaultP+CpfxiuQCfh94aXPqlx/wDHa+gP2Pf2k/Ff7Rena7d+KdD06yfSprdIl08SAN5gkJ3b2b+4OnrXrZNwp4T8RZhDL8FicQ6s72vypaJye9Psi8FlXCeY4lYehVqczva9uiv/ACnw7pEa7wrBwScEEfMK6zS7d1kTfD8zfM237tafxtso7j49eLHUGMr4kuug+9+8ak0O3n3L/dr8TzHB/UcZVw978kpRvtflbV7dL2PiMRSlQrzp3vytr7nY1tHt/MZYUh+81dLY2aSTGGNMrH833fvf7NZej27j5H+8r/LtrprfT5Fj3un+0is23dXFzcpzylyF3SdN8uTyZodzMm3cvy7a3dLidmSF3bb/AHv71QWMMMkLWz2zFP4V+9WxZ2/8b220fdX+61Pm5iJR5vhNGyEkKxQ7N4/hkb+Jq3tL1Dy02zTZ+Tcqqm75qw7dZsfaYN0R3bkb7qx/7K1qWFtbW9ujTQ+Tufb8yfd/2quBMi1czQqsWxFT5d0qr/eqnqFwkm+HYw/uKtLPdTW7NZzeTMsbbl+X5apXl1Myr/q96tu20c3KXEkWTbNsRG3qm1lrPkmkupvkmbH93Z/tUyS4SPMzuwRn+9/7NTPtUN0okeZok+ZYpFT5d1TKXKe3l0e4XUkMkj3Oze8fyorJu21j6zJtmT5GVmdtqxt8taNxIkluiO+yTb8jbvlb/ZrHupEkmWG5f5fvbv4lauSUub7R93gXyqKaMPUFSMtB5yszfNtX+GuR1mP9ykcLqy72+X+9XXa03lwvMnKsv/Aq5TXIdzJ5aNt27kZflrnlLm9496lGMdeYry3CKyIiL+8+V6t2t9tkXznVF2fdb+KsRrzayPvzt/vJVeTUt0iyPwuz/Vt/FXNHnPzCVTmO60XWIWX9/Nxub5f4lrW026trdt7uzbvm+avO9J1ZIVRN7M33vmrXXXnXP+krt+75f96vGxPtfe7G1OXL70jtlvkurUu7thv4l/iX/wBlp39qP5aCzhZ0+ZmZZfmauQHiR0hZEk3bflfdV+z1jzIza+dsVXVt1eBUouU+ZHtYWXN9o62x1iS2dIX3Z37F+Xc21vmq41x5kZ+7v+9u+7XN299JdFB521l+/wDP/DWhZyIrFH8xWX5otyfeqKFGfOe5TqezjyyNrzHkmebyWMm7au1vl/3qfdXEMe6aa5VW2fLt+7uqpb3EFwqf8sw3yvN5v3aJLeOZUTf/AAfekT73+7Xt4OnzfEZYip/LIjuNkm1E4eRfvL8tZtxb3klx878L8jbfl21uLB9oX/UtujqO6tY7yHY4+6n3v7y19Pg17P3TzK2IjH3TCk0/zI1dONvzOzP/AA0tvsa8f52ZpPuN97/vmr0luhkCJCwib5fm+792nx2m5UuoU2bovlXf/FXv05R6ni1qntJycivqFvCsYeGHemzdu37ay7qQzTP5iMrf3VTbV+8t33eXcjftTcm5/u1l3TS+dve52nZt3N92t5S5feOen/dKF1CkanzpmUK25F27laqkzbcuH+ZvlZf7tas1v5kcfnTN5n3VaqElu/2xt6bH/wCei1wYip7p6NOMiCNXm2P+7T+5Vu385VD72eZm2/eqNYPMmTyX/dKu5/k+81W4YMZTzmb5vkZa+axlT3rnsYWPuk9nL9ouJXfh97fL/DVuG8eRnS5T5V+42z+GqqshUon36tL94IEYjZ8v8VeZUfN7x6dGjy+8WY2tppN/nbSqbX3JViCbzmX7Z95flXc/y7arR280LJs+ceVudf4t1SW6+Yr/ALlh/Du+98tc/uSNKnuwNe1+03lwk0D43fK/+1V+PyVmhtprlQVdml3J96sSz3syJ+8zH8z/AC/L/u1s6XI80x/0be0f3amVP3tDjlU5o6mxbh2bZ9pVCv8Ayz2feWtK1VP9dCiurfNuZvmWsuxW8YI7vx833fvVpx7LiHzoUjjPlL/Dt+7XNUjOUeUI8kpFhreby2ms0VnX725PlVaS3tPMkbzn+78rfxbv9qmwu8kPkO8jj7rtsqWOPdGuyZdu7aiqm3atTGE4mcpR5vd2EWNFkREhy0j7Hbzd3+7TZoXVf9TtLfLu/u1IsaW+XQ/LI/3VqteX0MzOiI0bx/8AAvlrrpVJc+iOKt70TA1CPdJ8+52+ZUXftrDvpt2XhgXP8asnzLXQ6hIkLLN5zM8fzbv4vmrD1CGb7Q/nOxRk3Oy/LXv4X3TwcRExbxUmV4RJ+6X5vMb+GoL6zhjVHfzHH3kXb/FWhND5MbSvtZN/3W/iqC8V2kLu7BWT5o1bdXdGPtDhqRjGPvGck0Nu0nzsjt/Ez7qs2Nxc3ipvRVaFOdv8X+9TGjTzD8nH8asm7dVm1VF2JbeW7N/d/irSUfsnNHmj70S4stzBGU2b9yMys3y7ao3Vwlzs+Rl/vf8A2VXbiSe1ZrZEU7l/4D/u1VZtyn59rfeRf73+zXPKnE7qdScdSOzTdA7ui7t21G/vVFbyQ26ed8o+8qKz1PHZurffVfM+ZNr/AHaq3Fq7Qt523fv/ANXt27q4pHdGRG15tUzJbSD+8v3dv+1urOuZvOZtkzbmfc67d1Xmt08n99D95Plj3/erPuI3Vt6PtXft21dP3feRnW94qSag6rJ5Pmf3U+bbVyxvPtiK+/8A1a/3N1Zl1au0i+dHvVv4m+6rVoaRbvax+T5LfL8u7furvpyjI8vEe6XVkubi4/0lF2/KyNH97dWvp9i8nlZ2lf7rL826q+l2PmN86MWX5UVf4Wrdt7V2u4vvH/pmq/xf7VdlPc8upI0dJ011VZkdZG/uyf8Astd/4X0GSaFIXhZFuP8AVbn+7/tNXP8AhbQ4VkU78fJuWRmX71dz4b01G8nZdRy+Wm5Fb7q/7Nd0Y+4ZSlym74b0NI1+4uV+V2X5t1dFY6KkcnnWb53PuRm+Vv8AgP8AepfD9snyzJCy+Sm3a33m3fe211+l6fZria2h+ZV3Rfxf71ax/vGEqhj2ehvGyTfvGZfv7m2/LVqPw88du23d8rs3mRvXT2OlpMo3w71k+Xbv+arlroX2e3fenz7/AOJPlqfZxkL2hwWoeF9sweG8b5tzs0jfxMtYNxob2qr5af6v7i7t26vS9S8PzSTfPD8rbVesrUPD/l/vnto3WP7jKnzbafLKKI+sc3uo83utB8uFv3O+L5v3cn3l3f3aoXGhwzbvkyq/wyJ97/er0W80d5IQkyKrKjfeT/vlayL7w68cYm3rvVFbatTKnAftPe5UefXWh+cqvDIyIq/3aqX2ipNH+5fcy/fXdXa3Wlv5Mlr+8VvvfN91ayLiz8uGSGz3M/8AeZf4f71ediKZ3UZHKXUPkhofJbdJ/rW/5aN/u1BPZvHIpmdkkjT5P4W21tTaajYvH+V9+0LJ8u6ql8jtumf5Tv2LtfcteNiKMuX3T28PU5Sh/ZsMkbI824SJ/C3ysy0iL9laK2hjYeZ827zdq1JqH2do/kh2NI251X7tCXyNJslH/XJW/urWNLD8252e2/lLtvZvuRH+VV+Vfn+atuz85bjZDwY1+81YulzQSKqTncPveZu3fd/vVoabeOrFJpmZWdfKZv4t1ephafu2OLEVPaHWaO0MCrsdWeT5nVf/AEKuh0/943n3U/mn+Bdv3VrldMh/fB4dyOv3F/vLXT6OEaOKa83b2b7q/d/3a9mlGx5tSUuY6TS7ePy0m2SYX5ol27l/75rttL0lIVDzQfP95F/9lrA8O2/l4+95TN92u90Wxm8lJnttz/e2yfeVa9aMuWB51SXLL4jR0HRofs6BIdu5NzN/drqNL8O7oY32bUb5UaodHs/Ot1mmtW2RtsRY/wCKujhh+VExho33bmX5v+BU5e8cHtvf94zTpP2fe9m8OPKZdzfxVmaho9tGPtSIu1f4fvV1lxFDHjft/eJuST+GsHXLdJIW8lNyxy7mWFtv3v4q5qkf5TWMjgvElj51zs+zM4VF+9/erhPEFq8jfcw8jsjbvlaNq9K8RWs1rJL+5+dXVnbd/DXE+KbV5pHh3szSJu27K5KkeY7KdTmPMtYsZpVljdFRo/k/2f8AvquJ8QaT8rJM/mov32/hr0zW7OG3m2b/AJ2RmZW/u1xHia1+0M/nTNsVNjL/AMs2X+8rVxVKZ2U582p5v4i0+2mt3RLbDr9z+Fq4DxNZ3yyJ/q8/xrv/APHq9P1i1eSdHh+YL8kUjV534ou386a5ddhk+VFjX/x6uSVPlOunW7nA6o0ylk8lt8aN81ZscsM0mxPuM+19y/MzVq6tHMt9Md/y7NyNWHJIm7Y42H7zLv8Au1hyxOzm5tIkrXf2VsoMov8ADsqlcTG3b98m5ZP4anaZGU+T/F9zd96q9xcbW+R1I27WZV+b/drWMeb3iZSj9oiMkMm5E6/dVViqPzizSpM6jb91Y/ur/wDZUrSPGrO8O2X+9v2/LVdpIdzfPv2/7FXy8pzc0Ze6WRJC2zzplf8AhbzFrRtWDQu+xQWVdm2sqxXy9ieT8n3trfNuq5Gybi6bdjJ91f4aipHqbUeeO5uae3lzFJk2tt/iTdtrrNDk8mOLf5Zfb97/AOxritPuv9I2SOrfL8+7+KtjTdSkVfMm2p83/jtc8o8p2xl9k9T8N3kMcbPNMuPu/L8rf7NdRot5MtqLb5VO355F+7Xmvh/VkCo+/fHH/qvMf71dPpevPI6Inlptf5938VVR90KkjtY5kh2PDCrlV2ytGnzL/eq7BKzab5u3adjHAHTrXNLqCSRIjzM0i/N8rfKv/Aa39Om8zQxNKCP3Tbv1r9u8FnfOMf8A9g1T/wBKge7wvKMsXWt/z7l+aOVulhWQ/Y0Uru3bvutWJq0m5nhSbcv95v4q19WmhZUf7SybflVttZF4okj3oV/d/Kqs33q/KoysfB1InP30n2e8TyfkZv733ayppIbiR0eFVDPuZf71bGuKlw7onKKu6KSNN3/Aawry3TzA6W2z+Hdu/vV2xlGRxy934jJ1eJPOZ/M/vbq57VG+/wCZwPK+7t/irodQhSFpk+V/n+9XN3yzSRzP8u9m+ZVrKUvcKp83OYOoqjRrMg3t/Dt/u1QRFaP5E+Vv4mq7cRwsrQzQsu3/AGtqrVK9aGNQkbqqL8u3+GuCt/Kjup8nOPtf3Mm5/wC/tX/arTt714Y/73zfN/s1kNIkduknQL/D/dqSO68uQ735/wBlK82pThserh6h1Gm6lCyb/Obb/data31CGaHY02U3/ulj+VmrkrG8TaqTfIf9/wC9Vyz1HzpvOm+T+GKvNqYePQ9jD4iXwnUrIfJeRJlyqbH8ylW68tQlyilmT/SFWsKTVpvnfzsbfufxVOkzt86fLuT7zfe3VzSp8p0xrc0uUtXN0mF82HC7/k2/w1FNLeSS7IYVwqMvzOq7v+A0xZvmRPtSs/3tv8NR+TdSM291+b5kVfutUcsfiOiNSXwxM7VIQyrM6MpZ/wDe2/7NYeoafCrfO+FrpLi3eP8A5ed396sqe3Rpmh8ldsny7mrqp+7A563J9o5u+snjU79q7fu1n3Fqkn91j/eat28jtmVnR2Yfd/4FVBrEyTP/AH9v8SfLXpYfmlqeTiDGmsYXDbnZtvy1nXVq43b/ALq/d3V0U1n5cf3Gb/d/irS0P4X+IfF2oW1tpumzP9o/1SrFur0KfkeLiPdicJDod5qF8LazhkZ5H2osabq+rf2Bf+CW/wAWv2vvG0Og6bYSQ2ULq2pak1qzeTH9792v8Un+zX03/wAE3P8AgjzefFjxVpU3jOwupHaf9/b+Q0EUK/e3SSN/eX+7X7b/AA5/Z58GfCfQU+GPwQsNL8KadDYLZJdabb/6Szf8tJt395v71dNTFRpw0PErSnUl/dPze+B//BJ34D/B3VLZNY8NX2o6xDOqabot1YfabmZl+80kcfyx/wDAq+uLXwD4k+GetSX7+NrHwfdw6WsVloui6XDJdLGq7lVY41ZlZmr1T4qeF4f2bvBaW3gDWLfQra+vP+Ko8d65ceZcxx/xLBu+ZpGrxyL9v/4V6D4T8SQ/s2eEJ38QWUTLB4u8S6Zu+0bfvTbfvMtcVTFc0uVl08Py+8jl/CvxQT4P3WpeNvj98PLzVZdS+bTdS8cXq2zyNu+6sP3m/wC+a7vwX/wU0/4J56L4LW21o6amszMy3Gm6Ho0ki28i/wALSV8ReLfhh8Vv2n/G0Xj74reONQ8Q6je27NPqk1vJtjjZvlWGNflVf92qmufsSv8ABbxlpGt23wc8VeKtLjt1nnt5L/7D9quN33d393/0KudwqSl7j5TROlT3PsDxR/wUN+BV3rNr4nufiF4JfTVLQweHb7SVRo23fK0tzKvzVwvxS+O3iTXJpofCvjnwjqOkattli03Q2WRbfd/CzLXjPxA8Fp8XPDb+FdY/Y80Hw3bSSx7bq68Q/afL/wBn7v3q1/Cv7GPxI8K/Dyz13QYfBdtZ2M7eba6PK3m+X/DuasvelFe8TKMPiZufDHRf2jtH1j/hLbbwTp99Asv+i3FrKrK393crfxV3mg/Ej4naa9zqvxX/AGY9U8R2l15jT6hH5byeX/eVV+XateP6X+0NrHw/aXwr4z1WPFvLuijtZdyrtr3X4A/tufBzXri2sIdVvEC/8fkMkXlrWftlEJUZSjFxPMvit/wT1/YA/buhv5ktLjwl4kvLNks75bVoJ7ebb8vmf8Cr8yPj9/wSv/a3/Zn8ZXPhXVfhvJ4q0ppW/s7xBp8EjR3EK/ekZtvy1+6/iz4mfsr/ABC1pPCuj+MNF0zXI3826Zv3DR/wruk+6zLXZ/Dv4Y/Ejwr4fubzQPj9Z+IIfK22FveIsvmL/d+b5dtdUcTGpG03dDo1q+Hn7p/Mp44+Bz6LpqarZ21xb3McrRXum3jqrx7V+8q/e21wEdnDG3yOr1/Rn+3N+x78Fvjx4Nurz4i/CjSdD8QzQMsHibQUji3Mq/dZV+81fjH+2N+xzpvwR157/wAH+JI9StI7PzZbdl2zxt/FuVayrU6Uo80GfQZfnHNPkmfOTWfQI64X7+1fmpY7WRVkDo3zNt/3qteWkjLsttv8T7qmhs3j2zfKSv8ADXm/D7p7/N7Qz5I3WMJ5PzN/eWq7Wf7nZ5C5V/vba12jmRRNH93fVC+V45nm2b93+d1bRjORn7SEdjEvI3be7w/d/i3ferH1BflE3ksvybdtb2oRpIvlx8Lv27t1YeoRzRspPI/3q6acffOKtWnuc5ffvH8x3xt+Xb/FWJfrM0iIUXDbvu/w1t30b7t6f99VhX3neWybNy/3a9GnE8utWl9oo3XkhTx/wKqUknzEbPm/2at3SptG9/vJ92qsm+OT5ErrjE45S94rsu1N3zUx442j2fxbvu0+TzG+R9zfxbf7tPhjk/5ada0jEjmLenr5siu833f7tfan/BL2MR6H4wAXH+k2X/oM1fGFjGke3Yn/AH1X2f8A8EvUddC8YGRcMbixzzn+Gav0fwijbj/CelT/ANNzPpOEv+Sgpf8Ab3/pLPKPjc+z47+K5NuNviC65X/roadoMkM0ImCNv3fI3+z/ALVP+NpT/hePiuIY51+6LY6/6w1DoMe2MeTwdy7Wb5q+Hz73s8xX/Xyf/pTPEx/u46r/AIpfmzq9BRFuPJTa25l+9/DXTWMcefJSbhpfk+SuY0dnmZnMyn5vu10+m/6QphSFt2/5If7teRzcupwyidDptvDIqOj7v92tayhmeYfdCRszRRr/ABf71Y+lw4kG+Zl2/NtrXsZPvTb2f5dyfL81Rzcxlzfyl+3tUjbz53+Zm/h+b5aurcCNt/X5F2R7flqrb3ULqY9jH/ZWnyyfZWMaooRvmdt9aD6+8N1C8/1yTIqbovvL/wCg7ayLwQx26eTNIdvy+d/EzVYvE2OJtm7cnzrWczXO7zo/LVvu+Wr/ADUS/lHTiNuLl5GeadMt95/7tRzXTxKr+fu+fai/e2tVLVmk+5DeqybFaVV/h/2apSTJ5O/exH3V+eplKEj28HGEf8RqXWoIzDztsyxy/wBz7zVm+Yka4uZl3M/yNs/hqtJcQ3GLYPtCy7trVJ9udrZnuU+Rn2ouyuGX90+zy+p0ZT1DZJJ5yPgbvnk/2f7rVzeuRxyKqIjbFb/gVa2qXSNal0mZEb726sDWL54wy9V/jasYxme9Tl9mRzEl5tdk8liP4V31UluUjb765/ip0mxZCkM2dv39v96q19JjCfw7fvbanmjzH5p7OXQeuoJGyzfMxWrtvqSSKqPMu+sZrqHzBI77FX+FaFXdMskLyDb83yv96sqlOlJm8Y8x0a6o7YQq2W/u/NWvZ6gm1N77Gbarr/8AY1yFvK63Su7sn8S7WrZt7iaKRftib2+75lcVbAwlLmideHqezmdfb6skMivM/lbfl+X5t1bmn6h5irNHefN8q/8AAa4rT7hJJm+RnX/lk1btnMdyP/ef/d2rXP8AU4R2+I76eJ/mOqsZEVTNNCxMku1Gb7vy1qWdw6sn8O75t33tq/xVzenLNJG6Q3P3pflkX7tb+myTbET+HbtdVWuzD049TT2kuX3S/HIkLK8ULHc23zI//ZqsyWsjRs/kqHX5mkX+7UViwVTN8u3+6z1PMs0cjum77v8AE/y7a9zDx5TgnUluyvdK8cg2Ovy/M+6qlxcJbwvczfuk/ikb7tF1cZha5meNXjZkRY/vVk3V1jKTTbU+7tavUiebUl73vEclwJN7+cz+Z/49VLbMzPzHlvlSht6L+5eP/eX+7TjLbNjY+1v71ay5eUUZAFh8tNj7P4dzfdZqr6hDDGyb3VWZNz7XqPbBJIyeT8zP86s//j1WZLe2uPnG1/L+Xd/drya3xHpUKkpR1Kcbor7N/wDubfu09Y7aORPn2My/6tnpsKp5ypsZh/B8v3qWbY0hTZlP7q14OIj7/unt4ep7t+UtW7P9l2IMfe83c/zNVvyfLWJ4fM+7tZf4f++qqW8GNiPbM3y7dy1qxwpJcLbfMm2Jvvf+g15laUeU9OlL+YS32Kyvvb73ysr1OLdFk/fXTFV+9I38VOt18uP/AEnzPl/8epwh+zKHd1P8P3P4a5+aP2RylEnaORbdUttyP8rPJ/C22rcMiMQk6KG37naP7rLUFjDNNEmP9T95d38NWo40Me93YN935V+Vqly5jjk5RjsamnqGZ5ndtm9XX+7u/vVr2NzNNIzudvl/Lu2VjW7JG0MUCYf+7v8AvLWxDceZI8KfMzfdbd8y1MnPm2FH3Y3LscyTR/ang2iP5f8Ae/3agkj+wWZhvH3D7yeWm35f7tXY1SNSiTSJtlVvLk/hqpcLunMM0kjD73zPURlze6Ty+01CaRFjDpNsCrvdf4t1Z+qTQyMiQJsTb8zQr826pN0LebNs3Ju2rtqpcWciqfJ3NIy7V8v7rV04WPLLmMa/vU+WJRvtlyv2lnUqu3ezVUuoZnWSZNv91/4vlrTaOZVa2hRQ6xbv3n3W3fxVFNbzW6k+cp8z5flr38PzS1keHiI8srGM1vDIokdF2/3d9U5LWHyX2Js+fdu/hate4hSdfO/i/vf3dtZ95dfaIVhtvn8v5mVlr0InDUo80TOW18tok8n5pP8AgS0tqsMkm+F1YbtnmN8vl0rKnmM6PhW+X/gVRQTPuELlVbdudmrf7BzSp8upamt4FXyUdju/8epPsaeWjpNu/wBlqjhkmjuPJ2Z2/MrMnyt/s1YsVdkaGTblvuVjUjLkKo+7KzHNYpcQlPJ2r95Vqhdw/wDLbeu/+8zVqws8MZePlGfb81UbqNFYQfNmvN5pRm7ndy/CZkmn3KIj71/iXa3zMv8AtVDcaem7yfuq38LVsW9u/mL5ybfm+6q7t1PFm8jv+5Uxx/KvyfLtqftWNZR93mMSPQ9yhLby9m5m2yfw/wC7WlpugwrKqB8ldrRfJ8zVqLYorb3T/XL8m5d3l1o2th5ezZC38K7a6o80djy8RHmK2l6G9ri6mRnb7y7fl2tWxo+kos377cvmff8Ak+7Uum6em4pDB867m+Zvl21sabZpIYt8LMi/Lt3r83+9XpUfdPHrR5dy9pFlbNMkyJG6R/Lu/h212/h+2RY1e3+VW+b5fm21j6bYwx7UtvnhWJfm2bvm/u12Hh+xtvNheD5Qy/N/tV6cfgPMrS/vHUaGs3mKZkZ1835N1drotvDFGYUhZX2f73y1zfh1UjkhhebazN8i/e/76/u13On26QTf8fMm6aLb5irurXlOOUpbFyz0uCOP7NCkbSbV3yfxba0tvmW6TeSrN5Wx9y/L/vf71Mt7W23D7339u5fl3LV64hSO3Z0fIX5fLq/dkTGpKOrkc9JBNMjukWH3fJu/u/xM1Z91paLJK6QsRvXdt+6tdHLY+cuxEb7m5vmqnNbwL++N4ybk3P8A8CqfhHTlzT1OU1TTIZn/AOPbLKm3cz/K1Zl7pqRw7Ld1RJF2/f8Au/71dRJY+cyw9E+b5m/ib+9WLqkM1ufOhfYNjfd/h/2mrKW5106fvcxyOpWf775/LZl++2zav+ztrCvtPDbtiK0zf3fl3V1us7JrdPMhjTzN2yTZWDqFpuVkmh2fL96OX/x6uKodtPc5XVLMNl0TL7W+X/arCvLWGORUTa3l/Nt/2tv/AI7XXalb2DRvD58yFU3K0fy/N/drC1iz25d9qSfefdXnVNzvoylE5uaFI5vO+VGb5trS7qVrBHUpv81YUVt38S1eutPS4vI9/Kr9zau2oGi+xvsfzNzfLtX+7/vVivdj7x3RlIns2SMtbQ2ysrfwr8qr/eardiYWmZ3kbf5W6Blb5ttZbXn2GRX2KwZ9qbt33aYusKzI72zY+4v+7XbhvhIqVDs9JuEjmSF3k3tL8qr/AHa6zQ44bi4WFE/d7fk/2Wrz7RbmFm2Qvvdovvbtvy12fhXUnlX98m0bdu1fvV6tGXLE8+p7x6jocfkrs+0/8stu5a7nRW3QxzTop3JuTzP4W/2q838I6hbeWpmhZm8rZtZtu6u30fVHmh+0u7b2dd25PmavSp8so8p5dc9K0G6na1ieb91LHudJFb5a6K1unvIzNJ8/7pmeRa4Kw1izjs47VH2NIzNu3/w1vaXrkMduAjrjytqbmrSMuh50v7p0MMjr++WaNl+/tb+7/u1jax9muN6GFn/ifb/DuqX+1E+zpNbTKjeUyuv95f8AarI1K8QxtCk23d9xVqOX+Ur2nLEwvEjQ2rPs8xV3fd835vu1xeqSp9sTf5n3W+ZU+7XVas1y29YfL3/x+d/d/vLXMa03yuA6hFX51b+Lb/FXNUidNGRxWvbN7O8MiJv2rJ/E26uI1yzWS3ltnfIb+Fv71d34keGaGTY/zLFuWRmridaaG4k87Zgtt/1f3a45RPRjU5Tz/wARJ5cJhd23/fXy/mVa878YWaMx3IpVvlSvR/Elvf8A2hoYUWPdLv8AMZ/4dv3a4bxVDNJCzpbRp839/wDirlqU/tHTTkeZeIPPSZYd7M/8arXK6g0zXDPs2O39567XxBAkUbzJbMjx/wDLRX+7XF3ypNJiab5m2/NsrilGX8p2U5fCNW6RpGe6ufm27dv+f4qivGSRhIjsEX5t396mtj5tiZ2/Lu2feqNleSHZHwq/dVUqI+7sVL3pkd1N5jJ5iblZ9u7+7T22RyMET5Pm81W/u/7NRLDu+dPn/wB3+Kp1W53NMnyqz7l3PXRzmEf5RLeR/kdIW3Mn3VWtJbNG3Q/Zl+aL/e3VDD+8k3zblbfuWRavWqpFbibZub723dtrCpzHXRjEfDazyQ7HdU8z5av2K/YQ0Gzeypt3Mv8ADUVn8rfu0ZQvLsvzVoqqLcBPO+8isit91mrklI9CNPmjzF7Sb7yykKWzfdbZu/hrotL1KGHH77cWiVkZf71c3DJ9lZN77ZG+barfd/vVb0+5T7Qu99u75katqMo82hjU5up2tjq1t5SoHXLffuP9qvQNHlQ+DVlAOPsrnBOfWvHrWaZpGm8/a8ny/f8Al/75r1jw3dK/w6S63BgLOUkk4zjd/hX7b4Ku+cY9/wDUNU/9KgfQcLJLF11/07f5o5yZoZpC7zZj+9BJ/F/ustZmqTIszIi79v8AzzpYbyaTfvRn/ufLtqreeTaqzusnnM25fm2qv+y1fkMa3KfFSjzGfqk2632Wbsg2Y2/d2/7Nc1qz3McjwPJ95F3svzLu/wB6tm+kubjfO7xh2Tb8yfdasLVPmY2czqv3vvfLW/teUw9nzGRfXSQzOny7WTanz1z2rXjpI/2Z9q7fvL/erV1pYY2R0OxWb7y/NXP30bm3b7w2v8+3+Gp9pGQo0yjqF5uXYiNn+Pd96s2aTzNkJ243fxf3qkummkb53/g+bclZsmoeX8j7VH97+81ZSkbRLrSW3k7872b5drf3lomm/c7JplG3bs2vWZ9ukmUwzPs+b5f+BUn2ny2/hAj/AIWrjlT9/wB07o1Pd5Tfjut0zOiYdvvf7tWrNv3ZfeuN/wDF/DWDa6h8wd593zfNV6x1J49yJIqLu3J8v3qwrUZnZRre7qbiyOuI3flU/h/irRhk+0Rs7v8ALt27VrFtdReS3b98rN92r8d1C0I2XPyx/M6t/erlqQud9OX8pfhhh2qjpvaR9qKv3v8AgVWFuEk2fJsf+JW/5Z1TtdQmuFZEdkRv4lqz/pMiqg2uVbbXNKEubU6Y1OWPukM0fnXB/wBJ4/grI1NUaTy5kYhfuba0rrzdwT7Nna3zqrfdqrqtu6xr5L42/cVXrWnHl5UZy97Uy/s++TEiMit/DI/8VQR2X2hmhm4H+/VqSHzP3Lzcr83+1XQeBfhzrHiq8httHs2keSVV2+Vu+9/s130Y++ebiJcsRnw7+HP/AAk2qW9nN8jSSr5W5GbdX6r/ALAP7FfhvR9M0258N+D7W/1mS9VpZr6DzWX5f4Y/4a8s/YO/Zb0Twt4403Ur/Spr+6sd32qRbVWgjk/u/wC01fpF+zLq9/8ADG6u9A+Ffw6vLvxDql1591qmqOq21nDu/vf8tJNv3VWtqlbl2PnMRW5pcp9P/A3wjeeGPBdonjbTbW11Lb5UW23WPc3+yq0z4wfHr4K/APQP7d+Kniy3WWF/3Vjbxb5ZpP4VWNf4qu+Cl+Ilno9ze3lra3N66eYt9qMu1Wkb+H/ZVa+WP2l/2fbzxRHqU/xR8f2d4983mWtjodvJuXa3zNu/hVf71c060tLbGHLA4P8Aa9/bJ8WftB+HrTTPhJ8N43fULryIrrUl+2XNiu370UC/u45P9pvu1o/smf8ABOvWdU8Pvr3xU+NE1jLcKq3unyWSyysv/XRvl+b/AGa6D9kv4K+EvC/iaDw74M0qaGwtV8/7VcXDSXNxM33tv8NfaPhzwNBpmmtiHyJZFz9ocKzL/wB9VtSlzRugb5jz7XPB+i/Bf4Yp4P8ABnhiN20+3Vl1jULWPy1+b+KvnH4pal4t8YFL+GGPVnkTdtW82qu3+7XvnxnuPAei6bcN4y+IcmuyzTsn9mteeXHuVflVlX73+7Xw7+0d4os7G+stYm8SeHYXum2RW9jebHjX/a+b+7WVStKUiPZ83xHNfFS4/wCEHszrfiHwlfW33pfsduvmt/vKq1leHf2nPhj4quH8PaJ4km0p2dVnsbiJkkZv4lrmr79u7XvhrJPoPwr8H6TtuItqXXiK3a7kbavzNury7w78PfFHx41C51i88f8A9ircXrTy28ekrBA0jfeZZPvbaw5pz1pm0eXlsz0T46/st/DG5kl8beH/ABDeTeJLza0unxt+6aP+6zf3qf8Asl26fC/4pQp4h/Z5mn06SVWutS1C/wB7fL/Esar/AOO1hfD/AOFfhfwz44t/D1/8XZrm8t4t25d3kKv+1u/i/wBqtj4mTePPB+uR674J8eeKLm2hZV+0aT4f/dqv+y3/AC0/3quUqjjZmXL714nrnj/UPFXxd8a3Gpab+zxo9vYrdLcRah4ksNsce3+FYl/vf7VdKv7ZHxD+Atj/AGl4wv8AwHrEUd1t/sfT5drRq3/LNY1+7Wf8OP8AgpRo/wAMdJ0rTfiF8N/E2uWrSr9q1LVLWGJW+Xb91l3Uz4hfBH9hX9uDxR/wmH7Nl/caV4ptf3uraTpM7LFqTfxRsrfLu/2v4ayk4zhyS91mnLKnLmR2d9/wUg/Z4+NkNt4D+IXw9k0F7qLfFeQ3Xybv9la+Y/23f2Xfh78RtJvfij8Mdet9SNnp0y7bf5XmXb92T+9838Va/ij9nn4P+D9Qm8PfFT4o+D/CeuW8uyLw6uvfa7qNf4d237rf7NRaL8L/ABV4bhuNSfXpNX0e+umVLqFNqqq/dXb/ALtRGUqPu812TKXN76Vj8j9b0+5tdSlttSh+zur7XhVf9W392o2t0+WFLncF/hr9NPi1/wAEi7P4zatL4z+HXi2ztZ7z/j4sWfayt97dt2/3a+O/2gv2I/H/AMB7iayvEjuQrM3mQy7m+X/0Kt/q8pQ50fQ4LNqE4xhI8Kkt3WHf0ZXasu+hkZPv4Pyt/ercuLVPM8mZ2H+7/erP1KNFh2WzqR/Ezf3qxpy5fcketKMJe9E53VFTy3TP+18vy1gapG8m596+V/Atb19+93o521jahG8au7lWZt3yr/dr0KfNLlPMrS5fhOY1BWZPuMv8SstYt8sMWUfdub+L+7XR6hH9qlKD5X/u/wB2sC9jTa298n+9XfT0908mpKXMY0ypDJsHztJ8v+zVORC0jJ53zbN1Wrr9yzP94VCkfy70fI/j3V083vGBBDvXG87t33mqaFAGXYn3qTydzbEdf9mpLdXVs9dr/eqvhI5mXNPX5mR/7ny19nf8EwiDoPi8j/n5ssj0+WavjS0jhVdj7sr/ABV9lf8ABMJy+heLywwftNln/vmav0jwi/5L/CelT/03M+o4R/5H9L/t7/0lnlPxqm2/HjxYgAI/4SC43Fu37w1V0uTbcKLaHd/tN/DUnxwM3/C+PFyxy5J1y6wPT94aq6TJtjXDsD/Btr4jiD/ke4r/AK+T/wDSmeLj+b69V/xS/NnX6XI+77jKG+Xd/C1dRpMiNAsNy+8fwfNt21xGl3otmVzN8rJtZfvV0Ol3iTQq0MKtKr/eryJR93U4ZbnaafcFl5fYq/c2vWzp9518lF837y/3Wrj7PUXVlebam7/nn92tjTdShEwRLpf95vustBHKblrM6zRTu7THZs8vbtqx501xIXwu6P5n8xPlb/gNYq6tNHCkKIx2/wC3t8ula+S5kXyNu1vl3K27bTlLsXysu6tqCKuzzGB2bt38NYl7qE0aF4uFkbbuj/hqO8vnWPMz7l2bvlesi91R5G3um7b9xd9Z+05vhHEnurxoRs6+Y/z7vl2rWa2oIJFSFOfvJt+7WfeXSSKdh2j+Pc3/AKDWfLceRCH+0ttVfvfeqZfCejhZe9zG22oJC/yQsN27duXd8tQXGsPGuyP51VPn/utWW+pIyrCkzYVNqtUclw6rJCjZC/3Xrml70rH0uHrSly2JtQvnuI2DxxrE38NY+pXaKzpsykiL8u+lurxJIWR/u/8AfO2snUdS2ln+XC/L8tH2j3I1pfFIoMzu29E+6m1lqCZpmXY+3K/3f7tLIz7iUC/e/iqG437sf+PVjKPQ+Zo4eRXjj86d/wD2WrNqrzKrom0/x/7VR6e0KrvRGG19u6rdnG8bbE3ZZ/vVhKXKd8cCWLNUkjR0TL/d/wBmtS3t0ZPOh3b/AO9I9U7dJDIu9Mt/srWtart3p8zsv92ueUhfVZFvSvtLfvkTYN3yblrZsbc253o7fKnybm+9VCztv3ZmfawZdv8Au1qWtv8AMv2NPNVV+bzGrH4iOWMY25TV0i6dW/cxsQ3yvu+6v+zXSaPHHMrB/vr/ABVzel3k1um/7MsiMm52b/x2t7S7qJd6OrK2xW+V/vV1UY+9flMuaMYG7BdQ/wCuuYW/hVWV/mZv71STTeXZuQnKu21l/u1kWMgl3u6NsaX/AFn3fm/2almu3Zvs0L5C/wAOyvWo6e8c1Sp1KOoTXjR/P825P9W3y/NWPOs7Mfk8xf7y1q6lHNJK7xuyMvzbf71ULqNPuImVb+KvSjUt7zOeUoy0KCw+Yjp8zjf977u6o185MfZiy7U+ZfvVaa1eFg7n5WX71VfL8uRPs275vuf7VOUoS0OaMeUVWRspDtVu22nRyQtiH7yt9/b/ABU5bV1kYvDj+Ld95adFYvDGvz7WZvk2xfLXBWlCR208RykM0O3/AFL7Ds27mepoYXaPfC+x/wD0L+9UsdnayY2PJIV+5tT5as2tqkatCqMzs3yf/E7a8HES5T3MLU5o3Qun2MMa/aUtlXd8zzbvvVdt7V5v9d5mfl2L/Dtq0umTRwtDDDG3yfdX7q1aj0lGVUudwdvmZV/hrxqkuadz14/BYq/Z3X7m5N3+tVqfHD5jb3/3h/dWr32JFjeZJvNdf4W/hp32GFs/Iz7otzbfl21EuSWhfMQQwuqpNc8NJ/qt38NWYY0kxOm5dr7dzfdp9rYzSRlHh3xr9zan3attH5MZhhtmdVX72373/AqqnGUpcqOSpUjGPvCWbTPOqTeXnzW/eMn8Natr50kPkvtz/wA9FTbWfG0Jh+SHcrJu+ZGVlq9pk0jTffYxL9xWb5q19n7vKc6rf3jTYXL4hublQN6tuZdzNtWoNSkP33/esv3WjanKv7z/AEOFmO6okt0VvJdG+XdURo/DGJp7b3blK5mhjmREf5tvz/J/47Uas8a7Eutm1G/d/wB6p76N3t1T5mib+FX+as2TY28Ju/eLt2yV3UaP90yqVJdCyGto7OKb77Rtu3SP/wCOrVaSb7Ysbwjb97bUULO2xJ3WIR/cj/utTftCthEmkfc+3dIm2vWpx+yefKPtPeIL7eqvC9su6P8Ai31kXCwlPJ+YO332rS1CNPM3v8u1d21WrI1KYSK7p8iq+12V67IxMpUylNJMsyRnyz+9b5m/i2/3aa9x5jK+zYG/8dptwqHe77lDfKsjN97/AHabCsNwy2021gq7ttacsTllHl90s6XsYMjyL5sku7ar/wANaDLCsmyENlfvsy1ShVIdjujMy/c8ur9vlo3d02tu3IrVjUkZez94fZs/+0W2srKyf+PU+ON5pPOd/kb5dv8AtVHn7KzjfJ++2/x1chVJlLxjZu+Xcqfdry8RI7KcZdCKxsfJkeb+98yL97bV+yt3uJvJhmYts37VSoYYXVRCjthV+eSRPvf7VXIZlhxD5ys23a21az9/4hyl/KQrbo67N/Kvu+WrMaQ27ffk+Xb838TVHJcQqwTzGJ2feVKRri22p+5kLr825d1ddOPMeZX/AHnMy9atDHGZt+6P+P8AvVs6fNZtG3nWef8ApoyfdX+GsGzkc3n2b5odybt2z5Wrc0uZFZXkO3a+395/E392vWoRueJW5onY6KqLDA7zNsZPu7du5a7jw7HD5MUycfdZGX5dtcDolx9lWPfDtWP5k+b5t1dj4bvEkhQJZ7mZ/uxtXqU480bnjYjl5jvtEukjKI8yyuzt+7j/AIv96uw02TdYq/ygbVb723/vmuC0O6+0YmQMzfdVfu7a6zT77y2+eZXEaruVl3fNWhzfEdla3VzMq73Xy9m/b/EtXproLMXR1Kr/AOPVh2OpPDuS2udn2j/XtJF8rf7tW4byBgyJMoiVWbdJ8tTzFRo9CzI3kyfJD/Budd9Zt80Mjec6cf3W/hqaS6tm33MO4LGu7zG/iX+9WbcapbTX0aOi7GRnRl+bdXPKsddPD9yDUm8yPzoYfNGxlVWfbXO6hZpHD5PkwvtTc3zfMv8Au/7Na11cedC+P4XbYrVh3U0CxlE5RflRd/3axqVDqjRlEytQg86P7RNNxvXarL93/gNY2qfLcb5nyyqz7VrbvpH8tvJuViK/f+T7tc1rV1YW6vv5bf8AdV655VOY6Y05dSvdb1K+dCzI33o2T7rf3qx7yGG6UlHZ1Z9u2b+GrGsa88kLzJN8y/LukrmtS8Q+VG8N1Mrhm3Iv8K1yVJc3wnRCPSQXzOuxI3XdGu/cv/xVZGtahbW5l8mZkb+7tqjrXiia3tWe2O5938L/AC1yXiDxg8d0qQvHtVG+Xf8Aeaufm+ydtOUuU27rxAscgebdsX7n+0396qC+IEaTYu53ZvkZq4+68UQs3nJ8jr833/l3VUt/FE8lwD5zKy/3v4q7sPEwrS5j17Rtc+yqr/aY2VV+RfvV3/hDWJpoxEjqs396vEPCfiIbh5M3zb/n+SvR/CuoJtCTXO12Tc7f3a9KlLm+I4an909o0XVraHYj+XCzIrJJ96uz0HXHkaK6e5Ybfk2q/wB7/aryPQb52mhmtplO5Nv+9XcaLqSTR7/s0aFvl3M+2vUhJHnVn05T0bTfEG24W5G0Ddt2t81dLa6k8ca/PGiN92vPdI1CH7KmbmR2VlV/k+8tdPoMnzbJvnDI3zfe2/xVvGUebyOGpGJ1Ed5eNC0LzZ3ffZUp8cN7HEu94X+Xay/7VVbWPdCh+07UVVZ9rK3zVZh2TXHybUZvl/eU+WJEoy90ydajh85EmtlIX+JX+Vq5nxFJ+7W2Ta7bWZ5G/wA/drrdU8wzM7pt2pu2/wAVchrUcMcO3eyp93zPustc1T3TaG5xPiKZLhXdywDRMqx7P/Qa4jVFRVW2e5bY27Yuz7tdv4kaDzD9j2qyvtfdXF+JJLaNltvs+59+5W/iZf71ckpWkejCWhxviq1tri3ZLYs6LFtSTf8AM1cPr1pM1vHNZTZ8ncvzfw13mqLCsmxEaINuXav8X+7XKaxaosO9JmQyP91k+XbXNKJ0U9jzTxBaveQuiOzQs25o64/UtK27/k2D7u1q9R1SxhhD73XMnyoy/wANcl4g0ZGuvLm+fb92T+9Xn1PeO6n/ADHEzWqLGZl/vr/s/LSNC8f+jOkeV+ZW3VsTWLyTb0+6v3l27lZaij0uGWR5nh2/3/8A4muOU/sm/LKWxkR2czM1zN5iL/Bt/ipi2+6bcjs21v8Avmte60/gukMi/P8Ae/2aotaeXMN6Nlpdyt/erb4oaClDl5R1q3kzjZDvVX3eW1Wobh/O+xony7NyR0+1t0WPe8m9v9lf/HasWLwyHzkTcrMyt5iVjU/lOmnzRiT28dy2zyU3KyfearkK+XIIUT5lX52X+FqdZ2sxjML7lC/6pd9W5NNSFVd3Ywsv8X3laoj8Oh1KU+YrsyWKs87tnZ8+5d1XbVbaKaOCZIdq7fm/+Jp0MM6yF3dlZv4l/hpq27qv2abgt83mbfu/3aPi+EJR5SeG4hjk+ROVdtm5/u/7VeweEl8r4UqpPAsZ+T3GX5ryFrf7PNDG+1wyr8395q9d8Ks7fCf5mJIsrkHcMHhnFftfgnJvOcf/ANgtT/0qB9Bwwn9crX/59v8ANHBR6pC1v9pLtvV9z+X8zbahvdRRp5Eh8x1+6qyf+hNVHdPbt5P+r3Ju3L91lqvJqXmQt5Xmb/7v3a/F5Vj5uOH7C3135PzpDHub79YGvSO2+ZJvk27kVv71XbzVvJVnhfHzrurF1jUPtELom5t33FX7tRGtLmuZywsTKvrh2k+fhf42/u1zuqTec/z3Klf7v96tK+utirv+dtn9/wCVWrIvo5mVd+3d952WtI1uaXumX1flmZ2pR7rht6Y/hVmf71ZV5sZvuKo/gZkrQul8zbs2v/d+SqF8r/K8z8L/AA1rGXtCJUyncO6/Ojqyf7S1BJqCbd+9d27/AL5qO8ZNron8Pzbaz5pvLYQvHubq1ax7mMvdmadvqG2TZs2/7Va1jNC0yTbPmXdtWuUjuE/g+Y/x/P8AerR028dWaabhm/i30qkZ/ZNqMpc51kd4jRD73zL8/wAladldvIu/5v8AYrlrW8dlx9p5Vv4q37G+eRdgTKbP93bXBOPNI9ej8Bu2Nx+5SF9ys331X+KtNV3fvN+xv46wtPkSOH/XMzKn8Va6s7bZim9pPv8Az/w1xVI8tU76ceaER+o7/McI/wAzf8tKozQpJh5nUt/Gy1oyR+XGHROVX5/7u2mW9ik2fu/3vm+WrjGPLzGVaPNLlKNjpdzcSeXs2v8Ad3L/AA19HfsX/BPxz428UQab4VsNQlu9Qf7PYWtv96Zv4mZv4Vrz34DfD3SvEXii2tvEnFvJdKl15MW6SNd38P8Aeav1t/Y9+GmlfBfxF4a03wToOzxTq37+1s1iVm0+zZvlaRv4Wb722uylpHmkfOZpWlH3EfQf7Fv7HE3hpbLRfiEjJLawxyfZbG12bW/i3O1fXGpeAdH0icaroWg6VHKu1P342Ksa1bub6Hw9o1os2u6bazKkf2yW7dV3f3q+SP2wNO8YweNTrnhv4reIdYhvnaKLSLCz3QW7MvzL95d1TVqez+D3jx404QXvnu3jJ9burpNP8N+PNLheblo7V/P+X+L5f/Ha+Y/iV+0J4t1rXLzwB4WezsVt7hotSvLho5ZY4938Kru27q8Z1j/htXRdaSGb4PyWMMcSwJqGpaktqs0e75VWOP5q9c+FPwtT4aabL8Zf2h5vD/hXTdPeS6XT4Z1iW8Zf4mZv30zVyzS1lNGnxQjyHrXwQ1fwH8APCr/ET4l6xpelK0TLbzanKz31x/d8iD+L/gK1l/Ff9tr4kaxo93beHvhvNoOix2Uk/wDb3jLUo7F75f4fJi+9t/8AHq8W8eftKab4km1T45eAPAOjldPX7Ra+KvGTyeRGv3VWDzP/AB1Y1r5r8J/C/wCJ37fnx5uvFXxC+Md5r0G7zdUuriLyoreFV/1ca/dgWoVaOIjy/ZNYw5IEXxE/au/aN/aS8ZWHw6+CejR3UC3TJOuk7vIt933ppp/vN/31U3xA+CPhf4a6Dcw2dnpOseIbW183xDrl5KzQW67fmt4F3fNJu/iavqjRdS/Za+Gvhu1/Za+APifw7pVu2lyXvjXxFNdKktjCvzNuk/hXbu+9XwB+1x/wUy+BviLWdc+Fv7LXgxb/AMOaDLJbp4qvIt0epTN8rNHH96Tc3/LRqvD1MNT92GpnKnX+KZm+H7fw34s1h7nWPEP2izsUjW1tbN1T7VNI3lxwx/xN81fQ2h2fwf0HXNU8B+Lfijo+gQ+GdNZ/GV5ay+e1j8u77LD/AAtcMvy/7NfEX7JXhP4nax4/tvjB45SbTfDug+dq95faha+XH5yxt5Kq33du7+Fa89t/Elh4H+0eJ/H/AIwW+k1jV5tRvJrjd5V5IzM3/AqVatyRLp04y1Pr7xN+0xc3nhhrn4G/C6Pwx4Gs5WR/EmtQLJqWqNu+983yqu2qtr8cPj9ps1t42sPiLrmrWkL/APINt5Y/L27flVo1+6teReHf20dB+I11Z6N4nnsYNKhg2263Vv8AuF/7Z1778MdN8T3Ghr4q+A8PhXUWk2xXWnwxL/pTbd23b977tc8cVRnK8jX2M/hien/AT9szxD4+1XT/AAl8WvBmkvZ3Hy+TfWCu8m5tv3tvy19Q+KvhP8OvAfwz1HR/gb9l8I61rE6y+I9U8P8Altc2q/eW1X/nnu/5aba+SP2Vf2tvgtpPxkvfDf7VPwht/DF7oqSOt1b7mit1X7vyt975v/Qa7rwj4hf4f/EzxJ4q+EvxLutc8PeKriS9lutUVd7eZ95W3fd/2a562MdKTUZfeaRwrqbxPN/26Pgbf6potr8QhbWaap4fZU1TyUXzLxZF/dyM22sz4O/EDXtL0uz0TU55prZv3kULP8sfy/xVteMNU8SX2g6rba3qTP8AatyfvpdytGrfKv8AwGvM7q617SfD8iTSQxMq/wCsX+7XFWzLnnHQ7KeWyjTlc+mfhT8dLDTPESWdzZ7HmuN3nLKqqsf8W3+9/wACr3b9oz9iXwL+0F8C7q/1XwBDcOsTXFlq1rdbZV3L/s1+YeqePPFWh61bX9hfw3Lx2qxRRtF8u3duavtX9hX9uLVbJYfDfjy/aIr8zTMjLEqt/Dt/u16VHHOjGM90eVWwvf3T8yv23P2Fde+BMlxrdhqtrcvCypFHbuy7l/4FXyheXyfMiQ43P/49/tV/Qt+2Z+yzoP7SWgzeIfCv2W+tNY01kuI7ODzPssi/N53+zX8/nxu8G6l8OfilrngO8hk83Tb9leSbcrMtemksRHnid2V4ypH91Pc5TUZE3M8219vy/L/FWLqChV8x/My33v8AarTvbibkbNyfd3L/AA1lyrhSmGf+63+zXRT5uU6qkjn9QYKxm8liW+X5W2/LWLfKjN8/Cr81bl4vms6Qp83+9WPfRPJuf73y130pRPNqGPfTfP8Acb+6q1U8vbtd/lXf92rtwvyrlmX/AGarTJD8rp83+9W0eaRzczKskO5h5gbDVZtU8tvk+6v3KZt2bn2Z/wBn+7U0LOXZN9WLl6Fyz+7v35/h+avsj/gmGyPovjBkP/LzZcDp92avjm1jk2qn8K19kf8ABMdI00TxcIhwJ7Ef+OzV+l+Eji+P8I12qf8ApuZ9LwcpLiClf+9/6Szxv44s3/C+fF5EPK6/dbW/7aGqGns6qif886s/H+WcfHbxa0UmB/wkF0u3b/00NZGn3yXGCm4fJ/FXxHEH/I8xX/Xyf/pTPHx+uOq/4pfmzrtLkdoVd3+Va27W8+yyfu3/AN+uUsZH2BEfaF+9/tVs2N07ZhdFcf71eNLm2OWXwnV6XdQqvCfIy7d2+tKG8fzB5Kb1ZPlVv4a5iGT91shdV3Vdt2vFhCb9rf8Ajy1XwmXxHTR61CsZs3hZ/wC/8+1agm1SGNnbyWwq/dX5lrIa48vZ94nbtdart+5ZpoXkBVPu/wB6iMfcHzGhdX22AQ71+6v3k3VlaheOtw6QzRpudmVf4abdXTy/vn3bvvI396su4kmVnhmdWXZ/49RGMio7heTTrGqPc7T/ALPzVUa7dWaZPnLbqb5kM0n+sbCoy/8AAqptM+0JBN838W2lKJ00+5JJM8a+S7/e/u0yS4RZhGjt9z5t1R3E3mbXeZS/8H96qF9Ik3yb9yt/drnlHlPWwuI5eWJZuLh4V2O6urfMm6su8uklk2O6pu/i/houpkUh9i7l/h31VuJvMjZHRVC/3fu1j73xHvwxEZadi5Nbo3+rT5qrbXmZU2ba0riPy/n8n738K1Tkh2x+ciYdfmTdXHKXuHoUcKRLC+fkTcf7q1YXzpY/v4Zf4lpLdUKs7zbdv91alt4/l+Tayqm/a3y1zSly7Hs4fB80S3Z2z3A2I8ibvmrX0+ZNrQyblDPs3f3ao2qutuod1X5PurV2zhhkhXem3/Zaufmi37xljMHyrSJqaeP9IML7WRfm8tW+9W1DJbKqzJbbX+6/92sfTVmhjCTOvzVoWbIkj/JsXZ8m35qrlifNzjOmX7KZJJDbGHZu/wCea1q2N15WbaRlVN21Gk+8tYcM/wBnZ/ulf4drfNVyO3+Xfv2fP/e/irpo6SvI4pbnR299t/chN/ly/djqW6vNqy3ifJt/hj+ZvmrLsYX2k+czbf7v8VW7dnbdsRvlf72+vUo8nKcMoz+0RXM1tIrWzOz/AD/7rVRvgkn+pHzbtyRr8q1oXFucfaHTc0b7kVfm8yodQt3muEfYqrvX5VrsjU9y5lKMpFLznY5k2o3+9/FRDZodsiPs2/Kq1O1vtma58lnDfNupLO2SH9y77X+8rUpS7mPxTJJIXjZYZm2FkXZtp7W/lje6+Vu+XbJ/ep63DzBt6Nt/8eq59nSRU/fLcMyb9rP93/ergxFblOmjHmKMNrhgltJ5X+0q1oaesKqXv3+dn27v9qkjtXa8TyUZj/Gqv8talrazKx3zcyP+6XbXzmIqTlLlPoMHT5YXJLOxZVR32vKz79s1XLhZl3P5Kuy/c8v+GkjtrlVhh87zNv3/AJfu1dsrW8Wz+fn+8u3btrglKX2T1Kclze8UIYX8wu9srq3DU6OFIz51yW3fdVf4anhhMU3k23Lsm5FZ6uRab51u+xPKRn3Nu+83+7V0qcakia1b2cNSnZx3MkiTO6rF91l/iWrccKXLNCkOxlZvmVfvf71XrO1MkgeF1CRrtlWRPmarENvNubY8bLs/ufMtelTw84zseXWxEOTmMy4t7mG3CImGj+/u+ZdtWdPtfOVEfllbc+1NrVpLaz/Pv3bWVfKbZ8tW7bR3um8+2sPNaZP3sm/bXU8L/Mcf1rrEz1t7lmlS2dY0WXft3/Kq1Yj02/aPi2Zoo3+Rv9nburetdGe7tfscPG2Jdv8Ae/3a118NzW9uiJDgMv3mTcy10rCx3UTP61zfEcBdabNMr7E+WP5Xj+4y/wDxVZF9oqTXSzpCoZom/dt/DXp83hu2b906R/KnzeX96sy80FJC7JCqhXX5m+X71bxw5vTrcsOWR5y2mzMy/aYY0K/N5i/NUUmkvdeTIj4ffvZV/vV1114fvGjZJoMxfNvWP7zVD/YvlKHeFT8m1Y2rojT5ZaGsbSicRq2nu6+Xv4+6396sa6861j3wwr8r/vVau11bRbmS4T7Sm35Pmkj/AIa53Ure2aR4Zk3Dcv3l+ZmrTl5hfDexzl5LDueb7m3/AJZ/e21TC2yzM803yfxsr/d+WtXWrBLWMu8yjdKq/L95qw5LidmNtZpjd8yNs3Lt/ipc3MYVI8xorsaYIjsFZdrtG3yrU00z2tqyfaWX5v3W5d1ZNjdTmRfnUnd86/3quLI6xs/ktuZvvbvu/wCzWEo9zm92MS3JebVjmdFEv3X/ANr/AOJq/b6xNCws0ucSL/D/AHv+BVhPqW7Y7puXfuZVT7v+zVmG8haNnRG+V9u3+Fa86pTjz6mkanWJuyXm23DzTfPJF97722j+0ZvLO9F3/wDPNX/9mrMa68pfLhTMSov3vm2rSLeTzSM6IrtJ99v4ainGZMpcppfbXklGdvy7t7L/AOg1oafNuhW5eb5f9p6xLG68xCmFT/gf3WrR0+JJvK+7v2ttX/ar0KK/vHl1pcpu2quskkl48ez5dm3/AHau6XG/nM7w5C/J833Y/wDarGt5vLjCefnd9+P+Ld/erZtd8yGZ5mf51X5U27q9bDxPHxEoy3Oo03YzJHv+ZvlTd8tdXo9wlrIiQzf6xPvN8tcdpPlyXaQyP86/LFu/irT0++QYSdI9u/b83zV6cY+4eLWlKMrnouj37rbiaN9u35dzfxV2Wg380avc78JJt/h3f8Bry/Q9WT7Lvk8tRG6q/wDe/wC+a6rRdcghZ03yBWT55FolH3bGdryuegw608d1Eny7V++v/s1aS61CqjY8czfe8vd96uBsfEFndMLO5eRmWJmiZU+X/gVW7XUkaNN77GX5JdtctSX2Tto05HXf2h5cfkzSMvy/dWsy8uIbiEJsYtH96NX+as2TVkjjE1s7Mv3dzJ8tZ0+sQrIEbdvkTf5zfw/7VcVSod9OmbGpXUMLI77vlTdtVKwtW1iaTNtC+xN/3o1+Vay7jXZpF3vcs6qzeU3/AMVWbfeIPJhR0fcy/Knzbf8AvquWpUlE6o0eYtahrX2iF7VHxu3K3y/Nu/vVzOua1D5Zs/lYfd/2vlX71VtW1vddPC8yp/F81cfr2pI29IZo3Kv+9/e/NWHtDo9jIta14ihWPyd8m7duf/arivEHiqVXKbIx/wBNN/3v9ml8QagmnxqNjZ2fIrP8u6uO1rWAzfJtHyfNTlIcYjda8YXOZUR2H8Kt/E1cdr3ip2kaaa5+Zf7r7ttO8Qaoke3ekn8W9t/y1y958zPs+7s+b+LdWVOMzb4S3ceKImUpC7Nul3fvP/Zav6Lqj3F0yXKZMf8ADJXISfNdrP5Ku0L7U3fwrXQaHbzRKibFI3bt0iV1RlEiVPm949O8L3k0cKPC/wA33vu16R4U1KZduz5nbbvVq8r8KiZZIUd2zJ8z7Ur0jw3Hc28bTeWo2/KrM/8ArN1dlGXu2OGpH7R674dmSFrdneMBvlVY5Vb/AIFXW6XeJHvR037Zf4mrz3QVtljt9luuN/3V+Vt1dp4daOZY5k/1v8a13xlM46kTvdHuoZreF3Rv7rqvzNXXaTcJGqpvYM3/AH1XCaLNDNGsPmqpX5k3LXXaTcbriJ3hzt/ih/vV2xqezOCpGUjtNNvE8scrC2zajf8APTbVyONI/kn2rLI+7zP4masTSdSht08mZ94bcyf3l/3lq1Lqt40f7mbeWTc25fmrpj7sOZHH74/WJvs8yedNsKtu3fxf7VcTrmqItw8PnL5bSt8zfMzVta1qmGXzHzJH81cXrWqTJIzPZttVfvL/AAtWMvjNo+6c54hvIZP3MMPl+XKqtJt+9XJatcfbJEuRMrLs2qy/e21taxdPfTGZ/kXft3Mtc1qVxMzN8i+XIjN5jP8Adb+7XHU5JTO2n2MbVr52bY7yM8aqu1fu/wDAa5nWozHvvJkjLx/8tPvV019F82+a5j3yfLE33Wrm9ctxGyXNy67pEZUrmqHXF9DndSt0S1R3hUL/AHvvN/u1z2sRJ8kyQs8jN/C/ytXRag32qGTziqDavzR/3v7u2sbWP3jh04/iVvuqteVW907aPvSOZuLVJ2aH7N80f3mVflqC1hSOQ+T97/vqrs2+aR32Kqf3qbHC8bZdG2fdZv8AZrzZR9/mZ6NOXL7pk31qFtWm+bc38VV4LfyYUR4WIV/4q3LiNI1MKfN/EjVRukSZVd/LzGrLt/u1pGfKaS94qx/vF+dI12tU9nZ+XcbyisG/hb5lqPynj2TTQ7W+9t/vVoWdxvuE+SNUV9v+1upSlM1pxhyly3X5XhSFi396rstm7f6Sj8LtVlk2t/47VbT1eZWTzm++zfvE/hq5GyXqh3fbIyfwp96lGnLm90rm9wRo/Kk+eFnT5v8Aap8dok0iTBGYr/Du+apPs0wRHe1ZQ3/LRn+ZlqXdNMweF1C7m+VV+Zlol7r90IxnL4ih5L+Z/pNtIyt825vvLXrvhg7/AIVAlA2dPn+XGB/HxXmVxZv9oZ38wL/H/srXp/hoxv8AC0eW+VNhPhiev3+a/a/BF3zrMH/1C1P/AEqB9BwtGSxNW/8AI/zR5JeXDrC0Lp86/cVm3Mq1k6k3nW7F33q33m/vVueTsmfzoWH95f4mrH1SHdI8UPyD+7/er8MrS5fhPFp+9E5vVDcyRtDE+WV/urL8rVl3X2xd801sqL951jfdWnqln5Nw8rxsDGu35azLizDM1y82Pl2uq/LUxl3kWZepSBlaHZGv8O3/AGapyRurbN+4MtackKXUap8qL93d/s1Wmsfs8LoX37fubkrSnWjEzqYfmlzIxNSbdCkKeXjZ8se3/a+9WdeL5jNCm3O/5619SjdV3w/7K7WrJuFmhjlTO12fdu/2a9CnKMYHn1qfNMxLxbaOTf53z/x/JWPqEzw5SFPm/jZq2L6ZIyZvl/4F95qxNTkhaY/J/vV2UzjqR98heaCFv3jyFmq/aXF1JLvd9p+7t2VlyfNIzoy/7tW9PjdptiO2KuUeYuJ0mnzHycI+5lT5Grb0ubzI96PJu+981YWhxmRm2bd38O6un0bT5mj2TPgM/wB5a5alPlPVo/Cauj28O3f97b83+Vrct7e8upopkZvKZP3X7rbVPS7SH/Voiq38Lfdrf0mHyz88zRj7qL97bXHL+8ejTjL3RbWBJGELuxVX+fctbHhvw7DqmqfY4bbzXZf3q7/4f71JbWMy5Tdnbt+XZ/FXT+EdFe41iJEh3Sfwbf4d1Y+zjLQ3qRlGHMfS37Enwj0Oz14eM9bh85LNldI9m7cyr8tfpL+yP4BufDN5e/GbxJNavqWpP/oG394/l7flZl/hWvlj/gnX8HbnWNF0rw09neL9uumkv5JIv9XH/F8392vv/wATa54Y+DfhWbUrDR45/wCz7fytIs2TatxI3yx7qcvd91nwGMrSrYiR0Pw70ebWlvr/AOKmq6fqEzStLbxzWv8Ax6x/eXd/8VXIftS/tQfDX4e6Omm6V42stUmk2/Z45tO85V/veWy/+hV5n4r+L3xRm8L33hKw8KxWmpas0b65q00v3o2X/UxrXkPxK+HusapHN4qubKTVbyx05lSOR1RI1/8AQVWvOlKvKMuTQdKjScrSPG/2kP8Agol8VLPUH1+58VedbWNwy6No9qu6RWb/AJaMzfNXz54u/ay8Q+Jr+2+Inxgvbq5VZd0VjfXTMsn+ztZvu/7tQ/GzWNW/4SLUbDwveQski+VdXEMW5d38Sxs1eBeM9D1LxV4kgbUryS4trOJVt45v4m/3a5rU49fePXoYWUtIns+vftYfG/8Aao8Zab4Amv7qz8P28X2eK3ht12WNr/0zX7qs395q9T+PH7V/iT4Z+BbX4Cfs06xNpkaxQrex28Stc3ky/wCsmnn/ALv+zXgvge11D4feH4vCvh7y01LUtz3V591o4/4VovNDTTbP+xNH3TXV1K0t/fbtzM275VVqa5doy9fM0+q81W3Kc98RPEHxO+IHh2X4XaU9xHp91debr1xZu32nWJm/56t95o1+7tr0H4U/AfwN+zX4a0bx5+0Civb3l15uk+G43XzLpY/m+b+7H/tV3/h3xh4V/Zt+C8/ja80HRbTV1WP7Leax8z3E38McC/xf7VfFXxe+J3xR+P8A4uk8f/EjxldaxeTK0Vv/AMsoo4/+ecca/Kq1sqsFpCI44OrWn/dieu/Hb9u74kfFTXNZv9S1jTRbSW7W+jeF9Li2abp8O75dyr/rG21826hq2q+JtS+3+M9Vjd1+XzFT5Y1/uqv8K1u6L8O9Vb/Q0tlQSfNt2bdtaVj8EdVa68l0Z/n+6q0pVoSleTO2GVy25R/gzQ5ry1TUvD3i23dFXakPlfxV6p8J5viXBrEF54P1JtL1K1iZPOsZWXzG/hZv7tP+CP7Ob3l5aXOpWEkcLSruXft+X/dr9FfgD+yn4G/4R2K5vbONE2b/AN4iqzf7Tf7NeNi8RQcowZ6uDyOpKLkeAfCv4O+KviN+5+IUP9pX/wB77ZNLvdmb7y/7tfYHwp+BNnb+GYba5T7yfJHs+VlX5fu/3a6/4f8Awj0Twz4ia80rSo1ibb8qrXunh2x0m3+z6VNo8Jjj+XcsW1trf7VcVSpGU79D06eUQoxPmfxZ8Bb/AFazazttE+Vf4lT5f++a8g8ffCubwixe80yaVWl2I0cFfpBP4Y0ewgFzCAu5e1ea/GL4F+HviJor2cNv5LK7O3lv8zf8CrGpGMpB/Z/NGXKflj468B39ncfbNNtmeLeqt5zfNt3V9KfsE2/wv1jxNFZ+LfFrWbzbV+ztFv2rVX9oT4G3Pgu6EMMMjKu5/lTctaf7DdroM3xOsdH1m2hilupVW3kaL5pv9n/ZruwVb3vZ3PkMywfs+Zo+if2lLPxV8DdJj+IXwcmmitI4JEvNNhfZHeRt95vmr8qf+CvGg+FfjBryfF3RPD1vomqrp0fn2tvFt+1L/e3f3q/bv9qf9m7V/G/wb8iC8UfYV8+KaNs7o9v3Wr8av+ClHgebSfhfqXid9sU1jdLFLHMnzMv+zX0sYzjKMo6RPm8NU5a/LL4j8zVjdt6O7M+7a+6qklvMYTCXx5f92ta4ZJG/hDt/DUE0aLJwinb99q9mnKHIezUj7hzlxZ+Xu2Jjb/Ft+9WJfWrs+xONtdlcQ+c2zYuW/vf3ax9S03y8v5PLfwrW1OWvMjllT7HH38e19rj+CqfkeYT8nH96uhvNJSSQfuV+Ws64i2syZ+St4ynI5pR5TJj2LtTvTo4/LX/aX+JaszQ/x7NrbvvU37Om75+q10c5lGJLbq/y73bc1fY//BMbaNF8YKvQXNkB/wB8zV8bQt82w8/JX2L/AMEv33aN4xX+7c2P/oM9fpPhD/yX2E9Kn/puZ9Pwhb/WClb+9/6SzxD493Aj+P3i9C/3vEV1/wCjGrF09naQINwMjfeWtH9oVGPx+8YTRnD/APCSXSgev7w1kafN5TK7v91K+Lz6PNnmK/6+T/8ASmeNj/8Afqv+KX5s6bT1haQp823+Kt+1kSONETajb/kZfvVy2nq82x/OZT97atbFjdPGzP1Lf+PV5HwnD9g6Ozut032VIWwv+z81aEM0LQvMiYP3dsnytWTZt5cio+7fH99av28kPyud25n3f71HxESNC1j8tV3ncWTdu/8AiqWRZvLdJXw33kqKO587emViZXX93t/honk/dGFH3Ns+dmetYx+0Z/EUbyJ1489lVk2/L/DWZdKnlSwujP8AJ8jbttad5saQu+7d/wAtdtUrqH5vkTKf3v4qv7IRlyyMeSRPMZJht/hqtLD8xCblX+LdWlcQ7mCJHtZaoTske95tq/N/31WMoyN6ZWX5mWHzNoX7lMmCRt5Lv833ljalVngLJ5e4b/u1FJJuY5T+L5maolTOyjU9n7xnXkj/ADb/AJfk3fcqqZLZmKPuP8NT3gPlnejZZ93zNVC4k6h//wBqo5fsnpxxR1t0s32hdibW3/PVObZHJG8yMx3fw1p31vMq733FlfaG21VuC7N8iM25Pk2vXzvtI/Cfq2Hw3LHUrR72kd3Rfm+5tqeGONYx5xwP9+o2VI8og2v/AAL/AHafbzP52/5nf7vy1lKUz0qdGMTSs98kgR4cf7X+zV+OFJFHmdPuturNhvI42Ub95b5fuVfhk8uTe+512fw1yS5+fmMsVR5ocpfhaSWEbE2hU/v1fjaGGPztjbf49397+7WP9pcf6S7/APfNTWtwm37TsyzP8ys23/gVddKMn8R8Rjqfs5SubMciK3yR/N/B8n3q1LW4e4jjhdJP3nzq2z5a56yukb/l5y2/atb1nM8McTzJ91tybq6eY8LlNWG83xqjv833XWN60LVYW274G8qNNzqz/wDfNY1jJ5jMlsiru+/JHVqOaRm86Z2Qqv8Ayz+bd/stXVRqSlHlM5Rj8Rfkmf5P3LbG+Z/nqOa+gZShhVWbdvbNU5tSmZfJmdRt+Xb/APE1C3zNs87/ALZtXTzGUo83vE8cjxsI9mE2fLTFZFkG/n+Fv7tV1mjm3XO1vufd+792kW+2qsyIy/xbZPu/NSqVOUiNPlNSxjtrj53fYW+by2qzGvnbhtVdr/Nu/irLhuoVkRPP3qqfM2/+L+Kr6yQzRo8j4Mfzf7VeXiKkpyR10aMYxNWys5p5k2DYnzfdT+KtfT7cxr99mEf9771Zul6gjZmmmaTc3y7n2t92r0OpQ7ljR1H99fvMv/Aq8upze15T1adSnGlozUt4/Lbzkl3M33/3vy028muYV8mD5k3Lv/iZqitby5bcmxhJNEy/Kn8NL5jySIHTZIq7Umaop0+Wr7x1+0pypE1pI67blEjzu+9/Cq1q2do91JjfvZn/AIv4VqlYr9skRFRokX5vLX5d1dDZw2ki/c/e/Lv8t9rV6eHw8fiieXiMVy+6vhGWel+djZD5e2X+H/lpVqHSfLkZJpmUL83l7Pu1fs9P+0Rl5hwvzbf7tX7WxxMk1tbZjkf72/7terRw549TERMr+ybmeEO4+99yt3TdJaG3jm+xyeVv2o0bbq0tN0dIVeF33orfe3bttb3h3wy9vDjZuDS/e37a7Y4ePVHDUrS5vdM/S/Du3DpMzpJLtX/Z/wB6ty10FZIVtnfy93+qb+81dFpvhm2jt4dj7/4nWtjTdF87L7IQ/wDyy2/N5dbSokrESi+U4G+8LpD5UyWuDu/1i1i6v4ZdrgwmHzV/iZkr1abQdtw6OnmFvv7m+WsjXvDfl3Ucy/IzL93+9RGjynYsV2PJbzQX8xvs1tC6Rqu5W3bo2rn77SXjtTNNbMu1/u/e3fNXst54deO3/c221vm83/pov+1XMXnhuFoTNs2ltzPHtp+yidtPEc0eU8t1rTYZpNidF3bmX+Ff4a4zxFpvkys78q3y/wC7XrWqeHbPyXfY0P8AdVkrhPFWmpGux32eS+2Ld/Eq1nKMTf2zPN9at90Z2bfvbtzLXM3E3+lCHZs2t8y766zxRDsk3wzyB/4P7tcTqy/uzIXXK/M7VzezFKpAat5FZtvSH54327l+bdTl1SZXaOZ/m/g3P95axmvodzJ53H/fPy1A2rJc3Hyfw/fas+XmOaVTlOjhv4fMVE+7/E1TQ6hNtZPlTd9/5q5i21xI92/c7b/4asrqUca/675GT52+9uWseWXNzEe05Ycp0P25I41tndgdm7ctPbUPld3nZlX52/hrnv7aST7j4f70Xy1F/bDyL9/e7fM+2qjTjLY5vbe4dZHqUNvb/aXdpEkdflVK0YdQeb5E8wbV+Zo/urXE2usIoEKPiL7y7v71XrPVHW5SF33pJ8rLv2100aPL8JwVq3N8J6DpmpfKHeRiV/2vu10VjqUO6H5/kb+GvP8ARryHy97vHvV/vb61rHWE875Jm3N8y7q9alGMYnmVJHcprE0cT73V12bYtv3t26tFdWhtYX2XMcrr9zan8W6uGi14WapYJu+X5t396rMeseWqJG+4M235n3NurrPMlGXNzHp2n+IHvMP5ytt++zfL/wCO1rWuvW3ls8c2GZtzq275WrzLS/ESSAOm7zV+X7v3a2LHXEkZN/Lxvv8AmespS+0a0T0yHXJmh8yHcNz7drfNu+WtK18Rw7j5k2122713/e/2ttedaTrjlgkMzKrf3m+7V2bXEhkd9m7bFt3N/FXFUqcvxHp06J3c3iTzGCJNJEy/61Wb5Vasq68RXNrI0M253/3vl21yS+JN0aJGkm2P/wBBqG41aZo2Tzo9iru2s/zMteXWrRiz06OHlI6S58UPt2fKfM+4v+zWLqHirdbyokKj5/lZvmauduNWma33u6na7M3zfw/3ao3F8F+dLlkVvvRtXDLEc0viPQjh5RldGjeapNcR+dC+0SPt3fe+7XPatqyW9vJM9s2z7ryR/eb/AHf9moWvJo7hntpsIu75Y/utWFrl5NIqQzOyDd8m1/8Ax2s+b+U3jR+1Ipa9rEzffm3/ADbd1cpqWrPLHLsRl/8AHt1a2rSPcfOiYCvXPX3nW+XeXfu+Zo1raMuaVmYex5feMrULj7VGEd2dv7tZsOm3EjPsdW/2v7v+zWncbJpMbNu35vvfep9rbu0Ox0Yru+8qfeq/acsAjT98y4dLdpD2f/Zra0HSpmkZPmc/dqW1052ydijd/eWtvT9Ptlii2Q/7O5qKdaHOaVMOb/hez3NsRGRliVdv96vRNBt3t7XZvjCQ/MrRp91q4zwzp6Mo8yRt7ffk3/xV3Ogwo21E+4vy7lf5t1ejRkeZWp8sDtvDawtGkP29V3fPuZK7DR1NncQon3WT+FK4zQXht49gfcuz7y/xba6izuiY1+eRFb5ty/w/8Br0KcpcpwVKZ22kXyBURPLDN/D/ABV0mj6lA8Y2PIjfw/Pt3V5/pGrQTW4m2fJ83zNF826tvS9WSNUtYUZvL+4y/wAK12wjzHnVNj0W1vHtWzH+6E3y7l/hantq3kLLcw3MKvH8v+181crb6wkioUuZGDfws392pV1xPM+e5XbJ821q7Y/AcHL73umjq14j25R9v/Af4q5TWLqG2Evzq/mfN5dXLq9+0K009zkLuZmZ/l//AGa5jXNWRpPO2xoPK/iXc26spbGsYyMrUJ3ib59qjd8sa7vu1g6jJ/f8v5mZ5Vq/qF9ukWFJmf5/vb/u/wC9WPcfZlXfNI0Uyvt8xW+9XFU/vHZGU+XQyNQkkmZZrx2wsX3o2/1bVzusalDJHveZvlTZLuRl+atfWr547zZczK+35fMjb5V/u7q5DxJrDx24hL+aVRv3LP8Ad3Vyeh00utyvPKjRyeQ+Nv3Gb7u6sDVr6FpCJpmRm+VFX5lam3msbm2b5HX7zsq7ttZ91qEM87Rl4xt/irzcRKNP4juo8nwiRzTSMyJB5qKvz7vl+Wp5QmwP8qfxbV+7WbHcI8jb3UK38P8AtVpWrJNMEd9+2LbtV/lX/arzanve8ehGMRNqSfuYfmfZVL940Zd4413P8u1dzbaszSQsk32aRnSN/wCH5d1QNHDGxzcL/Ez7fvVnT68x0R94bH5LQna+9F/vL96kjtnWQedDIis27dJ/DVnTbVGZs/L/AL1TxokNx8j4WT7+7/lo1axlLm0NuXmhqTWipDYv5KM6K2591TWcabtjvsVl3J5a/wDjtR28e6HZ1Zf4d/y1citYWVIX+RVT5tv3qPacoRpyY638ldPjtkdS33vLkb5ttTx/vLxIUs8Nt2+Yrfw0yzW2tvnmRVZf+eifeVqsx+Yqw/Zk2+Z8vnVEZfFYrl+0QTNDHux5m/7v+y1el+HUB+FuxYgAbCfCYx/frzq4t/MYeSGD/e+V/lb/AGq9I8PxhPhlsxtH2CbjrjO6v2vwOd86zD/sFqf+lQPe4a5vrdb/AAP80eWR2vlxpZzP/H/F8u2s3WIdrfOi5/8AZa1mb5lm/eNKv3tq7lZf9qsy+t5Y5d/zMi/L93atfhk583MmeRRjGPunL6lp7x7k+VfMbbub5ttZs1ukzHY+1f8Ano33d1dHq1nHH1T/AHKz5rWHzGjtoWXd/Cv96uf2sY6nZHDykYclrtVke2Xa33938VUbousghd9jN9xa2ZmtlkXEPmfwsv8AEv8AvVj6lLbRx74XYFfmT+KroztU+EmVHliZOpLDIz/ufnXn5f4a57UXdZHQn5V+bbW9qU3y70K7m+bcv/s1c9q9wNr84Zk+Vm+7Xq4fY4K1M5+88xpN7purJuGZdybPvfxN/DWlqCOqPMj71X+Hf/FWZdM8e55h95K9Sn72h49Sn7xXX95cDL42/L/wGtXSYx5gOxvl+XbWbD500ibEUsv39tdFo9pj6Sf7FdPL7gUYzlI3NJsfM5+6yt86766rQ9NS4w/k5VWxtasXR7GGVfn/AIm2o1dloenooZ0C72XbXJU7M9mjTlze8WtLsEkZ9k0Y+fau7+9XUabo728afJ95Nybl3baqeHbcQqj3McbBfl3NXV2FmkjPJDcq4Xb96uOpGXxHq06dKUYpEOk6X5ylILZstF95f71emfAXwimreLre21W5YLuXfcfd2rXKabZfZ7h3jmZG+4vyfLur3b9kP4U63488VWc3h7TY79VlXzY2bb827/x6lHlcRY6MY4WTP1u/ZK+FOm/DP4Z6LDbbrq81KwVrJZF+6rfM25q1vGEj+OvHiTWFst5pfhtdlrb26/Leag38Un+zHXGfAP41eJ9e8SS/DeGzaKbSdNaJmX/l3Xbtbb/tNX0V8K/CPhfS/DMNhYRR+YsrS3En3mZvvM1cMv3kryPzapzJyPOND/Zr1KGFdV8SXLXVxskuNXmb/VLIzfLHHu/hWvnX9rjwb4vvvM+HulJbvaK6tcafp6N5cO5vl86Rf9Yzf3fu19SftCfFC+1vTIPA/gIXn2mSfY5tU+VV+7ub+81eaftlfEzwv+zP8K7fTdHS1PiprJVih3+Z9jkZW3TMv8Un93+7UuVKNKXY68LRk6sbbn5i/Gj4av4T1ObR9YS3udXbd5sMO1fssf8AtKvyq3+zXg+gfC3VY9Yge53QrJP/AKU0nzMq/wCzX0BY69rGrLI+sbUnvLhpJ2kXczbv9qsK6h+2X0eiWz7ZvN3TyMn3f92vmamKjf4T7/C5VOlh+aRw3iLwe+patPqttZRwQw7Yk/vNHt+Zqy4/Elh4Z1LzrnSrd0t/me3k+Xc235a9Pvo7DQfDGuGaFmnjt22M38Tf3Vrx3VtB8YeMNHm1u20qOB5k/wBW0u5qKdT23vIwo4flZ4r8ZvE/xI+Nnj648YeJ5ldY38rTbVflgs41/hjX+Hd/E1ZOk+DfEcbxubbaFf8AiT5a9T0P4J+P7qF5byzjTyZdreZL91q6qx/Z0+JbWsVzYaUt5u3b1t7jdt2/w12VqyjGNmdeFwspu7OS8A+A9Y3R6k9hJcS7/kWNl+b/AHq7nxNZ2ek2qasmiTQyKu6Xcn3f+BVZ8N+EfiLoOqeTdeErqFI0/wBX5W7/AL5rtvFmueHpPC+zXka3favm29wu3buryK9aPMe9Rw0eXmRjfDvx/olnshunVE3q21v/AGWvvb9lXXE8SWdto9nIyJIqq8lxtb5f4a+Crz4d+Etb0m21XR7xVbfuT7P91v8Adr65/Yt16aOxSO1n3vHt/wBcm1v92uLESheMkejh4ycHBo+0ZdP8K+FbX+1tYufnX5XZV3eZVvwT4i0r4heJjo+m20zJDtVty7f92l15/wC3PBdnc6rNa/uUVpdrbWZqT4O3GiaP4gj1j+1rWPajOkbS/wANdcKlKP8AhPPrRnGlJxjqe4W/wlbULBJgm0bflWue8Y/DK+0O3juAjKD8r7a73wF4+s9ejCJqNuyBtqqtanjHyrq0XO1l/ir2ZYfAV8LzwPi6ea5nhsdyTPi79pb4dvq2gyv9m+aGJmST/wCKr5n+Bf8AYnh/4sWb6x+7aO82xTL/AAtur9AvGXhWw8WCWxvIcbd33fvV4P4T/ZB0q18YX0NzDcT2011vguNu3y23fKteVhYxjV0OnPeWpSjM+x5YLZvhqNE169WeK5sNsdwv3WXbX4x/8FY/hzo+tf8ACTaDePNHpWl6XNcJJGzfvLr70Kt/s1+vXg+K6+Hvga58H+LTJdRQt5dq6/Mvl7a/O7/gsN8LZ7z4J+J/EPhW5ke3jtfPfy23P975vlr6aMvejA+AnKPt7n4GrcGTyxc7fOZdsrf7VM2J5flvt37/AOGtW+0/7LuhdMP8zfd+bduqs2mtIV2bhEzfPuT5q9eMoQ91nurmqQMy48mdtnl7f93+Gsy6skWOR0RifvJ89b0lqnmL5e1f9plqrdWqFmR3VQ3/AC02U4ygOVPl3ObmtfOV1dFx/svWVqGmw2+R975/4v7tdXNpvkwtDs+Zl+8q1mXGmzLG2+Fm+fbW1OpKWpyVI/ZOXuLeCNy/k/xfdqpcRw7nfZW9dafubY7/AHfl21m3MCRs2/cq11xlzHNyozWj+benT+OvsL/gl1uOi+MmOMG5sduP92evkSZE3b0Rifu7a+uv+CXAxo3jMbcf6VY/+gz1+n+EOvH+F9Kn/puZ9Fwj/wAj+l/29/6SzwP9oyZ/+GgvGK7sgeIrvj/tqaxNNuPL2JN0krZ/aOwfj94yOSCviW6/9GGub0+R2XYzt/srXxmff8jzFf8AXyf/AKUzyMf/AL9V/wAUvzZ1Oj3SNuRHw38DfxVu29w6yLDs27vv1yelzG3l3p81b1nN5jK78bvvN/drxvfOTl5pnSWl4nl7HT5l+V/n+8taVjcfJsSFflf+KsG3mh2+WduWb5P9qtXS5nkYxv5itu+9VRlH4jGUZmxbt+73zR/IvyrJ/FUMlxM0jOkfH3tzJ95qYskm9Rsbfs/75pZLh/nhO5F3fw/N81aR96Bj8I6WaaSMom7aq/eVP/QqryxI20P/AMCqeN5mZk+Vxt3eX/E1OjXd8kPyVUZcxPL9ozJraG3XeifKz/w/xVnXUMO95Htox/drcvLdFtVfYo3LuTbWXdL8q7AzL96plI1jyGJcQozF0P8A31WddSeYp4kEMbrvb+9W7dW7qGfO0N83+zWPdW/mK/f/AHanm5jojLlMy8m3/P5mTsrIuroN877Xdf7v92tPUI0jU+TuG35X+Tbt/wBmsS+XbJshfaf71Ryx5jXmker3lr9nk3pueJfl3VmzW6TM2zaF/grcuLdFHnHcR8qbW/hqo1rDJH/CN391a+PlL7R/QcY83wmTHYuzLM77gv36lW1SSRnhhZSvzfLVo2b+cSnzfd+an+RNC0SeSzOzsrt/DWHtOY648kYFdWeFUR9uG/8AHqkt5nikOxNoX+9822nL5xjy6Kf8/epm5GzD/H/epx973ZHBipR5fdJ4bi5aP9zNHs2Ns/vNVuHZJbNbI7P/ABf7W2qEbfZ2xN827/x2prW+e0be78L/AMtP7td0fhsj4fMOXn9407WSGFfJRGb+H7ta+nq8kHzpn+78+2se1mfcr71/3lq5b6hN5j7JsCRNqMq/LWrj7uh4EvdmbemyJ8qbFRt3zMv8VTLNNJHI/nLuX+L/AJ6L/s1nW/7uz++2/ZuRtv3quBPLg8ya5/g+ZWStIS5dSOX7I+GRI4XSGNgmz7rPVeS4+ZPOYsd/yU6Tfuf7M6xFv4WqnL50kjuky7fK2vC1dPP9oxkWbjUElk8l32t/s/dWoFvILyQJsYfNtX978tVmbbI6Kioqr8zN/FTI7xN2+Ty97L8jfdWueUuccTY+1Jb/ALlJF/dv8isv3mq/p9x84+eNh/sp8y1z6373EiI6Kr7fmaN6sW00KyK6bi2zduauOfNy+Z0x930Or02b95++mX5f71acepTKzBEXfs3Purn9LbanD7y339yfdrUhZP8Alt137t1cXNzTvI6YxlGBuWt8hhUIJF3fL5i/w/7tWVmSaNfJRti/Ju8373+1WXayJH8+xTtXbEy7mbbV2Jk2Lvs2H8KfPt3V1UafNL3iKlSMYmxpLNMqTecrPHtX5vvfLXXaDbpMrv5Ledv+Xb/dauP01vmVEhVNz/Kyp8tdt4fXzpPIRGR2Vd/l17mFp+6eFiqkoysdDo+mvIzb3b5flZl+7XQ6TpO1o5tmxGXd/stVHQbXdbxecmGh/wCeb10liqM4f92rr8u1Xr1KcYx+E4alTlLuk6D8rPDtQSfNu2LXS6X4f+yyKfJWUtF8q7/u03w3Y+XGyPDH8q/LG33mrqNL02e4bzo4VRVXay10RjGJyc5FpOkww42Qq+77m193zfxVvQ+H08xPJtlZ9m6Jn+XbV7S4YYlR0h3SL/dX5m/vVqRqyjyfJkD+V/q2Sq5UHtOU5280VPtD749n975K5/XtP8m8EM3y+d823+9XeXGzy1eZGd9jb/7tcxrlrbQyhNjFNu7c33aj7ZpzcxyOqWaQ7vOudqeVv/2v92uW1eOGa4a88r5FT91I33v+BV2Wp2brIiJzDI/zyKtcv4gjdZzs3bpPlZm+78tLlluddOfLM4XXo0mh+SHarJ/F8u6vM/G0dtaxvNawqV835tteo641tcb3vHbEbN833a8o8YL/AKU6u6tu/h+6tTLk5Top1JnmnjCOb7P+7mjHz7vl+9trhtek3bofP2oqbv8Aers/FVwlx5mx/nVG3bfmrzHxRdP5hQPzXJyzqG0sRFR94yNU1b7+z7y/L8tZNxq0c0b3Jm2Nv27t1VtavX3702/7y1h3GpPu2O/3f71Vy/ynDWxHtInW22uBdohfa6/Nu/hqZta3N+7fb/7NXFQas67k3/K38TVet9Q8xvlm2bfm+ap9nAx9tI6ttY2YdH3Fko+2Jw6df49rVzK6kYlb+PbUi3ySS/I7f3qIx5ZGUqkpe6dPb3m2RpoX/eL8rbv4a0bfUtzJdO67l/4FXI2eqQtHu3/8C/2qu2epTSOux9n+y3y7q2jH3zM73T9akVkm+8v93bWo2tedHvFyzL937u3a1cTY6g4VUmf52+atG31abDJu2p/erqicnxHZ2/iF5sbPLYqvzM33VqVfESBm2ffX76/3f9quTh1CaPbDH/wGRfu1ajZPl+zTbG/iX+8ta+hh7M7bS9YmkiWGafcjLt2r8rf71bVnrT7kh3yP/F/eX/ZrhrGbzFV3vG37Nrs33VrbsY7xIUe2m83c6szMv8NcdSpynVRpx6Hc2erQx2/2lOX/AIlX5ttWl1yaSETfM/z7f97dXLWN1cw/8eyfeTczL97/AL5rQtdShjhDo+2H+Nm/vV5WIqcsbnsYWjzStymxeeIEjbyYd22Ph2/iqncaw9rHvebCyP8Adb+9WU155032lHXZuZdv96oWZJLN7ab5BG3ybf71ePWxHNE97D4X3jT/ALSe8z9pj2+X8sSr91v96oG1SaTb5235f73/ACz/APiqprdO0QdEXYr/AL1f9mmTXfmMsMPzLs+RtlcVOod0aMfsj5Lp7iGWZ3b5k2/LWFeN5jeT80W7+983zVozXFyI2tt6hG+4zfxVkXjfudm9lZvl3f7Nb06ntNEYVKMY7mdqW+H9z23t8rP91ayLqO5uPkSFmRf4f4mrQuriFoU3fvfn2/NVW4vljk8mGHyjt2tu/vV2c0oxujk5feM37DuZkRMNt2/MlW7PiH/x35aYs26Yp8su5fuq3zLUsLPb3C/Pt2/cqJT5dB06ZetVh3ryq/71aGn/ALuVd/3VrJ3PJIyTJ93/AMdrY0G8Ty/JfzpRHu3tIn3qIRlH3jWUfaaHUaDcJ5ao6MzK27cv3a6zQbyQTNInzO33F2/LXD6bcJGyI74Vfm/2q6Wx1J9ypvkEvy7GjZfu162Hqe8eVWp+7ynfaPJbfZ2dHZ9z7Uh83asf+1W7pupTRstz529lT7rfxVw2lal5kZmd2V12/u2Td/wGtnT7ry1KJMu35v8AZr0adQ8ypR5TsLXVPMs/9c0e19rrIv8AF/s1r2uqXkEYvLOZndfllVotqrXFQ6nthE/ys0fysrPu2/71Os9amaYyC55+VWZq9GnLuedWp8x6FD4lhgaJIXYFlZvlT71Nj8T/AGj9yjr+8f8Ah/iWuGbxI9q7wWz/AC/xs3/oK1HJ4shUqnnMiL9zbXbHklsefKMIzO3m1yFrf7TC7AfMPJ/2qwdY1wrCu/gs3+sV/mrm5vFO3c9n8vz7XaSX5VrMk8UwtDKlzMvnK21l/haoqR5dCffl7xs3esJIX2bnC7d395v9qsbWdYe32b3XzP8Alrtf5dtYN14qgt3e2S5jV2/h/irBvvEk1xGzb1VY1+dt1cdSPNLQ6acvdNnWvEENnHNM8ykxv/C33q4fxDrk0k0ib/l37kkb5t26q+teKkkDvNt3t8zMtcnrHiItN5KBdn3k+b5lrgqfynZHlkbtxrVtZw7/ALTvZv8Almv/ALLWVeat5jApt+Z6xJtS3SI/nZX+61Vprh3kZ4Xj/eN97dXBUj7T4jqjI6i3u8Ks29Szf3flq3a6lJH8+/Yfvbv7y1ydvqEyxKnk7vn+T5qnk1Sa2yjzbdq7vm+9urilT5Ye6ehTqR925uyaqk0b/eZmfa275flqS31NGXzkto/m/hX/ANmrn21iZtuyZU8z5k3U+HWHaSN0Ta6/e3PtVq55VJcvwndTlyy1OvtdQmkX54VTc/zMv3q0oLy2jkV0Tdt/4FXK2OuQ+YryXK7Gf/V7Pmq6usYjeTYqqv3W/vVEeaR1HQxyJ5kSIjH5fut/dq/tcXSbPubPu7/mVv71YdnqXmbd7rhU+dmq/Y6pDcXH2YoyMy/I237vy0hx97c1Ifs3lt5jxjci7Fk+arUMHkqIUdX2oyuuz7v+7WRbt5eyH5Svyt8y7q1GZII9jo21fmb5/l/2adT3fhJjT5viGyabCuy5+YfPu2s9ej6AGT4akO4bFlP8wHXl686mmS6YpDuRFT978/3a9G8PRmP4bCNn2EWU3zD+H738q/a/A6bedZhf/oFqf+lQPo+HKThiattuR/mjzMxww2ryeTMrN/DUN1H50ex3b5fl3L8ys1XZLia8t4kTcwVdvmN8u3/aqhdM/l+RDuVF+ZGV/wDvqv59qVpVDmo4eFMxL63eRv30zBI/l2r81ZV3G8dw/G7y/wDVbf8A2auhmjSGR7v767dvy/xVg6pbTNDvcKCzLuX/AGv4amNSPNytnpU8PzR+E57ULiVpG+RYgrsrN/s1iXk3kuFd1y38X8NdBqNu+2RCnz7sfN/DXO6pGjcPtTb8u7+9XfRlEKmEjuYmsXDyKbeHais/zsz/AHqxNQ3sp3uuV+Xd/DWtqkfyKUm4VtqN/drFvmi2jf8AM/3Xb+GvUo+8eTisHLm90x71tqt5Nt8i/K7R1m3ioU/2v4a1Zmdd+zbsZfu1lzInmb3/AIW+7XqU4njVcHyyE02HazeTtb5/u11Gg27tlHeRtqfJ8n3VrG0+F4nDui/c+bbXU+H7fdGzvMxGz+5XVGXvExwvvm7o1v5Koj/OW+589dvoduk0LPs27fvqtc34ft5gqOm0bf4tldvodq7bOF3f7X/oVRWl7vMethaMZGrpdhbeXGnaSLdtZPmWun0nS5oWbeiqJk/1n/stZWiqjXCQ2z/Oy7Xb73y11Oh2VzGzpM7OVddjL/drhlzSPSw+Hjct2tn5cn2beu3dt8tX+Xd/vV9A/sg+IpPBPjCwv4YftU32jbFH91Y/9qvErK1s7gpNbQ5Lf3v/AEKvU/2fY9SXxdbPYbXXzVXc0TbVbctZVPdIzTD82Dkj9UPhb4f8K+A/BOr+P9Kud2qas8lxdXDRbVVW/hX+81dp4W8Za94f8Kvq/wBsbdJaqkULL821l+Zq8+0rxtrGj+C49N8SQ27XF49vE7bf3axt97av92qnib4gWzalcaPol/G6R3HleTC3zR/LXm1KnN7p+Zxp/vfeK/iD46P4D1aPUtEg8/WFuvN+2TS/u7eNV/55/wATV8RftLftA+IfiVql/wCNpr+adpNSkuPMkb/Wfw/NXrfxz1y8s7/WrmbaIbO1ZUXzf9Y235trV8m/EC8/taOysIYmjT/WxKv3dteZi4Q155H0WT041KsTJh1vxPfTPcpcyFW+Z1Z/u7v4Vrfsbi5Wa33wrujRv3itVLw/Y+Zbx22z5P8AZT5lrtPA/gua41KG2s7NZY2fc3mfe/4DXzlTEUuX3T9Fp058nvGF4ivtV1ZTYWGlSNbzNueSH5mrn1+Cfxa8SWKXL6lHo9lv+S6vFZfOX+Kvqe88E/C74b+BZviR8QtTjsbGxTfLH95rhv4Y468d8YfGLxb8WNNi8W63pVn4c8G287JYfal23N5H/u1vl+IpRjKLPPxWFlTfMtDw/wAVfDHTdDhTSrP4/XE100StKrRMqszN/wCg/wC1UvgnwX480WZI/DHxRsZEkb5FuL1o2Zv91mrK8dfED4Pw3kzw6VCgVtssi3DeZIv/ALLWDfeOPhXqlu6aR5ltNIny/vd22niPZyj7pphZOnLnke9Wev8AxR8M3STeJPDbXMcLqzSWvzbl/ibdW/q2peG/iF4Ru5nsLO4h3Ku26i2yx/8AfVeM/CX49X+k3iaVN4umu2hiVU+1bV/75r0Tw78YvB+rR3OiaxZQzxzS7kuF+Vl/vV5EuenP3T6KjUo1qRt+H/hn4Y/sWGawS4tfLf8AdLDtkRmr0j4f6TqXw71C21Kw1u4htm2rtWL5mZmrmNB0XwfcWkL+FdSurdJG/wBSs+75v4vl/u133ibxI+j+HdNsrnxDH5TXmyJVt/m+7/eqJS5pe8VGPs5H0R4T8RaDqnh1LbU7m6kuIVVYlkl/9Crs/h7oqX2oRzL9lCTPuX5l3Kv+1Xgnw50PStWtFv57+a4juIt3+tZdzV7f+zvofh24m87zt6Lu3NcS/M1VCM5T5UKtyRpSPpTwCulWumo14kburfu/n21va9rN3b2EjWc21JP+en8NcXpzeDZzHZwTW6vH8rLHPUXiH+1LGxl/sLU/MZdxSO4fcrf7Ne1KoqMOU+Lng4V8Xzv8SbS9VFzqkqxPld+167f4aWNtqGp3KXNvG6KnyqrfxV5HpWpXKyGa8mjhmX5pVr0v4ReJHW7CfKyzN8zLXHg8VGGIi5bcw8/wMlg3ym3400NLXSp7ZBny923d/Etfmz/wVA+I2n/C34d6lZ+IZpmsdeiazg8n5v8AWfLu/wCA/er9O/ia00OiPc20G9/KZdq/3dtfih/wWy+MP9vXkHwom01ZraGy3/ao22yLN5n3f++a+wVH967S0PzKNP2mIUT8tPE3h1NF1ibR7O586GJ9qXUn3pFrNm092KwyP89dVfaLtuij3O/y/m/vMv8As1VTS4VhV9nyr8u5V/hrr9ty+6fVUaMYwOWmtfL/ANGSFtzVRl0lNjJ8qjf8q118mj7ZGMyMyN8u7Z92qs2kl5CnkqqR/KlEanu2iP2PMchcQzQ/uUdSG+4rLVC8sX5eNNy/3WrrbrS3+1fP8rbN3l7Pu1l6lp8Z+4ny/eaumnU+E5JYeMeaTOJ1axRN2+FW/wBqsK4s8A7U+b/arttYsbYLsfbvZG3LXMahawqzp8wVf4v71dlORw1Iw6mDffd2Oiqf4WWvrP8A4Jehho3jLOcfabHbn/dnr5Ru49y79n3m27v4q+sP+CYMaxaT4zjXtdWX/oM9fqng+78f4T0qf+m5nt8Ke7n9Jf4v/SWfPn7SEu39oDxlC/3f+Eju2/8AIhrk7dkWVPn+9XW/tGr/AMX/APGZ+U/8VHdcf9tGrj7WRFk8z5T/AHK+OzyX/C5iv+vk/wD0pnjY6P8AttX/ABS/NnQafJuby3/hX7396tfTpN2Pk3L/ALX3a5eGTlXP3d+7733q2tNm8sId/wAyvury/fOXlOmt7j5n/fMh+9WrY3TsvnI6qW/hb+9XLw3Dqzl3+992tnT7h1Yfdx91V2VEveM5ROnhk85Vm2fw7W21L8kkPyJn/gf3ayLe8RlZ8MqL99quxzIy7IX2bl3bquMebYwlEsq0fmb5vl3fKn8NTKyeSsKbt0f3t3zbqq28k3k75Pnf7u5fmp8Mjsw/fcbNv+9/tVfKjD+6Pnt8Rp++VdqNs3fd/wB2sq7jcR7Plx975m+9WjeXkyv/AK5cL95WrMvl3Tb97Z/8dVaZZn3TJIwRE+RU/wC+qzrqNI5vItk+9/47WrqEh8z53+X/AGU+asm6fd9//Wt/tfw1MTaJja0tyu1PlZN/97c1c9qW9fuD5mf/AJaV0GoO8at/C/3vmT5WrA1SHG15H/4DUSiax2Pari186RoU+9/tf3qg2/u40d/+BKlaq2e64OxGI2feanSeTEvzp87fKrbP4q+Fl7x/QGDqc0DFa3RXXenzSfN5ar96mfZUmUeSkn+xWpbxzI3+kpG3/jzbabfWPlvvSHaqpuT+Go5oxNvaMxWt03vC+5N3/j1QSQvtVf4FbanzVpXlnub+L/gNVGXb9+Flb7zV0KVzkxlSHIVZLjbuR5MMzbWoWYxt5P2fcNv3mf5ahulhhbek25arxuVkd0dXX727+7/s120o8stT4zMKnMbtlcedMm+ZlVU+6v3a0obx5Lcwo+z/AGv4q5zTpH2h+7VrWbPtP2bbu/jZmreEYx1Z8/KU+c6TSbiZpGT5U/hRm/8AQquXH+sfzkZDsVU/76rCTVtqi2mSN9vyrtq9HqFzND52xVH3fv1EZT5tRSlzaF+ZYcv8izM33tq/d21R+yQzLK6blX7qbX+9RNqDxw/u3yGfbtVvu02PyZ1Z0fay/dp+/wDEEteUGWPc0m9i6/eWRf4qpNHbNvuX+dd/y/PuqzfM6yLNJNv3fxLUK2/ys/kqC38LfLSjKQL3vhK0d9tVoXRVZX3fMtaFnfIwZ3hVf4fMX+KqsduJ2Uum7cv3d33amk8yFRGj8fe2rRKMZe6VHnOj024eRf3KeYzJ/e21prcfZ1PkupZv4f4VrmNPuts0saQswVdz7v4a2bPY0aB5m2x/fb+Kud0ff1Or2nuWOhs7uGE+ciMq/d+WtNLtJFSzhRnK/Mm2sOxWa4+REUo0W5Pl+Zfmrd0+WzjiCJtfb8y7f4a6adLlncxnJVIWia+myTKrTTJzvVV2r92uv8M3lsu/fufcm3c1clpW+OL/AFO91Tckjfdaun8NyfaNs0yNEq/wqv3q9zC0z57Ec3OegaDM9zCNjxptbdKq/dauq0G1tm3XO9pIm+bbsVVWuF0uZLVY4fIWNGXcit8zbv4a67RrqZpbe58za33mXf8Aeb/dr0adP7RwSkejeH4/O8p0f7ybUX+9XZabawsEtk+QfK21V+9XE+H7jy1T54ztbc6xt8q13nh9vtEf77yzKyqrtH8zL/8AE1rGPLEn/CdBo9n5Nr5Lopdmb7v3dtW5l248ubLKn8TfNVeGa8tYV+fP+z95d1WZCjKX8jY2z96zJ96o5oyKjEqagqR7kd1y23Zt+61cz4gjS0j2XLspaXcvy/dauj1DZHH5z+Yd3zRR7fu1jaozRw/ImWaLd83zbaxlLlNIxOV1hUuml/crCm75FZf8/NXH68sMgEqc/e2fNXXa1CGuDC7+YN3mbo/vfd+WuN8QSQwq5d+d7SfMm3d/wKsuc3jznn3jC68uGaZ9vlbN21fvLXk/jpvMjaG2RVRdzRN/Ev8AwKvVPFU3mJvtodu5PnVfutXl3jC3hjjbZbMsvzebub5WqObqzX4fhPKvFKeTI6JuzIm5F2/erzDxRCity+CvzOuz7tekeKP3kjoiMpX5X3NXm3iCP/Xec+7bu2L/ABUub+Uzqe8cF4imdY5dm37/AN5a5ua4/fbH53J81dF4hWZf+WON392uUvF+zsfm3v8A3a2jyyOWUeUtW93tYo6ZVa0bebdH8j7f/Zq52GTdJ5KDFaNvePGqoNu3b/FWkokRkasF4jbod7K3+zUkd47Mdj/ef7y/3aoQ3Wfvj71WM7vuO3y7ay/xBzSNKxutuzyvm+Xa6slalnIkapM6K53/AHawre5RWWXeq7vlT+9WnYs8f39vzP8AIy1UdiZRN7TbjyFMexfu/wATVpWO+SPjcu59y7v4ax9PWFtvnD73yu1a9nN9onXzplfd8v8Ad+7R7QI0zTt5PM+ffkL/AA/3qvWtr50/nPwsf8Tf+y1Rs4xt2JuRa07ePZIg6IrfLTlW0kyo0zW09Zljih/5Zt8u7+KtuwaaFETep2/3X/8AQqx7P95H8j5WP5XVavxzMg8lHX5trLuavNqVvtHdRw/MblvqEy3SzB13r827ftq39qRd3nP5r/wL/drFVnXZNGy/3fl+ZqveZNMpeEL8rqqM38S15OKqHvYOjJF9Lj5Yk2bVb5tzJ/DTbiTbIkKbmf5m2qvy1VhZ23o42Ps+Vd9NW6muFD79zsu568mpyyqns048seUluZpo23w+WnmIvzbvlaqlxff6O80M+/dx9yo9QYMv8OGX7yp/FWfcaj5sapZwNt/ij3fd/wBqnTlt5DlHlkTtqj/wJhVX7zJVeSRHs9iOzbU+XdUdzsWZEh2/7W56q3kkyqzzI2FX5mX5q6Y8vN7phKPNEgmk2ugmfL/88/4apX0cM0ju7thf++d1TeT9oVXhmb5fmRv4qrXVi8dwqbGK/eZa6pc3wnHKn7pRk7TO7JtfY9WIZJlVkMzMq/Lu/vVNeWLzLv2bAv8ACyUn2eaPYibfv/3flo5eaMSffiTWMKK3yPv+fc6ru+b/AHq0bVbZvkR2iVf4W/vf71Z/lzW7s8PG75dy/dq3ayIbcJ50gl+ZtzfdpKXKXGJtaTqDr883+sVPl/4FW9p1x5OxJpm3Sf3U+bbXKW801ufkm+ZV3Ju/irSt9QS4eCZEkRP41V/vV3U5cxzSox+I7bS9XfSZUSZFZZHZfm/hrZXW9yqyXK5k/wBn+GvOYfESMwSdGYq/7rav3a0ofGEzf6HNMpVfuts+bdXo0ZHnYinGUTvl162WfZbTSEyf7P3qjbWHjjd5h8iy7dq/3a4638SJNImybDLu3Nt+VaZN4mRV/fTfLub5d33mr0acjyalM7bUNYeaPZZ367PvbVrNm8Qf6QzunyR7d9csviHTWmVH3Kjfek3f+y1BJ4ieJtqFZWm3N8v93+HdXRGpy7SOaWH5jevPEkPmTfvt25/733VrNv8AxNczKUebZ/FE2z/0KuevPEXzMjxxmWP+6/3qwtQ8SIv7nfIYv7q0qlaPQy+r8pval4qfZvR1DL8zrs+asjVPEUfktbQzKzb/AJ1j/hrmNQ8SbY9jupVW2uzVlXWseWrQo+3/AHXrmlUlzGscPy7Gtq3iCWNfJedt395U/wDHaw7jUNzMiTKq7/kXfuqpNqTyYTf8y/wtVSaZ4/nwu1nrlrSlI19lylxb658sskisW+b/AHqkjmfzN/nR/L8ybvu1kLeOkbTed8v+zUU2qOzDY/LcNtrm+L3S/hOguNSmZVm+X+79+j+0pto+dUaT+FqwZL4zL8/y7futUkdxN5i/6tv9pvvNXNKX2TpjLlNuPUnkkV5E+RV+epmuvNY7IcbV/ibdurGjvEkUQvDz/vbamt7r5UT7399t9YyjynoU6kZG3DqFyjJ8qsi/N5n/ALLWvp+qPt2bMf3JK5iNfm3puI+8vzVbsL7azb5m+b7sa/w1zSjJHZGR2djqD7t6TfKq/MqtXQaPO6wi6d/vRbdzfxVxWj6nuZUd8MrfJ8m6ug03UvO3Q/ZlO197/wANYylM2p0+Y6K1u/Lj+R2xG3ybvmatGO8fzF+T5Gfb83/oVY9ncJfND88cUknysv8Atf71X4ZPMZIXRW+f5v8AerHm5TaEZy+Iv+ZNuTftfy/vN/Fur0zw65f4Y+YV2E2Mxwe3368sfEjRI/yv/ufLXqnhlU/4Vqixn5TZTY/8er9s8C23nuY3/wCgSp/6VTPqMij+/m/7r/NHnt5H+5itn+5Ii7v7tZ8kMyzSp5KhI22r/d21qSWb3CnenCvuTb/6DULbPOh3w/Kr7Zdz1/PkqnN9o1o4X+6Y95buWNtC+xmTzfl+6y1i6hIixu6bR91nWN/vNXRX1u7M+xNyK21v9paytYVIdsH2Zl2oyp5f3v8Adp0/dPVp0/7pyGuRXMzffkVY2+Zvu/N/7NXO6sfLb7NMjJt/irq9Us90n2l3ztXYism5WrmdS86Ni820p/drvo8vxGv1X3DmL3ZIzIgVl3bflrJuo+S/3Ntb2rQpG2P4W+Xy1rKns3DSfuWAVflr2cPLljc4K2D5YnO3lmlwz7Jtzbty/wC7VWOw/wBKeR/+AVrzWe5m/c/w/dp32fy4N8m3/YbZXfGpKMeU8epg483NIi0+zSZvkTCr9/d/FXS6OszRjeioW+VmWsaxhdFX523b/wC58tdFpMflrsRN27/b/hraNTkMJYXlOi8OxoNls+4r/Ht/irudFt/tHluiMzxoq7WX7v8Au1xuhyOrIyQqhrufDLPJInd2/vfKtVKXNE2o0Yxl7vU6fQbONVLwwySvHFuVY9q11ulRzeVbzIm3+J1b+GsHQVSSNIYfLZV3bpN38Vdbodm81uj+Su5fmfa+7dWTX2melTpxL+k28zWvz229G+b5f4f92vVf2f2ez8XWSeR5paWPyvMT737z7tcFZ2MK2fnbN7q/7j59qq1dz8KbeaHxZDNYW0yXEm1U2v8Aeb+9Xn4upyYWpM6qeF+uTjQl9o/S74m+G4bT4ZS/EzwZ4psLzWtAtYWk06RvMjXb/s/xf7tfMHwX+Jut/FzxlqtnbPJJqlxPJdXEdvFt3MzfdVa+P/DP7SXxy8E/FbxVHZ63cSaRb6pI1/DNuZY/m27a+z/2Q/i54Bk1Cw+IWgww/wBtLexyrG0G1ZGr4fKM3nJSdXY8XijhChlyl7GXNJamT+1t8IfiX4VvNLstb8PXji4+ZpGT93GzL91q+cLjwref29NDqUKxxWfyRf7392v2s+K8ejeLvA3/AAmfjzSLGWBtOxArL8vnMv8AD/eavzg/aO+FvhvQ7BP7N8yaaS6knlVYvu/8Crtz/FUI0oqG8j5zhTC162KenwnhOmrbWypGkPlSyS7du37tegeFfFPhLQYY45LmMTyP/oqsn3o1/wBZI3+yteQeKtUvrWZLO2hbz1+5I275V/vV5r8VvjRqWh6bqei6Dcs1xfQfYpbpW+aOH+Lb/vV8pTjKpLlW5+jYiUMPDlR6b8fP2sPDHxO8SXOt68kieCvB8XkaXpqvsbVLrd/rmX+7uWvh74/ftWePPi14mmnub+4trCz3Jp1rHL8sa/w/LU/xI8XQ6ho6eG7CHyYN/wA/z7mZv7zV4zqWoPcTTwWyb3j/AIv71fQ5bl8YyakfJ5tjJThaMiDXvjJ4q85oZnbZ/vbv+BVF4d+N17Z3W+a5bLfK67qyNQ025jXzr22x5nzbWasi80mGVftKbVb/AGa+jp4XDOlySjY+TlWxUZX5j3bwZ8XptQmFyb/ey/eVW/8AZq9K8K/Eie/uGmS8k27l/d7q+QdNnv8ATXVra4kT/davR/APxKvLEJvmb/b3fxV5WKy5xu4HuZbm84+7UPqez/ac1v4b6lazJc3gtYdzvCr7vmavXfit+1emqaf4Pe2RY45r1ZZZPNbbuZfu7f71fFs3iiPxJqEWnxzcfe2q9bPxG8bPp+l6BpX2m4LWNw0+3zf4tv8AEteQ8JHSx9HDNpSpvnP1y/ZX+Muj61p6/wBvTbYo/mf978y/L/DXvPwv+IfgzSW/4SrVbOGSzXcqyNPtRWr8PvDf7bHifwHo722j6kyySffZvm3f3qitf+Ch3xyWzv8ARNK8W3Ahuk3RRrb7ttYxwmL2ggxObYaKP308Pfth/s1W/iR/Dd5qFvDcyT/6M0gXbGv+1JXpWkfETwP4igku/BviKzlRfmfbdb1r+Z7wz8Wvj34y1ppn8Q6ldy3Uv+rhT/vpa++v2Nf2nvGHw7htPCXjGwvoYm8tW+1W7Kzf8CrGthsdh4c9SzMMux2ExNW0vdP1Vm1r+0rUXP8Aq3ZvmXb/ABV2nwN8RSw+IvJuQqlW+RVrwvwH8RrbxhoMWq28yusy7lkjr0P4U6tdQ+JYXhdg6tudl+avBWInGpFy/mPezTDwqZdNf3T6A+OHiZvDHg6XXLi5W3tTbMtxMx+Vf7tfzQft1fFi8+M37Sni3xiniS4vIW1JrW1jaX5I1jba3lrX7ff8Fef2lY/hJ+yVcbtSEV3q0v2Ow2ffZtvzMq/7Nfz+XSw6lqCXN/c75vNZmmX5fM3N83/Aq/WsJKNajGbPx7BYb9/KZjw6bDI3/LRmb5v+BVLcaXc+Tsmh3L91mj+7Wva6OnmSp5P7tn3I0laVrpcEf7l7Zm3Ju87ZTqVPZyPoadH2hx7abMtr/o1ssifwf7NULjTX8vztnz7/APV/7VegtYpGzfuV2fwL/drJ1LTobdvMd1VvvP8A7tZKvzR1H9VjGRxmoaVM0e+ZGLr/ABL/ABVl3WmzRsyOWV/7ytXZXCosrJ9mZwz7f7v/AAKsDW7Xb5u75GX7rbN1dVOpI46lOH8xwWpaSFaV5k3N82xv4q5fVNJTaHfhv4a9F1PT4JIX2bS38bVyuuWe0SD+H+7Xo0ec8rER5ZXOA1KzmikL9fn+8tfVH/BMeEQ6T4xwxObmyPP+7NXzZrFvt+d4cbvlWvpj/gmlF5Wm+MRnObiy/wDQZq/V/B//AJL/AAnpU/8ATUz2eFVbPaX/AG9/6Sz5w/aTYx/tC+MJFlVf+Kju+G7/ALw1xm1Fb5E4b/brsv2kZU/4aD8YqzFc+JrsZP8A10NcarJu3u643ba+Lz/3c7xX/Xyf/pTPKxijLF1X/el+bLkM5HyId+3+KtW1vnX5ERWb+81YNrNJyiOrD/ZarsN0I8fw7q8s5ZRhE6WzvH2r8isW/wDHa1NPuJI5C8x3fwp89c1b3m1F8l/4N3zPWnp906/O83y/edWoM6lO51lneH78gX5l2+W3/oVXYbjzId6PJ833/n/9Brmre+jV1m87f/DV23vk3BE+X/e/hq+b+U4qkToo7qFVDmFkOzanzUkl87R8Iylk/wBW1ZC6hu/10i/u/wC7SRapDlPJf7qbZfn3bq0+Iw9maUkkednmbdqbd2/5agkvXVkf+Gqbao8jFEMaj/nntqvJqiOrOm1v4du+p5v5TSnEtXl1DIu/5vubU/hrImussu9Pm/2v4qbc3ifxt95qz5b7zJndnz8nzUe/8RrH3iLVrqHa2+bdJ97/AIFWBfSPJIzuGfd9yr99cPMu8uvy/wALfxVlyS7mPzMP/ZaylI1jyH0ZHHIpVJhnd/dp0dv9sm+SFYgqMzeZ95mqfy/LfyX++su7dv8AvL/DVn7Okcab+m5vlr4ipGXU/X8LjJR90zY4X2/O/wA6tt2r/F/u1DqFqkkbHfuZn2s0n3q2ZrF45IoXhVVjbcirVWbT0kkKJH838C1zxO/23LtI52+t5priUfdVU/76qjdWs3mKjpjcu75XreurOaR22W33v4l/u1i3kcbSedt+VflSuyMTixFaW5j3lq8ay7NvzJtfctZ8kbrI0MyrtrauoxGjb4eP7rVSnt3WTf5Kq+z7391a9Wjzch8lmFT2kiPTZNrL87bW/wBitjTlmmKR/d/vNsqnZ26Ltd+Qu1vm/hrWs49q/fbLfxVtKXszzI+9ISOF5mX7NM29X27q0o98J3vBIh/jWT/2WmR2/k/cSPCvt/2matLT7GGNWffu3fM7M3zVzyqFxjEreT5jCTyWLfN+72bamWzuVXZAjRJ975au2tq6xssbyKV+dV2bvmqS1hh+0M8/mAyOqqv+1S5uxfLzGdeRQtMjvHubft2/xbqhu4fJX54Wbb/Fs+Zq9I+F3wgX4kpfz3GutZm0mQKy2+9n3bv9oY6frXVz/so2lzEUm8dTlyc+Z9iGf/Q6+9yXwu434gy6nmGAwynRnflfPTV7ScXpKSa1T3R6mGyfMcTRVSnC8X5r/M8KNvBGqzP8vy7v9r/danRqkkqujx71+V1Va9wn/ZLs51Cv45lGO408f/F0xP2RLCMEJ45lGev/ABLx/wDF16i8FPEe93g1/wCDaX/yZ0RyHNV/y7/Ff5njCzOrfuduN3z/AN6tS1aZpPkf/tm3y16on7IumqF3eNpSydHFgAf/AEOr1p+zDbWgIXxrK5PQyWQOP/H60Xgt4jLbBr/wZS/+TD+wM0lvD8V/medaau6MJDuD7/nXZ/DXTaUz7kjS23Mz7flTbXT237OFrby+YfF8rgfdVrQcf+PVq23wXtrZdq+I5znhmEeCR6ferop+DXiItZYRf+DKX/yZl/q/m62p/jH/ADOftVS3l86Z1RP4Fb+L+GtnS7iazmaG5T915S7vL/hb+Fa1bf4YWkUgabUzKigBUaDgY/GrEfgFIp3mXVXw5yR5XP55r0KfhDx/HfCL/wAGUv8A5M4a3DGdTndUv/Jo/wCZd0CRJJNt5M2+F9jR7f8AZ+XbXYaDdPHshSaFW2fxfe/4DXJafoLWEm437SJnPllcDP51p2MjWYJYl3LZ3ZxiupeFHHyVvqi/8GU//kzgnwjnz2pf+TR/zPUPDd150my53YkfZ9/5l3V3mi6slqyQv/yzTam35WZl/vV4bpvje604Nts1YtjcQ+3OPwrdsvjbd2YBHh6JyDk75yc/+O0v+IT8ff8AQKv/AAZT/wDkyY8HZ9H/AJdf+TR/zPfdJuHVU4Xay7nZm3KtX9Nvt1usyIxPlbpV3fKrbq8EtP2jLyzgWGHwnCNvpdsAfw21ZtP2nb604TwdAQeubs5P47azl4Tcfv8A5hF/4Mp//JmkeEs9X/Lr/wAmj/me03rLJthhm3MvzPu+bdWLqG+OFvs20fN92OvS/wBmX9kb9r79qHwRa/ETRfB2ieHdA1KJpNO1XxBrTo14oZl3RxRRPIFypwzhQwwVJBBrlv2vP2af2n/2StD/AOEr+Ifw+0u/8PSXSWw8Q6DrLTQpK4Yqro8aSx52kbmTbkgbskCvk6fDOZ184eVwlSeIvy8ntqV+bbl+Ozknpyp3vpY4aeTYyWL9hePPtbnjv2338tzzzVGRZJd+4fJt+X5dtcR4qmRrP/Q0U7fl2t8275azLr40XVzE6f2GFZjkP9rJI/8AHab4Rl8YfFbxZpvw88FeEX1LWNZvY7TTbKKQbppXOFXnAA9WJAABJIAJr3q/hLx9SpupPCpRSu26tJJJbttz0SPZfC+cQi26aSX96P8AmcT4pkH2VoXfYNvyLs+7XlnjiaHy5cvs/wBrZX6WaP8A8EIP2u/Feix6r4h8deCdDubiP95pdze3Ezwj+6zQwshP+6zD3NfK/wC2P/wTO+PH7KtzZ6V8Y7eCGw1OSVNM1vSZFuLW7ZApZQch0YBgdsiqTyQCASPlsp4dzHOsx+pYGVKpV1tFVqV3bV8t5+9om/dvprscWHy6ri63sqbjKXZSjr6a6/I+I/FXnTzNsdgy/wDLRk+Zq4PXrOaSSWf7T95/vbfmX/Zr7z/Zo/4JCfHX9tnVb1PhJqCfYNNkSLU9b1YLDbWjSBiozuLyNhSSsasRkEgAjPefH7/g2U/at+FPhG68cad8RNI8U2dhayXOoReHlb7RBFGpZmEU/lmXAH3Y9zHoFNXjOFc2yzNVluKlShWdvddakmm9k/ftFu6sm03dW3Ir5ViKNb2FRxU+znH8ddPmfkv4gW1+yPsLNtf+GuM1C3jdnf5v9mvqn42fsd2vgT4d6l45h8evdvpypJ9mk00IJN0ioRuEhx97PQ9K+aNWt1Enku+F/urTzrhzOeFcXHCZlT5JyjzJc0ZaNtXvFtbp+Z5uaZbi8uqqniI2bV909NujZzY3qzJ90U+3abc53s3yf36ffRoku9EZf7tQLNMrK7wq7L/D/erzeb+Y8o0Ybr5WfZ91Ktwyfu/O3s/+7WXDO7ZjRFz975f4a0LV42j+RG3M/wB3+7WUo8poadlImVff/B95a19NLtIm9MfJWTYxwrHsR8j+9/drZsVfcqTOqq1R7TlLjT5zWsfOkKee6o6/drbsGhl+/wAHZ8jKtYtrD5hX7y/PuVvvfLWxp9p+7V49yqv+t3JXPUrGsaPLI17FX++EX/gVbumrcyR702qGTc/+zWdpSPtT59iM3yfL96tK1heRTCm4+Z/FXLUxUfhZ20cLItx2aRqs4mxF/wChNWjZr9l3JNCroy7n/vU6xsfMWONwrL/D81WWRIZsQosu1/7lebiMVGMbHsUcDy2Yy2j+ysnkptLPtXy/mqzHM/l73ePar7kkV6HjdsfI2/8Ah3fxf7tSx2ci/uXRtn8S/ery6laVSMYnr0aPL8IKzw2/32fc25G2/wAP/wATUUN1JJ8nkrs/jaP7tT3UDyRlIdq/Nt2qnzLTlsUhhZLaZd8n3fk+9XO46ndGnymRJMkkv2aa22K25Ytv96qrXjtIh6J8ys2yrsmnvCpd7mb/AK5t/DUE1nebfsxf733ZN+3dXauQ55U+WXu7lNVdZV2Pn5Nvy/8AoVSyRpqDFHRg+/8Ah/8AHqsrp+6Rsortt/e7aks7F5I1S2Rl27d235mat40+YzlGdPczZLGHc8RhZG2KqMq7W/4C1Tw6a8cjb0Vvl3bm+9/u1pNY/wCnEXMLB9n72P8Au/3dtTLpqSSb97Lu+626uj2JyuMfekc7eWu23cSblZk3fL/CtRrpaXDQv+73t/wGtqazS4uFTZt2vt3L/wCzUl1Z7pEdH2BW+Zl+61bRp8sfdOepzSkZsenvFIkMztkfM0kablX/AGaLrTdrb5ZWLr8qL/C1bCw7m/czbh/BItM1TS3WT7Y+35ovkbf/AKv/AIDWEo25Wa049DH8mEBXmf59m5NtWWvHtFSZ3+7tVtqf+g1DfF0ZYU24Vdrs393/AGaz7q4eZUmRGWJV27VraMvshVpx3iasmoIyvGkzIVbd81RLrkLbpk+/I/8AF975ayby6hmX/RnZjGnz1XuL54Y/kRlRv4q9ChL3PePJrUftG/JrUn+u8759vzRqtV217C7HuZP93fXOS6g6sPJf5mf+Kof7QkVmd34X5n+Wu+nPm3OSWF9pqdVHrB8xn+b5du9Wb5adea1tt+rfM33Y22/N/wDE1zUd9cou9JlG6ludSn8nY7/N/Ay1ftIxOmjl85QLuoah5ibE3Yb+Jf71Y+o6lNDGNn3mfa7b9tR3GoPIqfPg1l3DPJuM24/73/oVZe25h/2a6fvcoy81CZWXD4Zn+b5KoTXzzMrojMn3UarjR/aJNjuu7ZVZrErCv3vl3VhUrGjy2XxcpT+0TQ7/AN58zfc3J92hrhGbf/s/8s3qdbPbvd0Ulv7zVHJYPHDvRP8AZ+X7v/Aq5ZVv5jnqYKZXWT5flDbW/hX/ANmpqqjSf3fl2/71Sx27xyI7/KrJteSnN8q/Inzf7lL2nN8Jwyw/LLUhjV45G+66s/3aPMfzG3809ofMK/3dvystRv8AMqRvbbGb+Kl8XMY8vNIkW+eN9m3lv4qtwX0MaCEPt/8AZqztr2snnJMzbqfBNbNMjuF+X5V3fw1lUj7mptTlyzNi1ukZtiI1XtLeKOZnR9qN83ypWRb3HlyK7vub/wBCq5bzO27ft2r/AHa5PflHU9KjU5Tp9NndZt8cyqsjfOtdDZ3Dqv2lNqr93bu21xlnJ5cKzJN8rff3VvWd07Yh+VkjTd83zbqiXw3O6nI7LS7x/PR3+VfvbY62rPyVmCJ0+98yfdrlfDd+P40Zz935Vrp9Pjdo3S68zezLs+X7y152IlLmPQwsYyiX/sM0LDemwTfPFtf71ereHyj/AA7UxggGylxnt96vMrODbcLNM+Sq7drfdWvTvDqqfh+qKMgWkoGT7sK/bPAWTln+ZX/6BKn/AKVTPqMogoTkl2OHWOGORkR5NrbVdW/i+WiS1hGXh2t935f9qrMdvDIqfaXZFVvk2/N81L9l+0R7JkZh83zKjfw1/P0Y+/7p6uDomFfaW8TfOkgDbvljbau6snWo9sZebc+35tv3Wausu4d0zohYWy/KjMv3mrndQhSSN03sWk/vPXXE9Cnh4cxxOrKPnf5lhVNz7V3bf9mud1K12xb32s+3btX7u2uy8Q6PcrHs8tWVvm2q3zVz+paXuj85EZCyb9v92uqnL3YnoQw8eSRx11GkeYd6yOvzN/s1nTWsPlrsds79zru3V0mpW8LRv88YMi7vu/M1Zq26LHv+5tTb9yvSo82nY5a2HhIwJ7PdcbymPOf+GoZLVzJsTdhv4WX5a15LdI3Hk8/xf7VRzfOq7Imc/d2/3a9SjKK+yfP4qjCJSt4Z41+Xbtb+Fq1rH942x4VXb/Ev3apyRpHKZEm3H7q/3qksRtmZ0+9/eX71b/YPGqSinqdX4f2Dakb7v9pq7fQbySRkhuZlii+6rLFXA6Ldx+cYdjI7fLuX7y11uh3zqzJNMz7fl27vvVfLzR94iPaJ6JoN5ZxzvZwOr+Y23ds+aus8P3zNa/Zt671T5IWbbu/2q850fVHh2JGnzr8zbV/8drq9DunY/aXnzufd5e7burm9pynZRqcseU9J0m9eORJpkjQxxbdv3t1epfBfVLex8QLqVz8629rI6bU/2fl2/wC1Xiei6puaO5mmVP4XWvR/hrNc3WoXFhbIsrSRM0TR/wC7XjZ1KUsumo9j18qlzZjBnpXwl+Hfw31b4EXmq+OZlsbnxt4oZX1DULhVk8uNvvL/AHal+Aek/DfwT+1hb/D34deOYde0iNo23W7bo45N33a+bv28viA/hvwT4O8AaHrHlSQ6W1xKsO5fL8xvm+b+9Xt3/BBL9mG7+JXxmvPiLrM8kum6VbLcXjSP93b8y/8AfTV+aZdTxPsddD0uK5UJ81Rn64ftXyxWfwdsdRkhkhghs40SOP8A5Zttr8+/if4sfxZfXOpX7/JH8sUjNtVm2/xf7NfcP7X/AMZbKbwzD4Vhs4/skCHKyL95tvy1+YHxY8YXt14uuf32yHzWVo1Taq114/ERryjGEj53hbAzwuGlUqx5eY6K/wBF0HWLXffw27xxxfPJH8kjN/vfxV8sftHfD/TbWyuU8H3KyXk10yy/aLLb8v8AstX0T8PfEmja1cR2GqpJFa27Mtx5L/NJ/wB9V1Xir4A6b8RtJa80DSo7S3hRmWaZt3mVrl9SDnaZvnEeX3on5CfEiDxDp1vMlzDJFIvyvuSvKrrUPEmk28mxJAsn3pNtfpH4s/Zf0S88SXdt4i+zv5L/ACSSfd+Wvn348fB+z09Z30TQVeL/AJ57PurX2eX4rD/DKJ8DjMLiqseaB8k2uraxq1x5L/O/+1W1rHhXWNJtEuSi/c+7W+3hHQdJ1FLy2hmQszfu2ib5ai8Ua9NeW/2BEXasW3dtr069ZSlGEIniQwteL99nENqn2hfJ8xVK/frT8LyTXVx5KfKfu7qpWHh172++T5gybvlWvVPhT8Jb+6mS8e2ba33NtZ1pUqcC6EatSqd5+zb8Kbnxl4ytdBezkVrh9kU2zcq/7Vev/txfsK+PP2c/h3Z/F3xPpUlvol1PHBFeXDL+8kb7qr/FXa/sj+CX8L+MrDUtSso8Ky/M3ys1fZn/AAW2+DOuftE/8E1vCnirwlF5114T8QQ3l1tk+Zo/L8tm2/7NfIVakp5jGD92LPuJYPlyj2kdT8Qta1zTdPhH2l1X+5urc+GvxM+G+i6hDdaxp8dyyv8Ad3bdy/3q5Lxt8H/G0OrbNS0e4ETfKjSVe+F/7PviHxB4kisH02RRI3zs33a+meAwvsOac+U+XnjatOrFwpcx+lf7E3if9k74svC/gnV7HS9ajfb9jvkVWk/2q+7rL4d+CfHHhX/hG/Emj2895a2+yK6WBVZdtfkX8O/+Ce/xqXULbxJ8GXkhuYXWWJVb5m/y1fol+yjrH7S2m6vZeBvjT4Sm0u8jVVe6jf5bhf4vvfxV8bmmHrUo89KfNE+wy+eGxtK1aHJM9v8Agj4f13wbb3Gj/aZGs/tG1Gkb7te6fB3Xrb/hLLbfGzr9o2su1qw7XwTZw6YNQMLBJnVm3JuZmrT+C3jXSvCfxPx4gt1fT7G3mup7m42r5axqzbq+WhRp1sXCMv5kepXj7PKp/wCE/OL/AILjfteaP8dvjpZ/BPwL4hkuLHwDPJFetDuVvtkn+s/3tvyrXxdp9vDNdeckO/bxLuTd81df8ZtQTxt8cvGHieF28rUPFF9cQSSJ80kckjMvzf7tULfS0ZQ8j4Xf86r/ABV+y0qMaVKMI9D4PBYfmp8xGunpHhHh3jZuSNX+VWq9HYouUTzP91nq1Z2fkybPszOWep44Xabem4I33o9v3awre97p7dOjyx92Jl3Fq726u8K7Nvz/AMVZV5pu5XSYLt+8jKu1q6eaF45Am/5Pu7W/irG1a3SZmd0YN/zz3fdrKj/LIyqUeaGnxHHahZhWabewf7y1ja/HIsy/O23au9ttdTqVrtZ3Tps3O38KrXP6p532hneZnVU2pHtrspx960jyKkfd0OR1a1hk83ZwzNu+WuW1yx3MX/ib5fmru9QsvmkHyotc/qOmja2/j+Fa9GnHlZ5Nan3PP9WsbaOPZtr6O/4J02sdtZeMDEhUNc2Xy9uFm6V4brWk5UnY2N/8X8Ve+f8ABPq2a2sfFYb+K4sz/wCOzV+s+EGvH+F9Kn/puZ6fC6tn9J/4v/SWfLP7TRkX9oHxkEGc+Ibr+H/poa4Rrry/k/ir0X9qC2mT4++LZht2nX7k/wDkQ15xIuJN7pk79tfF59H/AIXMVzf8/J/+lM8fGSi8XV/xS/Nj45ts2+FF2t/Dsq1DeddzrhaobXjX5PmX+9up0M/zbHh+X+GvG5ehz/FL3jbsbxPlLx7l37srV9dSDK3kurN/B8n8Nc5BN5ce+F+W/hWrEdw6yMiblSiXxcxEpcx09nqTzYhhRd33d1Wf7eeNVSba235a5WG8mjP2aN9p+8+2ntfbRlNpP95qfNy/CYVNzsIdaTydjurFv7tDapthVIdvzfNXJLqgaPyZoP8Avlqmh1J2k+WfAZfu1rKRhyo6dtWTeuHwrffpn25IZG8jbtk/vPWJHqXnSLC6KzL83mVMrPIymbbt3/w1EpFRj73ul+ab94+zafn+bbVdmfyf3m1X+9/stTlL58t0wP4d1P8AJmMj7Id/8O5kqPaGsYlCZdq732qf/QagmtIpMfxbv7ta32Hy1Xjcyt95f4qa1n0TKg/x/L/DWPOaRjy/EfR0djbR3BedN+35fl/iqa4tfMhHkw427tm5fvVYs0hZfkhY/wAKfxNV1bdBahIX2/xOrf8AstfGVJe/7x+k0zG+zpDGk6Bkbf8ANteo5Le2jZ/nYpv3bpPvLWpcWPzbN7Kny7d38VVLuPaqI7r95vlb+Ks5RudEcRymDqASV3869ZPn2/L8y1i3sMZ/49oW+Vf4q6a6s0XY/kyK7fLu27lrLvLV1md3XZ8/8XzV1U/e0MKlTmj7xzF5azec2yHe2z7u6q0lpc28iu6LtX79bdxEjXDud37uX+FflqL7G8m9N7Hd/EtepRqS9lZHgYiPN8Rm2du/2j9yiy/N95q2rG3ufM2Jux/dVKdpeioyvOnlp/e/ire0fS03LJsVQ392nUrHPRo9ypZ2O21Fy9s3zPt2tV6x02GRWx/f+833avx2aSK0MO6VY/mRm+7WhY6TMu534TZuVW/irnlU9nA3p0485mNCkTI8KeU33fvblqxbR3LSfaUTMq/8tK1rjSTI3kum3+8q1J/Zu1fsyI22P7v+9WMqnNE3jT989g/YH+E+qfGT4n2nwi0LU7e0vfEmuWVhBdXgcxxPIzKGfYC2BnsPy61+l0//AAQ68IeBZJV+Nf7amgaAt1fvFoLPpscRvIlxhmE9ymJPmGY0LhePnOePhH/gkfJNYfto+BRbStFIPHuko7RsRkNMVYcdiCQfUGvpL/gs/qOp3v7eviG1vr2aWGz0jTIrKOVyVhjNqjlUB6Au7tgd2J71/VXh/i+KcxyvKMly3G/VacsPWqykqcJybjiZRsudabr5X0vZn0GGnj6lShhMPV9nFwlJvlTek7dfU4z9tf8A4J+fGP8AYo1y2k8VSQ634b1KRl0vxPplvIIGYE4hnDDEExUbgm5gRnazbW29D+xX/wAEyfil+1noFx8UPEHiW28FeBbNn83xFq1sxa6VFYu9uhKLJGhXa8jOqqcgFirKPftN1HUfGf8AwQi1C5+M9/MRpmoCLwfcXczB5I4r6NLdFJUlgCZogORsTG5QMr7h8Q/iH+xz8Iv+Cdnwv0r44eCde8RfD3VtH02GCDRJJZEa4Ft5w+0OksBbLiRsEAF0zsBUY9jMfETi7D5Osuprnxn1qphXVp01LmVNKTnCnKSh7Rp25G+VNS8iK+dZlDDewir1faOnzRSd+VXuk3bma6XtufJ3xy/4I5614d+Fd78X/wBmT496L8S9O0iCWXVrWySOObEYDOIGilljlZUJYxlkbA+XeWC15b+xJ/wTy8d/tweHfF2seCPiBo+jz+GUt0gtdUhlYXc8xYqrOgPlJtjkO8BzuCjZg7h9ifs8/t3/APBO/wCDlr4kP7I37MfxHlvZtJa71fTdI0qa5jlhgDESTBrqVYo13ENKV+VWPXoeW/4I5+Ph4X+Bv7QvxL0HShBc6bAmq20AdRGuy2vpY4wAgAwVIyBjBGFGOZnxh4iZdwnmNSupqrSnQVGpWp04TkqlSMZRnTi5QstlJWunffZPMs7oZdXlNNSi4cspRim+ZpNNJtfM8z+Pv/BKX4Vfs9/CTVvEXjL9t3wvF4w0jTFuJfC01oqmeYgEQRhZmnO4H5W8nnglVXJXS+HH/BGGS28A6b46/ah/ad8O/DxtYto5bHTJ40eSMugfy5XnlhUSqDhkTeAR9418UX/ibxDqviObxhqWt3U+q3F615PqMsxMz3BfeZS/UsW+bPXPNfof4k/bV/Y1/aV8G+GfCP8AwUr/AGffFHh7xXp+jxPYa59iuY0uYJo0P2yIxlJRHKyFwpSRAOVdsmvf4jh4k5Dg8PTpY2piOeUnVnSoUXVglFcqpUnZSjzX5m3KSVvn2Y5Z5g6cIxqud2+ZxhHmWisox0ur77s8I/bI/wCCXvjP9mL4ap8dfBHxS0jxz4Ie4iifVNOjMc0IlJVJGVWeNoi21N6yE7nUbQOapfsVf8Eyfij+1noFx8UPEPia28FeBbNn83xFq1qzNdKisXe3RiivGhXa8jOqqcgFirKPbP2qf2fU8HfsG33j39hL9pnXtd+Dl1qqz+IfCNyFk2EyGOSVZvLSZI1k8rfbOuDnzSTgVo/8FGdR1Hwr/wAEwPgj4Y+FN/N/wh+o2liusTW0zMs8gsxLGkjbRkGXznIO354x8uR8vnYPjPibHZVhcvw+Mi8RiMTOj7aVLknSjCHO1UotKKr9FHWDutbmNLNMfWw9OjCqnOc3HmcbOKSu+aL05/LY4D43/wDBG/xDofwxuvit+y58cdL+KNnpiSHUdO0u2X7U5TaStv5EkyzuFbcYyVbA+UOWC14l+xF+xZ4m/bY+JGr/AA60DxtY6BLpGhS6hLPf2skpcqyxpGFXGAZHQMxOVUkhXI2n2X/ghp4i8f2H7Xt14c8OT3DaJqHhm5fxFArHygsZUwysMEbhKwVScHEjgHkg+0f8Eyrfwvo3/BTn47aF4DuY5dGWHUTaup3/AHdTi4VyoO0FnGBwcDlsBqvN+K+LuFsHm+XV8Sq9bDUYVqVbkjFpTnyuM4pOHMt46arV+TxOY5ll9LE0JVOeUIqUZWS3drNbX7dzitH/AOCH/hnR7XT/AAz8X/2zPDmheMdTT/RdBtLNJVkZmKoIvOnhlmyRjIjXnIGcZPx/+0/+zZ4+/ZP+MF/8GviLNZz3tnDFPDe6dIzQXUEi7kkQsqsO6kEAhlYcjBMHiv4k+OfGX7R1x8TfE/iW6vddn8WLdPqNxJucSLcDZjPAVQqhVHyqqhQAABX1l/wX0WJf2lfB5SCJWbwOpeRYwHb/AEy4ABbGSBjgHgZOOpr6DKcZxhkvFmCwGa41YmGLpVZNKnGCpzp8kvccVeUbS5fe10vozsw1TM8LmNKjiKvtFUjJ/ClyuNnpbda21PhKus+A/gCb4q/Gvwl8NobI3H9ueIrOykhG75o5JlV87SCAFLEkEYAPIrk69C/ZL8Y23w//AGn/AIfeM7y1E0OneMNPlljO77ouEBI2kHIByPcdD0r9JzapiKWVV50PjUJOP+JRdvxPdxLnHDzcN7O3rbQ+sv8Agtb8f/GOgfF/Qv2Y/h7rVzoXhXwz4btpX0nSZGtoZJpM+WpVCAyRxJEEXGFy2OvHSf8ABJr4i+Jf2nPgL8Vf2P8A4sX0viPThoAn0KLWJHn+ziRXjMYZjlVSVYJEAIKNuZcHkeW/8FwfAmp+Gv2zv+EunsnW08R+GrOe2uNp2yPEGgdQTxlfLQkDoGU45ye4/wCCFmh3XhnU/ip8eL6wkOnaH4XS28/Y2JH3NcOi9iQsCkjkjcvTPP4FjMHldHwGw+JoRXtIwpVIySXN7d1I3ae/M5txbve10fHVaWHjwhCpBLmSjJPrz8y6976HwNf2N3pl9Npt/bvFPbytFNFIpDI6kgqQehBBFe7f8Ey/i34E+Cn7aPg/xr8R5YoNLaaexe/nKBLKS4heFJmLA7VDOAzArtUkk4BB8O1vUv7Y1m71f7OsP2q6km8pCSqbmLbRkk4Gcckn3r68/wCCJnwY8D/FX9qu98QeONFi1FPCfh9tS021urRZYBdmaKOOVt3AZNzMnB+YBgQUFfrvHeLweD4Ix9XHRbp+xkpKLs/eXLZPWzu99Ut9T6TN6lKllNaVVacrTt5q36nuX7Zn/BLT9sb46ftG618U/h78cNMutE1q8WWxi1fXLqCTTYtoxCESN18tDkLsOSOSMk5yf+Cq+saf8EP2HPhx+yN8Q/Hv/CWePIri3vLnUpZBJLHDCkqtMS4LhC0nkxk7WdY2JJ2sp+f/ANpb/gqD+2J41+OGr6r4X+K2s+E9L0zWJotH8P6W4gS2ijkKqs4A/fv8uW8zcNxYABcKPoX4teLf+G8v+CRt7+0F8Y/DMP8Awmvga9aK28QW+moJLpo54UkddoXZFLHKBIq4QSRFgvyKo/FqeU8X5HWyCvxFKlPC06tOEY0kozhOcXGnzvlXNFac6g0m+ktz5aOGzLCSwc8a4unGUUlFWabVo301Xe34lP8AaG8Y+Kf2MP8Agkz8M/h58MzL4b1zx6YrjXL/AE7fBcsssRuZiZAQyyMDBGTnPlqUGFAA8Z/4JJ/tO/EzwD+174e+H954x1K70DxfLJp2paZdXcksRlaMtDMqsSFkEiIN4GdrMOhr1P8A4KM2t18Vv+CZfwG+M2j6Qy22j2ltZXqxK5FuHtFhyck4XzLYLls8svPPPzl/wTD8B6l4/wD25/h9ZWFk8qabrH9qXbqpIijtkaXexHQblReeMsBznB97I8FleN8Mc4rY6EXUnPFyqtpNqcZTtq7u8UouOumljswlLD1chxMqyXM3UcvVN2+7Sx5l/wAFvvhPZfBr4qfF3wZonh6HTdOkvIb7TLO2hKRLBcPDMBGvQKC7AAfKNpAAAwPyc1SHZJ5P3jtZv92v2I/4L7+L7fxv+0H8WZbW2EK6eLHTi43ZkaBLdGY5P94EcYGAPcn8idWs3+d3hYqv8X3d1fkniPiMVVWTzxH8SWCouV97vmvfzfU+T4ldWccLKW7pRv8AichdW7qrP8zCqDWrwr8nylq3Ly1ePbsThqo3Nk24Mj/x18BGpynyHLzGfD5nmM/zYX7+2tO3bC70+Yf7P3qhjhS3kZ/J43Vo2sbybfJ/h/vVEpdzWnT5i3p8aeYqOGbd95dlbdnbvN8k0P7vf8+6s+xj43+R977+1/u1taWs21Y4fm2/+PVyyqR5fdOynRiaFnbv5Zm8lnZfm2763rGPzoD/AKM2xlVmjaszT4UVW2fM29WX5q3tJimaREd8fPtdWavPrVj06eHizY0exdVSHy1Xb9z5vvVsafb7m854WQx/Kn93/gNM0XT0jkRJnYln+Rmeui02z32I3cbn+X+Jlb/eryfrXNLlPZo4X3SPT7VFU/udki/5+Wr66em3Dw/My1YsbFIUe5+ba3y7m+XdV+OxSTc+xldVrgqVuWR6VHD+03MtbPbh34T+L56sJbzKzJ5Lf9dP4WrRktbOOON33L8nz7aj+zzRwo6OyfOy+W3/AKFWMZfDJHVGjy+6Vvsszfvw+xtq7WakXT7byw7/AMTfIy/e3VfWGSSG4/fLsk2/8BqW3s5o4/Jhh+Zvl+V/vVtyzluXGnGPxGNcaXZ3UhdNuxfk/wCBVBL4fh2h7lGLr95W/u/w10v9myKyWy/embdu/wCef+zTZ9Hf7QZptpXZsT/er0aNP3QlGHL7pzVvpaW8nyBi/wAyurfw1ZtdMRYfOSGbK/cjX7zf7Vbk3hmGNVcPt3ff2vuqxBpFtbru82R/Oi2szfer0adGPLoccv5TmJdNfKx/bG27fvTfM1VvsDrtmd/njbb/ABfe/vV2N/p9s80Lw/M2z+Fqp3mlwxx7HRt6t/E1dPs+U4ZU/iuc79khaMo6MvmfM0i/3qh+xJZw+ZCm9Gba3+zW9JY2fmK8MyuG+5uf7tULixdG+S552fNu/hp+z15Tn5ZSkY8apJ/qX3OrfdVf4aLqD9233vv/ADqzfL/s1d8vyZAjvj+H/Zpt3apcKrwuoZvvVlKPKZ8pzerWvlt5zp87feVqwLyaTzR/cVN23Z96uk1SGa3Xf5zFt+1ZG/hrmdUV9xMz87vurXD8J1L3oEEl9MzNB5yoW/26zr7UGVim/wCT+9uq55L7Qj220/w1n3lmdzJM6/f+7W9GW5nWw8qnwkMc3nKju+9Y2/ufepjXTr/rtzFvuL/eqw1nNbqqPbMqyf3n21XmtU3B0h2/7NdUa3N7sTqwuW82g5r55d/z/Pt27qZJNNtVJudv8W6kkXyzvdF2L91dlQv5fzyQpt/i2s33aqVafLofQYfKfcigEyecr7/9ylb98u93kZm/ib7tV13tG3nO29vuMqfLtq9Y277Tc/Nt+61ZSrRjG5p/ZREtm8bbPlG7+89Syae7J86YH+z/ABVct7fzmRNkmf8AZTduq7b6bNMrzvuxGn/fVc0sRp8Q/wCyeWJhyaCkipvfYzfcb+7TJtNjjVvLfjf87f3q66z017pf9Tt+RW+b5qq3mjoJFhhhVW37fuVyfWoy0ZwYvK/d0OVm0hFXG/afN+df9moJLH946P8ANt+VK6W4sUhm2fK3yt/urUP9nJuV5HXDfLWlPERj9o+VxmFlGRzFxZvHs2Q8fdfa21dtM+ywrGqFGb/Z31vSWMKzcpsXZ/y0+7ULaei/OkzFt/8AF91a3lW9w8ephzCm09933Nyt/DvqNbPdcLsT+P8AhrZvLHdI2/gf3qguoXVQibU/iqeaXui+r/zFOOF/vwu3y1djjmkkZ/MVGZ9v+ztqGGHH3/m2v97dWlbx7ZEh2K3ybk/2qr4YGlGM4ljT1aNkTZ97761t6azqph8njeu3/aWsqG12yLsTaWf71benwv5a4hVd3yo33a5vhO+j7ux0Oko/8c0hTf8A6pfl2112n2sO3zo9yvu2vufcu5a5fR03CF4YV2b9svmf3a63T45o1CK+8bvmZvvMzVxVI80z1cLU5Ym3Z2G79z5efutukr0fQURvA6orBla1k57HO6uB0iHy7dZXRW/5616FoMUY8JJDEgKeQ4VVHbLcV+2eA0OTPsy/7BKn/pVM+lyio5VJX7P9DlFtYZdlsjsZNy/u1/harUtrcx/uYfm2uzPtermnx3MM3k3SK39xdvzLU0MPmW/mb921/u/d/wCA1+DU6M+Y3o1pRlfmMDCMqJDbSTIzfeV6wdcs/Ojd3hj/ALvl/wAVdlqEbi3e1RFb/ZVNtYOoWMLQoX+Uqm3/AHq6PZ8p7mHrfaOI1q3fzGuXtlES/L97+Jqwr2zf54fmfd8vmL91a6zWo0jiWF3VQ277v+98tc/q0iRqzvul3Ov8XzV2UqMuU9aOIiclqVrc2qsiQKQzbfuVk6hD98OmHX7y1011++mf5Nv+zu3ViagttbjyfIY7vl+auyEJR6CqVqUTAuF3Sb03CRl2pVFo3t/4Gcs21/71al9a+Wyqi7D/AHaosqBTcv8AIy/fVf4q76cbHyWYVOb4SKRPs+7em7d/Ev3qLWP7Pdb0Rm/2lpxkfyxvTa7fw0kW6ZneE4X+L/arsjHmifN1q3Ka+lx+ZcBPO2qz/Mzferp9HkfzfOR/Kdfvsv8AFXK2FqHHKbXZflVfvN/tV0elwvHjL7H+6zK3zLUVOaRH1jlOu0u827fJT+LczMtdLpV88dwr3O3/AGGWuN09XjxsdmH93ftZq39Jme4VURF+XaF+f5t1cVSXunXHFR+yd5oeobbgJc/JKvyxMvzK1etfs/68mn+JvOmm+aO1kVI2+7N+7bateG6LdbtUTem54flWTf8A99V6R8I75G8WQwncr3D7P7rV5mYU51sHJHfluK9liozJvil+z/4q/aE0HSn8Kwx3OoWaNbfZY38z5d25Vr7K/wCCPfhbxf8ABT4b+K9N1jQ9Qsrp7uGGeGb5fl3fw/7K1t/8E8vgq/wZ+JjeNvFOgSXelWSyXpdf3mNq7q+gvhh8eP2ZvjNf6v8A8Kp0a5l1+/uZGubSCJlKFW+Zn/2a+FqqnGhyP4j2MZWqVMU3yc0Opyv7TmuTLvR5pHMMS72X7vzV8RePrHTZNeu33ybppfNlaT5lVf8AZr6n/aG1z7VNeW015JB5bMqKrfK22vnpfDqatp5mvIcmR/8AXfd2t/u15MY+yZ6EfdgeWaPq1h4f1R/sd5u/jlVvu7a9L8G/H+wtbaW1v9YmSwVdu2SX+L/Z/u1538WPhzreizM7wSYb51WNN+7dXz58T5vHmk+IrR7aNgnm7v7q/Kv8S16FHDupLmiefiq1CPu1T608XR+GPE1r9s8MeGLi5E0TM9006+Xt/wCBV8+/Gi38Z6Mvnf8ACt45rf7rSQorMse35d1eY/8AC5PivJJ/yEpv3O7fJHLtX/d21et/jl8YLjT/ACZpFltmfbtuk+WRa9WFPFU5RbR48vqM+ZRZ5d8QfiB4J1Czlhm8HtFPDuX5vlryHVrVNcvPs+j6a23eu6OP5q9u8beGbDxhqj3mq6VHEq/xW/8AFUOh+E/Dekx+Ra2y7YW3MzfeZq9ehW9h70nqeFisJKvP3YnK/C74B3OpXiXOqwthm+f+FdtfSfh34Z2FrpsSabCq+Snzxqv3q5bR9SsIY1sLZIUaP7qq/wAzV7L8IY7PXLT7HeXLJcbP3TRrt3VrWxyrQDD5b7HVfEdd8MNP0qz8Jprd4lmj2rR79z/vG3N/yzr9A/2ePCuifGj9mnVPAvifzXsLu1aN4WXcsn93/wAer4kh+Ad3D4fPim5ufJtoUVmjb5VVt33a+0/2H7rxLd/DwaR4ZsxPA0amRi/yxxrXz+Pjy1YSPqsBH2mAqQmfm7+3l+xHqX7OPxWhvPGmiTP4U17b/Z18vyrDJ/drX+Hf7Bt54i0+z8Q/C7xbD5Eksb/Z5Nr7mX7y1+rv7Q3wR8GftB/Aq68DeObBrnZFIbWRl+aFm/ir8vJvg/8AtG/sR/EpLHe2peGI7pnsrqNmZo1/hX/gVaVa05YWNSn7zj8SPNw2GpQxLo1fkz7P/ZV/Z78beEL63v8AxRCv2aGJfK8mBVbd/FX0p4u0nwlfNYQvokbz+asUU0i/Oqt8zfNXzf8ABX9szxCvhmF/E/hhjFJF/C+2SvY/hz4i1LxvqFtqb3LIrbmit5G/1a15H1irK66yPSxGBlH4djtvHsGmeGNBR41by1i3RM38TV8lfHzxpeaP8KfiH4hsLzyn/wCEXvILeRn/AOei7d3+z96voD4+eNg0Y0RrnZ5ab9sf/oNfMX7Ukltp/wCyj47165mbz5LCG3WFU+WRpptu3/vmuLCR9pmsEo7SOmph/Z5VN1Ox+b2j6HMqok1y0h+zr5s38LN/erdsdJhaEQwzKqb9m1k+Vmqa1tUt5gjurKv97+9WppdvMrfvkz/FtX/0Kv2Bw5ocyPicL7vukMOg3nnCZHjDxv8APCr7W27altdPmhV4ZkjWRl+61bsciSSedNbfL8q7m+9/31U91pKLbvMjqNvzIqp935q46lOctz16fuxOKvLab7Q6WzxqV3Km6Lc1Ys2j3KyPNI/m90X/AGq73VtPSS4e5b5Hk/1S7Nq1kXuhw27M6QzHd8zt/do9nLmsKpGEtjgNc0e5jdkmhZP4tqvuXbXK32mv814m4rv3L/s7a9U1bTkmg87fGsWz/dZv96ub1bw35kf2b5Wdfvssu1V/2a7KcYxPDxVP3vdPNNQt5maR5pvut/q1VfmX/erA1ix3L8nzLu+f/Zru77Q5rWE7Nu1k/wCBVgappaRskyIzp954d/8ADXfTlE8StG3unBatbTqr7Eyy/wATfdr2/wDYQt1gsPE+0/entCR/wGWvKNc092X92jbdny7V/wDHa9j/AGJovKsvEeEA3TWpyO/EtfqvhAkuPsLbtU/9NzOzhdWz6nb+9/6Sz5h/acsIJfjT4qk80gtrlyCAv/TQ15VqkOdyJCv3fk+bbXt/7RNktx8YfE+4DB1u4G4L/tmvI9es3Hzuiqq/xfe3V8Xn8eXPMV/18n/6UzwsZ/vdX/FL8zBTfJ1enq3k53puWmXcflNs8tR/u1CjSeXj+Fq8XlOb2hPDJtk/c8t/Aq/xVYW4dlHybf7+191VrdnG7/fqVZHjbKf7vy1X2Be0LH2ieTYibfm/9Bpv79tvbb92mpbzSbZjDll+XdV6Kzj2h33D/gH3aj+6YyjzFaFXZWfzty79tXbW1uZJFcBXFW7DR0kbfs3LWvY6WzfIkXzMn8NVKQombbw+Sq/Ju/3qt2lm8eX6Lv3bv4l/2a2LfQ0bY4tsszf981etdDMa7/vMzf8AjtYSlKUjanH7RlwwwNh3hb5fl21Yt7abd9/buTbt31t2+g7pPnDJt+ba1JJo7wyb9n/AmrnlLlOunT9oYzWc0J2dV/2aRYX2rDtU/wAX7z5a2X0vy03vSeX+8V3s1Xd8vzfw1hGpI19ifQS6S/nSpE67413IqrVlbV7dWCIoHlK21vm3NWvNpf8ApDvhWeFPnbd95qia32SbH2qF2/N/er52pT5T7Wn73xGLcW91IiQwzKh+9u+9WfNa2wxAkK5bcqsyfxbv4q6G+0/zriLZbKPvKzM33WqpeWqbhBsVvn3IrP8ANWPvHZKMOQwLu1mWPZ95F/8AHmrK1COFmX52+Zt3y/w1v6pH5m7bcsPLT+H+9WXdRpNN9z+Hcy7fu10Q92epw1OblkjAlhMl9smfaF+X5vu1DJayWTbJHZyz/dq7eR/6SU8lcb9rrUa7Ps/z/wDHx/Bu/u13c38q3POlHmJNNt4bO186H5tzbtqr81bmkw20kyTJDIyq+3b92sqzWaNwUG6Hzd27+LbXRaTNDHdJsRtirv8A9n/gVRL93qKnH2kuU09L0n7UY02YRv4f/iq1V09422Dy9v3VXO6ordkuFH2ZFbcv71fu/LWlbxpGqeTAqhdq/N/D/tVwyqc0T0I04RGLpqSfP5PzfwK3y7qhbT0t7VbmH5dz/wB/+KrrW+2N/tL79rrs2v8Aw1U1Jd0zJbPhG+7H/dpUypxj/KfTP/BKnTb7Q/2u/hnrkd2UbUPHOnKqoeVj8/y2B+oZgfY1+nP7dP7TX/BOXTf2gL74b/tefs86jrfiDwzDbfYdZ0+xV/tFvNAk6ozpPE5CtIw8t9yjJIxvYV+TX7M/xc1T4CXvhb42aLpVrqF54Uu11S2s7wt5U8kEhkCsUIbBK9jXm/7e37Y/ib9uH9pjX/2jdV8J23hj+2Ut4E0ay1CScW8UMSxRh5GC+Y+1RuYKgJGQq1/RfEs8s4WoZDWlSqODwSt7Ks6U1OclUk+dKTs+eS5bWtLyLzKjToV8PUkny+z+zLlabd9/mz7m/by/4KOX/wC1LomnfBr4UeCU8G/DbRCn2LQYkiV7to8rC7rGoWFEQgLAhKqcks+F29D+xv8A8FMPC3w8+EUn7LH7XXwyPjn4dyDy7ELDFJPpsWWfy/LcKJlEm1kberxHJVjhFX8in1jU7iRkS8nH+y0hWmT39x5w/wBMZzs+VlnPy1MvE7g+pkEMnWSuNKMueLVdqcam/tFU9nzc9/tX1Wj93Qbx2WPALD+wtFO697VP+a9r38/lsftl4v8A+Cn37Kf7PPwz1XwT/wAE6/gBceG9Z16Ird+JdVto0e0YYCuA7zPclQX2q7KiM27DZZT41+xx+3T4X/Zx+D3xe+H3jTwdqutaj8RdIMNnfWt5GqpO0U8TGXeMqMXDvvG8kqF2gMWH5ZTa7etEiveS7lb518w/LVO+1u7ijdn1C4Xav3vMLM1GD8Q+FKeW1sG8qnU9tKE6k54mUqk3TkpQ5punzWi0rJWVr6XbZjHHZdSw8qbouXM023NuTaaau7X0Psa1urmyuY72zneKaGQPFLG2GRgcggjoQa/QOD/gpp+w9+0x4O0Ww/bv/Zn1DUvEGhWSQR6zpQEwuW2gSOGSWCSIOwLeVl1BPU1+EF3q+oSycajcK23+KU/40+DVbq4fbLeSP5e07fNO2va4i8Vcl4ndKeKy6pCpSbcKlPEOnOPNZStKNNaSSs07jx+f4THcrqUWpRvZxnZq++qXU/aL9rf/AIKSfB/xT+zzL+yR+x38Grnwf4PuLhGv726dIpbiIP5jxCJC5+dwhaR5GZgpUjnNVP2N/wDgph4W+Hnwik/ZY/a6+GR8c/DuQeXYhYYpJ9Niyz+X5bhRMok2sjb1eI5KscIq/j3p/iG+lVzPNKBu3LtkNWo9e1BVd3v597fLuydy15v+vfB8ckeVvKZOLn7XneIk6vtf+fvtOTmU/NO1tLWbRxf2zln1R4f6s2m+a/O+bm/m5rXuftp4t/4Kf/sqfs7/AA01TwV/wTq+AE/hvWdejIu/EurWsavaMMBHAd53uSoL7UdljRmztbLKfEv+Cd37bnh39jz4z+I/ip8RPC+q+IDr2gT2jNY3KCX7Q0qThn8z7wZ4wGbOVDFgrkbT+YsXiO9LjbfOB5X3VJ+Wr9r4ouzMkM08hjblW840Yfj7hTD5RisBLK51Fibe1nPESlVnbbmm4c3u9ErJdtXfOGfZdChUoPDt8/xNzbk+13y306H1g2vW7eMT4n+wv5R1P7V9m84btvmb9m/bjOON233x2r3b/gpD+2d4Q/bZ+K2hePvBvgzUdGg0rw3FYTR6nPG7yS+Y8r7QnAVWkZQxOWADFUJKj86rPxBfLCqfaZWKtuVo87v92tnSdcaILcxXM7L93czHdur2cX435RUzTD5hUyuTq0IzjB+3dkpqKldezs9Irc9iPEmGxGIhWdB80E0ve72v08j3OgEqQynBHQivJbHU5JV3797bs/eP3q1bXXJHZ3ikcfL/AKljtb/arpq/SWo0/wDmVt/9xl/8qPapZ+qqv7P8f+Afpr8Nv+Cnv7NPxi+D2g/B7/goR8Br3xdP4dtxHaeKbUrPPORwHbLxSxOUWMOyyN5hXcwHSsD9qH/gpf8ACS8+AN7+yp+xJ8G7nwP4W1KXGq6nIyQz3cDDEsXlxlzmTbGryvIzMgKEYNfnXJr1zD9xHT91uX95upW8QvHbur+bhvl6/KrV+d0fEvg/D5lDEwyepyxn7SNL61L2Mal786p+z5U76pfCuiPIisrp1VUVN2T5lHnfKnvdR2/Q+6f+CZn7c3wb/Yv1rxPefFP4SXWsSa3aRx2et6RDDJeWwXO62xM6AQyEhmKsDlBlX+XbxXwf/bY1T4B/te6j+078J/hzp2l6dqWoXXn+DYJilt9gnfc1qrKPkIwrKwXaroCE2jZXyJLrBhCwvPI27+JTVJtRu8PDNKy7X3blb+GvWreNXDVTH4zF1smlKWLgoVU8Q3GUUrJKPs7LTqrNbqzbv0vFYGdWrVlSu6iSleTs0vLofq/rf7bH/BIT42anJ8SPjX+yLrVr4nv2MmqiwtgUlmJyzl4LmESsSTl2QM3U15h+2v8A8FLPC/xu+Dlt+y9+zl8HY/BPgG0ukkljJjSW7SNvMSMQxDZCvmZkb5nZ2CnI+YN+bOpXF3DGq211KZPvbdxw1ZN1qF+gbF5KdqblVXPyt/drzMs8S+FsDi6OK/s2tV9i1KlCpjJzhTa2cIunZNLRXvb11POhiMuwtWM3CUuXWKlNtRfkmunQ/TP9h/8A4KPeHvgP8LNU/Zq/aM+GMnjn4eapcb4rFpI5G09WJaVFilG2VGkCSBdybH3OCS1eqt/wUz/YY/Zl8N6q37Cf7L13p/iXWbGSBta1iJIhakjKEs8s8kqK4RjCCiMVHPFfjBquo3IVsXs/8K7mc/erntT1e+l81GnlX5mZN0h+WujHeI3B+b4+pi6uVVEqslKpTjipRpVJK3vTpqCTeivtd6u7bM8Vi8sq1pVJUZe87yiptRk+7VrH1j8UNOk+MVpq8Hj7Vby9m1ydp9TvXn3TzytJ5jSM7A5YvySc5ya8ok/Yk+DskPkG+1sKTnAvY/8A43XgOp6pqjsVGpXGxv70pb+tc/f3eoxK+7Urgtvwi+c3zL+dfT5l4ucK53VjWxuRxqSjHlTdRaRV2kv3e2rNcTneX4uSdbCKTStq+n3H0jJ+wN8D5Dk32vD5cYF/H09P9VUR/wCCfPwJLlzfa/z1H2+LB/8AIVfKt5qd7BIGTVrovv8Al/ft9386z7zUdUZj5eoT7P8Aanbd/OuP/iIfAf8A0T0P/Bi/+VnE8zyR/wDMDH7/AP7U+uD/AME9vgUW3f2n4i4z/wAxGPv/ANsqcP8Agn58Cwwb+0PEBI6f8TCPj/yFXx02p62zB01af+7tadv++utPh1PW5mXfqs4+f+Gdv8ab8QeAv+ieh/4Gv/lZUM0yd7YKP3/8A+y4f2D/AIKQFSmoa9kdzfx8/X91VqD9if4PWxBhvdaGBj/j8j/+N18f2upapt+fVLn5f4vPb/GtvTNSv5Zkht9TuPm+ZlaRvm/WueXiL4fx/wCaep/+DF/8rNlmmU3t9TX3/wDAPrCL9j/4TxcpcatnOcm7T/43Vq3/AGVfhnbSiWO71UkDABukx/6BXzNo93rB/ez6hK3zfIqzN93866LSrq8KrNJdXH93b5priqeJXh4t+G6f/gxf/Kzrp5hlstsKvv8A+AfQ1v8As8eA7crtuNRO0YAa4X/4irkHwV8IW5UpcX3y9jOvJ9fu14Xpup6hLJ5qX0qFX/e7ifmrdtr2984+TeyOW+bazn5a5ZeJPhwnb/Vmn/4MX/ys7I4/AvfDpfP/AIB67F8IfCkKqoluyEGF3Srx/wCO1MPhh4byCZLrg5H70D+QrzDRdRuNizXV84f7mybPzVeivLg2kkUch3SfLiRi1Yy8TPDfm14Yp/8Agxf/ACo6oYzB8vNGivv/AOAegN8LfDTsrmS63L91xMMgenSpF+G3h5ZFlD3GV6Eup/8AZa4EXUjSR/O+V6tuP3f7tT2M8zxn7I7ff+75h+7SfiT4bf8ARL0//Bi/+VG0MVhmrqkvv/4B2zfDTw65O6S5wwwVEigfotTL4B0FAMedlVwrbxlR7cVyOlw+YiXL3Mof76qzFt3/AAGtW0S6eNXlimX+Dcw+9/tV1UvEfw4n8PDNP/wYv/lYfXMPLel+P/ANqLwHoUTBgZyQcjdIOP0p6+CtGBDP5rkZxvYHGfwqlawySSLsw7N95VX/AMeqza25nb7Xbo6Fn+RWFdtHxB8PJ6Lhymv+4i/+VjlicNGP8Jff/wAAnTwfo0b+ZHG4OMZ3Dp+VKfCOjkY2OD/eBGf5Vet9DF5aCLyxtZvvsdu2nXGkBLpI5rgfd2qqr97/AIFXUvEDgBfDw7T/APBi/wDlZzrFYSX/AC5X3/8AAMs+CtEIUBJBtORgj/CkfwPosgIlMzZILZYckDHpVwaWzXRS5QlNv9/azVQ1LSr+3iUoiDe/zLn5WrT/AIiBwFb/AJJ6n/4MX/ysyni8FF/wF9//AACB/hh4YdizLP8AMCCPMGDn8KZcfCvw1chRLPd/L6SgZ+vy81T1KwnRmeAzbF/5aSSL92svU9MnSNo453lMm1Ym3D5l/vVMuP8AgGGv+r1P/wAGL/5Wccsxy2DusMvv/wCAbI+DPg8MH33m4HO4zjP/AKDQfg14Q3tIj3alv7sq8f8AjtcbNBcIxRbqVmba33Cq7qpXVreBiIWbePmlZnLfLXK/Efw/5uX/AFdp/wDgxf8Aysc8wy6muZ4Zff8A8A7a7+AngW8yZGvVJGCyTKCf/HaoN+zH8OXBV7nUyCcnN0v/AMRXH6qskqhBeNGv3U3OfmrY+Cv7K/7Rf7SviePRvgz8M9b1x3fynktY3WNWX+JpG+VVrN+Ivh7y8z4cp/8Agxf/ACsn+2MsjK0qCXz/AOAasn7Lnw3kIzd6qMYwFuk7f8Aph/ZV+GRJb7TqmSck/ak/+Ir7C+DX/Buz8Xbq3g1v9o7456f4ZhcsZtH00m8uFX/eX5Vr6D8Of8EH/wDgn9ocMaa/4p8ea5IyrumbVBAu72Vf4a8yv4ueF2GdpcPU/wDwYv8A5WaU8ywtX4MI3/Xofl5P+y38Nrn/AF11qhOc5+0pn/0CmSfsp/DGTGbnVB/u3KDP/jlfq/cf8EQf+CdMg8v/AIRnxdDuRv36+KnZt396vOfGf/Bv7+yjrDk+EPjX490hPu7JZYp1rmXjP4VPbh6n/wCDF/8AKz0aGYUlLSg0fm/P+yH8K7mQyy3msFj1P2xP/iKYP2PfhQCWN3rBJOSTdp/8br6q+Lf/AAbwfGrRklvvgh+0Zo3iaBf9VZ+IEktblm/u/L8tfH/x0/YH/bc/ZzkuE+KvwZ1qK2t5f+Qlo+bu2aP+9ujr0YeK3hnV+Hh6n/4MX/ys9nDYzDV95cvqjXX9j74UJ9261f8A8Co//jdSw/sl/C6AbY7rVgM5/wCPtP8A4ivBIJ71L86Wt5NFKv34Z2dW/wC+c1p2IvlYb9Tuf725pDWkvErw6cdeG6f/AIMX/wArPZoYJV43jP8AA9xP7L3w2IIE+pjLZBFwnB/74qZP2bPh4gws+o4/6+E/+IryJEktlBW5lLSfc2yP8zVoQT6nGDDPqE25v4d527a5ZeJ3hx/0TNP/AMGL/wCVms8vqxdnL8D04fs3fDsNkSah7j7QvP8A47RL+zh4Clk8xrzUx7C5THt/BXlqGaSTyLy7ddr8t5x3VQvJrpTsS4lx6+Yf71S/E3w3f/NMU/8AwYv/AJUeXiMOqbbauesS/ss/DSZy73Gp5Y5P+kp/8RTJf2VPhlKQxutVUjptuk/+Irxa4vb6RnH2112t8+1jVSW6vZGb7NezIP73mHbTj4neHD/5pmn/AODF/wDKj5nF1sHTV5UE/n/wD3F/2TPhhJy95qxwMf8AH2nT/vik/wCGS/hbgD7TqvAIz9rTv/wCvFI31G4VVS9uA2cblkO1qe1ver88d9M5X+HzDWs/E/w6pxV+Gqf/AIMX/wArPKWOyxy/3Vff/wAA9n/4ZH+Fe8SG41UnGObmPken+rqB/wBjf4RuxJudYGRggXif/EV43u1JoykM83zbmSTzD8tZd42o5WP7bKHb7zLKf8aqn4n+HU3/AMk1TX/cRf8AysbxeVf9Aq+//gHuo/Yx+EIGDdawRnJBvE6/9+6kT9jz4UIjILzWMNjP+lx9v+2dfOEs9/aXYZdUnQ7vutK3+NSf2pdqWlTUrlpG+bb5p/xreXiT4eKOnDdP/wAGL/5WZRzHKnLTCL7/APgH0jH+yR8LovuXmsfX7Yn/AMRU9r+yx8NLRNkdzqh92uUJz6/cr5xsb/VJbjzprm4jVvmT5zXRaHHeTRPFPdyOVbP+tO1qUvEjw7ir/wCrdP8A8GL/AOVl/wBo5Y/+YVff/wAA98sv2fPAdgu23m1D6m4XP/oNaEPwi8KQLtWS7PuZV6+v3eteUaIbidQq3TN+6YL++KqrVvaLcX1orPcK5eMqPMOdsi1nHxG8OZO64ap/+DF/8rG8zy2l/wAwq+//AIB6LbfDzQrU5jluD6hnU5/8drWttOtrTTxpsW7yghXk84Of8a4eyiu4lSZp8+Z9zaK7DTVdNAVZcKwibJJz681+l+GfFvCecZli6eAyaGFlChOUpKfNzRTjeD9yNk7p3122PVyrMMFiak1SpKLUW9+mmmwg8Naech3lYHHBfHT6AVYGm26xrEC21egAAz+QrJWULue2D+rw5/8AHqtFQkQfcERfv5NfnUfELw7Wi4ap/wDgxf8AyszjmmB6Ul9//AJxoNiu4K0gDMWI3DGT+FVbjwVotyhjlEpBOSN/f8qfLa3Lzec0BdF+bcx27awvEdrKJAyySB4fRvl2tTXiF4dXt/q3T/8ABi/+VnZSzLD20hb5lm8+EfhK92+d9p+X7u2UcfpVKX4DeCpozG9zf4Jz/r1/+Jrltbnlt4HNsSyjcrLuK/8AAq898Q6tco7RR3Eqsv3VEx2tXRR8QPD6e3DlNf8AcRf/ACs64ZlTteKPXpf2afh3KBvuNT4IIP2peo/4DUcn7L3w3lJMl1qhDDBBuUI/9ArwaTV76NvJhurj5fveZKfu1Vm1TUmkXZdSfN8zfvD92uuPHXAD24ep/wDga/8AlZlWzanCN3G/zPfH/ZP+GUgO6+1fLHJP2tM/+gVBL+x/8KpQQ17rPzdSLxOf/IdfPV5rF8JHb7fOuf4vNP8AjWdd6vqjReVLqE+xd3Kyn/GtFxzwGo/8k9T/APA1/wDKzya2d4KMbulf5/8AAPpOX9jn4UzIEbUNaGO4vI+R6f6vpTof2PfhTbqFjvdYwO32tP8A43Xypca3eyMTHqVx8v8AC0zfL+tOttc1adkji1ScH+FhM3+NJ8ecA/8ARP0//A1/8rPOeeZZf/d19/8AwD6xt/2TvhfbYMdzqvyrgZu0OB/3xVyH9mr4dwjCz6ic9Sblf/iK+Y9F1q+df315cNIv8X2g/wCNdDY6zdzXCma6kVdm5VaQ1jPxC4BTs+Hof+DF/wDKx/23lslf6svv/wCAfQ0HwE8EWyhYbjUBtGF/0heP/Hamg+Cfg+3IaOe9yO5mX/4mvC4b+/jjEiXbyt95f3x+WtTS9YmbMM1wyurbnXJ3Vzz8Q/D9b8OU/wDwYv8A5WdEc2y/ph19/wDwD23T/hh4e01w8FzdkgY+eVTx/wB81raboNnpWowanZySLLbyiSPLDG4fhXjlnq108CJFM7H/AGWNeqfA7xbpug+PLK/1WTbZxyrvMi53L/FWdTxE8P1C3+rdN/8AcRf/ACsl51l8XdYdff8A8A+lfD3/AAUV+Pfhf4fXPw50bTfDkdpd2pt5bo6bIbjYVKnDebgHB9Kwvgp+2h8VfgBoWp6H8OtF8PQtq+BqF/c6c8lzIoOSm/zBhT3wBmvvD4kftGfs1fFP9kbSU+G0Vkuv6IlvLp6vZLHI0kf3scc18taf+zB8bvht+2d4V/aV+NN3aDTvF9x5ljamQPujWP8Au9F/2a+fxniT4Y0EpLhalLT/AJ+JW/8AKR6WV5zg8x5qc6fI77b3fTseReNP2qviZ48uWudatdKUs+4Lb2jqFPtlzWIPjZ4x3ITDZEIcqhhbb7DG7pX0n8fLPSjq19NBaRrKZd8iLEFIj3bvSq/wk0q08QrNNFpKKscW7aYw3/fVeLPxZ8K1HXhKl/4NX/yo+mp0/aHg9v8AtI+MIbt7648NaDcytAYla4sXYID/ABKPMADe9ef+LjaeNdVOr61pluZD0WNCFAznAySf1r7T8QeKvAvgnVEsLzTLfUXKM0qraqyw1xl74i0DVll26RaM3zPueMbYa9DB+KfhhUhePCtKP/cVf/KjzcbKlRnyyhzHyK3gHwizMx0SLLnJPOemOtZ158IPCV4csbpOuPLmAx9OK7f9pP4xSeHrddG0bTHeOXc3mlwBXxr8Q/iDrN1qRtri8m2yMxz5pK7q9GPif4cyjpwvT/8ABi/+VHnzxOAo6ypJf16H0Bc/s8eCLh941HVY/aK6UD/0Cq4/Zn8AgY/tTWMYxzdp/wDEV8j6n4x1J2kha9nP+15p/wAa5a88ZaxPOiR6hdeWzbX3TN/311ran4i+HFf/AJpen/4MX/yo4qmdZfTdlSX3/wDAPubRf2cfAeh3K3Vvf6rIy9pbpcH8kBrudCsbfw7fLf6cmHUABX5HHevFfhx/wUB8P/sw/CbRfDPg+K0vboWbf2neXdos5mZv9+ty9/4Kk6N4n8FC70qO1h1Bfv8A2e3Cttrkn4k+HUXpwnTf/cRf/Kj0qeLy1K9op+v/AAD6D1L42+OdV0X+wLme3FtkYRIyOn/AsfpX05/wS8/aiubT4lR/APxDooZPENrJBpF7ZqQ0U6RvIfO3PypVWAKjIbHGCSPx/P7WfirxL4qPiPxD4gvrmeWXazXU+EZf4V219u/so/FJ/AHxL8HfFVSD9mMVycNgESQkHn6Oa+jyvFeH/HvDmcOlkUMLUwmHnVjOM+aXMoykrWhG1nHXe60sbYephcdSnToqz/qx+hPin9rrwf4B+IF78IvEOpLb3bRbYmml+6u7b92vP/E3jLSfiRpMvhW8mWaxVvkkZfmkb+Fq8E/4Kha54Y1r4Xr+1F4Pv47TWNNv4/tUa/6yaFvvKteN/s2/tOX/AIkuIEu9TuJ5mdVlWR/lb/dr+Rlzyj7WD92R61HD4eUOWovePqr4E/s9+LU8bPNo+pXFxEt/t+zzfd2/wr838NfUd5rGq+GvDcOlXmiRw3bJuna3Tay/w/LVD9kb+yNb0K31Sa2VZWbczRv+93f3mruPjSumqxSKzb5YmX5W+ZqdaMY0HJfEeVWlKOMVM8K8X302pXn/ABMkaTb/ABNXhX7a3iC80f8AZ1ltoYvJs9Y1GGCBZIt3mSK38O7+7X0N4k0220vRvt80bDd96T7rL/s18b/t2eMLzxJ4g0TwBczSS2Ol27XirDcbo1kkX5fl/vUcNYf6xm8ZS+yXneI9nl7ivtHz5a6RebW/fRhlTd+8StbTYXbZDbP+98r723/2anabazW80XmzfIz7WZU+bbV+G33XSuNvnNL/AH/4a/WZRhL3T4vDx93UntbeFbd32eUy/N++/irSW3ddqP8AeZfn8tvlqGGzcRs83lojP8ke6tOxsy37lHX5tq1PsY/EejGXL7plalpcLbpZtqN/EzfN/wACrI1HTXZfOSaTf/Ev8K//AGNdj/Zb+S0I3RFflaNvmqCbQ5mt1mFm3zJ/EnytWlOmpS0OeVT7J51daKjQO/kwzL8r/M+5mrB17QftDNc71j/56/3K9LvtFhj+eEbxIu1FVVrm7/QfMhZUtsKvzN5la06f2jzcRV908m1vQZomaaHb++2/Nv3ba5bWLV4VdEds/d3N92vWde0N47OW58narPu27drL/wDY1xuqaK8Lf8ey7Wbc8a/3a6adOMjyKlM861bSxHCR8zNGv3mf+Jq9S/ZCs/sdrr8ezH7237YzxJXIavo6bXR337m+SNq9B/Zjtmt4tdLoAWnh4H0ev03wjv8A6+4W/ap/6bmdnDUYxz+lb+9/6Sz5v/aAsUk+K3iQgr82sT7lK/7ZryTxBpPkyH522V7v8b9O874k+IZCmVbVLgHcvfca8p8TaS7Yfzm2/wAKtXxnEMf+FvFf9fJ/+lM+czDm+u1Y/wB6X5s801Sz8ubfsU7qghtWkVnxjbW9q2kvGzb3Vd3zKtUo9Jdm2P8AL8n3q8bl5feOMpw2TyZ+fH+1VuK1eGVJH3fd2otatjo825fITen+1WvY+H0umWaZGT/Z2/dqOX3dC5HP2ukzSSbI9zbf4a14NBmk2pD93+NWrpNN8NqyB4flRU+WtPS/DbztFcp87L975avl5/dI+E52z0Py9qeSw/2q2bfR9pXbw395q3F8PiOHe+1/+Bfdq9H4bm83Yib41/5bK/y7qylzco4mJY2tzCqf6Nv2/NtatjTbFJF3um5m+Zdv8NaFnod55vlodqt9/wAz5v8AgNbmm+F3VVZ7b97/ABL/AA1hKMzppmRb6G9xILxE+RZf4aW60d5GfEOP9n+GuysfD77Um8vCw/M1X/8AhG7U/f8Allk+ZGX+7XLU8ztpy5ZaHmFxpEMg8l7ba33vmqhLo26Rnd1G7/VMv8Nejal4XLSP8mX2feb7tYmpeH0t1SFId7b/APdrKPx+6dMZRl7p7akMLSB5tu5fuR7Pl/4FUFxbv9q2743iZfnZfvVsyWr21r9pSLa6syr538S1W8794k0MOz5NzfJurzJUT7SMTGuIUt5DDN8rSfMu1fvL/tNWdf2Kecr20ioNm7dJ/DXTahbJcLvSzZhIm5v96srULW2aNYfmZ1/h2/w0nS5dUKpLlic7q0c0cbpZzL/e3L826ufvo0C7PO5b70bV0lxZ7l3ui7du1N3y/LXM+II0s2MyfIrN95VqfYyUrHFUqR+0ZepTfdm379332/u1Ta4/5Y+cp3fxU/Up90jSQp+62/Nuqrat5KfvnjxsVUjVfu/7VbxjynnSlzS8jYs2maNfnyq/3V/hrZ09khh3I7Jt2sjN96uZjvNzeS6cK/8AC3zVu6Sv2p/JRN38XmL/ABVjWjyxKoyjI6zTbxGhS6mm27m2NJWlDqFt/qfmdY93y/drnLOR4YUSE4TYzbf7zVaW8ttxvPJbzW++y1zcvvanVzSNj+0N0LIlsyuqfd/vf7VUby4cR75tzr9146zrjUpmZXeZY23bWjZ/++ai86ZSZpp1x8y7d33WojGZUqkuh7D4eYN8F5GQYzpd1gf9/K+c2uEdlTfsl3fd+8rV9DeF23fAt2kO7Ok3e73/ANZXzVqV1+72I67Nu5l/2q/fvFayyLh6/wD0CQ/9JpnTn7k6OH/w/wCQ/ULhGVjN5jOr/wALVnX+sRhWSxdQn97+KorjUIUVnRFV2+/uesa81ZAqv/e+7X5DCjzHy0sRLk5TZt9SkuNrufmX+791qbc30PmL/pkjt95l/hjrnYdYeOZk879yz7U/u1dutQhmhDw7gm37tdlOPKYSr8pozXUdxGN+3O37yv8Aeao47qaJWS2fZtbayt/FWTDqW24fjejf3m+7T11Kb5od+U/vMtb8suU4albmlc2luEaTZ0H8G5qsLfTKPJR9jf3q52S+e14++f4Ny063v/NV/wC8z/8AAqcpfZFKp7v946iPUsMUd1fd9+rNvqTzM/zrsh+6q1zUNwZG8t3Yj+OT7vzVo2d88jb/ADFwybdy1zVJTN4/zHV2OpbpVCXP3k+Tb8taul6pt2qjybl+bcqfLXJrqCWsMUe9f/Zv9mtXS7yZV3vMrbfm+Vfu/wB6uKodtH4jt7fVJmhT9yoT722N9rVpLrW+3857zczOqvIyf7NcVZ6pDcKyedtVf+ejVYbVHMaOkzMzfNtj+WOvHrLm1Pao1OWB2C6ptVLmNP3jK3zM1Mk1TLM73O/7vmqtc8uvPDZ/67BkTZ/lqkXVpolb/pomzatRGnIupUgdCtw8krI7ttm+5SrdWcarvm+dV+dY03Vi2987xqjvIpV9v+1t/vVJb3CXEh+fzd25Xm27dyrUezI9pD4S3NdJdq800rNE391dtYV800sazW0jJ95XVv71bWyGaFX37hGn+r3feWqN3bvHC7vtZG2/u4/vVcY8vuhJcxzupzX7R/Y3O9933f8AZrm9Qt/mmSaZtq7WRW/irsL6F4W87yZMMn3v9quf1zT0kk8t4d3+0q11UakY+6jklR5tZHL6havDJshdn/2v4dtYmpfKzTbPmX5UbZ8tdRq1r5e77u1fvVgatMgGx+iv8+3+KuuNTmmOOH5o8py19bzbmf7rfwKy1n6hbO8bBPmffu+aty+UyD/U/K3/AI7WXf2TyK3zso+9XbGXumNTD8pjzK8ch9KW1t3jbfNHt3fNVjyUbO/5v4qljtQzbHfedvy1pzcpz+xn8RY09X+byUYN/eb7tdDpdqlrcRbE+b7rMv3aztNtUh2edCwbZu/3q6PS4Ukj+eFW3fcZkrz61aB0U6Muc1dPhRdjpu3fd/2d1dDZxzRsm+RQ+z7ypWPYrCIvJy3zfc2/w1sWcfyMrzM/mfNu/u15sn73M4ndGPL8JrWavHH8k33v4mX+KtXT1DK37rfFGnzMv96s+xaa48uFNp2pt+X+KtSxV2/0ZwuPuurVjKXu8x1R980IUfa8Lw7yzbauTrM0cUybc/d27vu0Wq+XiaaH5fKbbHGn3lp+n2u0lEhb7/3mrl974oHSuWOjkWbPzrVUdEXds+fd81WrNXmZJrZNjbf3sci7lqOzhmhQu8KuJN33m/8AQa19Lsd0iujtsXa77f4V/u1pCXvSOyPN7L3S/pdq8W3y4Y9n3ZW+7/3zW5p+moq/vk+78vy/3araLps1uqrNc74t7NLuXduWugtbNJpFeJFcQ/LXp0o+5eJl8MinHYvJMuxP9Z8vzVp6PpH7v5+rfcXft27atLp6RhU3q5X7kcK7latezsvL2vMjb/7u3+L/ANlrrhKNMcpSlIrf2Z5liNm1U2NvVv4akt9JLfuYduzbu3N/erYt9N87fNMOI2+Vl/ib+7tq3JH9ojX7TtRdi/KyV3R5Kkipe7H3jnV0Pdcb/lZvl27vvVV1DR9sgR0ZQrfvWb5lau1hs7b7Q0yWfnPG219r/wAVMutJdpi8f3lX5GZ/l/4FWsfdkY1pLk948x1Lw7H57hE4X+Ff9r/ZrDvtO8jbbTOsI2/uvlr03UNB8qOW8dP9ptqfe3fxbq5bxBodnJL8/Rv9VtrlxFSPwnnxlCUzzvUNNk8yOKGNVbc3y/e3f7VTeHfh7r3izWo9B0S2a4mvH8q1ht4mkaaT+7tWuo0LwFqXjTWLfw9pWiXE13cOsEEdvFueRmav2S/4Jgf8E0fDH7Nnhiz+J3xLsYb7xdcRLJBG8S7NNVl+6v8A00/2q4oyVSrGETnx2NpYWlJs+f8A9gD/AIINaPdaNafEf9ryBnE0Ucll4ahfDNH95fOb+H/dr9B7X4f+Cvg54XTwV8NPB+n6Ho8UW2Kz0m1WJdv+038VeoMqbCBXnvxm12DRrAtM+0fxf7Vc3EK+rYHT5nzeBrVMXjo855n4x1+3tsrEGl2/8865STxBCzKdi7G+b723a1ZXiv4laJBIYUv4dzK37uR9tcRJ4uttVulvBqXlIr7dqv8AK1fkmI9mpn7Fl2DoRpe8z0m78SQiEfZrjLbf9X/do0/xZpzolvczNE7My/N92vL9Q8bY02Qw3MLlpV23CvuVVqjp/izVbP8A0bUvLzHL961b5WX+GuX2nLC6PVjl9CUfiPZriaCKY3KHzpP+WSxvS/20n2WS2uYVlST5fJkTcrf8BavGbHx5r3h23vtS1XWPt8Mcu6KG3i2yQru+7/tVtW/xOmvI5X8hlVov9HkZvvf7taU6s1Exll8Ho3cpfG79h79i79oBoj8YPgjpbX7RNEuraOn2WdVb+80f3mr4u+N//BB3SNHvH1X9mT43yXULK3kaD4si2tu/hVZl/wDQmr6/1r4qPafZg6NdPJ8sse/ay/8AxVTQfEabT1uEnuY5Ujt2ZPJl3MrV6WGz/HUbwvzGdHDTwsuejOSl+B+O/wAbP2Vfj9+zfNFD8Y/hvfaYm/Yl9Cvm2kkitt/1i/LXDXFuG2wwzKV/jbf91a/dSPxponjrSx4M8c6VY6lpVwn+n6feRK8Uy/8AAq+Mv22f+CS3hHVLCb4xfsS6kbcKjPf+A9Vus+d/e+yP/wC02r6TA5thcauSXuz7Ho0c/qxfs8XHT+ZfqfnrdfuZAg8tlV22M33v/wBmse+WG4bdvVdr/PWz4m0/VfDuvXPhvxVpVxpWoWcuy60++t/Llt2/2lrHutn2p0f5D/E38Neh9qxljcRSr+9CXumX8m50toWHz7mbbu20v2G5WbZM6qyp/f8Alar9rH5URTbhl+6396plt0mm3o7bf9patS5ZRsfHYyn7TmRX/s879iW+3zNvzU5bPdCzJ8qr8qLHWhDZzbQ7wqgXds+bczU63ieNj5MPyyfP833t1Epe0PI9nGNjEvrHy43mcNu2bVWsXULV418wR7WX+Jq6y83rMd9tyzf71YOoW94uoSvsjxvXb83/AI9XRHl5tCfZ+6c7cH7ZiZEX+9uZPmZqqLHuk+f76/M6/wB2tK8017iR7nzvm3/xf+y1TbT5oZGm2K5k+5/drpjUjzcsjj9nIls43vLhXd2/dsu1Vau40m3RdtztU+Ym35q47SLV47hHkh2uv3o67jwrb7m+0yfNtT/Vt91q1jL3NCffOi0m1RWG92RV+Vmat/SVeQN9sdfJVtiLN8ysv+zUOhafDN/pM0zLu2t5ez71dDp9i8sf75I/OZ/kj2fw/wCzWkTjqxl8UR9qrRshfzHTYu+Rf7392us0Nd3h5FBPKP8Ae69TWHp9j9nkV5oWVlba3+0tdHZIY9OVGIYhSCVOQTzmv3TwPi/7azCX/ULU/wDSoHu8Nz/f1V/cf5ozUhmW6L7/AJpvvtJ/6DVuNoZo1mgRVf5vu/8AxNPhhWGTzpkm+58u1NzbqfpNjtdrxU+f+JW/hr8ajT92585HFDLiN7m3Xypmf7y7Vb+GsfXLebyXd3Zm+6jKn3a6C+V7aFXl8tP+eUcaVgeJp3s7d32KAvzbf4q3jROyOYe9Y4DxUqNayOlyzLv2/wB3c1eYeKrpJLr50ZVVtrxr/DXpfi5PMhlSFI96/NtX7q15h4q3sxn+6rfM/wAnzM1dVGMInTDMOWNkYd5JuXekzRvu+b+KoGum3K/3V/iaq1xdXKyOm3dt++1V5fOZW+fC/e2q1dUfdicWJzKUpDru42yM6PGRv+9WPqU3mSedvZTs/v8Ay1PfXDyDy0fDN9xttZl8z/Km/d/e2/xVpKJ5dbGc3ulZZvLk+cKy/wAVT6XfbZmk+b/4ms+6ZJGCb1H+ytLZ3CfKm/b8/wB3+9WUo8xx/Wpc2h0+mz7ZCiOx3fwt/DXQafqSRxgvz/erjrW6SOPY/wB37v8Au1uaTcfvPJeeuWUeVs6KOI/mOr0+4hgk3w/8Cratb7z5vOeZmZfvfL8rVyNnefMmblSrfL/s1uWOpOZE+dW3LXFUj9o9CjWlsdj4fvPtEfnO6p833V+9XQWerfZ5A6PIybNqN/drhtPupvkwY1Hn/PIr7WX/AIDW9Y6ojM0cm3zf4JJP/Qax5Ym/tOaNj6W/ZF8Vf2l8QtK0HW/EO+1k1SNGhV9sX3q/Xb9r74e+H9Z8J+HvEtkzEeFrWJrWRW+WNWjxX4a/AnUrmHxxbTWcMnmNcQtF/F8277y/3a/Yz4i+NPFWtfstaD4q8T6fOsGsWq2cDN8v7yNfl/8AQa8PN8LX9leEeaJ6GT16EsbBTlyyufLXxx8VQ/8ACSBHeSWW62/dbdtrqfgvZ2Gn2MmL9T5lvuZVavF/id4sfT5Gmd2edpVWWaR/mXa33a7X4V+Mkh8M/b5rZYUZGRmZtv8AwKvz+o58vKfqdFx9pZnOftAePLDwrqypYXmfOn2Iqxf3v4q4m1+MGlaTp8t5fpsiji2v/emrjfj58WJv7cub+51KF910yxLs2syr/FXzr42+MlyrN9muZPMV2bdv2qu6vWwdGXLFI8zMqkOaTNb9pD4wQ61eSXXnSS7Xbyrf7vkrXyz4m8QzXl083nf8tWZVb+Guk+I3jq/1dX+0zSOzP88m771eYa9rgW48lNufup/tV9Hh6fNLlPgMxxyjLkLPmXOrXC2dr5hdn27q9a+GvwF03UrEf29tt3b5vMk+ZVrkvAdvo+kWcOq6leR+dJ/D/dr0HS/FifYXSG88pN23cv3q9KVSNP3IbnnUY+0lz1TE+JX7H9zqelteeGNYhl2/fhWvErj4K+PPDd43+gSFVba7R7mr6x8N+MLaxhR01Nm3L86t93dWlofiDR7fWIRNp9vMjfM7Mn8K/M1XTx8oR5ZRMsRgYzqc0JHjfwD/AGcfiR8VtcTw94e8MX15eQv88KxfMv8AtfNX2bf/AAw+I9/4Lj+Evg/daeJYraGwi+XJimi2rIMD0CPXef8ABLv49+Fda/a41WbVbW1SG4s1gt2ZFVV2r/dr074NXmk/8N8C/wBQt0ktD4x1N2jLfKV/0gjn8q/Y/CXFUa2U8SrltbCTv6ctQ+k4bjVpUa8m/s6fieB/Hr9jX9oHRf2XX0a78YXGqPI0dxq/mbmdo1+7GtfL3wV8Val4B8UR2E/mW81vOq/MzfKv+7X7+fH7SfDHijwf/ZWj6bCkFwu+VY13bv7u6vxT/wCCj/w3h/Z9/aKtNbs7Nrey1iVtyqvyLIq/3q/nn2dCdL2dM6KebV41Y1JS8j9LP+Cf/wAZH1LS7W1Fzsm2f6xpf9Ztr6f8Va/ZeI/9PuUVHZtyyN91a/LT/gnv8YodWkhtt8aOrbEkWX5q/QbRdal+xw2d75yeXErfvPutXz8uajTlCZ9Th5fWK/tS18WJIbXQYbaZ98HzNLtTd/3ytfml4w8RW3jLx9rGtveb/Mv5EiZm+VY1+VVr74+O3i59P8A6hqt5NthsbCaVVX5Wb5f4a/OHw/fvJZwwzbklm3Ovyf3m3fNX1PCGFipTqng8RYiPtYUjbs2hkhV0RXZk2vIv3Vq3Y2r28wMKLI397f8ALtqG3unbZNv3tu3eWv8A6FWto9vux5033l3fc/8AHa+85InjUakeTWRoWOn/AGrEyOy7XVvu/K1aEGnpIzzpMp3bm3fd/wC+aZp9q8iv+53N8rJGzfL/ALtbFrZzKzFE/wBd8m1U+7/u1py+4aSrc2sfskFnpb/fuSqI3yqv+7/FVmTT3/2m2pt2r/7LWhptil1tmufluG/iZ921au29vJJbNM6ZH8G5fvVrTjDYx9p7vMzjtQ8PwyMUhdYi23ytyfN/u1z2peHZrhZofs27b/tV6Hc6a9xJ8ltuib7+75tq1Tbw3NHMvkosKr8ybauNP4TmqS5o+h49rHh3c7w52uybdrfwtXJa54Z+8iJ5bq/9zbur3LU/CrpcSvsb92+5ZF/irlNU8JpcSPC9hI7796SN/DW1OPvHj1qh4xrHhHMZhRFxJ/EtdD8EtKGlDVI9pBZ4c+nAfpXSah4ddt1sk0e6N23KsTU7wxpLaWZy2QZQjFG6r1r9M8J1/wAZ5hX5VP8A03M6+GakZ8QUv+3v/SWfP3xe8NJdeMdYmhdhNLqMhTC/7RryfxN4T2tvuZmO776qvy19K+P/AA3Nc69eTblbzZ34H8K5/irzPxR4UtmWV3tsiRq+Nz+N84xX/Xyf/pTPnMfL/bqv+KX5s+ePEGjuZm5xt/h2VQt9Jm3K6IrN/tf3a9Z8TeD0VWuURh/s1zDeE7lZPkTf/stXhSic3MjK03R52Vk+X5v4q6bS9BRdruiqdu35av6LoM7L89tt/urXT6P4bdpF3plF+9HWXvS90rmM3R/CP7tXg2oWet2LwPbQyb02uqxfPtX5lrp9F8N20atvhkd/7u77tbdrocLTBHSSIt8zxx/equXlkZHBx+Ddyslmm5Nu7cy/NU9n4Zm270g+X7u1V/ir0q38N/a0byT8kcq7vk2s1aVv4Pe4X5EYf7Ozaq0csJEylM860/wS/wAvyNvXb/q23bq6LTfDKK+zydzr8qrJXoOm/D37ORMlg3zffmb5t3+1trX0rwKjXUk0ztt+780X3m/hrOVM2jUlE4HT/CLlhM9mw+XbtX7tXJPA9z9o2THYrfLub7q16NF4LS6VEHyGN/uq1adp4HsxGLZ7NijJ95q5qlOR1RqRPINS8EOqv9mhWVfK3Ksf8TLXNat8Pf3J89PMk+83+zX0JdeBbYLLM8MYeOL5WZPmWsLWvA8MjP8AOq+Yu35Yvut/tVEqfMbRqfynKQq6xqiSbW3tvaR/lWoJN9psRLbeJmb95G3y/wDAqJrh7ebHzGJlZU3fdX+7WfcahcybtiNlX+f5/u153LKJ+gx7k0cfnTK77c7mZmV6rapZw+T/AMfKxBv73/stTx3bvvezEKPs+X/d/wB6o7i6hffLc2yod6ruVt22q9nyrQupU93lkc9qVm7QvvSMoybfm/h/2q4/xEqNHMj7f7vmR/3a7e+leS1dLPbvbds8z7rLXHeKIY5tyfZsKr/P/D/vbaz9ieTWjy+8cPqTeXJs3sRSxzWzbfX7vzfxUupLFHmZJmRpPmT/AHapfbELIkPyv/z02/drKUeaBy832jbsZE2kof8AvqtjQ2kjkPOxPvLurnNNmkjVIXnXH95vvVq2d0kjCaF2YbG+Vqz9n7vvExknPmOh+3Ha77ONn/AqsrI8kLJ9xfK+833V3Viw6hDNC9tv3fut23+7/wACqWK6hhYW2/5VTb8zbqj2c+W3KXCpyyuWLiR7eP8A0lN+3+L/ANmrO1G+mjs2SF9xVdybvvN/tVYvrrcvk7ONyqjM9ZerSOtw77NqMm3ctVGMuawqlT+U978FzrP+zs06twdFvMH/AL+18q3lzDHEPsyMu35d26vqbwQY/wDhm19jZUaJejP080V8malMk2dkPz/e2/3Vr968VoXyTIP+wWH/AKTA9DP5JUMLf+Rfkindas/2hYXdWP8AeWs3UL7ywV37tvy1DfXXkyO/mY/u1kXmqpuw82HZv4q/I6R8TWrcpZutQSPbsdgn3tu+rEOveXbhEfd/tL91q56bUk3F32qFam298nzP527+8q1tHzOb2k/iOqh1JJJFeF1fd/47U39pbleFvutxu/2a5nS5N0i7H2lf7z1rR3CfKh3H5K1+GRHNzGqbzzmV33bl+/8APSw3E0395d3ytVKFk8zf/A38X8VXLRZ9rJv+9WUpFU/i0NKzkf8AjPzfw7auWt15cfycf7P92s2CZ4ZBDN93+9/DV21bd9x2K/drGUf5jvpx6m5DcLMyb9qKv8S/3qvWd9tmHz/KqtuXd8rVh2+FVZJ5lRGb5a0FvCxKb1Td91f4q5pRgd1Pmj7xt2t062aOj5kbds+T/wAdardnqMaQ7E27v4925lrm/t00e75Nu75vmq3Y6h8qI7L8rbkjrz60Tuo1I8x0C3DzbbK53Oi/d2p/47V5bj/SEmtvMRvvK23dWDHcTLH88zMv8XzVow3Tqqu4bH8ar91azlGXLoaVOU3JLqZSgQMoaX96zL96rdrIk0myFF/h/eR/8tKyLOZJr1EmSR0b5krUtYZo1M0PyIv/AH1urCUY8vxD+KZdWPz4diOqbX+8v96i8h2t9mR28qR12bfm+anGbdboju33P3rSfKtPVrxlGxGQqnyfJuXb/d3VnU54+6dMeWWxkXizLu8mXzPJRlRv4axNQt0khd0uWyqszLXS6hY+Ssps/LEf92P5vmrDurVwoguU/wBr5U20ox+0Wcvqdu6wujwqtxu3bpPu7dtcteWrxsXm5rufEVq6tshTc+35P7tctdWrzSO8MKvu+9t+Va66dT3blyjy7HKaxazblWN2Ufe2/wB2s26h3Mfn3N/erevLH7VMyP5ny/w1VuNPf+//ALq12U6nLoc/LzGB9lQbkT7rfxN96rljY4aMuvP92rbW6blQRqzL83zLV2xs/wDlm77lZN27+7SqVv5S6dGMfiLGm2Matv8AOVpf7ta9nCnmB4UZd397+Gq9nYwx+U/k5f7qsvzVqx2T2u5H6fe/4FXnSlE2lTgWre33SK+zDr8vy/xVqafC8c3nQpu3Ntb5Plqrbrtk3q+VkT+L5q1LGN1b53w/y7Y1p83LDQ5eX3y9p9rdbvkDff8Al+f7tb9nGgtxv3Ef6tNy/NurJ0+H7RtTfIEk++392tiyZI42tkmb5UVfMX5vm/3q4pXlL3Tqp+7ys0dNhmWREm2/IrL83y1pbf3iwpyv8W3+H/ZqnYx/apA86MzzL88jfdb/AHa2bRd8Z+dRti27WTbub+7WfNyxOmO4/wAmGSRZNm5mVmRm/hWtLS4UWWJ4dxXb8jM3y/8AAqo2zIyqkMMiyfxbvu1saLb3N5JFeQphV+VFV/lWrpxh8R0e0l9k6DTWhktv321om+55db+l6b+7R027JH3P/wDE1k6PapNEiTJ8zPu3M/y/8BrptJs/OZETb9752/vV6WHlze6c8qnNEsafpKQwuIYWBZtyNt+7WlbxusgdE3fLu+5T9PhuYY1h8xn2pteppLNI03zbirfLF5f8VdNPnLp1CXT5vm2eWwb+CSNvlqSPeszbHbZs2vti3f8AfVQW9ncxsz723r/Fs+X/AHa09LtLmGEvv80Kvz/Jt211xly+8VKX2h0Nl5y+dv8AvfcVk27f71T29i/l/Oih/vfN/FV61017hdlzNGwVF/1fys1W5rbFvsL+Vu/4FureM5ROSp70TktWhhlkV7kLtjdt8bfN/u1yOqad9qk+xzIzpvVn+WvQdUt4ZLaXf+7Vk/hTdXY/su/CVPFXixvGGt2dvNp8O37Gtw/+sZfvf7y152ZYilh6UqkzjjH2MD6N/wCCX/7IGm+CJE+NPxJtrV9YuNy6Tbybf9Fh2/eZf7zV+ieheI9KsLSO2u7lYxt+8z/LXxLp3xqs/DP+gabND5zQbPLb7qt/D/47XP8Ai79rTUtr6gqSW4X9xFIt1uVmVf4V/hr4mjm2JjjPbQPMx1OOJgoyP0Sutc023sft7XKiLs+75a+Yv2sviheWWl6q+k38OIWy+5vurXzov7fHiSPQbTRL7WFjSafa0kn3dq/8tF/2t1eZ/tIfHh/EngiXxDo9y13Nbz+VqNxNcfNMrf7P8K17GMxk83oq552GpxwdXnOM8ffHC8utUkRNV87zF27l+ZW+b7u7+Fqx7X44X9nb/wDH5Mkkn3FX5lavEvHXxB022b+yra/aSZn37o/u/wDAazNN+IU1hZ/Zv7S8pVfa/wDFXx9XLakp8qPtctziVOPxH1N4d+Oty9vs1W5VE2fuo/u7v9qpbz4i3PiSN7Cw1j7P8isjN95a+XNP+KUN9Iba5hZHjT5ZmlVfmWut0v4lQ+IIYrl9eVJWRvN2t821fu15OIwU6J9TRzql7I+jtB+IFta2z6bf6/5vz/vfl/8AHant/iRZ2ObOzv5PJ83dAzPt2/7NfPA8aX9vavDYXKjzt37xvm+b/ZrO1b4ieJNF0+K5h1iSRd/meTMm35vuttasPqs3H3RfXnUlpI+jdS+KVtqEySJNveFvk/u/8CoX4kaba3CTWfy+c6q21/vbv4mr500/4mQ6q6PDeMjqm64Vfl3VteH9evLr7keXhRl85flX73ys1ZRw/vHVHE+0peZ9IaT460pZP9JuWhmWVWabdu/4DXf+GfiBp6/Z3R2YyM32eRXX7v8Aer5f0/xU8McRmdcM22VYV/i/vVreD/HF5HYyLbP5Kea3kbf+edTOi1PmpnHWrQ5PhPRv2xv2J/gb+3F4fhtprlfDnxCVW/snxdGi+VI38MNz/eVv738Nfk38afg38Uf2efiJefCX42eFZNI1rT7hkRd3yXi/wzQt/wAtI2/vV+qdr8UvtVpbw/bJPI2/e2bW/wB2q37Snwx+HP7ZHwVk+G3xIto/7b02Bn8F+JpF/wBJ0+Zf+WbSfeaNv7rV9nkub1eT6vivlI8lVquGnzUvh/lPyUjhfaqbMn+D56u2qoqNC77lX7yt/D/s1qeNPh74n+GvjC/8DeMLNbe/sbpom/hWRf4ZF/3qoWqhV+f5gv3/AOKvYqPl0HKpGt7yLCtNJCqedt+dWX/apbXfNI01zDsVZdibqW3/AHKu8NywMn+xu21b8iGO3EKJvDfNub+GnGVonHKJm3kkMMZaFG/3W+9WPfW+6KV4fk2/wsv3q32hdWHmCPym+batULrT08kIhbDS/Oq/w1pzcsomfvnMzWIhX+JWb5vLVPlaqf2N5I/O8na3zfLu+7XSXFmJI/kTDL/D/eqrdaW8cf3G2sn+srb2nv3MPZx5TK0uzuZFH2aRleT+L+Ja7LwvYzW8yQ7923/nolY2n6Y+4bJtn8MUiptb/erpdHsZI2VJnbdJt+b/AHa7sPyyPNrylG521jCnyl5vNVUX/V/NXT2rfZ7z/XL8qfIyp/DWT4fhhmhRLaaHaz7kjjT+L+Kt61t/9HVJX+Vm+TcnzV6NOnCR5dat2LVrawybrq5m3Iqbljb71XraJRp4iRNo2EADjFV/nWVU8vDMm1mZPl/4D/tVctolhgEcW7Azt39evev3DwSio5vj7f8AQLU/9Kge1wxO+Mrf9e5fmiG38mb/AFL7Bt2qvzNt/wB2rLbFtWS2i+Tdtbc/zf7O6oWjeSRVSFtq/LuVquNYpHMv77en/PNvlr8g5mfHRqGdqESXAXZcyD91t2/wrWTfRzXUZ3uxTyt27+FttdDdQzX37mEsWj+/5ifw1j3yzeWqI8eyNNr7qs0jWl0PPfFy+XGUuX2M0X3q8r8UWvmW3nWbq0avuRmdvvV6x4wkSaFJkhX5mb7vzLu/vV5n4kjfa6Xnkl/vfu/lVWqoxlylxxB57fLLJJ8j7lb/AMdaq91JtjV3hw6/eZa1tQaG2ZvkXdu+fbWYypMzok+5W/i/u11fD7oqlafIY01w8kbd/mXYzJ93bWdfRvH++SH5meugutH8yRUcNhk27ao3WjuqMiRt8rbd33lWq5uX3Tikc7Mz+Z5OzP8AEtLC25jsTeG/h/2q0brR3WYL8zfLtqKPTdrNDs+9/EtTUj2IiuUfp8m2ZXmfG3+8v3q2rG6SGRJv4Nn3l/h/3qy/LRfkdGZl/hq3b/KrJHu/6Zba56kTppyN6zkfbvfaV27kjrStbrzI/n3ASLtrB02V7XbHNMy/wvurVt9nll3dv4di15eI5vhPUoy9z3TZsdWuYZEcJ91PvL/DXQaXN9ujWaYMwV1bb/7NXKwxzSLvR/mj+5tevUPgh8P7rxVqBd4WI37POZdrVz4en7SdmaVK0cNSlKR0vw71T/hD9c03xPeOyPb3Su7btystfuP+y94p8Pftq/sFX/ws0q/3a94biWXTvMX978q+ZDIv+98y1+O/xi+G+meFPCqabC6veNF80avu2rtrf/4Jtf8ABSXxV+xj8aNO1LXLy4m0yGX7Lf2twzN9qsWb95/wJfvL/u17/wBXpKjyo+UhjKs8Z7W56D+0do+sWsl5Z3NnJDfWtxtlj3/NHJ/Fup3wj/tWbwvc20yed+63xNv+Zdq/NXvP/BULQ/h/rnxc0n4/fCrVrS78LfELRo72C5g/1Zm2/Mvy/wAVeLfD3R0ghm037fsga3byF+6u3b91v71fk2d4L6pinBfCfuuS5j9fwEKq+L7R8V/tKfEy2s/Gl/C7srWtw0XlyJ825fvV89+IPHX26aV0ud7M7Nu316X+3pZ3/hn4jX9qjttml3Juf7zV82x6k/nb3dldf4f71e5luDpyoRkfOZ3mVWFeVI321Ca6mZ33YX/x6uY8U3U8OpLMi/w/I1aWm34mZUm+f/Zql4xXf5fOF+7ur0KEPZ1/ePjsRUc46SK9rr2p3GyFPm2/M9dt4Y1p5nSG5v8Ayv8AelrlfDekw+XvT7/8H+1XfeHdL8K68YrfWLNYivy+cvystd0vZSi00PC06svikd74T1DwlJCs154thVY0X5Wf5m/2a+hfgr8Ifhp8SvBtzrdt4wt5b2OJlt4Y/mbd/tV8leIv2edN8QN9p8E6rIVVdzR/aK1fhL+zr+1FHqyp4GuJi0m7b5dxt3bfm+7Xm1sLKXvUqp9HhYx5eSVJ/wCI+n/2bfgTrej/ABqS80fUoRcWsv8ArGn27v8AZVa988Bz6tp3xzjmjvSl5HqlzunzuO/EgLe/evizwX8J/wBtjUtQS80SHUIrlpWiaaOXazSbvurX1p8KZPiJ4d8WaSNPsBf+JrYiOSCVgPNuAhWTJ+u41+w+EWHqU8k4mlJ3vg5/+kVD6TK8PRhhq0YJpuL39GfYVn+0vNa6f/ZWvPI80Nvt27ttfAf/AAXU8eeG/GXw08Ia14e1JRPZ62Fe3+XzG3feavYf2vta8eeF/hTeeLbm5sbPUoYvPnWO48xo2/u7q/Kjx78RPHfxm16O48Z6xJfeXLuih3MyLX865Rhq8sYq0pe5E+TxUvY/up/Ez6G/4J8/Fi58P+Orezmv1hSSXdLI3zeZ/s/71frl8NfF02oaHbagnnPaTW+7bJ8zV+Mn7LPhXVdP8aWEypnbcK21l+7X61fAW+k/4Re1d3kaOGBV8lv4mrgzWUfrH7r7R9lw/UlChzTMf/goN8RLPwr8DW0HTZv9N1q6js3kkf8A1MLfM21f738NfH2gtCot9jrn/noy/dWvSf27Pie/jv4vR+GNKdW03Q7f/Slb70lwzfLt/wB1a850GB1ZHh8v5X3bpK/SOHcH9Xy+PN9o+RznGfWcwm4nV6eEaMJC+/yfm8zZ95a6DR7Uz4mdGRF5RVf5mrH0PHyB5oy//LXb91v9mum0eGGGRXSGP5n+Zq+kjH3Tzo4rl92RrabHNbtv+zM8jLsRW/iroLKFlUJs/dsny/xbWrN0+18zbc/vN2zcrL91a6Ox0iFY1WF/lb5nZf71aU6f2jf61L7JHDb/AGO8V/JUiRdqSM+3b/wGtCO1ea3MqJhvlR2jdV/8dp0OnpcNI9z9+OX5a1bXTxCqIlnt27dm1KvllzEVMRKMTKk0m8imST5R5bfP/u1X/sXd8k0EkXlvuRli3LJXWRaWjTL/AKNl2X7yp/49VyPQ3+SH7TuSP7m7+KumnT9w46mKnLY4HUvDu6Sa5tofJ8x9m3b/AOPVz+reGZt0kzx4ddy/N/er1680CGaNIXhYuv8AyzZ/masq48Lu0m+5dd+9m+7t21fs2cFatKR4nqHg94V87ydiyN+9kjXbuauc8SaONLulcSu3m54cYxj0/Ovcb7w+8W/f/E/ysyfw15d8YbD7Ff2mVILCTORjOCtfpHhUpLjnCX7VP/Tcz0+E534jpL/F/wCks8w1zw2t7dvOu5CRuKg431xXirwWHjkhjeOL+Lds3V7n/wAIzHd6Zb3UsDfPbj5x9K53WfB73CtCkMcoj3Oqtu/76r4/O4RlnWJX/Tyf/pTPn8fKX1+q/wC9L82fOfiDwTM1rsmdWeOVl/dxfMy/w7q5q48CvaM0/wBjYtsVv7q19Bax4NmMn7mKSbd/07/N8tc/qnw7trqQLNYNGVbft3turxJR5TllL+U8r03wrNHcbFhZ3Xarrt+7XV+GfDdreTLLc7Yk+b5Wrr7HwXGsfzpJv+9t27d1XdP8L20EZ2QMx+bZt/irl5eU1jIzLTw+kcafuVYM3yMq1vab4PhmukuUjVH3bHmki/hrU0nRZrfZ9mDFWRdkbfdWu00fw+7bYUeOWNfmeRfvbqn/ABEylE5O18OusabLb70v3v8A0Guk0Pwak2zfD80bbpY5E+9/wKur0/wyiuiXUMexfmSFf/Qq6PR/CMMiP5KfeRWdmp+zM5S5TkLXwTttB5z7vm3fu/4v9mtO38Jwxsv+h+bt+/JXaadoLtmZEX5fu/w1Ym0naqOnmbI02bV+61EuX4Qj/eOQtvB8MCvss4ZJvN3bvu1pW+jwyM3yKsUafdb5a3bjR5rhd7pnc3/j1OuLNJF2TIpb+FV/u1zy973jeMjmptHtpoXS2dS8ifxJXO6x4fhWx+SHb97zV/vV38lv5M37lF2/dVtn/oVYviKzSeQo6fdTajL83zNUcv2jXmPkbWNcRY2htod235E+b7q/3qZb3FmzJ8kfzfM0n96sNdUfcHmffIq7f3fy1PbzOvzo8iD70Ue3+GvPjH3veP1SXJE3ZpnhUPDe4Rl3bdv/AI7UV1HDuMMyZT7+5f4v9mqXnSNMNibQy7fmb+KmyTXLXSu7L+7/AL1KXuyFUlSUSWSOzyyfMjqv3WSuQ8UW7tHI95MyvJ8ybn+6v92uq1C5TzBC9zlV++y/ermvGF0kkMjo6uioyJuX5qiXunj4qVP3kedeIfmUOhyivt3bKyZLjcySbNp37dy/w1d8QTTeTvR8u331Ws6JoWVt7tv21ly/ZPMqSNKxa1kbzo/3rKnzLW5b3U0NuvkouyT/AL6WsfRtk0iIk25v4/krftbP5ofnZ/8AgFT7vwi5fd5izbrCsWya2bZtVvlqaSN7eIb4WQ/7vzU/R4YbfLumx1fc/wA3ytV64bbH/o0Pz7N25v4a05eWRXvS1MxpPM+eaaMhvuMyfdqlqUcKsiLNvT+Nv4a1Ly4hhRpkh3n+792szUWRkSVE2Mqbty1MYilyr7R7v4Kbd+zc7qSc6HenPr/ra+QNajmaNnR2+VtytX2B4Jbf+zgzJnnRL3H/AJFr5G1yRJLffIm3b975K/dvFLTJMh/7BY/+kwPU4jd8NhP8C/JHF6lNcjKO6kVj3Fx8xfYrCtfXNizP5P8AFWK1vt3vHu3L822vx+PvRPialQgkm8wLs+YNUdv+8k2D7392pfsb/wAH/AttTWdkI2MvzBl+b5lrWPUx5ZSkS2av52+ZOa2rOTdJtmTnZ/eqjb2Mqzecj8SfwtWrb2MLSfJ13f6ys5S5jSNORbtU8zZ2C7ldatQ/KyvLuyq7aZa2v2dVD7Xdm+etKOGFWWZ/MO7cvyr8tZylE6KdGew2G3kkb+//ALK1dhhaFV2W2TJt+VakjtZvs4e02iT5V+797+9V23sX3f6SF+ZNq7XrCVaB6MMORrbxrDv+bLN/ElEcm24Lvu/d/LuZank098/cY7t2756dHDDJbp97HyrurGVS5vGnKUhVH2mNfOfJ/wB6rNuu350+fy/4aS3tUVdkMf8AF8n+zWrY6R9nw/lq25fnrhlU5TvjRkyDTw7Lv+X7v3Wf71adizyTNv8A4k+8v8NMgs0VUdCwRvl/3q0rG33R7Jpv9rav8VZ+2Y5Yf4S7Yrtk3wjcjfJKq/e/3q0VZ4XXykklRfv/ACbW3VDptvtjVERQ3/j1a8NvIYV2TL8zs1Y7fZKVGUh2mqhU+c+d25nVv/Qatxvu27E2jYzfL/DTvsiR26uEy2+rCwolv5MM21lRtn+01c1SXMdNOjOPumVeWcLfI+4q38Uf8NZ+pWMdw2yH7m/bub+KupksZpFXYjFZFVfl+b+Gsy4jubiFZvO+bf8A98stR7TljzHTGicfrFq+Hd0+bdt2qvzbawtT0l4/4F+ZPvSV297ZpIvkuiu2/wCdt9Y+oWe5gjpwv8Vaxre6VKnJe9E4G80d4ZGmuU3hk3bqgm0fnznhVX27ol/vV1Wpaf5bKm9XDf8APP5t1VGsEWF4URQZF3bf4q3lWvuOOH9w5CTS3lja53+UW/iZfu1LZ6TN5hR33/7tb8mjoyrJ5P3fmVWpfsdssPzowlVfnVaca3vBKjH3bFW1t0hUJDym7a+2pYbzduREV9u7azLU726R4SFF+b5nkb+L/ZqvJJDHMrujIrfKv8VRF80uaUTmxXu7Fy286b/RvOVfm3eWq/eatXTV81R59ttbd8zM/wA1Ztq3lsJkfJbc23Z92r9jN5n75ZtjNL8jMn/oVFSUpXSOOKtys6HTVkgUpD8qK237/wAta+nxvbLvttvzSrv8usvSYUmJS5h81pPlRo/l2/8AAa19PjSSZf8Ax7+Hb/vVxSly6ndGP2TXsFFvCj/u2C7tyt97/eWtDTW2lHmdiZPvbqzre13N5MKZ3f8AjtaVsvmMz3Lsrt8v7uspcvNzo6I80tEadva2fmec7btrbtu/5v8Ad/3av6L/AKP/AKNNO2/733NqstZMdvdeZ+5mVkmT5vm/eVraQqW3l/aXaJf+mjbtrVtGQ4y97lOw0u3hbbMm1fL+bb/drrdHheaOJ4XaIsu3/e/2qwPDckMcfl3Pls3y7fk+auq0nTYYbtLxLmRHVNu3buVt1d9H3TnqS5tTQs99vIg370b5Nrf+hVakV5RG8O7asW1YasafDDMywpCzN97d/e/2auw2b+W6Rrt213U4++Tzcu8jLsbOZ4/PTl2+6y1tSW8lvGr+cwdXVmZaktdFeDdbIkmWdf8AdWrTae8Nsz2zqw+83+1XVGPNMmVSPKFrNcwsj71y3zeWybmZakkZAqzb5GRUZv3n3dzVUuIbmxZ5ryGbfsXypFf5V/3lql4o8RWfg/Q31vU900K/JFaw/ekk/hXbSlLl94uMYxXvSKN9ND4g8TWPgy2RW+1Sr9tWN/3kcP8AEy17LD4q0Hw3odnYeGN0Z0+38pYdnyqqttryn4NxwtY3HjbUEmttavpWWW3mXb5duv3VX+7WX488dTW+oTCGZvObd5XlvtX73zV8Tm9aeOr8kdkeVPEc2sT0Hxt8cv7J26t9v8t/ueS3zK3+1Xm+tfFt5NQe2eZndpWZFhf723+KvKfEnjy/1y8ubm83JB96JZH21x3iDx9c2cLecP3sf3FVv4q5sPg5ROOVSXKetap8WrmwjSbWNS81Ld28qbY3mR7v7tYGvfFpNWt3mg1KaNJPmlhZvnavHZvipNdXT2015v8AM271b/0GsDVtehuJvkmZBub95u+Za9ajTlH3eWyOGpKB1WoeLn1bUvtKQyJLI0kXzP8A6v8Au1SbxNqumxqIUV5lXO6T+KuM1bxdCyuiQ/NHt+b+KRqqt4yubrY/nKzfdaP/ANlpyw/wyjua06nKdlF423OZriZWeSVju2fMv+zWxoPxceGNLB/JtkZVR/L+b+L/AGq8wm1ZJI/Jtkx5n3938LULLc2Z/fbW+X5GWuHEUL/EdsMRVhrzH0F4Z+I0M0Ys7O5mfbKzSrN/D/dZf7y11LeItS1JY7DUkjuUX/VNH8u3/wCxr500HXNSWSHvJ/e+8zNXq/g/XtVvL3fczfIvzKrP92vCxlNUavMfSZfWnWja51FnHeWeobEhYbn3Iv8AC3+81d/b2qaa0NtD9omtJkX95M3977yr/wACrN8O2dhqVuk2n2bJtVftDTfxN/ervrPww8mm/aUh8xY9q+XGm7y/9qvP5qUo27nuU8PV+yOXS0t3RLKbymXaqL/E1dXpscOmwpHbTxod3/LZflVdtZVvp5tdQ+021szRsm1ZPvfd/wBmtm38P2euaa/9pSyPtf51VtrLtopx5fcOfGe1ia2jww31nsuUwu9WuGVNq7f9mr9vpWpNfO9nNhPmaKNvvrU3hfR4brVrbR97XT3FrviVV3Mqr95Wr0K18B6bcLbm2muCI0ZXXbtXc396u+GDnU9654NbGSp/4j4n/bw+EPiHxJoo8T21tHcX2kp5vmNF89xH/wA893+z96vkCKJIZGhhDMVav12+JnwbTxJpFxol+i3iSRbdqxfvIY6/LH46fCPVfgP8dNR+HWpQyRWd5K15oi/3o2+Zl3V7GBxU6l6M1sRRxHs5afaMmGZFUPM/yr97bV1fs0cgR93975azbNkNyiIn97za0IZHlUfuVlb7vlsu1q7Obm909OMftDprPzmEP7tDHt+Vfvbf71Rf2fcw7d9mx+9tZfut/tVpL9j87yY03Lt+9tqOGPEx852+aJv4N22lGOpqYt1pO6aZN/zfxstQSaakil3mYsybYlb7rba17iOZZo3ebc3/AC13fLuprIn2cedCyuv8TJ93/gNdTjzanHzRjzGJZ2LzTYmhZFX+Gt3RrG5hvPOublWSP5ait1+0QOkM2/zG+eRmrV0m3uY5khhgkcL8v3PlavVwsZHjYqUY7nZeG1hWGNIdqM0XzeX95q6CxVGj+07G+Vlbc396uW0uORYUmR2V1l+9s210Me+aBkaZV/v/AD7d3+1XdGieLVle5pWq225538tZd2/bJ/FuqxHCsECxEkAIM4OSOKpQttkijmh3N5X3o/7v95qvp5YYFHyuchge3rX7d4JyvnWPX/ULU/8ASoHu8Ju+Lrf9e5fmiWOSGFv3cPmfN95X27W21ZdXMbTfaI0fyvut8zbv96q1vC8Kyu8zfvF3bl209FS3jLukkg2fMrfeWvx2NT3uU+R5BGuXbT0tZpvlX5t2/wDirn9WvJriF9kKtufanz/+hVp6hvaNXh+7s+aNfvNWNqkkMymF0w7fNuV605mTy++cP4uXZl5nZdv/AHytea+KI0ZZH353bVr0vXkebzUdMqyfvdzfNXCavpv2mQ21zDs8v5dv8X+zV85pGPKefXGlvcSOkKMzs+2n2eivHGd9ts28fKn8VdZHoLzXDpbfK0fy7m+XdVuLwwi2/l2yMz/xtW0ZQH7PmOQk0FG+583mfxVRk0RNmxE3/P8A6vZ83+9XosPhU3kaeZujZU+6qU5vC6KrDyNyt8q7lq+b7MROjOR5Y3h0LcSh4cTKnz+Z/dqhdaLNGo2PtbZ/dr1ibwTMsnkyp8jf3k2/8CrO1Lwn57ND9jZVj+X7n3qFKcTKVLl+JnmMGlvIpjc5f+Nmp8Gm7W37M7V2qq12eoeC0hzNbJlf/Hqzl09LdVR4Wdlf5NtTUpy5JWLp+6YwtfLX7jMZE+VZPurVmPeW8tOWj/i/hauq0P4X+IfE1wf7Htmd1TcsarXMXGk3mk3Ettfow2y7WXY25fm+auCVGcjeOKhT6nU/Dnw3c+JNaTSk6zOuyNf4q+pPhb4Zt/AVwlzebfssa/6VJv3KrL81eG/AfxF8N9H8caakOqxm5mlVU8xNu1v4t1e3ftZeLLDwv8N7/R/D2pbb68t2g8tZf9XuX/WV04fD+z96XxHmY3GVakuX7J4l8Xv2pFvviJfHStYjmhjuGT5m+9XCeI/G3/CZM2t200cVzG25I1+ZdqrXzzr1rdeH9Sf7TqvmNJu37X3V1PgvUr+O3+02E2/au371dP2ji5ZH1r8Cf24fFvh34er8AfHN5HPoUN19q0O4un3Np8jfejXd/DXvfgH4sJeXFo7uzxsisy/w/wC9X5oeINceRn3p8y17B+zv+0LHMraPqV5JHcwr87SS/K22vkOJsr+sRVWB+gcIZzDCz+rT+0d3/wAFJNDs9U8cHX7BJClxFu+58qttr46utJvIZC7/APfVfVvx4+JWm+PtJgvL+5meS3+Vmb5ty/wrXjLafpWpbkS5jG7721a87KKkqeGUJI789w8a2Jcos86jjmhbf/49Ud8v9oTrbRorbfvbv4q63WfCP2GRXhT727+Cqmn+FftTb3/75r1qctj5lU5RdpRGaTZ+XboiQqp/g/2aZezzW7P5M2GWunh0V7e3VHTdIvy7mqva+DY7++VJNyn+9975qrmhz8x2+zvDlicnY+MPEmjzYttVuE/6Zxv96vUPhT+1V8SPBeoQ3lhK22P5Uk3bW2/xVT0X4K2Gvaglm9ysKr97zH+9/wACr66/Zl/4Js/Cvxd9gufE2tyOlwu91jTcse7+KuLFVMM/dmehgaOb05Xpv3SP9m39tq2v/E1tYeJNKmBaVmi2y/xf7Ne4fC3WI7n41Wmuxnylmv55l77QyuQP1xXpvh3/AIJV+Bvh/pMV/wCGNQW4SFfNWaa1Vm+avNvh74eZPjrD4aT5TDqtxCMDpsEg/pX614QKP9h8T8r0+pT/APSKh9vldTFVaUvbb2MT/gpl4jh0H9nbX9Re0Yrcr5cG77rN/s1+cfwN8Dtq1y07pv3bXZq/Yn9rj9mf/heHwXvfCX2b52i3W+5/vN975a+Avhn+zT4t+H91qFh4hs5oXt/lTd8zN81fzvg8bSw+AnTUvePl82w1SOOjKfwnT/AXwGYdct4fJjzG6s8nzbV/4FX1t46+Nln8IPha+qo8f2zylitVhb7szLtVtv8Adrx/4T+Gr3w/p/8Ab2sQ+THH+8lmb+FV/wDQq84+I3xGvvid4qmv7mRls4W8qwhjfb5i/wB5lroynAf2jX55fZMquYSw+F5IblGHUNSvtUudS1u/a7ubxmlnmb7zTN95q6PQbo/aFR7zc+3ci7Pu1z+k6fDDJvR5MK+75vm210uixw7ldPkWNfu7PmZq/UKPJCHLE+Xmm3zHY6XDDIpd4VCN8z/L8zN/s11+j2tz5yTbN0apufcv3v8AZrlNHU3Cqk1yzhfuRsn3f9qu20eK5VsTXKkKm5Pk+XdXfT96BjzS+0dT4Zt0Zt7usUTfMiyN8tdNpdnbX0KYRdituSRmrA8MqjPsSbLx/O2371dnpMc1xG29I03bdm3726t/hD20uhJDo7wxiF3jy3+1/DWnZ6clv89s8MkU0X3VX7tT2lm8zR3PkrEi/NtZPm3VdhhWFvOm8sKvzfLW5UqnYq2FmkbJ+5b5fvt/erYt9JtrjZMltCzfwrv+b/ep1nY/6R86fKzruVa1bPTXkZUd1AX7jfxbaIyOeUio2k20iuiQrGV/1W75qy7zw6jM7zc/w128empfWuIYdyq+1WqGbRYG8y5e2V/3u1JK6KfunBKoeX6x4bh2nzE81FX5V/irw/8AaR0/7Bf6SOf3kMrfP97qnWvqjWtBTa/7tf8AZVflr5z/AGxbOSz1HQRKSSYLjr7GOv0nwsSfG2FflU/9NyPa4Nk3xJRv2l/6Syp4f0Z7jwtp0iMyq9lDuCr975RWZqnhm8aZvN3INrOkka7VX/eWvR/BmjS3Pw20R44iC+nw7WK8fcFLqHhm5aU7I1dfu/7W6vj87Vs5xNv+fk//AEpni5hP/b6v+KX5s8b1bwukO7YjY2fJ/tVk3fgpGuE2JG/lpv8AMj/i/wD2a9i1HwqjL5IfY7fN92sa68LzQ7njRWCvt+X5q8OtGPKckZHmH/CJmNjvRWVvmWSo/wDhHoVt2uYYV2N8qN/dr0m50O2jWXZDsH3n/wBpqz28NyW+H8mMLI3+8qtXHKPMbxjy/CctpOhPHC7okZ8yL7sjbfmrstD8P7Fi2QqRJ/d/vU3S9DT7ZvmhZx8reX/tV12i6ci3CNNNs2/8s/4azkOT5iLSfD3lqrvbK4X/AJZqv3q6G08Mu3kuifN8rMu//wAdq7otrDJG6I+4fM/lr/yzaug0nSXupkfCpF8rNG3+1REzlzSMmHw26qERGJb+6ny0smhfZ48ojF/mXayf+PV19npm2NPJ5WP7jf3aJrNG3/aZPlbo395qmp/MEZfzHGyad5cex4ZG3Lt+b5m3f/E1n3Vr9lby3kUqybfL2/xV1uoafDHbj528xm2pH/C3/Aqy7rSoWvHh8lXPzL5f+1/s1hL3ionI3Fnsj2TbYtzbt2z7v+1WTqFukaskNgzuq7vMrurzw/MyxPCiqv3XXf8AMtZF5paR3iwzJvjb5nZW+6tTKoan5uQzPMu+H5X3/ulq1as8f7533bf+WLM25qwbG8vLVndP3X8UW371XrGd75ldyodfl3Vyy/mP1CWIjKldmurX7BPLTa/8KyN8rVJ9tigRtm4v/Eu/dUFmr/aPtME25mXZub5ttSSQJbun7ltzfKkmzbWceQ8+tiOaPuhcfvlM3ylV++2+uS8VXzq0lzDJt2/KitW9qV19lkOzy5EXlpF/vf7Vcf4gm85me56MrbmX+Gs5SPN5pc3McnqRupvuIu1v71Jptm/2pYLlG+/t/wBqpVt5pNk2/cPl+b+7W7o+mw7t72yuW/hb+H/gVc0pco6cfae8SabpdtDIltbQsVX5vm/iroLHT5FkXenDfMm7+9RpOlzRys/zY2fPt+9W9Db20i/ZpE2rIv3v4lpQ5feHyGa0fkqyfZt27+JUqX7P9ojVHtmfb8vyvt/76qb51kWL5dvm/eb73+ytQ3C3NxHM7wq3lt/q9u6r9n7uge0k5amZeW8KsOwb5mWsbUP3iy7NzN/AtbV15jRnzkkt1jT5I2i+7VC8kgjtGuUTft+X5WqqXNH3TOUYy8j27wVgfs1vg5xod70/7a18iaxE7Qsjuyov8VfXXgtz/wAM0ySSNk/2FfEn/v7XyJeM8xfZyNu7a33a/dPFBWybIf8AsFj/AOkwPW4jlGOFwl/5F+SOM1aF2b/XMG+9838VUfsczMkmznZ8+2trULcfaG3vgf3W+7Vfb919+Nv8Vfj0tj4upGUinDa7T88P3quLB5n7nr/fp0dn50y8bm3fPtrRsUTaZk+6r7W+So5kdVGiV4bXavyJurVsbG52xO6KgVtyMv8Ae/2qmgtoWcIm3ZH99lrSjsXZU2TNj5d9c9SpynVHBy7Edtpbr84RSd+5mq5b6fatI33sbPl3Vp2dm8sw8mP/AHG3/eqwumgR73fY27+KuaVTmjrI6qeFlzRK9lb/ALxSn/LNP87ql+ywvNG77nP3n8t/lX/ZqRbOZpNmxU2/xL/FVy3sHkbenlqv97+9XHKpKPwnpqhHm+EhjsYWO/8Aebo/vrJU0Nqkcmzydrt95Wq/BCYVh2bnP3WbZu3VZjt4WWHzn+Wb+JvvNtauOVaf2jtp4VfFIpw2MKws6Q/8B/8AiquQWNyzfvk+ZV3bV/u1fjtdsmx/4fmRauWOnzTEu9ssK7fvLUyre6dEcP7xRt7dEyny4j+/u/hq5Dp/lTJ5M3muvzOqrVhrGGObyZvmfYzJu+78taGmKnkyzOipIrbIt33qz9tLluFTD82hLp9jbLIqI+5mi+dtvzLV/TdNdpGhmRdrfLtb+6v/ALNRa2Lxqmx42+RV/wBpm/vVrWcbyqsKQxqy7vNZvvVnUrcsviD6tIS1s442WHyMhn+TdU66fNDI81zCyLJuaJamt7Xy1HnI3/Af4asMzrGn3nZV2rJWEqnve6bex5YxKE0ZVo5rZG3bdysr/eWqEweFghhVPvbtyVsXTQCBYYH2CP8Aur/31VbVLN47dZn3bfvLTjLmL5Z8hy19DNHM6Q+XvX5vlX+Gsa8he4kL3MPnNH8y7k+Vv92uu1bTYbpXmSTduX5GX5a5/ULNrht77R/dXdWnukcsonNXFukjH7NCqNJ/47UFxGjbEmRsxuyfd+9W3dWsMMs32naNsqtuj/u0xrO88v54967922tvae4ZS7GHHZzNI/3Y0X+H+98tMWN2jdI/mZU2u1bMdrC0L3PnK33t0a/dWq8cSKrbIVUN95mojH7RnKp9kxLqPzV+Taj/AHdtVGhvJFZHTYy/Kv8AtVs3lq8B+5/tfcqs1uklykw+ZmTbuVPu10xlb7J5lSXNLlZBa28zSeZcopP8TfdVlra0ezea437FRVRm+/8Aw1Bo8KSN5ezG3+HZu21sadYu0w3tuVUb7v8AF/tVlUlyy0RFOPM9ZFrSY5Jp97w4P3d1dHY2cLbnmeNkkX5W+b5mqjbx20eN6M8i/Oqr81bWmx7meHfGF3bkXZXHzc0eaR6FKNqliSPT38tEtvk3bd+75dq/xVfhs5o42hhdlRfu7vvM1WoY4fL/AHPzOyfNGybvMq7Hp800nmeTlflX94n3ax9pKR2RjDm0KlrazSzLcvw+z7rfw1v6TYpcKHdM/Nu3bKit9PSaNbZ/Lfd/31W3pWkvcKh85l3S/wCrb+GtY+9ykShKL3NrSbeGRvOTpD8ny/3m/vV2+j2u6FA+4lfl+auY0uwS3/fbGab5vlX/ANCWur8OQ+XGkLvIUbbvb+L5q9bCx5oHnVpSpyNqztb+Fm8mHafveYyfdX+7Wtp+m+ZKghSQ7ovvL92Sp9Lt9tqybNyt8v7z+KtzQ9NuLe2S2udq+ZtZ2+6u3/Zr06NOWxwyrGfZaXeeWXfbt/5aqv3auR6bFDjiSZ/mVv4du6t2z0tG3wrwiuv3v7tXofDaTTulym7dt8qNf/Qq6+X3iIVpS0OPutLSa2aG5dnWT5W/2f8AZrjtT8Mv4m8Yf2NZ6rCkdi6u9vN/E38P+7XrfirQ303w7fTb4UZV2xSSf89G+Va5ux+Htt4Ft7bVdYT7Pef6q6WR1bzm3fe8z722vEzrERw9C38xVfETl+7MTx5dQWuhszpa2lzvVEaH7zN/F/u/3q8C1q6vI9auLm/vPMfb8jM/yxr/AHa7j4uWd/Z6lqdhO8bzTS7d3m7v+Bbq83urW5vlj0ewtmeRom3eZ91W3V8pQUpbHFL3vdOe8bWN5NZpPawx7FiZ/wDZ3f3q838YTGOFpobndc7F3ei16v4uW80XS4dEvLnesbblWOL5m+X7u6uAvvC9zrgTPnR+Y+3ayfK3+9XVRqRjPyCpGUocpwEOmpJdrvRWeF/NlkZ/utVDUr+S4vlmtk4+47fxN/tV6G3w9uby1EKuquz7V3Nt+b/a/wBmsnUdD0rSY0s9Vv7X7W27zZI/+Wf+zXrxlQlI4KlOUYHAeIrKG1kifzpJkZN26Ntu2s1Y3tYpX+0tvVt21mrqtet7G1kR9/mBvmdv4dv+zXLapqENxL+4SN/7jbvmWolKEjL3ojvO1Web5H3Rybdnz/NXQaPYa9JGdh80bvl3L/DXP6XqFhK2x5sS7921vl212ek606sjpCqQt/t1wYyo6cYnXh/3kviNLQ7waVMX/drc7du2RvlWu/8AA+uTLIHudr3DbV2qn/j1clayaVqGIYZrfd99Gk/vVoaXq02m33k3OpQskjf8s/4a+eryjWlK59nldqUo8x7v4Z8TTW0Ze5+5s/er/Dt/vV7X8JfG2la5o7Wf7mUbd3mN/er5NsfEDsv7nVW2sjL8392uy+G/ji88M3CWVtfraJ5q/Nv+Vt1eNiOWn7p9vQq0oy5e59VR6DZyKtzFCspj3bfLfatVZNH0/RdWtr+GFmhkb/R/MlZmkb+Jv9pawvAPjL7ZpOy/vI0MMrLtX5d3+1Q3ipJr5YrmbeluzeV8zf8AjtTRrcs/eNa2FhWPq/wL4N8Pa1qFt4h0ezWK8ktVR5I28uONf4vlr0S3+HsMccsKWEm1n2o25f8Avqvm39nf4mWGpeKrOzubyZotvyRzfdjb/wBmr7t8HaX4f1jQEfR4hM6xL/pTNtVv+A17uBrVK0LwkfDZ7lcacuZnkV98NbbS7lLm2voXlb5Z1+823b91q+O/+Cs37Fd58RPAdx4z8AabHZ6jocX26CNkZpJFjXcyq391q+/fH3h3R/D6y3V1CsJV9zeX8qyfLXN6lqWia9pAvNes1vbab/Q/LZty/vFZdzVvLEezr899jx6WEnKN/sn89lrqE01nDc3On/Z3miXf833WrVt5HWPfDDtljVVfzPvfN/FXpv7aHwTm+C37RGr+GDprNpdx+/sLxU+ST5m3ba87t5Ps5/cJhf4d1erTqe2ipLqevT5+QmzdSMEd9w2bd23+GlkkuYWlmmtv3G3bFMr7dzU6OV2ZbZ/MV2Xcu37rUy437Ve5h+Rv/HV/2q3px98uVSJBcQpJvtpk3nbu3M/8VN2eTIHeZlXd8256hkupvtDuj/7K+Z/tUBoZrh7Z3Vnj+b/Zrrpxl8Jw1JQ+IkhW58tHhRfN+ZXXZ8tdBose7Y+9lk3f3vlX5axLVfOm2Qo27b/u10mkxosnk214odfllZl+9Xq4ePKeZiOV8rubOjyJHdfZp929UX5m+6y1tx2MEMOYYVkb7+1vmrM0/ZIqpv2bfvq33Wrdsyb6NIZkUeX/ABK+1mWuqMvtHmVI/EmETTeXE80zDc6r5i/dX/gNXo42B8qfaSGKnb0IzUK2L28m+GbDN8ryfw7aktUnVFj2nzAcAZ754r9q8E4xjnWPt/0C1P8A0qB7vC0eXFVl/wBO5fmjQtVhVpNkMYC/Kit823+9SMySTNcojN5fy7W+6y/3qVdn2hJvs23/AHk+7UTXfmKqD5n3/NGvy7f7vzV+Iy5ovQ+a5fdKd5sjs2dNrS/Ns/2f7tc1qm9nYb5MNt3+WldCsP2hmCIu7+L+81Z2oWcNqyuJtsf3XX/araPuwH7Pm+ycXq2kpFHcOiMzr/y02fM1czq1mImZ4drs3+t/vL/vV6Bq9r9lkESPv/2v96sn+xPMuGdIFUN9/an3mpxl73KafV+bY5XR/Cs1wu+bdlfmRV/5aV0mi+C0uI0+zW0kok/1u75dv+1XX+H/AA35ipC6SebC67VWL/0Ku20jwe7pFNsjI3fxL826u2jH3jT6vy6Hm9r4DhazTfZ8Kv8AF95m3UN8P3Yr/oeUjfcklexx+DUuJvntmE0cvzeWn3v92luvA9tbx74Y5JPMl+638NdHu05kypT+0eK3XhD5Wgez3Rt95l+9XP6h4TS181I7ZmXZt3f3v92vfZvA8NtZ+XNCqybm/wBrbWJr3glLXfcPDCU2f6z+7/tLS5oyOeVPljeR8/Xng/arQzW3+kfe/dt/47TPDvw1m1TVksktmmaRljiWNNzbm/hr0rWtNhvl2aDZ+cyttuLj+GNf7zVc0fxp4S+Gtrv8PW32/WfK2pfRptjt2+78taqMYx1PJxWIjT+EvQ6HoP7Pfgu+trxIz4gvlVZ4V/5d4f7rf7VfL3xO8UW11qVzc2yQ/N/Cq/xV6T8QrrxV4u1Bry/uZNm/c80jtukZvvVwGv6LolvCEmvFLf7SUvi1Z5/PKUjyXUpb+a4W8015Eljfcm1f4q1td+L3jDX7NdE168uLiVYvkkZ/4as+KNctoXYWEKsqy7VZU/8AHq4vVvEDxSbH25/2ajl9/mNI8xwXjaO/kvC81zkq38VXfhr4qubFntpvuM2395UPjRvtUjFNpXZuTbXNWtxcwtvR8Or/AHt9XH+6OXmel641s0bujq4ZdztWBY6lc6Lqi6lZvt+T96qt96oND8SPdW/2aZ/mX+Km3kKMrTfKV+7RKMKnuyHGpOlPmiem6x4g1WTw7DqTpus5tv77/arndN8Y3drdN/q9jfd+SvTP2E/Fnw38Ra5N+z98YLOGPRPFG21t9Wm/1mn3TN+7kX/Zrn/2zv2SfiR+xj8XJ/Afjl2u9OuH8/RtWhX91eQt91lavMrZVS5ZTpRPWo51V5488il/wkyX1q8KPGzN99tn/oNWPDeqJDdLZ71YbfkZl3V5xDq1zGqok2f4vmrV03WpGm85/kZf4d1eJKj7Pmcj06eKUpczPSdYntpLUI6Kq/xt/wCg0zRdRSGT7T8qbfl+WuIm8WblW1f5f/HqSHxE8MezG1t25Pm+9WVOhOULHcsbScj0/TdesLi8SaG88qZZfm/utX2r+yP8ZP7PtdP0F5lk23EbeYv3W3fw1+c3hvxJPcX3+kzL+8bdu219Q/s0+JLizuLP7HMvyyqvzPt+X+9Xl5hTnGOp9Dk+MpVpcvMftJ8NfiBpXijQX0jUpoXCxK1u2/a23+7Xxf8ADo2i/tiuZwghHifUchzgYzNitD4Q/FS5sZkea8aaBty7Y5drMv8AerkvhvqS3X7QkOrM+8S6vdSlnP3gwkOT+dfrngvVcuH+Kb9MFP8A9IqH1+HhShdxZ9V614ss5dS5eOKFX/dNv/8AHq8H+K3h+w8QeOBc2aKltJuWWRdu5l/3q7L4saf4k1LTQdBGx5vlVl/8e2159qVvqvhPwvfa/wCM7zyYrW1bbGvzM0n8LV/LVCM6uIvH7R89mFSNSrblPEP2ofilYTTxfD3w3N8lqm66kWdd3/XP5a800GG18tprxGD71+X/AHqzLrUpfEGtzarNt824Zt25f4d1bOlw21xM73k3k/dVK/ZMow9LCYeK+0fJYyd6tzY0ux+zTbJtvyt/ndXR6bBtk2Rwb/7qr/E1YunWrmPZ9s3J/GrN96uh0tUtWhRCsq7fkkV/utX0FPkkeNKR1fheOaPy98DK/lbXaaVf/Ha7LSWmhXZCWR1ZdzN8ystcZpt5bLiREkdf41bau3/drorHWLZY2RN25Zf7ny13U/g905ZShI7nw7J5l0jzQ53bvNbf8qtt+Wup0GSe4sY7l/LZ5P4m/wBmuC03WEVvJeZVDbdi/wATNXVafqkLKvkP/q/mf+61bRlzC96J3WmzvGuyHywv3dyv95a0LGRJ5D5CL5qv+9WT5q5XT9UT5N7/ADfe+atSG+S3ka8hkXYqLvbd81a/CZ80v5jp7OQxwqkN4u6SX5m+9W3p/kzRuiJICzt5Sr/EtcpY6hMzjyXhwv8Arfk+Za6PR9UhVkdJPlZ6uJjUl9k6/SfJnt0y+11T541/iWrlxGnlh4YVXa+5FX5qzNPuraKb9zMu5vm+58yrWh9oh3K+/G5d23bW8fh5jjl7plalpv7p5vlLsn3m+7XzD+3hbSWeteHLaaSNpBa3Bcx+5jNfUF1fbbiWG2mVwvytHIn3Wr5k/b4Yyax4akZFDG3ut2zp1i6V+j+FKtxthvSf/puR9Fwc4PiWjb+9/wCkSPQPhVp3274UeHxlAF0eDJPUZQVe1Hw+iw/wptTbuVPmb/aq58FoFl+EXhuRQpC6Hb/J/ebYK1brTbw/8e1qz+Y3zRs/3Vr5PPP+Rtif+vk//SmfPY+X/ClW/wAUvzZ5/faPDNJKjWzfudu7dF8rLWdqHh9LdiiWcfzffZn+Zf8AZ213MypH5ibM+X/C38VUNUsEmG+5h+dvm3M9eDW2JpyPPtS8P2ccbw2ybXX+Ksu+s/Mj8nzpPl2r5bLXX61bxvveF9rr/d/iWsK88mF22TtIFX5tvy1wyNfi2KGn6Z+7RIdqv/HI33t1dLpGn+WqO6bf9nd/6FWMtxNJGjwIy+X8v91mrpPD7Q3EexPOTd/eT5Was/fA1tH010jRPJVPMl2o3/PStiPZGFT5k/e7W3fLTrGNJLdZvJ37dqp8+1qWa38yTfN5JTb8+5vutVfY1MZS/lLcV6n2na+3Yv3o4/l3UT3CNcNDs2rv/hfctZUWpJHI0KfNM3zIrfepW1JLfd+8UH+7WVSXKSS6hHcxyNND5eGTdtkf7v8As1mX29mL2yKryMvlNu+7/ean32qQyTbEm+VotqNIi/NVaGbzmi+Rf7yK38NRL3tjaMixcWs0zeSkm/8AvySfxVWbw+kkbvO+Ts+793dW1ptvNJGj3iKob5nZXqe60vzFd403Bvu7n+7WMom0D8drW+S4ZH8/59u5Fatax8mHDpyzfNurnbJXjkjE0Ma7fl/3f9qt7T3dl3o+N3zJ/vV5vtD7ipiJS5jUhuHsbpN+1lkf/d+b+7Us0011H50d/Hv+b93I1VWvJlZvJ4lj2l/97+9Ve8uvLX7T8pdn3I393+9VVKhzxpuRnatePskCTM7/AGj5lb+H/ZrntQmdg/zso/3PlWtrUmmmn3xztmbd/H8tZjQ/K6Q/MWTd+8+6zVh7bmj7xcaJQs7FJPmRP9lG/vV0Ghw+XdIk21WX+Gqlnbw2bMk23d97/darenyWzKXfcPm3Iy/w1jzIqPLHludDZW/kRmZH+Vk/esv3ttT27QsVudjK8f8ADt21W0ybzIzM/wAu5du5qka6SPCPzuXbub+9/s10RiTUlCIqzTRtve5XZI+1Vki+ZaikaaRNifKzLtdm/u02adIV87Yvy7WZd/8A47Ucl0jqEmhVkm+ZF/i21pH3vdOaUipqGy4j/ffP8m1Jo3rC1aSKzhaG2hX7nz/7LVp310iyNCkPyKvy7X+9XN6ndbn2PtZm+bbt+9WtOMI7EVJH0L4Dcn9mNnI/5gV9x/39r5EvLjhXmdkO75P71fW/w+AX9lxtpJ/4kV/jPbmbivjnVNSdpNj7WOz+H+Kv2zxRV8lyH/sFj/6TA9biRp4XBp/8+1+SM3Uo08x7jfn5/mqsq7ZFdE3JRMzySNC+7C/9805WhXYnk/8AAq/GD5unH7Mi7DZbtjoiov3mX+9V2wt4X+T94u5Nvy/w1Ss50WZe277q1raX50LeZsYrv/4FWVSR34enYv2cKWsfko652/w1r2Nu7bHmiVdz7dqr/wCPVTswn+p6H/dre021mST9yjH5d25q86tU5feZ7NKhzbGjaWO0LsRv+ArVmbTYfLV4U+Rm2/3tv+1T9Ftdmdk33X+dW+9Wu9jI0QSbazb9rs38Vccqx6csLDksYElj5cy+S7bmXbTrO1MczIn3vvPGyfe/2q3bizSOZX+/5fypueq0luisu+H73zfKtctSp7xEKMea5Ts12yJNCjbmXbu/hrStbHdMzw2zN86tuamR6bsXZNc7Yt22Jl/iq9HJDHvhhTajbfKVm+7WMqnN7p6NGjHdkUlq8VwvnO3D/wAP8VX7VZlX9zMrBfvr/dqBbWGT5JnZkV/4f71TxK/mHy5t3ybfu7aUqn2TdU/7xMq+QPueYjfd/i/3qv2ckMdwmxF+6v3U+9WfHdv8v2mZdv3UVa0tPnRmTYjfK25mX+Gp5kX7GPN8Jq2tvCrDyIZLhf8A0GtCxdEmTyYWbb/sVSsxC7bIZd/z7t0f8Na8LfZ4U8652/7qfNtrOPvBKjMkjmmb3PmtvVk2qv8Ad21JJ5jhEtnXaq7XX/2aljjQS796h1f/ANlpJpIYZG8l2Z12/e+Wn/DMPZ8xJNJ5Mi3Lwxt8ir5a/wCs3f7VVNT/ANcE+x7mkXcy/wAK1LcT+XumttpZl+dmXczVVZXkb7+1f4/MXdR7xZQkS5+yqgRYXVv3qqu75f4axtR03zCfJdUZXbezL/rF/wBmukktZnkVPlVdm7d97d/dqv8AZbwTTP8Ad3fP8zf+g1vT97Y5JS5fdOSns4VVoY/MKq+7a3zUyb7TaxbJnUpJ97+L/vmuhvtL3R/Oi7JPm3b/AL1Zk2kpGvyJ/Fubb/EtVGMFscdapIxHs7lIV2Ivzbt6rVO5tfLZkhTc33n8z+Fv9mt2Sze4ZkhdURX3/wC1/tVV+zzTXCwpD/eX5l/8eraPxnFUlKUdDJWHazI80bf3m/iVqj+zwzbZ4XV5P4f9qtKS1n3Sp5MZ2vt+b71VdxRtk0Ko0fzp8lbQ5ubmZySkQ6XZzWZfyY1USRf6tvuq1blnst3CD5/kVHkX5qzrG3tpJHmRFRfvNWhptukczfO3m/df+6y1jiI8xth/eNuFVt5Gmk2lmRVVdm3dWrp8iLIqbI938G7+Ksm1ieR/Jm+VPuxf3l/2q2obPzlXfyisrPu+VVbb/DXBLm5fhO6n8Rr2az3qpsTLSPh/l+XdW1DC6tvfj/2Zf4qy7PfGhcvgr9zb93dWxoMkLbo/3bq3yozfw0R5Oa3Kd32OYv6fawSSfuYflb5vMVvl210em2tnJILxE27flXanzNWbpum/Z2Xy03p/Guz7q10ej2SWqom+QKr/ALr5q7KNLqjnre7EtW9u+7HyiX+Fv7q11nh9YVkjffub5vm+7trM0tUm83z0Z13fvdybWat/Q7GY7fnXbHK2/cnzbf4a9nCx5fdPLxEpcvMdP4dl86QoXj2/xr95v96uls13Qs0yYXftiXZu3Vy3h+N2medAw/vN/wAC+7XXabInmrNC7bV+/Htr16ceU8mUpc8jZ0+N5IfO2Rqq/fZf4q3tPhtvmR9x875YpNu1lrP0dra6mR0RZ0X5dqr8rbq29Nt2a3+R1ZfuV0cvuj8kZvijw1D4im07R9jbFulnuIV+bzlX+9XM/Gy1v9S1y4mtbO3a2tYl8r+Flb+Fa9C168+w6em+5hiNvas6SeVukjWvK/FGqalqmmh9Vto3W8uFWWSPcvyr/FX5/wAQ1JfXbfZIpylUnc8X8eeHZta1ybWLb5FV1WWNX+62371cpJ4ZexvDeQ22ySb5ZZFf/wAerufF2nzN4gFn9sWG0WXc25du6s34gK9uW1CwRo08pYov7jSf+y140Z8kdDX2c/anCeLrG51BbPTYUzFDFueTbt3N/F81c7rmh6Vo9xb3J3Mm/dcRzP8AKzL/ABV1Hi37Tp+mvcwzbxtVXX/0LbXnmuapNHbyec6sv8PmfM1KnU925208PJHH+OteeHVnuUvFmXzf3Xlt8qrXAXF9f6lrTzTfvdv3lX7zVf8AF1xbbpHkfbt3b4/vLXF33ixLWP7NZu25UVmaP79elRqU1DzPNxFOUi94pu3Wb/SXjjSaJWVZH+aua1BnuL5Utn3/ACfdVayLzXLm6uH865bYvzIrUln4iextxIjru3/e/irqp+7Gxx1Ixeo+4kuVvEm3qJd+1m+9V688Y3NrE9sk25FRfmb5dv8Au1nfbLa6ZLmG5Vfm+633m/2mpdc0u2uLdZvOVt391f8A0Ks5QjUiuZkU4zj7w/T/AB1qsjecl4yOv3Nr/LXTeHfHF/HeIl5DvWRW82SR/u1wcdu9nHsS2VlX5ty05fEV5br+7Rvlb5GrhrYKnL3oo9Cjip05R5pHvmg+Pvstiz3OpKv7rb+5i3sv92uq8G+OLnct+k0LIzK3mXD/AHW/u7a+b9N8ZO+62eZQW/i2/LXY+F/E1gq7Jnkd2+4u793XkYjC8vvSPrsHm0JRjeR9g+Cfixpt9dLazX+2WaL7sn8W3+7XoUnjbzo4kv7a3hWP/ln/AOzbq+V/hb40sLm7hTUtSsYnj+W3mZtzf8Cr3Dwr4ZsfiRdIlz4h2Sr/AKpreX5WX+9Xh4iLjPm6H2GHxP1ilzQ2PbPgb8XvD2k+K4rO5hZtr7XkZNyqv96v0V+EvjXwS/h3Sdam8QRyHd5SRRy7V/3Wr8ytD+DWq/C/xBJqWy6u2W3Votr+Y0n8Vfbv7MPiDwV8QPA9nZ23l22pW67GtWXay/8A2VVQxUcLpHqcmOnSxVLlke9fEm5XXkNnpTxzwrubc0vyrXi/jBr/AE+5s9BFk0TSSxsixq2yuzuvBOpeHrq41J/Ekjxxtt8n7ytu+9Whpuj6d4muLC8lf/j3n/f/APTSP+7U1sd+/wDfPNq5fD6tFwex+cn/AAWe+FltoEnhL4hWdm0Dx38lrK0b71aSRd21q+JJFh+dJn3/ACKv39qs1fpH/wAFydPkvvhBaa0ty0aWviq1eKGOL5Nu1lZq/NqOW2WH5IfvV9dklT22D5vM4a9OeHlb+6TR3B3LDsVJVT5P4tq/3aqXlw0jL+53bvl3K+5f+BUt9HtYXNtbb327d2/a1Q3Vx9mjbYfkb71e7GJ58pe6JhPJV03A/eaRvvVY02HzJH859yt97/4mqdvM91j7rBv4v7taVjbyXDo+9UT7v3PlrooxnE5akixpNql1JLNCjE7vlX+7XRWNnD5j7JG2/wC0vzVSs7ZNqbvmbY3y+Uytu/2a3NP0maPZ8m4f3mr1acfcOCp/hJtN+aMb/wC9uSNvlatrTZHWZSibv9pvmaqmnw+Ym/ztxXd95P8Ax6rDM9o8To+6Jv4dtb8hyVP5TV8x2UwRuro3yyrTnAZyqPuz3A61QhuraGRnh+T5/nZv71W42IgDOwchfmKdCe9ftPgpHlzvHr/qFqf+lQPoOGl/tNX/AAP80W7W6e3kZ/l3bNu5n/hqG4V1uns5vnh3L96L5vu/3qqTagkLi2hfci/dZqurN5kKPcvuVmX5t/y7q/FfZ+8fMRj7xE0KQqdm5Ny7kX+9VGUw3ELXNs+1W+5u/vVbkv8AzJJUEGWX5vlqgt9uk8nYvlw/Ntb7qrUSlOMdTejR9pLQp3EaMuxLZcq25JN38VaOi6Dc31wj36K3zf8ALP7zNTNLsXuLhJng2RK/ybW+Zq9B8D+F/MxvmZl+bylk+9urSjHmPUjg/ZxViz4b8Jpu37I/m+aWTZ8zf3Vrr9J8NzXStvdVLLvTan/oVaPhXwv8qXLpGm196f8AxNddHoPmW5e2RY2j/i2/w13xlyx0IlT7HKWOhwyQ+fCnyyP/AHdu7/gVSX3h+2sbf7TfoqRw/K9wz7dtavi34heD/Celu80P2iWNNzxqnyrXgHj/AONWveKLpraFGuEkbbb28K7VVf8AaojKXNZI8vHY6hho8rkdF4u+JnhjTZJrawtZL64Vt22OL+H+9urzbxp8SptXuks9ShmuIZEZoLGxXcqt/dZqd9ovI8/29fx2X96GH5mb/ZrMvfHGg+HY/N0eFUm+YJNs+at6cT5rFZjXqe7E0re71W+hW5fw9Z6fab/+Pf7jSL/FurE1q88N2a74Y7fdJ8ySN83l7a4vxZ8ZLydZYU3B1Vv338O6vMfEHxQ1nULje9zu/h+/92tOZHmxi/tSO28ceOLa8upIba537fllZv8A2X+7XmuvaonL72VV+4u2sm98WXNxNsmOVb+L+L/easi+1a5jmb59zN/C1OS5jWPvfEUPECbpGmhfhvmaNVrkNc08M2/zthVvu11VzqRZX85/9la5/UJppH2STKp+6rNS+IuMjgdfa5t2b59tZsuyZTMiMB/FXVa5pcNxGyfL8rN97+9XMeS9rI9q6fLv+9soiacxVjupo5vkfdt/u1safq0b/uZuVb761z90r2VxseHZ89WLe6SObfv20+VE/Ebc19daPfRajbTzBo2Vt0b7WX5q/Ur9lfxR8Pf+Cqn7Hd9+zf8AFHUoW8b+Ebfd4cumf97Mu35fvfNX5VLfJdW/7ybmvQ/2Rf2kvFv7LXxw0r4keD9YmhaG4VbpY/8AltHu+aOqjOVOV0Zypxloh/xm+APj/wCBfjvUfA3irR7hJrGdlaTZ8rf7S1ylrNtkP2lGRl+Wv2P/AGkvg/8ADf8Abm+BumftB/D23t/tN1oyz3/kr92T+JW/2lr8wviX+z/rHhXV5Ib+w2Osu1GjX5f+BVxY7CxlHnhHQ0w+O9lL2VTc81kbzMTfcOz+F6csiSKHn4+X71ad54N1LT5pUmhZhv8Au1XXSX3bJoZNjfd+SvD9nKOx7Ea0ZRvGRf0W1TcjmbP+7Xrfwn8Ra94baJ0+dfN3bd/8NeefDT4c+I/Hvi+18M+GLEz3FwSyR+aqDCqWYksQBgA19sfAL9mTXPh94c07xv8AEn4WXKabc3rQ2utSRM9rLPGQZI0cfI7qjKSoJI3DI5rqfDXEmb4T2uXYCrWg5ct4U5SXNa/LeKettbb21OrBYqFGpd1VF+bS/M3/AID+NPFuuapbaVbabMBNb7omb5fvfL8telfDAPpPxis0u5gGgvplldsYyFcHNerfs1/ALxv+0L4uvx8EPA11rv8AZNuJr0QQrBHboc7VaSUqm9sHam7c21sA4OPIDN/wjvxWvG122msmttUuUuoLiBlkhbc6lGQjKsDwQRkGv0rwi4c4gwmX8T4DEYOpTrSwcoqnKElNynCpypRa5ryv7qtr0P0bJ8fh6mFqz9tGfLFttNWSVz6x0m/1LxIyWejwxzHZt3N83/Alr5P/AOCjPxU1Lw/qNh8HP7NutPm1CJb+XzomjeSFW27l/wB5q+mf2Rv2gf2Z/A3i+01r4v8AjmWCzjX97bpo9xMR/s/IhrH/AOC3XxO/ZN/bL0bwX8Qv2Z/FIvfFnhmdrK6sptFuLMT6e4zw8iKvyt2Jz6V+W8OeDfG8JOtXyzERt8MXRqL/ANtPj804lw8aqjTnGUX2aZ+cljeJG2x9qL/G3lfMy10uh/d27Nz/AN1W3K3+81QwfC/x6DiXR0wq4BNzGM/k1a2mfD/xfbsslxCRwBtWZPl/WvsKHh7x1DfK8R/4Kn/8iefWzHBT/wCXsfvRPa77VUd0+fczVq6bqCNIiXLrH8zMn8K10/wt/ZM/af8Ai3FLqvwy+DfiPxFDbORNd6RpElxGhPYsgIB9s5rJ8UfDD4l+Cdem8N+N/Ct5peoW8o+02Oq2pgliPoyPhh+VdFDhHimtiJYaGCqupHeKpycl6q118zzKuKw6V1NfeXtNvLaRdn2be67f4/vVvabqVz9neHzo2b7qf7NcVpmm+I7OV2ewQqwwVMoOa0LdNfRmPTccBfMHAr1o8B8aR/5l1f8A8FT/AMjjWMoKWsl953+l+IJo40muXj2fdbzF/wDHq6XTdYeO3TY7IsifJN/tVX8Ofsh/tc614ei8VaP+zT41uNOuoTPDdQeGrh0nhIyHXCfMCOQR1HSuT0/xJNp90Y9UjmjeJjG8EynKEcHI7H2rmwfC3EuOlKNDCVJuO6jCUretk7bdS/rVKMdZI9d0TxB9qT53j279v+9WvY6hM0e+bbt+6nly/erjfhJ4Z+I/xbvZbH4Z/DvX/EM1oGLLo+jz3YiRv7/lodv44rofHng/4nfBeC1v/it8LPE/hu1lfEVxqvh+4hikb+6GdACfbOa0fDXEFPFrCvC1FV/k5Jc3/gNr/gRKtSkubmVjq7PVIHVUtnba33m37q3tJ1a2hZPnmD7l+VU3L/vV5D4W+JehaxqbWmnXjj9wWZdjK2B15Ix3rqtL8QXgHlvcqyt9xll2s1cmNy/MMqxXsMbRlSna/LOLi7PZ2aTsZ+0jKHMnc9b0nXEaFZELb/uu3+zWwutLJbhHuVBk+Xb/ABbv9mvMNN8S3knV1ZPvPHDW3H4ihZmfzFLK+23X+Jm/2v7tZRkYVI8x12p3ztIHhRn2/K+56+cf269h1Hw0YidnkXW3P1ir25fEFtNs3ncGZv8A9mvCf225mfUPDsbFDthuiChz1MVfo/hXOMuOcKvKp/6bkfScGQtxHRf+L/0iR7X8Eiv/AAqDw0zbVP8AYtuqqf8AcHzVta00ccKujr8q/Kyt/DXJfBzWBb/Cfw/ayFcNpFsu70+QVs6priyRy2yPGgb5EmX+Kvjc7nbOsT/18n/6UzwcfT/4UK0v70vzZT1a8gaI/wCsLKm1PLX7v+9WPeXlzG2zf5X7rb5n96nzal9oXM25fl+eRW3M3+zWPqGoeZZhH2q6tt3LXhykZRj9opa1evNseFG2L9/y221zeoaghDw/K2373yfeq7rl5Naq+zcu51+VU/hrjNa1L95LZpuRG+40b7WrnlI6OVnQ2upfMsPU/wADK/y10/hy686PY94uGT7u/wCZq8xt9Uhjx5LxsZF/heui8P6w9mqpDdLhk+7t+ZWrn5uYJHqWk6htjMds7P5cu3bUlxcOYYkhufNRm+f5Put/tVyFr4kmWMrvWP8Ai8xadceNINy5mZN3y/KlVKXQn3DYuta3XG9Ewyu2xm/h/wBmsq68QQrcF5n4b7i/7X96sK41pod8KTbHmfcm5vvL/erBvte2/uft/wC7VfvM+5mrKpUHGPvHZT+Irb78L+Xt2/x1fs9Ze6aVPOXe3ypt/hryW88WbZP3Lrvb5dqr8v8AvVd8K+NppPkdN9wvy7mrGUuYfL757tpN9Ctp++mjdF+XbI3zNVxtW+0Ls8tk3JuRf4a4Gx8UJ5KXM3O77vzfe/2ak1LxRND5bs6qmzdt31nKRfL75+U+hq62ux4WKbNvzPW5ZyILVLb93sb+HfVCz08xM/75iPvbdv3auWcczNvhEinc29W/9CrwZYj3/dPu6dGESbdMrK838Xyr/u1BdSW21Uh8vP3mWrtvbvJH+++Z/vblqG4sU8l3jeNmbcrttqfbc0uY19jymTJawrGrxupZf/Zqht7M7nkdvvfw/e21oNZzIo5VW+8yqtNa3m2s8KZPy/M38X96nzc0feJjGX8pnbrYMqPuc/dT5vutUkfkxzRo/wAzL8u7f96l1K1aw/d7Mbf4aybi72wvs3Nuba1ddOMJ8tjjqSlGVpRN2z1KZv8ARk3IPmZf7taH2wMvlum4R/3f4a5mx1DzIx+++WOr9jq03mebD8m5NvzfxV0R+02YytLlsabSJJH52/btb522/wDstRRzJHCd8zL83y1Q/tB4Zt/nKGaL96zVWvNSF0u9PmCp8u6nGJjKp7wanqkJ3fPsb+9XN65qXmNvtn4+7uq1qV08bb0df3ifOu6sDVt/mO8LqC33Pn+7W8Y3OOpU98+pPhtMJP2TDM46+Hr8nH1mr4pvNQeRmdP+Bsv8VfaPwzJk/ZBJHU+G9Q/P99XxV9jfyykaZ+fb9/8Air9n8U3bJsh/7Bo/+kwPoOIkpYXBN/8APtfkit5n2jekPyVbs45lVP7n8dLY6f5cjK/zO391PvVah0yaObePmX71fjT+L4jwqcfeJbWGGWTZsZtrfK38Na9nDOP+WyquyqMNu9u2x0zufcu1P4a0FjdRsRN/z/w/w1zVOSJ6uHpmzpLQ/fTdt+78yVu6bP5bF4ZmUMvzq38VYOmyeaodE3Bn2qv92t3TYXkj/h/76+9Xl1tj2MPTqy5bHRaS0yrFDbJnd/e/hXdW7bxpHMvnOz7fmTclYOls8ewbNh2fKrVprdIzQpMm9N38TbfLrzKn8x6fN9mRLfLN9lZNiqJn+833mqCaF4UKIONu5d396nteRNuT95Ii/L8v8LVB8/nCNPubfvK3zVnL3o/EKPuj2urmRYXR1Xy02/8AfX+1ViGNIdvnf6xtv3kqCOEtcbx0/ut/eq9DG80zXMyLv+6i/wASr/FtrLm5TeMeaRHDDCyskMLL+9+dV+9T4983yP5imrMa/wACIu6FdvzPtZqfDYvHGEeFl3ff/vbqcZc0jaMeWXMRWdqJGXZCqhVZtzN8zVo2cc8kiwoissi/d3/xUsdi6yD5l+X5kbZ81XLS1/fAnaVX5tzfxN/dqvcO6j7xZhb7PDEnnMm5/n2/xVp6fcJBGmx9219r7vmZqotGYdr3KZ/etv8As6bvlqxbyPCqOibd33f722pjE0l8HKzRhvLZvnuX4ZPmZabN9mSHYtz87N8rN92oLWN5GV0T5Fb5P7q1dtbW2uvn2KxZ/wCLd8taxjDm945alP7JXjt3kjFtD8/zbfm+VVpYbONbiGaa5ZHk3fNt+Vvlq5Ja/uWtvLjc7N+7+7/s1at7OG4mZIbbb5cXyfxLtolK0eU5qlPuUfLea3XyYVSXfudZPvbalW1m8tH3/N91FVf/AB6tBbRJmZPOzt27P9lf7tLdw+ZNvs0YP91/4qPdjojkqRMZtJRoUd33srbn/wBms7UtP8uZt8O8r/t11E1q8jI43IPN/wBWqfLVHUrdPLf7L8kv3mb+H/dq/djpE4pf3jkrjR/Oj3o/l+Z8u3+Kq91p/kyhIUyqp97+9W9dwpHCNQuvmMfzbV/hqBrdLiX5+sf+qjZ9rN8tPm9/U5qkZcpzd1prsyPGka/N8y7vm/3qrSaWv2NIXdi7S/d8rc3/AH1XSR26XkYmlh2qq/6lfl/4DTZoZGby0ttzR/L8rfw10c3uxSOXl5pXic3HbzQ/PCnLfK21Plb/AHqt29rMsS7y33Pk/vVsfYXj2OifvW+5/d2/7VO0+1mto0e12sGZlZv4VWolyy1FR5o+6QaXYpDDJvdoljTbW9pcKKqQp+88xPn2tTdPh3JsdPmb/a+ZW/vVqaevl25hmT/a8xf4q5pS5Zcx6VGn7xNprOV85E27n+833WWuj0S1SSRU3xq0i/Lu2/LtqhbKkdvDC8O5FTdAuytLT1vG/ewwxp91d0abWWsYy5p8yPTjHlh7xq24mnb/AF0gKvteTZ92uj0mFLWQIgzt+bzP4t3+0tZGnybo4U+6Wfc3+1/tVu6fcQrMru6nd8q7fl3V6VA4a1O0DpNNhmWJrlId25f/AB6tbT2hFu0LzK5Z13R/3qy9KuI1hMPkyIsjbUb7y7lrU09vLmRZvnbdu+58q17WHieXVlyx+E6HQltrq4/ffu4mTci7f7tb2lW9rCq+SjFF+98/zbaxNDt0jbYiNtbd5TNXR6THNGoT5X3J8m1vmr1I+6eZWjyyOj0K/kZXRJtkTKzJtT5l/u10cMrwr5s21UZ1ZGVdzM38W6vN/GnxS+FHwZ0ubW/iv8RNF8NQRrvim1jVI4Gb/dj+81eV6X/wV2/Y68SfESx+EXwf1jxT8QfEOqXCwWGm+E9DZ47iRv7rSbaUqnLDmMXWoRjrI+ifFFxN4ovLlLDUpporO6+zeW1rsSPavzLu/irzj4ta9Do+mwW3nLHLs+WGN/vf7TL/AA16N8PdJ8Q2PwzvbzW9HurXVdS164e80e8b57GRtqrCzf3q+V/2n9c8Q+F/Fl9qv2BkRYPKlVm+ZZP9mvzzMq31uvPkFR92XqUfFHxK0S1vjeXV5MWb5Yo9nzbq4rV/jgl9pc2lb12Ryr5rfL83/Aq8S8V/ErXvE2rD7ZD5QWVkVfvVreCNNe81S3037MzyXG2KCGOLc00n8Kqv96vLo0eX+LK1j28HSlW+E9b0O4TVtLuZtYmVbaZF8qSR2bav+7XlXj6+8PWf2m20HWFnjjbb8rN+7k/iVq/RLQvEX/BK39iHwLpfhP8Aa9vYvFvxBvLCO6n0S2iZrfT2ZdywuIm2q397dXz98Xv2n/2cPi28+k+B/gX4L/sC6l22tvpFh5U8a/3mk+81YYqthcLCNRPmb6I+kynI8djHJVockOkpdfQ+DPiHqE0in7NMvy/M7Mn8Neaa9qH2e43wnKt99levqT4zfs122oeF9R8f/BZLzUbKzRrrV7Fl3S2cf+z/ABMtfJmuXCtcM6J8snysrJt217GV1qWLjzo+Wz7L6uXV+Uc2oQzRGTfuVl+7TDaJ5auj42t8q1Bbyfak8yFFbav3t33qdb3l/IzWboqRbtyyV6dSPu+6fO/FrIms43hLF93zfdq3b69cK3kzbVj+7VCZXLH99vVvl21JDNazb4Xhb93/AMtI/wCKuaUfslxlKPulu+Wa6t0e2+T+/tqte6XDdTvDDcyRr5W7bH/erofDdmLqJEez3p1T/dqxqHhKFlTyf3f3m3Vl7SMJcpp7GrL4TibGzmiwZpm3762H1XypERHb723/AIDUWqaOIV8yCbdu/h+7Vnw7b2cciXM0KynY29WqKkY1I8500ac6fum34RuYlX7Q8s3yvtVt1fZX7H6+J45orvw94P1KdZFVHaSDaqt935Wavmv4dX+pLJCmleBmuhD83l/Z9yt/vM1fcH7K/wAUviRpGoW39saVss/K2yq21fLk/hXbXyOZ1HKL5Yn2mSynbl5j6Ct/jN8fvBOoQf8ACQ/Bmxn0+4iWBdQaeFp1hX7zNHX0B8F/FHw68dWUmsP4e/s3VVWHb5f7v5t33q0fgN8Mfh/+0d4GfTfEunafq10sH+re42yw/L/Dt+7trzX4ofDm2/Zb8WQXmm3/AIo8PWMl1HEsmsJ/aFjJu+7833o1WuOnhZVKfPDY6qmKiq8qM9z6bh1mx0uyntteij8xvm8xdzVDo2uaKshSzmhRNu5mZvvVk+D9S8U+OvB8esaL4u8H+IbZXVWls7hkdfl+bcG+81TxaRYrI/2rw1a7JNvzRp/DRWoxjOMJI9HBuFSlKPU+cP8Agql4Jk+OPwA1qz8OaysI0OzW9WSKLclxJG27bX5RWNvNcQpsdlkaLf5cif3v7tfuZ+1/8NZvGf7OHizw74X0mSGQ+Grho0tdq+ZJt+Va/FbS9F228NtMn76HdF8y7WXb95a+q4f0pzj0PEzWpSlKHIjAk092jbejJu+by2Sq8On/AOkI8IkV9jfLXWalpfnw/uY22L80rb9rVmLp7xyDfNhoX/1m3d/49XvQ933meZKMTMtdFSRUh6P1WT+8v92tWx0l9xtgitu2r8qbttWrOG5k3iHco+7u/vVtaXYo0xSGwwy/c3fe/wB6vUp8/JqedKXNMP7DSOZI4fMYx7W+Zf8Avpa14fN8xbaFNyMm5l/u0+xsbn5oYYWd5k2r5jf+PVLb2KMyTeQ25V2bm+6tehRjE56kv5SG1Xyo3REZG3svltVhVuZlIebZ/wBM1Tdupz2vll3Tbs/3/mprRzQozv5jOu7738O6uuMYnLU5+UqyjbGQ7q3z/wDAdv8AerQtVij0gLGRsERxg9uaxL5UbytkfyMq/Kv96te0YnQctnIhYH8M1+z+DC/4Wcf/ANgtT/0qB73DP+81V/cf5oox3010yuAr/vfnXftWr7zxyWfnfdVm+TclYcM8yzBEtlKbN8rfxLVqW4to7GPzptm3dsXdX43y+5ynzXLEbfXz3UiIjx+WybvmqCTU3uplhmdpG27UjX7u6sqbUJlkM33f+esatuq7pc0zP89tuf8Aux/e/wB6ueUJnq4Wmd14TXy496BZtu35dm5lr1XwXpUPmedNucMjbFZfmVq8q8Ntua2R0+VW3/N/s16TZ+Jk0W1+2XLybmXcqxy0U3GmejKolC0j1C1utK0mxjtpkjik270jk+XdXPeIvjBCt0dK/tCO2t/mZG3fe/3a8a+KX7QFh4b0G41XVfENvbCO3ZYvtT7mb/gNfMPij9sa2ub+7+wXLXcmzylupvlX/a2rXTCPN77Pks0zmXNyYf7z6R+MXxd/tqS68GfD1GvLuOLdK2z/ANCavFdW8ZeKtPjmzYMpVN0sjPu+bdXkl1+1Br2m2d1YeFXa2mvtv2q6VP3jViap8dL+3sxNqV5nd9/b/FXVGPLE+Ylz1HzTPUNQ8eeJFZ0eGbbJ+9dpP8/drC174iefbjfcsJWVn2r/AHq8l1v9oK61i4Pkv5SL8vy/xU3TfiVbatcb9SRdn8at/FS5pyHGnI6O+8eJcSPbTTb9rbvL/u7qxbzVIZpH3zbPn/heqWqSaVeRrNZ3kafNu8tq5m8vHhfyU3My/wAX96rHH3ZG7ea0nmGV/vf3lqhcaw8iv87b9m3czfw1k/aJliZ5gpo8x1YPnf8ALu8tarlHzIsXGoIq702/7u/71RsyfK6HH91aPLh8zZs3N95GpJGT7WyMmfl+8v8AFTjyi5ub3WZl5bvIqp94turndctZll+VNp3fIy11zL5ZLu+1v4dtZ+paa8zIh+9J/t/doi+YRzGrabbapH50LqxjTc6r/erDuLe5jZUmhZWrc1TQbzTZmvLNPutuZf71XdHt9K8Tx+S/yXP93/apgcvHJNGwTa3+9UU008cyuj7Sv+1XZXnw98lWm3sm3+KsS98Nur70+b/ao5Zhzc0j7w/4Iy/trXPgPxHJ8BPG2syHStcl8uykuJd0ccjfw7W/vV9J/tPfBXQfE2qXCf2bGGaX7yrt/wC+a/I7wfJqvhXXbXXtN3b7W4WVNr/NuWv0j/Z9/aY/4XB4FsX1vc95awKl0vm7pN3+1upe05IcsjlxlOM+WX2jyLxP8BbnwrdXFtHbM9vJ/FJ8zf8A2NaPgD9lez+IUkVnDYXEcsjbH3Rf8tP9mvr34V+H/B/jLWLew162txFJLuZW+b5a/TL9j79jv9iC60Gz1iezivtUZN/79fLVW/2a5vqcefm5vdMKeIr83LE/Jf4Xf8EsPi34D1XTfil4e8N3WomeZbGytLOEtJLLMwgRFUdSWcCv1c+LP7FH7TPiv/glb4F/Zm0XwbHL4z0bWUuNU0ptZtlVIVlvGA80yeW2BLFwGP6V2/8AwVM8C+DPhz/wT88Tz/D7To7AwalpTwT2rkOrC/gIYMOQR61478aP2hfjrov/AAR2+HXxa0n4u+IrbxRqPiFYL/xDDq0q3lxH51+u15g25hiNByf4B6V+xcN0c8XDeULLZ0kv7R09pGT/AHvsVyt8sleFubmStK9mna51QbvP22r5ena/5nz1+wj8Uf2+/wBnv4ieLvhB+zj8L31jU4YJpfE3hXWdNZ0spbcFTMR5kRjlHKBQ37wlVCudgr5v1STxv8S/iVdzXenXepeI9e1qV5rW2syZ7m9mlJZFiQZ3tIxARR1OAK+1/wDgg5d3V9+1B4zvb25kmmm8EyPLLK5ZnY3tsSxJ5JJ5zXi3/BOOaztv+Ci3g271GaKOCHXdQlllnYBIwtpctvJPAAxnPbGa/dauc0sq4lz2vHCU/bYahRqynFOMqsvZ1JWk7vRciUdLpbuVlb7TgyDnlmOV3Z0pL00ZN/w6k/bz/wCEN/4Tb/hRNx5H2P7T9h/tS1+27MZx9n8zzN+P+WeN+eNueK+f9a0XWfDer3Xh/wARaTc2F/ZTvBeWV7A0U0EqkhkdGAZWBBBBAIIr9gLLTtXg+NEfxdvP+CvWgz266gJpPCudPXS2t882whF7gLs+UPzJ/FuLfNXxf/wWa8TfA/xp+1NaeKPg74s0vWLq48OwJ4lutHvFnhNyjMseXQbS/k+WDhm4VQQpHPn8AeJmfcQ8Rxy3HU4VIzg5qdKlXpqnKNvcn7aK5k09JxtrpbU+FxGFp06XNF/e1r9x8jV6J+yZ8GrH9oP9pDwf8HdVuLmKy1zWY4tQlswPNW3UGSUqTwDsRsMc464OMHzuvpj/AIJDaxpGj/t7eDjq8ir9pgv7e2LQq2Zms5doyfuHryOe3Qmv1DjDH4rK+E8fjMN/Ep0aso26SjBtP5PU5aMVOtGL2bR9B/t+f8FMPij+zH8X1/Zg/ZTtdH8PaH4J061sp5TpiXDNJ5KsIUEmVSKNGjX7u4srZbGBU3xS8a6N/wAFMf8AgmdrPxw8d6DBZ+P/AIXXM7vdaXDhJAqxvJhWJIilgYFlzxJDuHA2n5I/4KM6dqOl/txfEy31QsZH8TyzIWjC/u5FV4+B/sMvPfqeTX0x/wAEtZ4fDP7BX7Qvi7xJLt0ltMnhXfapIvmLp827hjhyfNiG08dPU1+HZjwtkXDPAWU59ltJRxlOeFn7WP8AEqurKCqKUt5KanLR300XY7oValXETpyfuu+na2x+ftfTf/BKLUf2b/DH7TD/ABA/aP8AGGk6VbeHtFnvdAXWkHkSXqkYfcwK+Yib2jXG5n27PnCg/MlFfvfEOTriDJMRlsqsqSrRcXKFuZJ72umtVo/Jvbc8+nP2dRStex9n+Pf+C2P7WNx8X77XfAF/okHhaLVW/szQZdGSRZrVXwgklYCbc6gFiGXBY4C8Adb/AMFvvh94SkHw2+P8PhtNC8S+LNLki1/SnWNZmMcULoZcEF5I/MMRfaeAgJG1Qee/4J4/sVeFPBvhtP29f2xbiHQ/Anh5Fv8Aw7p+pKQ+pzKQYrlk+80W/b5UYBadyuAUwJPEv23v2rPEn7bn7RE3jK2Se30aN107wjpV4yRm1td3ymTDFRJI5LuxYgZC7iqLj8bybI+H/wDiIuG/1Yw8aVDL4VIYmtBWjUlKKjGi2tKk4P35yd+V7tS0O2dSp9WftXdytZdvM+9viB41+P8A+z//AME9PhRe/wDBPnwTHqtpcaVbya3eaZpR1G4h3wCWSUQ7PmLzmXzHKfKQBhc8aH7A3xa/bD/aL8K+OdC/bs+Hvk+CJNBZBqOu+HhpjTBwyzRbNqCSLyg7M+35CBzzxhfFL9oTwr/wRz/Z38Ifs9+CtMuvF/i3VLaW/mXWNVf7LZsx/ezBVHyxGbcscMe3IV2Z92Wdn7LP7f8A4e/4KW6R4i/ZA+O/haXwtqfiHQ5xa6l4U1WWFbyMDMkaBtzRuqfNtYyRyKrhlx8rfjmJynM8Vwni8wo5ZCphJV51Fj2l9a9l7W7qqHMpvls9bpW15bXZ2qcY1lFys7fD0vbY/MrTrrR9F8Y3zaRcNLYRzSpaSMxJki3/ACEkqpOVA/hH0HSuw03xhbM0Sb9wb5n21x3x68D3/wAAfip4i+GGuXcc1x4f12fTXuIiGWQxuyh+CcZABx1GcHkGuWs/Gezb9nmk3L8zqv8A7LXoeM2Ko1uLoVaMuaMqNNp9002n81qZ4JNUrPuz36w8UW0MiTJfsD8qqrf3WrorHXkkjk2Oqv8Aw/w14To/jRJBsjuYyv3fvfMrV1Gk+KN1mN7szM/3Wf7tfkntPsnXyzPYbXxJCzJbXM3O7d/eryX9qHUBqFzozK+Qi3Ax6cpWxpOveTIl59pX7/z/AD7t1cj8d9Sm1ObS5pX3YSbBAAHVemK/SPCad+PcKvKp/wCm5n0vB0X/AKwUm/73/pLPYvhrrX2X4baNDblo5RpUI34z/CK2brWofJWFN3yr8zbvu1514A14w+DtOtt64FlGo/2W21oSa9DHj52ZvvL8u75q+Lz6rfO8Uv8Ap5P/ANKZ5GMp2xlX/FL82bd5qz28jzJDlJH+Zay7zWk8xkfdIn3X2/w1nTau/mFEvFDMzNKzf8s6y7zUbyNPtI2lm+bbI33lrxZVDn9mP168Mil5H37G+dVrlNWuLBlNyke5t33lq3rmrbYy6zKnz7W2r8rN/dWub1LU3+wB3dmWOXZ83y7Vb/0Ks5VC/ZsS4vHjme5hdT5b7tv8O7/Zra03XEtG37NjMy7Y9+7/AL6rh5L65N1FbQyYWRWZPM+VGVat2+v7HTznjR2+XzKzjKP2hcsz0+HxAlxalPti/d3P8m5WqHVNaj+R/tMaFdvm/wCzXF2+tQSQokM21lT5WWqV/wCJHWNXlfczfL83zNVRkZ8h02peLIYrzelzteN1Xds+by/7qtWRrWuJKyTWz7U+66yNXKah4ofzG/1mf7sf8TVg3niLzGZ5pmUMv3Vespe8VHlN688SJ5kzo3K/LuV6k0XxpNHIlykyqrbl2/3ttcBqGsJ80MNzhGbc6/d3VDpetJJdCaR2T+5urjlU5TqjThyXR9AeF/iHbP8A6m/2t975n+Wr83iaZl2fafN/hRZHrxzwnrWI0R3Un5tzb662z1BGtfkm85f4/L+Xd/u1lzSXLyj+r8seZnylHp/l/vkhZXkf5GkTbTzpryN5yf6xvl+Z/vVu3GmySSSfIr/8C+7Tls9zbNm4/wAXyfdr5n23Kffex5jOh0nyYZER/up937rbv96q8lqk6s7w7PO+9XQx2M1x8kjt83zf7TVn6lp8bODv2n7r7aqnU933jb6vzJWOZmXy2Ih8xFkb52/vNUflzbZZ3eTb/B5j/drV1S1VkH+9uZWb+KsfVriHzOXYP8uxV+Zfm/irrp1uaASwvLH3Sjqzedbkh12r821fvVgXV5tuGfew3JuStPWJHt7UmbdlfvMrferDvJkbHkupXZ89ejhbbHmYqjOQ6Obyo1RvmO/d9+rdnqU0aiH7qx/Mism6uekmfzPOT5G/753VNHceSy7UZf4mZn3V3yieVKPIbkl552f32N1VJdQQL+5fd/D96qclwk0sf77Ztb/dZaZcbFjWOZF+/u3bvvNUSlymFSnPl94Jm+0SNDM+Gk+X/drLvm3fc/h+7uqzdXTxt++Rn3f7FVpI/tHzwwq6t833qqUupyyp+8fVHwsjP/DIoiwcnw7qAweuczV8i2uk3LMYvJVW/jVq+wvhTEP+GWI4SMg6HfDB/wB6avma30MSMr741b+FVav2LxXny5LkGv8AzCx/9JgfUZ9GMsPg7/8APtfkjHstL8uFUdNjM/yVeXw7MW/cv95K6LTdBTy0huU2/wCz/FtrYi8OvGw2bdjfL92vxCWI5TzcPR+GRw66DcrComg3fNuWT+JaktdL3bUQNjdufb95q7O60H7Gphuk3/L8jR1XXRYX+dH+Vk3LurD2nNuerTjHnOcsbOWJTzj+FGX5q3dGt5vMR9/y/L91tu6pl0W2W3WFHY/MrJHt21o2tnbW0KI6MV/j3fw1yVKnNHmPUo7sWBo5FSbzJPKX7jfxVOuoO0mx3xu2t8yVB5cNvM485vKZ/k3feVdtH75Y2R33/Lt/4FXDy/zFSqRWxaa4m+0OjpwyfPtf7zU+OOZgqJ95fmdmb5az/MeGQb5v4Puqn3qspHDdR+dM6ouxd+5/vVlKP2gjU5vdL0MQW4D/AGyOFf4P96tDYjQpsmVXZ9su371ULWES2/necu3d8u3+Grq7Pm2T71X+KplUOqmW4YbZbf7/AJjb937yrNrG0i8Rskit95n3Kq1VjmS4k2TDcy/Kjf3quRqkaeWkytJ/00/2aUZG8eaUvdLljdJcR7441Hltu/efearpZJZPJfazMm/aqfdrNg33S/adysq/Ky1cb5Y/OWFhHt3bf4mrSMfe5jrpy5feLcLTrDL5jrDuXc391qnjuPlE3zMfuOv92qtqXuof3Yj/AHi/8B21ah0/fGPk2s25VZf4q1px933jeXve8ixb71txNM8m7c33f7tXrSP/AEHzHud2377fd3VHptq7W6P8u77rQ/3Wq3Z+bCp86ZdjPuePbW9OjGXunNKpyyEjjMli1y/y7k3bVfc27dWjpq/uVe2m8v5/n8v7v+1UUa2d0G37gF+X5m+9TvtCWrCFHXCy/wBz71KtRl0iYSr05RvKRcjhh+zlPmf5P4fvLUbTJ9ojuURt7fN8rfLu/wBqljuIW/cpy6/wt8tV7qd4JGheT5JPuNv+7XP7CcZKRyyqwlHSRKzQ7vnfeWX5lb5VqndW7yRvM67Y9u6tOOF2h37N+3+L71VZtjWbh9u1V2usj/w1XJOJ5tarCMveMe6VJFUNtV9nzrH91lrOmt/OjR0to98br8395a2prPbtffG25tu2NKZJYw3Ehkhmj37lRo1+8tFpxOfnhIx4YfO8tII5E3fwyPt+b/4mpYbPaz/dDbPvf3mrQuLUR3P2aNPlZNyzSJVyxt3+/c+Sieb88e373+7XQ5Tl9kx9yM+U57+z7mGFLq2uFZ2b513/AHW/2qktdJ89fs03ybl2/u/u/wDAWro4dJR43S2HO9m+b+7VqPR7aSJvs24oqfd/u1lKc+U1p0lze8ZWm2aRw7BCxdvldmX5lrXhtXKjMLbtm2KT5fl/4DVm1tfJjRE+Z2+Vdy/eqxHofl/M6K7xuyvt+bbXNyTqHo03Rp9SKG1f7OIbmZiqy/d31prbXV1tTe0YX5UZl+ZWp9nY7oVSZI1bZuRW+81WJI3s7dEe5jO5t+1Wp08PWlV5eU3li8NTh71SJNHbpDGib1R/K/1i/e/3a1dL+aMQv5iN97d/FWcti90qiZ1QN9yRv71a9tNZ2Vu9zc6rC32e33s3m/e/2a9zD4WvH7J5WIzjLI6Odzf0WJ47pYXuWfcq/u/4m/2q6TR5YdNUWFzNyqfJHI25q4aHUNYVU1W81qPSNPkXck0y/vZP91f4a1rjxNZ6PZy3mm2bRytFte8ZN8s1exDD1Op8rjOIaesaMTrb74meGPBenya9qrzeVH8sqyJtVf8AgTfdr4I/bI/4Lc/EfTJ9T+Fv7L01lptus/lz+JEhWWbb/EsLMvy/71eff8FK/wBtvXrm/f4G/D7UmjVV3a5qEbfNI3/PFf8Adr4cY7ucV0xo31PGljMTU96Uje8dfEfx58UvEcnif4i+MNS1zUZmzLealdNK5/76r9mP+DUX9lHSv+Eo8V/tmeM9Ijf/AIR+L+yfCrSQf8vUy/vZlb/ZXatfjh8Kvhn4w+KXjC08F+CNAutS1K8lVLa2tY/m3N91q/rM/Yd/ZV039jv9i/wP+z9olnHbX2n6JHea3df8/GoTR+ZNu/3W+X/gNfPcT476ng+SHxSPQybCvF4v3tkaHxM0XwxY61f3Oqw7Zr6Xz2jVvmZv7zf7VfF/7WnwNvNea+1XTnZreG93L523dNHJ/E26vpD42ePNY0uaabxPo8MDt/r449zRzL9371fP/jL49eD9BvlufFWpQtDJFvuI5vmZY4/u7a/NKOInTjzH0n1eFSryny3N+yjo+k3g16awk3szS3SyO2xd33W/+xrsvgh4P8L/AAX8J+J/2k/FVms03gu1kbQY7i3XY19IrLB8rf3fvVi/Fz9qiz1jxBc2GlPG9pHFuikjfcyru+WvHv2l/jZrGqfsi6P4TfUZmm1zxfNcXSt/CsMe1Vbb/vfdonPE4lx5/tH2/D+XYenXjJ68p8+X/iLxt+0T8TtQ8R63fyXl3fXUk+pXTLub/d3Vk6tL4i+HMzTaVdTRiNtqyRuysrf3a9f+APhP/hBfhqHa2hl1XXHZ/MjPzLGv8NY/x88e+DPD+lDRIdEs59QmbdKq/M0K/wB5q7faU/bRowjzRPWzXH1YQdRsufs//th6xDqcel6rO1tPDEyvIq/LdRt96Nq4T9orwz4e/wCEmj17w/bRrBev8iwy7lj3fM1ebJ4iln19b22tI4EVv+WdavibxBc6hpscPnZhX5tv92uqODqYXFxnR92L3ifnuZZj9dptVNSdvCaaPDseFWaRN3zfwrVG509PtDWaf3NyL/FRputf2ppZf7eyPap8iyPu8z/ZrS0W8S4tVvHTefutu+8rV6salX7Z81KhHmjymZb+Hb+ZhNbJ8m3+Kr0Xh+8t7hneHKNt37fu112i6lYLZ/6MitLs3N8lbOnww3EcU14io7Ju8uP+GuOeI/eSO7D4PmnEyvCelTWcY86H5JPufJ/DWvdafbQ2LQ7I1Ez7l3L8y/3quqyR3Cw2z8/dfbUt0sN1pvzvt2y/P5b/AMVeTVlPn5lI9/2MKMOZHD3nh/7RdtDDCzQ79u6tTRvB+m6RajUnRWK/djb5mqz5yWszwzJGPM/1XzfeqrqEl5Gqo/yhm2pt/irqlipU4cq6nlVIx57mnb+IPFU0zvDr0lhZrKreTCu1ZG/hrpvDfxu8W+E7Wa2TxLdXDyNu3SS/dZf7tc5oNuJLMQ6lD+4VN7fL92tQfGb9nb4cWKW3juOOV1vVaKFYvMkkj/irz4U/b1eSMOb0Lp1amH9/n5TqtD/bd+LvgGSHxD4S+LVxpWob922xumRmZW+VpP4a++v2Wf8AgtrqnjzwXP8AB79qDw/pfjG3urP59QO2Kc/8B+7X5IfFz4q/sz/EfZP8N3urSWOVv3bW+z5W/wDia4mO48W6Nfx6r4b8QSBoX3RFf4v7tet/ZbhCy9yX94y/tOtKd6nvo/o5+CH7SX7H1rd3Fh4Xs73SLy8dWsbdk/cbdv3dy/LXX6X8WFtfGVppuzzbPUN3lXC/dX5vu1+D37In7S3x1uvE9tpWo69G0Sy738xd3lx/xbVr9Wf2RPiVZ/Eq1h1LxDqqqNNi/wBFbe26SRv9mvBxmAlRxC5pe8foGS4uhXw8n37nvf8AwUq/am8H/sr/ALNU15qWoxnUvFP/ABLtGjkb5JJG/ib/AHa/IO+kvZNSN3JtR5JdzeTuVa92/wCCt/xu8PftOftReH/g7oV3Nc+FPhfpyyX95D/qptUk+Zo1b+Lb92vAWunaRpvut8zJtbdur6vK8L7OlfufLV6kfb27FjdDcMHR2dm/iVvmqjJCvnMkb7Ssu5o2pYZnjVHmdtzfN5f92pZGhuofuKH+638W3/davSjGEp8tzmlLrylu3s7aSNXmRo0b7ix/NW7pdrM15sRG2KnzNJ95V/vVj6bb+XG375flX5fm+9/vV1WlrDeSGEowdYvmZq7af905pk0cdzGB5McjJ915N/zL/dq19heGFkeH543+bzKm02xuY2Te8aKqN8q/8tKvQ2afZWREaUxv93f5lenT+A5nHmMaRYbePyXk3/P8rSVV1Df50r/dk/u7/wCGtKRd0jQvc7mWJldV2/erN1dtxZ5trR7VXdv+7XZGJhL3vdkZV0r+Yru8YRU+dV/har9sFbQ9u8MDCwyv41DfTIyuk275k3LuT7tT2mU0bMhDfu2JweD1NftHgtGP9r47/sGqf+lQPb4bdsVVj/cf5o51pHh2FEY/wtueqU15NIpPzFV/hb7v/AasapeQ2rb9/mIqbvLj+VmrmNUvoY18tEYrHL8rb/mWvyj2fN7x8z7bl2LMl5NJcP5M2xWf5Gar+k3CQt5dzc7mb5pWVq46G8feIZJssqf99VpWOsItwiQzfOzbUrnqU+bU6Y1vc+I9S8P65Bb2aRo7FW+VGri/ip8fLbwjp76am24mX5v3j7dv+1XMfEj4nWfhGzZEuWlm/wCXdYfu18z/ABK+Il/fXTzalcs0zf3nqI0eboeVmWbc0fY0v+3i38VvixqviS+k+06k0jTN95n/AIa4q3vnt7d7l5l/vbawZtS/tC8d55mJ37lVadqF8kdt5PnbP9ndXTGnCJ4NjRk8QP5jvI7bf49r1h6vrmpalcDZNuij+X5ag+0Qyw/OPvfdqGNkiV/n43VX90rmLf2v7PDvdNq/+hU1vEj2e5IX2Ls/3v8AgNZuqatDt++o2/L/ALtYtxfPMu/5mLUc0R8p2un+LppJPJd2Ybf4vvVvWt4mpQ+c7qPk+T5/mavLrW4fer/eH+996uo8J6xukCTPx91d38P+zURl2Mv8R1qQib/SVDNufbtanLb7Vb727/fqa2WRmR/4G/h/u1ZmtfJkEKI395WrQn4feKqxGOPfsZfM/ip11Hube6M3y/eq+LN9q7v9371Maz3SNuhbav8ADVRjGURfaKFtbvIu/YoLfxNViTSnC73RRtfdV/S7EM2/7y7tzLsrqYdDhmtV/wBAVv4vlrOJUpHns2lpJH5czqrfx1y+seG/s8g1DSv3My/3Xr0nxBoqWbPGm1XX+GvPNQ1Z217+x7l9ir83+9Ve8HNzbl7w3qmq31j9g1K2Xdv2vM38VLqWj+T8/wDAv8X96tK3js1jVEdv7u1f/QqmvI0mz/FVRAwrWFN3yJ/d/grsvhx8SL/4Z+IbbWLaeT7HNKsV7bq+3av96uXWzRmKb+aka3eS1NnM6n5P4v8A0Koj70zOpT5oH6R/BXx5pt3pdt4h0rUldGVWiZvvV9q/s1/tBTW7Wlt9v8k2qL8rPt3NX4//ALEfxmSO8f4darqX76P5IPOf93tr7d+H+tar4ZvormO5Yo207V+WtqlP3PcPFlKUavKz7d/bv/aW1K6/Zl1H4VX2qpNbeJbm1ls4ZZgZVkinjmkIHXYNoBPYsPWr/wCyt8Qv2bP2vf2AbX9iP41/GvTfBHiDw/qbT6VeX8ccKNCkxlSVGkKRSMRNLGy71kOC2CMk/I37RPjB/F9l4enaTIhiuFCnquTHx+lWbT4XeFNEsNA1nWvDj3tvqenW9xKPtbplnQE42kY5zX7zgo8P5J4UZficdVrU6k8RKtTnSjCUqdWHNBPlm0nHljqnu32OmlWlSlrrdW+R9M/8E4/En7Pn7GH7bHxC8OeK/wBpDw3qOgW3hee00zxTGzxW18yzQzMikgqZAsbDarsHYBY2kJFfNP7IHxZ+H/wf/bL8N/FL4hTGTw3Z65cjUpo7ZpQLeeKaEyGPG5lAl3FQCxUEAE8V2Xjb4FfBn4i6v4Z+CPwB8LX0XjrUna81SS21E3SW9ljhDG7HEhwSPWvlv9p3xncfsr6V4w8RXfhGLXLjwleT2n9k38skSzzLP9nAcxEOMOwYhSM7cV9TkOfcJcSf2vjYzxE3PDQjXco04NxpwnFypqDaU5JydnaKdraaH6FwbKawGPStpSk1v1TP0sP/AAT+/wCCYDeKz8dD+2hpn/CBlv7S/wCETGr23neX9/7Pv3+fsz8vleV5235d2/5q+Uv27Pip+zv8V/jrdar+zH8KdP8ADHhmxt1s4ZtPgNumqsnH2r7PhVgBGAFChmA3v8zED4y/4Jk/Fb4rfthfHeXw78Tb6yt9OnCGLS9NsfKhtw0m3Adi0rn33EV9Q/8ABe34Dav/AME2rT4Oa38G9WkisPGUl9b+IZriATg3KJG8SqZc7BhmyByfWvn+HPEfhrK8zWNx2NxuLlCLhTU404xjF2vzRhNKpN2V5zu+tr2a+HrU5uPLGKXocJXS/B34peIvgl8U9A+LfhKOB9R8PapFe2sV1HuikKNko467WGVOCCAeCDg180/swfG34gfE3xnf6P4s1iO5todLM8QS0jjw/mIucqAejGuM/aM8X6hpHx11KyivZo0W3tyCkpAXMKdq/Tc+8TspjwQs6o4WVajWm6LhNqDaalzXtzq3u2t1uYU6DVblk7dbn7o/GT4Z/wDBPX/gpjrFj8e9H/ai0/4feKZtLt4/Eul6nJBGzMqDAdLhot8iAiMyxsyMsa8cZrhf2t/2jf2YP2av2Pf+GF/2O/GVv4pl1m8kPjDxEB5w27kd281VEUkkhWONTHuVI4iCd21q/GDR/iPFIsem6lqbfaEVmtpFuDtZW/hraudZu1Qf6Y3ypudvOLV/NeXcd4TC18NSxEK9bB4WSnRoTrQ5Yyj8HNJUVKcYX9yLdlZbrR+39UjUg5wkrvd2/wCCfql+wT8Iv+CbHjv4D+LNc/ax+I66f4ptpJRBBd6y9k9lbCPMc1lGjYvJS2/KFZcFFHlgHL43/BM34FfslfFL40alr37Q/wAWtJtNL8NyrcaL4a8QSpZprQ3nbJM8jeWUTCloAxLlufkVg/5YalrlzJMkw1SZNq7t28/NWZN4yvd2Ib+Xhv3v7w19DjPFvFYqlmUacsRB4vl5f30WqCWjVJOl7qkrp2aezT5lzHLHBJOOzt5b+up/RN+2p8B/g1+2j4gsv+Ei/wCCjPhHQvDWkKP7H8K6fPYtBBJt2tM7G8HmykEqGIARflUDLlvij9sP9j34Nfso6NoXjP4TftfeG/HeoT6lhtHtIYnmiCYYTfuZZkKAjBEmzORjf8wH5VjxVOsph/tKf94u1GaQ7qjk8XX3nB0mcpGv8U5WvH4U8S8z4UjQw9OrOeFp3XsbUIxknfeSo82rd278ze71uXVwsat21q+uv+Z++3xM139g/wD4Kt+DvC/ibxj+0Fb/AA6+IWjaSIL+DU2jhUZO6SLE5RJ0Dh2jaOQMBJ8wydoT4GfDD/gnd/wTR8ST/HbxP+1jaeOfEcWnzw6JYaKYZnQOmG2Q27yYkYAoJJJEjAcg4zuH4Dt4y1OTaft86p/10NMj8ZahAoT+0n3q33fONea+NIQwEsoo1MRDLpXvQVWnpGTbcFVdDnUHdq2umjbvc0+ry5udpc3ez/K59lftg/GDVfil408RfGHU4orS68R+JJr14o0AWLzWdwnAGcDAz1OMnJJNeOaP428m6aZ7ltkjK3l7/lavIf8AhNpp0a4lvJWU8CJmJq7b+KrmaRUhuYwiv83y/N92vM4z4rw/FObRxVDD+whGnCnGHNzWUFZa2j08jTD4adKFm79T3PQ/GVhMyfJviZvvM3+rru/D/jVLiP8A0N2RWdVddv3v92vnjw/rXl7Hn8t2ZNyMv/xNeheHfFDxtHvmZF++kav91q+MliObWJ1xozPcbPxBeTSRJDNt+b5IZPvbf4mrN8bakdRmhbaQELjJXGenNcjo/iK5m/ffaVkf73zfLtrUbWX1mNJpQu9V+Yocg1+m+D9fm8QcJHyqf+mpn03ClGSzulN/3v8A0lnfeGtce20W3is2yUhTevvirMniaGPdsfj7rtv+bdXBDxUlpAtkjRq3l7dze1R/8Jh5yNDDYbTs+Zq+H4hr2z3FL/p5U/8ASmeVi8PJ4+o/7z/M7ybxVNDMiPc53Ju3f3qrzeKZpVDo6lPmXd/tVwlv4kmk8ua5jWI7tztH83y1Y/tiYM3kzMg37tu35a8GWK98f1Xm942tU1aPbMjwsf4tq/3v9msHUrh5GmdnkXyV3eWvzNUVzq9zNtmO0hvnRv4VrK1K8eRi/wBsYfN80K/wr/eqPrEpbFyw8YwJLq6hWSF0/ii/1jN935qyZPEG5nTf937zN/FS6ldeZG+/ciM3yfxfLtrBvj5itNs8zavzbm2s1bU6hy1MPH4jqI/ElnHsmmf5tmzzP937tQyeKkvpmRJlO5G+ZWrhm15FkT5Nqxy7tqt/s/3qrf8ACSOq703A/wATb61OfkOqvvEUMkawwvvb7vy/3ax7vWIR/oyIvl+V/e+7XPya/wCZC01nNv8A4Xb+Jqz5taRrdnRJP91komTGJr3GuQ/OPmX97u27v/QaWx1TbmMOzJJ92SuRvNSe5U+duVvvf7VX9Pu55o47n/VL91FZvmb/AGq5K0eY7KZ6b4d1BFaNHdVPy7K6+2u4b6XzvtMjL91FjfbXmGi6g62p/ffe2/6z7tdRpd8YlEOxkVVX95/C1YRlGPu8x0xo9kc82jzRsU2KfL/i3fe/4F/FQ0aW9w0EcKu+z523fdraktXEYSZ/uvt8v+KkuLe5aN0hKl1+Wvj/AGnN8R+g06PMc4y+TJstk3bvv7n+Zao3kflu8yfIrP8AdX+Jq2b61SNm2MzFlXbWHeXH3pZnYsr7kjWtY1OaHunpU8L7vKc/fSJMzpMm6sLUlhWT99J5YrZ1SORWKJcspb5vmrG1RXkczfMrr8yL/C1dtGXuHV/Z/u6RMe+bciTI+9/4pG+7WBqK2f8AqfM2M38VbF19pmm2P0Z/urWXqDeWvkvtZt/8P8VepRxHwnnYrK5cvMZDb5PMeRGXa33qGnebefs2xFRf+BNSszqyuj7/ALzbW/u0lrGkmx0K7WTc3zf3q9CNaLifK4rAzpyL8MKSQ/P8zf7PzUyT5t8Lp/urUunw/Z22JMrp/H/tVaksfvSQo21f9n7tY1Kh53sZy0ZkzW+1Vfzud/3V/iq5ounPtbZ8v+zsq3b6TGsiTI//AALZWppun21yrIjsZV+Z/k21Eqn9446lM+hvh1AIv2dEtwwYf2PdjPrzLXhFhoqNJstrZU/vNs+9X0B8P4PL+BUcEgz/AMSy6BGfUyV5PpemvMqoibPnbav91a/afFpxeS8Pr/qFj/6TA+gz9L2WDT/kX5IzbPQ7Zdzo/wDBtWT+7V9bJ5LhIZtwRtu+Ra3rXQ7ZWTzrZcSfLt/vNVmbTRHtSGwXydn3a/DKlSHwnl0JSiYUmnfuVhT98y7tit/FWfcaTItwXtrZRGr/AL3d/wCy11raXcyRlIbNR8n3o/vMtQyaG6Wav9jV1VfkjWX5lrmqS9melTlzbnLzaebiQI9tJhn+6v3l2/xVWazmVfJRPl/jZk3NXYRaS7Wzp9mkRFXf9z+Gq13p0f2V4fm2fe2su35awlKXwnTT/mOSuLVJo/OTbuZ/vVUkZ5I0m+7u++v3dtdDcWKSQu/2b5P4W/irEuNPm/uRtKybvLV/vfNWfN7vKa/3iOHYshSeba7Ju3feZamtW85lhfbtZdyN93c1H2PdCg+x7JF/iX/aqeO18nYHffK33KXuRIjKRYs4bmSSJCVVdm75f4a0I4Z41W2dF2L8zMqfxU2zsXZ0f5fm/vfLWxHp7tHsSFt2/wCRt/3V/vVySqHoU7/EVFsUmj37Knj8u4ZLN3jC7vnkb+GlaDeG2fK/3dy/db/eq1a2SbkuYUYRs33ZF+9Vx5ZqJ0U5E1hH+7aaFFJ+75bfKu2tOzX7G7XNnDu+8v7tN21v7tLpVrDIoeFmVW/hkStmysYVkSZI1T5/nb+7RzTOqNT2exlWMf775LZVK/Nu3bq0mt3XaIXZzIn+sVflVv4lqSa0RdSdERvl2v8Au0/hrJ+NnjhPhv8AD+bUvDztNqcz7YFVf3Vuv8TN/tV6uFw9XETjY480zShluG55y97+U7nQfBNzeRp9vvLexST5lkupdjbdv92tBfhg9xZyw6J4z0u7uY03W9vJLtX/AGa+I4fjd4z1LXpb+8166eS42rceZLubav8ADXR6D8cvFVrfJdWesSJJG+7asv8Adr6GngadP7J+aY/iXMMVV5oS5Yns/wAZvFXxa+GsLWeveDNPjtvveZprM25v95v4q8ruvipf3FoupWviFV8tvn2y/d/2a7W4+MUfxK8A33hvxg+97ja1u0b7pFb/AOyr5S8eXeq+B/FDJbXLRpH5iy2q/dZa640YdIniyxeKqSvOpI9b1D9orxbpuobLbxPJ/wB9feqCH9p7xOzDztY/d79yxt96vK9S1Dw9Haw39s/mpNEsqNJ95W/iWsG68WWbzOn2aPbv27t1V7Kl/KNYnFR+1I+iof2ltYhjDpqv3l2vGz/+PVNa/tMX9wwRL/5lf5l3fe/2a+arXxPpskmx93+z8/3asf8ACQabEu/7TJu30vq2Hlq4h9axUvtH0lqX7QmvSRj7Hfxodu3av8X+01SQ/tD+IXhihS9YfL8zbvm3f3q+arjxVD5izf2kzfLjb/dotfHDruS5vI3Tf8tP2FLpEXt8RGPxn0+37QmqzSbJtYkYLB8it821qsn4/X80gd7yOXd8ybv/AGavmqHxk7fxqf8AgVI3jK/U7PtHy1H1WG6iH1nEfzyPpmz/AGhpoXZLnVN3mf8ALOP7rVctfj99oZEsPFDRMqbfvbvmr5Sm8ZXjP532ln/h/wB2o/8AhMoYVZPtKr8/3VSl9XpS15R+3xS+3I+rtQ+N3iTa6W3iRZpZE2/M23d/tVGvxq1vT187UtSmilk++0d1/D/s18pv8RNrfPeNtX5fl+XbVab4tTKvkpebgyf3qtUIR15SY1cRH7R9oaP8evD1wrQzeNmtvkVUa6f5lb+7ur1jwjcaJr1rDf6br1rf7l2tJDP5n+7X5fX3j77Wqv5zBlf+Jv4q6b4W/FLx/pOqInhrxDdW0i/xQysqr/tba05XHYxkpVNZSP051jXtE0m3ijS//eqm1Lfd95v7q1V8TfGbwB8EbGPWNVe31PXJomS301k3RQ/3d3+1Xx3b/tCeJ7jybzUtea5uLODZA395v4mqlJ42vPGniJZtQvGY/ebc+6oi5SkZ+z5Y/EfXXwz8feKvi14i/tjxa63Vs0TbLNfljj/iXbWf+2F+0xbfDL4W3k2ia232prPy7VVT5d33fvf7NcZ4D8XQ6V4QS8sL+SP5V/2fmr5T/bg+Ll/428WLojvGsNquzyY//Qq1lHl+EVJ+0lzHguqXOq+J76bXtVvJJrm6laW4mk+ZmZq9S/ZB/Yq+Nv7ZHxd0/wCD3wd8H3eq6nfXEaZt7fclvGzfNJJ/dVa534c+CdX8eeItN8H+FdEa/u76eO3gt1T/AFzM33a/pO/ZO/ZQ+G//AAQd/wCCTPjX9rvxpplo3xHHg2S7uLxk+ZLiZdttap/tbmXd9K3hDlpc89iqtZyqqjDf8j54/wCCbf8AwTN+BvhT9rh/2UPhnNHqUXwtij1P4zeMm2+ZqWqfK0Onwt/DGrfe2/3a/U7xZDpt19pRJlhMP3WVv4q+Ff8Ag2n0maH9kLX/AI3ePXkbxN8R9fuNZ1a/umy0ytM235v7tfYHxO8QWdu815C63EMiN5U0fzL/ALVfknFGN+s4ySX2T9I4fwTw1G8ux578ZLPTP7FZ9YSzumZW3rJ/6FXwT+1B8H7XUtQbxP4eSQfat0UtrGyskK/7K17f8ZPjTqV5qV5o9hfxtF5+12b7yqv3dteLeKfiBpusWo0q/v44Nrt5Um7azNXzeFnKW7PbjhY+05nufHvib4Q69ot1ea3rtzdIiy7ovLTb93+GoPFljN4w/Zz0nTbzzm/s3xyqpcXEW39zJH833a9d+LvjLTbHS/s15qv9pSSbldVT/UyL/Ey/+zV5pZ+NpNe+Gut2d/YRomn3tvexLG3935fu16FSpXnS54n0OUVI0cQozOo+D7aJqnxE1jw89ntfT/D0n9msvzKsnl/K1fFfje6vL24lvNRud9y0snmzM/3vm+7X2V8H9Pv5Ne8QeOfDd+zyafo0k6Qq67pNy/d2/wAVfCvjPxJNLfTJsVEaWRvL/iVt3zLV5LTnWrzkYcQSjGhYbY6hpGlTx2Fr+9nupdnmN/DWj4ksZtEtfLfcV/iz/e/u1jeA7LTr7xpYvqR3RM//AAHcv3a6j4rSJb24eFF2ebt3V9HiFy4iFPufn81zRk2ZGh77y1d0RY1/hVq0dIW8sYftTpJ5bfw7qq+DYXa1R/l2yP8AxV0t1ZpJDv8AOVAv3F20qnNzSMpa0lpqVtO8SPYzM/8AC3y/f+atqx8XXMezZMzLt27pP4Vrk9Qh8y4Kb2VW2tuWtvTYd3zx7v7y/wCzXnVox+I3wdWcauj0O+8M332pD5O7bN8/zV0UPh68uYd/ksu35lhj+X/vquS8F/K299rMrqybq99+G+l2HiK4jmgddquuyFf/AGavDxlT2crn0cf31I8x8M/C258UeIBZwurI25l/3v8AZq74q+EV54f8RW+g3Ls0cf72eT721f8AZ217+3gnSvAjDxK/lwvbuzRQxp/epfh7oOieLPiM0z3MKXKy7UvLr7qx/wB6vOjinKfN9k4Hg579T578O/B/Uvjd8UofhLYaxNoNrfQL5F5fN5G5m+6zf7NenfH7/gkh/wAM0fDePxr4q1WSHX47rbYatap9ps/LaP8A1m5t275m+7X2Vb/sT2fxeWHxDoT2o1a3i3Ws1w26JmX7vzfw17BP+xL8ffEnhGPwf8Q9YZrCGLcvl6izJG235Vhr6LL81lh+WNOPzOatl9HFx5amkj+cvUPBlz4R1680SS0keazlZJ2kgZP3n97a396uh0e6mjsR9pdvlXb/ALtfqt+1x/wTV8MeF57GFP7Q17W9e16zs1uL7a0vnNJ833f4VjrhP+CiH/BNn4afBfS9Qs/ha8b3Om2dusW3a7yMy7pK9avmmGxH8RnJ/ZWJoT9nA+Kfgi/xEXWJLz4faVNeSSRNE/lp83lt/DX2Z+z78UPjf8F/B8viq70G4tbqa1aDTbeSXb+8Zdu5t392pv8Agi54H8BxeNpbD4i6V9pSS88ho5Pl+zsy/eavr7/gsD+z7Y/Db4b+C/iR8N9P3aJbzSWGttb/AHbdpPmjmk/2W+7XkxhDF47kPoKdGvl+GjPm+I+BraH7Cs32+8865uJZJ724Z9zSTM25mak+0JcMyIm5fuqzJ95v71SfJDv2Ooimf/WKv3v9qq6x7ZDH8zlk/wCWdfWunGEOU86MrT5hqSTNt+8zqu1l2Vcs/tPltDbWbOkf8KpSraQyKltO+5vK2/d+bdVu3s59pG9drfK8jN97/drkj70fM3lzEmi/vpFme5jVW+Z12/Mv+zXYaWvzwiYqdzszbWrntA0fyZmkeGF03N8yv97/AHq6zT7cf8uyLv2bZfL/APZa9DDx9nozz63vF/TYUmupU+XYyfe/iq+s3k27w/MHX5VVUVd397dTLNfJtfORI9rNtXbUL33l25e5hkcM+3y12/u69WjzchleMd5FDVtkyp5IaNlbc/y/eWsa6vJmke2mCpu2tE0O1l/4FWzqypMreTN5X+1t+9WDeW+2GW5877r/ADfJ96u+jGMfiMZe7rEoveeZJLDDNGG+7t21ctm/4p5mVM/uXwBznrWf/qo/JeFisnzeY33Wq9ayGTw28jMpzDJyBx/FX7b4ORis0x1v+gaf/pUD1+G1fE1pf9O5fmjhbyaaONvJtt259rNv+Za5rWrhG3u+51X/AGtu5q2dW1Sb7OZn3YVWV1VfmriLy8Ox5kTG5/vf3q/JpR5o+6fEynzSuMuLy5VUh+07Ubcz/wCz/s1qaPdQWqm/v/uRruaRf/Qa5+FYrp2S2hYBk3eZ/tbq5n4pfEKwtV/sTSpvlt3b7RJv+Vm/vVzVv5Tlq4j3TM+LHjqxvry51KGbY0nzbV+ZV/3a8E8ZeKPt146b62PHni8XE2xHz8m2uCuJJry92Iitub71L+7E8/l5S1Hqrxrv85lVf7tTQ+dI38WGXd/eq54f8J3l2vyQ71kfbXTN4Lm0+NPOh27dvy1pyi5oHJxw/d3vn5PkqrfTOsjb9qL935v4q39aW2tWaBPv/e27Pu1g6kqTf6xFbb97dUSLj8Rj3W+Rj2DfxVWVX+5sXNX2h3ebsG3dVbycKnybd33m3Uv8JfwkfnZ+T5v73y1e0W4eOb5G+VfmqrN8sfH+7SW7OPnQZ2/+hU/hJPVvBN5Nq0aQvhnb5fmfbXc6b4PS8jZDy/8Atfw15J4K1j7HdRTO/K7a9r8KaklxarNC+5ZF27l+bbTjIzqR5omRfaK9kyWr7X+f+H+GlttHudweZP3Xzbd1bEi+ddNvhZVjlbc2z7zVNHa2zMuxGXb/AOPU/h1MYy5TM0y38q6/1O8b/u10tir+SqJtSL+8tZ62W1lTZlVb+H+KryyfY7dt/wDu0lHlKl73vI4/xpeJpt8+xPlkl3MzV5r8SNBm+yxa3psnzruZ9td38WI3t7WGZLlnG/e1ctperJq1vJYb1YN/DIn3Vo5eWRpGUpQKHgfxMmsWPkvdf6TGn3WrpljeRXT73ybmVa8l1YXngfxY4hRkG/dt/wBmvUvCepQ65ZpqSP8Ad+8u2l/dCS+0LJDunUp8m1PvNUGUWQf+g1rXlvFtZERvv1nzxxhVR5vn/vVUZAVLXVrnwP4s07xVpk3leTcKzzV+lP7NPxQ034reC7LUkeOWWSBWlbzV+8v8NfnNcaXZatYvprso/dNt/i3NXtH/AATm+Klz4N+JUXwu8SXn2e2vLjZbs3/PRvu/99V10Ze4eVjMPzfCfc3xCiuYTaLcLgfvNv0+Xivqb4a+E/CnjP4T+DBq17b/ALvT7ZJSgzIo2DK/+O185fGvTZtLtNFtZmPEU21T/DyldR428Q+Ovg1+xB40+O+mWtzMuheBpZLHarbftEsYii+7/d3bq/Y+KaV/CDJor/n5U/8ASqhyU3dRU+p+YPxm/be+JGrftsePPjT8H/idrXheVvEE1jolxod60EkNnD+5j+b/AIDu/wCBV9B/GjU9Q8X/ALLV3rninVLjUbvUdEs7m/vbt90tzM7RO8jk/edmJY56k1+atlPc2zrctc5f78sjfeZm+Zq/RjxzOD+xdazuw+bwrphJ+ogrz/DGLjleeP8A6hpf+kzP1ThOKWBx6X/Pp/lI4X/gn58Sv+FK/tDeHtY0FIYUmvVS6aT723+H/wAer9Zf+Dpq30f4w/8ABIf4fftCaa8bzaD400u4imX+Hzo2jkX/AL6Va/EPwrqD2OsWmo21yqSW8qusjfeXbX66fF74n2P7U/8Awbl/Ev4Xatfx6hrfhHS49VttvzN+5mWTd/3zur8hjKUKyZ8NGUbuB+bf/BP/AFKLVfFV/dRyK3/EjIYr6+bHWH+1vcT2vx71N4UZ98NqNo/64JWV/wAEt9Z+3fEHWrDDDy/DofaenM0deqfFv9nr4hfGr49X9l8PvD8+p3c8UCJbWkO+QkQLX7JmNWNPwKwspv8A5in+VQz5JPFuK7HhskyXVn51q0e9f9v7tdT4B8aJrVr/AGTc3K+bC3yN/FJ/s1yd54d1Xwrq1zoPiGwuLS5t5ZIpYbqJkkWRfvblrmhrT+GfFiebuWOR12t/tV+KLklHmiduHqTjPl+yex6t5M0azJHu+dvl3fdrnr5XaTzt8brv3bf4q39FjTXNJTVbYfMyMzeXt+Vqo3Gjw+csMMjMnzNK2z+L+9RKpy+6eh7GMveMUzSGZnh8wL/B5jbqg8yaP9yPmDPudmatB9DEe6ZNzqvzI1V7rSfLV5nfyX2fP/tVlKQ40e5Sa4dZPn3EL/FuqC4unuN+xFG3/wAdq42nu670Rm+fb81Rw6JMtwdiNsb5t22sJc8Y8xtGnzSI7e6u2h2TJ8v8LLW9psyLHDC6M3z/ADsv3qp2dg/mO+zYip/301aFna3K3Cwof9p1b+7XJUlPlOynTgdNp948q7E3Af3f4ttdRpWsfu4Xhdk2/wCtX+KuN0uCb7OuyP5m/wBvbtrqND/fL9phRmj/AL2za3y1w1K3sdT0aeFjI7rSte3XG4zSb/u7mT5q7fwncNLHNF5bqse0IW79eleX2104khm875Y1216D8NRi0utsxkUurBj3zmv0vwTxMp+JuCj3VX/01M+kyLB+yx8J+v5MkvtQl+3TIoUmOVgHY528+lI+pJbgzWc0kn8P935qo38yw63dCZt26ZsH+5zUunxpM0UzvudUZm3L8tfAcTY3l4jxi/6e1P8A0tnHXy+U605eb/M0o5kZUmRGaRfmdVl20lvdO0zzW3mBPuyt/ep1rZw7Umh5LLtbd95alk0+aTDoWDx/wr/Ev+1Xz8sd7w1l/uDftW7YjvtWRtv+zRMr3S7Lm42tGjbl+7tX+9SzWs5uJbZNqJt3f7P/AO1Uc0LrMnkp/wAsmXdIvzf7tb0a0qn2jKph4xhsU763mMf2l32/PudV+bctc7qVs7QvNNDHmRGVFVK6uazeGFzC+Cr7mVf4WrOutNhuITM6N9/97/D8tejhqnLH3pHlVqPNocJqG+PaiW0ats+7Iny1j6gHtbrzhuQMyt8v8P8Au12WqaHbXcjonmbd275vu1QfRUhdN8O5W+/t+Za9WNSPxHnVMP8AzHI3SpJarNCn3Z9qSK23dVSaL98zpuy332V66m68NQxxsZvkfd8u7+JqoXli+nr50M251VU3Mm7d/tVpzcxj9Xlz+6YMNgkKqiJv8z5fmrSsVSJQjwqrr8m7d92ntp7+c7787n27dtT2NjlVtkT5PvIzJurmrbHRTo8s+Uu2t0V2o8O7b9+Nt27durqNHvIVj2b8tuX95I9ctawvGpTzsP8ALu3P93/gVbGnXD/JbO+9lT90zfd/2a82R6Macoq7OtuIdri58ld6vuTctUru5/eK77d/8S7K6G8091jLzRq7fw7U+6tZGpadCsf2yZ1RNv3tn8VfH/aP0ijTk4+6c1cfvrdXd4yV/ib5VrntW/eStMj4VX3Sr5XzN/utXVa1HZq2yGHIb5tsn8VYmqR3M0e90bcqblhVvlrop+6ezhcLzHH31+FGxJsvH8yKsVY99vZm2bVZv9uug1az/dukO5NyfO277tZeoRwyKyXKRllVVaRfvVtTqez90+gw+X83xHLalazNMro6s8f3FrL1KB9whmdUZl+7/FXTaitnCv2nzW2b9yRqm5VrBvo4M+T5LM7N/rNn3a7qNbmFistjymLNYusZTequr7U3VFDbzSN9mdNzK6/Nsq/IEhGzf8sfzf3m3U+1sRGp2Rtlf4m/ir1qdblh7x8Jm2X8vMP0+z3W6/J82/8A1e+ta30e4uIx5I3bfl8uT+Ko9Pt0WOLzk3P97+9W5br5sib9oLIy/L/d/vUe0lzaHxmIo+zgVbPS2k2OkO1I3Zdv8S1sabZz+RJtdR86/Lsqxb27sphf5ol+bzF/iWtPTV87CQuzwqu528rbWP8AekeTW5ep694Mgf8A4U/HbuQSdOnBOMZyXrz7T9LeaH5IZEfZtbd/C1eleE0UfDKJFjZ/9BlBR8ZJ+bI9PauX8O2MKyKRCy7t27cn3a/bPFybjknD0l/0Cx/9JpnvZ5BToYS//PtfkirDp5kkjTyVLL95vu1oNapFA2+zVPn3bt33v9la0rezS3V0SCORpPli8x6kms0Zvn2u6s235flWvweVT3uY8ijTjHVHONpsykbNqyf8tW3fMv8Ad+Wl+y/uzYTRq8m3d8v8K/xVtTaTczKsyJuPlf39rfepI9JdZseRsVXVmb7yqv8AtVEpe0jaR00exgfY/s7Jcw9NnzKtV9Ss/M3uLOP/AGNr/eX+9XQx2qMzukyrGzt8q/Nu/wBqqtxYwuomhfak3y7m+Xc3/stc0pe+ejT5pR90468015WkvJnyGbb833fl/wBms6ax+Zfs20ov+xXZ3Wiv9oVEEe7+JWf5Vqh/ZdmsLQ+TudpWVZNvzUv3fNzdDSUaso2OXWx875HhZAv3dvzbqS10v7++3Ziv8Lfe210DaXCyyvC7Jt+82371Q29nN5w8nbj/AGl+9/s1MpQ5gjGRDaWszXHku7bV2vtb5q0V3zSBETc3zfd/9BpYbea1+dLb7z7n/wBpaufZU3JM7bwr/e/+KrA7YxlErQ2EFq2x08pvm3/xfdqzZW/+ledDD91Nyf7W6opLV47hrn7Mz7mZUVW3bq1dFh+0tHvTeFRldt/zf5WnTlKMd/dNvinymtoujJIqWyOyrt3Vu2fh+SaNzJ9xU/i/ipvh+zhiX9991vuMz/w/w1oeKlOm6HPcwuzn+COP+9VYeM62IjBG1bEUsJQlVnLSJ518SvjNpXhHVn8N2d5HNqUjqyQ+V81uv91v9qnfE7VPDfiDw7b6CmlRxG8t/n+0L86/L95a+Z/i43ir4f8AxQPjHxJDIrXFxulVnb5m/wD2a9T+I3xFsL7Q9A8bW95DNb3EH3VRtsP8O2vvMJhY4SlZbn5BnGYVc1xUqjl7v2TwLxl4fn8O+IrizR2T978rf7NOs7g/Z1dPvr8qSf3q6341WtlfeV4n0p1KXEW75U3LG392uHtLh7hVfCl1/hX5a7tfiPJ5vcOp8L69MsywpNtO/duZ/u1x/wC0NDDNcR31s7Zk+8u+tCG8TT2aZEZTt/v/AHa574iXCapZ72ueFTb8zfNUy94qPvHDWOqXMKrZ3T70/g3VVvFRpN8Lsqt81LM/2WYOiZFRXVwksexIP4/vNT5eXQ2jLmI2uHh3bJttEupTLGu/cw2/w/xVWkVGTeqN96opG3L9xcr/AHqfwxD3i39oeNl37iqr/FRHqm5hvHy/eSqDXW1Sj9f7y1E0zMqyZ+bfupFcvKbS+IEDf6za6p92optcdlKQ3TL/AHKyfOf77yc/3v71EbIdrvubbVfZFymnHql2u5xM3zff+annWpljH77ft/iasuWZ9xpPtLrDj5TUi5ZGlJrlzdSEu+Vb+Gq11cI2E8mMVSeR3QPv203zMqv7tjtoHylppLfcifdrsdJvP+EV8NrPDNvuNQVkVv8AnnH/ABNXF6db+beJvDH+9urQvtSfVLpcriKNdkS7/wCGqk/dsxcvvnYaLrczKru+Qv3a9X+Ecf8AamoDzl3K33f9qvEPDsEN1Mi78D/Zr3H4c7NP09fJ+Uqnzs38NO0Ix5jGZ6n4s8ZW1n4fa2dGEdvBtiVfl+b/AOJr468dalc+IvHVxeB9/mS/dWvc/jN4wh0vwu8MN5IrSIy7f9mvDfAtrNqXiVpkhaV22/LULmlVHHlpw5mfrR/wa4/sAab8ff2lj8fvHug/atE8ExR3UUMnzR/bm/1P/fO3dX27/wAHjfxsvPAX/BPTwj8F9NuWjbx94+hhulSTb/o9rG021h/d3bfyr6S/4N/v2U4P2af2CPDd9qGlfZtV8VRLqeoGRfnZWH7vNfnj/wAHqOvvL41/Z/8ABkwZ7fytWvWj7Ft0a104ufKuRfZRllsZVE6j+0zoP+CLH7V9h4L/AGOPDvhjW4ZGjtYFgW3hXa0aqzbfm/iavpPx98dtB8ReHX1LQvEkM5+ZZbW3/dtH/sstfK//AASH+B/h7xB+zDpWg627Mtx5c8U0kH+pZm+b5q+ivjJ/wTu17wr4dvPGHw98etDN/wAfHk3zr5TL/wBdK/DswjTqY2c0ftdCXs8LC/8AKfPXxX+LGlahJND9jVJpnZrrzIvu7f7rV86ePvEtzHcPDo+ox3CQ7mlWOX5lWtL4qXXxR8M+K7zRNe8N3yhk/wBdCm6KRW/iWT7rV5neaD4k1icvdO0PnL95bVmb/gVGHwvu8xt7aMYf3jhviB8SJrqV4ft8flRtuTb97/dZqreBZLzWL93ke4W2uott1Gq7v3f+0teir+zdDqEi6lePHcJMnyq0DR7qztQ8F694Llms9NRltlTcjW6s33f71d3JGnEUJ1/iPObi8+IXgvXGv/DGsTQNbv8A6P5cu3ctcjqHgPwl8Qrj+yvElnDp2rzSs32yP5Vbd93cteueJG/4SzT99gjS6pGm+Bl+Xdt/hZa868YabYXnh5PE9heKt1DKyXVu3ytG3+7WeHqToTfJodWM5sTDX3jyfxF8GvGvhPxh/wAI24USQurRXG/5GVvutR8UJodNs4NKluYXuVddzQvu3Vc8ZeKtY1KEveX8kjxp8rM/8NcNobTeItYWa5mVoo3z+8WvocP7TFctWp9k+HxnLh5+yX2j0Lwvapa+H4Jk/hZmdW+81ak+tOsa22yFl2fut33qzo9ht9j7m3Jt2/w1LHpqeWn7lt0a/JXLWlGM+Y55PlVkT/2X9sjW5Tlv4lVfu1p2Nr5OD83+zu/vVn2P2mz3/IyozLtbf95q2IZEXY77fNb73mferhxNT3TpwcoxLul6lZ6fMuyP55H+bc/3a9t+A/jKG3mjT7Yrt5u542+XdXzrc6k7TCaSHcV/iX+7Wr4T8bPo99Fc7Nrxv8vmP8teRiMLUq0nKJ6+DxlKnP3j9AY7qw8a6b5M0MKrJF/rP7v92nfD34Q/2bqU32O5kuV3K0u77v8AwGvnD4V/tD3LQ29tc3m5lf5o/wCFq+mfgj8XtKVYftLySbhul3N8y7v9mvGlGUY8s/difU0KWHxK5j7F/ZH8NeM2e2sbGzjkMjbrVW3fKtfU8a/Gq50qDTZobGFNjI7R/Nt/u181fs5ftEeD9PW2ub+2ZAqbEa1+Wvq3wP8AFbSvGMca2w+zxhP9ZN827/ZqsP7Bx5eY58Zg6+H/AHihzROS1f4ReG/C/iPTfHnjN49Qk0WKSfTbOQLt+0MvzSf71fm7+0dq3iT9or42eILOG2/126CLTV+Vodv8Py/xfxV+jf7UnxO0fwP4Uub/AMlbqaGLzWWT7u3/AHq/KHxJ+0Po/hv4ral8XdHmtYvLlkllj837237vzf3lrsdOnbkj6m2X04Rh7aqvekdT8Efhanwr+H+qa8eNS0m/VpY9qrL8v8TL/s19ueFvG/w//bK/ZS8Tfs7Xs32qTWPDUkVncRxeYy3SrujZf91lr8yrf9oi/wDjJ4w8R+J9BuVh/tD97f2cKMqtN93d/u7a+tP+CZfxNm+GvxGikuVX+z3u7eFEj+bzGb+7/wB9V6NGNSlWjUUh1qcMVhKkHH/D6nwfY6Tq1nCfDetwzNqOn3ElrdRrFtZZIWZW/wCBfLUyxmRvOSZn/hddm3bX0h/wVZ+Blv8ABP8Abv8AF2l6SPK0bxZaw+INMWP5dxuPlm2t/D81fP8Ap2jpDH5DooRW2o2/7y19soc3xSPhKdaPLdRGWtinzeQ+3b827+L/AL6rSs9NT7ON/wC+f5tjbfmX/ZWnww2zfuYdzo3zLtRv4aljaaS3aF4dnmfL838VKNLm1ibyrRjuXtH8nyZnttsrR7V+Z/u/3q6Gzb93++3J/D+7+b+KsnTLXz40RHZXaVWfbW9bwwrb7/mYLu27vl/3t1enh6cYw944ZSlUkWVW1aHZ9m8vy/ux/dqtJHbKhmRN211by2+81TQ28N5IXm3MFfb5e7crLt+Vt1MuLdIIfv7PM/ib5mrtpxCPNUKGsWsMMrO8youxvmb5vLWsK8WBd0abcrEu7d/Ev8NbeqMzWM1s+5PMX7yurLt/3a526uHmmWFJt/8AFEzL/DXdT+P3ialORQvry2ZjDDu3yJ86t91Wq7bE/wDCMOVBH7iTHGT/ABdqzb7yZlWZEZNsv73cn3v92tCFw/hSVoyQPs0uC45H3utftHg875rjv+waf/pUD2OG4P61Wb/59y/NHlniiSaG1lmg/eu3/wAVXK2qpfXDQQv5vztuWP5trV0Ovb7yXf8AKVb+H+9tqtY29tYxy3l/M0McKs3zfKq/7VfkcvhtE+IrRlHUzvEnh+5sfD83lbVmuIv4U+ZV/iavnz4mWttoqun2lSv97fuauo+LXx8upbyb7Nqv3V8pNv3fLrw7xd4xm166+0u/zf7L1z80uY8r+JqY2r3T3Fxvi2vu+/trofhz8P7/AMTX0KIjE7t33KyvDOivrGqIkaMXkbb8tfV/wb+Ftn4D8Mp4n1VI0laL5d0W6tInNUl7vKjJ0X4W6b4a0dp7xI1dV3KrJ81ef/E3XLDTZJLa2mjY/wANdX8XPi08cbQ2r4Ma7UVUrwXxJ4k/ti4fz3Ynf95qzlU5pl06MY+8JeXj3TGZ5s7v9n71ULpoyrbNxb/dplvIjrsd2P8Au/dqG8Z4s84FVGJp8RFPI7Lv2Y3fw1Ey/KXxuX7zVIoSaQOflVqRIUjZo9mWb7u6jm/lJjsNaPzofP8AJ2rT7e3SSP5E3f8AAaVI23bE+7/HVm12R2+z/wBBqZRK+GAab51rcK6JuZWr2X4V+KJmsVsPlbbuZF215LZwoy7zGylvv12Pw/1r+xdSi3u3lfx/Luaj7PukSjzHpDTPMzTb9zbt21X+7UqyJ5fnec3zPurHvLzbeb4fuN83zVNJdJbqXd/l+9tZqfNKJHLH7J0K3Ds2/YrorL/FU1xdTLy/zbn3Iu/5WrO0G6S7j8lH27vm/d/xVq3FqFkO/bhU2/N/DVEc3L8Rxfxm+xyaD50Mf+rXa22vIvC+qeTqDJv/AI9tet/GAGTwu6InKu3zMu3dXhen3Xk33/2dTzfZNYy5vdOo+Lmg/wBqaTFrFnCu2GL5mj+83+9XO/C7xpPoN/8AYHmby5Gwyt92u70vyfEHh99NmdTuT+GvJtf02bw9rcsPksu1vkq5R/lLpy3gfQUapfW5vIfmH8DLVO+sXjYYT7v3P9msn4I+Lodc0/8Asq7uVLr8qq1ddeWm7986MtTGRFT+Uw7GMRzKzorD+CrV1dalpOo23jDwxc+Tf2twssUi/K25W3LVe9hNrKPO2/8AAa2PCujJ4ruho803ktJF8kzfdVqqn7szHEcsqR+lDfG7Tf2hf2fvh98TIHT7dLb3lrqyr1E8XkKSfrnIr9fv2R/2ePhb44/4J6eH/h54/wDDlrd6f4w8GW7anFOinzVkgU1/PX+wvD4i0XQvEnhDV7qR7bT9Qiazjb7gLqwdl9m2Kfwr+ij9jfVpvHX7E3gPwxpF2bTVNO8H2CxqOsiiFcH8RX7fxIpx8IcmXapU/wDSqh5b5G218j+cz/gr1/wRl+KP7CWrX3xd8J6LNqHw/vNTkihvrdNy2O5vljkq18X7s6f+wYl2n/LPwlpJH529f0n6z8I/h3+038GPFP7OXxi8PwXtlrNnJBe2VzFuZNy7fMH+0rfNur+dn9vj4Zx/Bn9nvx38HFnEy+FCuiiRh98W13HBn8dlYeHdSnUynPGtGsNK/wD4DPU+/wCBIVIZdj03zL2Tt90tD4s8H+MI76FP3y7f8/LX15+y/wDtr6D8Afgb8R/h74wMl5p/ibwRfWFnY7fMjkuJF2xr/wCPV+eGh69No98uH2bfl/2a9W0HxNba94fNt8ryLF95q/FYylzRPl6lOPLdHpH/AATd8OXHhf49eIdPZR5T+Fd8bAfe/wBIhr6E8P8A7U/j79mr9r2bxF8P9VS0lRreO8Z4VctC9ugbBb7vBrwz/gndqE178ZtahuGJNv4adI8/3ftENZf7UXiWPTP2uNZtGuioW3sy2P4c20dfseeYeniPAnDU57PFP8qhz4SpVp4lSW6R+on7cX7GfwE/bA/Yd8Qftk/BDQ2sPGHg2wW91a3h+ZLrdtVt235vu/NX42+OrOG+0eLUi+4xru3L/E1fY/wD/wCClX7ZP7Ofwn1j4UfBzxho/wDwjviCyZL3S9S05ZdrMu3zN33vu18oeJ7G5utNuE1W8a4nm8x55NqrukZtzV+IYCnUw2HVGfT4X5HQ43rurF7/ABep2nwD1j7ZpJtodpaRFl/3v9qu71DQ7aWTeltJn726vHf2arq5s9ShheGMLHL5TRs+75a+hJtFuftHk71d/u/7q0qkZU5nv4P97SOMutN2nfN8iL8u2qt1oZ2/uYZAv8G75mrvJtH8yApLCrfNtSTZ/wChVWbQ7lY3/cq+1FXc1YnXUoxitDhJvDMcciJ833tzfPt3UQ6LukfZ/vIq/wB2u8Xw680f76237f8Anp/7LSWvhm2hcoE2Ue/GJUaJxi6O8ceyaHbt/harEOl/vmeL5kb5WXb92ulPhkys7/d+b5lbd81EekvDtT5kT7u2sqkToox5ShY2MNuq23k/PJ8r7a09Ntd0f2Z3ZUX+FW/iqS302BJmj2bv4ovOVv8A0KtbTbFPOd3hjKrL93+KvHxEYy5j1sPzdCa1tflWa5hbCrtZY/71eg/D2ForecncQUj2lmz2NcdYw21vJF+//cx/+hNXbeBwFiuI1PygptBXBHBr9E8D4cnihgLdqv8A6ZmfTZQ28VG/n+RTvrFBqczbnLyTOdg7L/eq/pOmwzNvSb+Hd8v3akcr9qnZlUO0hUEt/Dmr+j6f8o2Q4T5mT/er8v4ol/xk2NT/AOftT/0uRvOMXUlbuXNL0+2t5kT75kb5N0X8VW7q1RmP2OH51ZvNZX+7UkPnRqhR40+T5G+8u2p5vJaQQu+15Pmdo12rt/hrwVHmnzEy92HLymLJZvJCXhttiqu75v4qgWJJl+0wvvRvl+V62Psdt/BMrhvl3M/3WqvJYwqkoTcP4fMVdv8A3zXpUKxx1qPumVJG7wtbOm4r8+7Z95qz5rF7qaNLlFXd9xvur/wKthrPyZGe9RUh2bvv1Vms4Vm2Om9JvmRV/ir1aUuWR5EqMKnumRfaXN5LfY/ldv8AlnVCbR3WTzpkVTt+Zl/hrpWtUZl32rQpG23b/eqnqdq6x/YzMqK38TfdX/Zrsp1vd5TJYSMtWc7Jo9t5exAu+b7/AJi7qytS0+GSHYiKp2fJ5fy7f/iq6m40v5Wh2bj/ALTfvKx7+3SOEOm4FW+VVfdu/wCA1p7SfNZHRHB0ub4TktRs5vkheCRS3yxNH8rUklmlvIsPzI/zN5ez7y1u31j5k3z2bPtfcm1tvzVNb2264b7T5cj+V95vvbqqpU933jL+z4+1MG10ua3tTCiSNu+f99825v8AZrV03TXjYTeSybl2vGrfKtX7fSZvLV96qytu+Wun0XTXnsw6Q7fMl3bm+X5a5ec1+p+4WpmS4jDp5au3ypt+bc1QND9sjMMlttdfmZZF+8tXJY/OUyecqtH8q7v4afGyXC+dD5gb7ryN97bXycacoxufZYepGVU5jULVFY/3Ff5mkTdtX+7WBq1htj86Z1RJPu7fm2rXb3EbyQyTTIpSNd33/vVzer2Kbi88SqWXc/8As/7tdUactj6bBy5Th9Q09GbZDYeaaxdWiudzFLNY/MTc275v+A12l5YJ5ZmQMfmbYuysXVNLvFhWabcHk3KrMu1WolTlI+rwMonF30MNvav8m1vuuv8AdrEltUVuX+X5vlZPvV0msWMyxnlSy/N838Vc83O5Nn3UZf8AgVKnzU46HoYinSlQ5jNvrdGhG9FT+/TI18sr5PKt92rs1i80fk71+Z/4aiVXhV/k+98rbv4a9XD1OblPzzOKPUtaXDD5bGGbe393dWrp91bLcFJEYlk2p/eWsRZpoZGh+5uXan95quWeoOrOjpIP4E3bdzV6FHm+I/Ls0lyz5TpbUf6Ps3/vFT5FZ/vf71bWkzblGyFf9uTfXN2epJ5yRzQsqs+FZq6PR7oQ3AR+Il+dG21FVVIwPnqkrz5T2TwiNvw+hBJGLaXJ79WrN0+zM0guYXbZ/ufNWr4YZR4FicsMfZpCT6ctVOykeNY/szxiJduxll+Zv71fsHjK7ZJw6/8AqEj/AOk0z67M6EqmHwvlBfki3bxwthPsyr8nysq/M1WFs0muGCfIjfMqs33VpNJjcsf3zb9+1tzVqw28Ls6TIuFRfmXa25q/BqlT4YxOOOF5feM2PT03J/tf7VU9WtUhSZ4YdzfxeW/y10kbov750XLJtRdnzNWfeNbXEbpCmP4ZY/71XTkKUY09jmrlblo3+zJHv/jaOqElrtY3O9Vf7rfJ8tbt5pLwt51rCrt/zz+7VGRX8kOEUfIypuTdRUpwLoy5TLurPz1KO/y/7P8Ad/vVX+zorGZ4Nj/9M/4a0biGaGPzH8vP3dy/3agby41875kfZtZv726oUfsov23ve8ZF5HDIu9Fk3btu1fl21R+zwx7/ACXZXVty/PW3fQzR/ubx2YR7vu/+g7qyZrN9rn+Jv+Wcj/dpSjGJfNzEbb7hi6Phtnyxsvzf71W185ZDCjthk3MzL/FVNmeSREm3fL8zqtXdPuoWWVE3F9rMqr95VrmlGZ105R5i5Fa7Zgn3l3/JJ91lrZ0e0SFU/cx5Vv4U+9VfRdkw+RFxt+dpPl2/7S1u2qoWWySGN0jVm3L/ALVc1STjLlOunHmL1jbrbvHM1tG8kf8AyzZ6o+Hfi/oll4yu9E1KzWaOGDY0cifMzVbbVbCxs7i8ufl8u32oq/Krf8CrzDS9JfVtYudVheNdrM3y/KslfX5Dgeb99I+F4pzSL/2SHzO2/aV8O+Bvjf8ADW80rStBji1G1l3W80lrtdmVf7y18d/Dm8vJvDuq/ArxOjW17Hun0hpIv3ny/wANfW+i+JrbQ9WiS5jmeVtu6ORvu15l+118M3+1Wnxj8DWarqOmy+fcLCvyzKv3t1fUfFSPhIynFHj/AMM9WtvFGl3HgPUvmaRWW3kZPm8xf71cf4o0W/8ACuuTQ9DH8jbm+7V/xJqFtofiiLxnoj/u7pVfbH8u1v4lrb+IWqWHjq3t/EKWCxFUXd/tNWcZcpcY8vwnIR+S1mu/5dvzfM26uU8RXELq8L/NW/qF4kJe2+bd/e/hrjdeuIbiR/O521RcYmLfQ7lkTf8ALVPzPl/d7WFXLry1Yon9/wDv1RjX940CJt/vVXxGsRGh3fPv+X7zK1Q3C/3z/wB8ir01u6xhPmcN/DVS6X/pn8y1JRQk3s77D92o5G3NUt0dzbAnNQfOFJf+GgcdwzJt6fK1Sqs27Z8v3ahLZC59akST5vegQ5mVfkAXO371Rec7fJimyLtaljX5hl6B8rF+8Pkpg3rgB8bqmGyM/PRbKZp0THP/ALLREI7lhpfs9nvT77fKm3+7S2uW+fZUF0yNcHZ8392pLePzG/121m/u0fETI6rwyvlus0O1TXq3he4v5rf/AFyqipXlPh9fsixzTOrbf/Hq7qz8TTNZpZ2Vts/4HWnuxiYSjzFX4talusSk0yuy/crc/YH+Ftz8ZP2jPCvgm2T99q3iO1giVvmX/WL8teX/ABE1OaS82TTbj/Ctfa3/AAb9eA/+Eu/4KAfD55oVMVrqn2h2/wB1d1aYWP7xGOLl7PDM/rI+FnhWw8D+BNI8IaZb+VBpmnQ20ar/ALKqtfhJ/wAHqVjMPih8ANV2KYvsGqQfe2nc0kdfvbpF5i1V33D5f4q/FP8A4PMfh5P4m/Z9+Fnxctot0fhrxXNa3kiruCLcL8uW/wCA1jXhOXOb4KtSpxgjD/4Jd+PLbTPgjpDwzNuhtY/3Ky/e+X71fT3jL9qq6ure4s3RbhI7fZ5Myfd3V+Zv/BOX4uPp/wAEdLtdiuY4tr3S/L8v92vXdb+NU11cMlzcssW/a21fmZa/EsXTjHGzifteCq3w8Kh79qGvfCLxBa3P2+2aK5aJvlVFZLf5vl+WvKvF2ofCy0y/+hyyR3CrtVFXd/wGvK/E3xFdbGYQ3k0QkT/lncbdu37teI+OPiFc3WoTXM2sNLMv/H02/bu/u1pRVX4eYKkqHPzSPW/it4/8K6Pb3l9Z2cMRkf70d1uaNl+7tXd8tfNnxI+NV5rDNpVtPMSu4O1vcbPvf3ttYXjj4lX+pRy2cNtazRTf62O4+9/vbq4Vdck+0PeWdqsMXy/LG1ejh6NWXxbHBUxsPhge3fCi68PeH7W08T+MJLdEhTcnmMzL/u7a4f8AaI8YeAPHGuPqvg/wwthffduri1dlW4/3l+7Xn2s+NLieFrN7nA/55791Zmk6sLy8Uvufc3zs3zVpHByjzSbDEZpT9lyQMjxZp9/cafcJsVGX+633q5fwrHDDfLHsY7fvqv8AFXqfiHS7OTT5P9J3S/d2/wCzXCw6L/Z128yHaNu7dXp4WtH6tKJ8pjpSqVec6ixk8u3QQozfw7ZKmbUZo2f/AENVTftT5/u1haHcTKrbIfl3bvvVLc303lt/CjfeZq55UrS1iT7SPL7xrTalCp3/AHCv/oX96ga4kjffyG+VWX/2auea6SaNX6t93/ZqRdSeFlj37l+66r/DWUsPzRsRHFWkb0379Sjncu370f8AeqrNvkuNzv8Ad/hVPvUumXELRomxd27c7bvu1p3K2f2NUWFd0f3q4pS5Jcp0+05/ecg8L69d6bf/AOu+Xcu3c9fQXwZ8fbrpLY37GWR925X/APHa+Zo5vLuN8abt38TV6D8LtWmj1RE38KyrXnZhho1I8x62T5hOnXjGUvdP0s/Zl8aQzSb7+8kRfKZ0jk/iavs34HePptvnXN5IdPj/AHrQyfIsf975q/PD9mjWnjVLZ0kuPlXYq/KrNt+9Xsni79pTR/DujxeG9Nv98ELK2s3DO21v4tsf+7XzFFTniLRP0mlioYjDF/8A4KdftueKviPdj4BfBTU9Lht4dy6veSy7Z5N3/LNf7u2vyk+K0Pj9r6XStV+0TfZ5WiTbuVWZfvf71cj4t/aA1rVv2h/FHjbVNakX+0PEVw8CK+1fJ3fu/wDx1a9x0/44fDfxV4VtodS1JWuo5du2aL7q/wAXzV9xDBf2dGLnHmf8x8x7SljY/u58vL9k8u+Cfxe174b+Plie5kjWaVYrqFn+Vo2/2a/Wn/gnHcw/FL4q6HpNpYeWt226JvK2xLtb5W/2flr8sfixN4G1rUIde0GzjSaOVf3y/wAS/d219/f8EmPi+3hCfTda0+8ju72wRoVj+80a7t21WqMTXoUXGrym+WxxM3Uw/N732T17/gujrGlXH7XvhHQNNljZrDwbJZCTb97y5F3f98tXx9YtDcXRheza23fcX/np/tV1/wDwWR/aB/tP9uH4dWrXDJP/AMI5dXF/5kvzeXNIu3cv/Aa5SxtbmZleMM67Fb5vvN/dr6/AL61ho1v5j4vH/wCxYl4ZfYtzepoQW8yqgRJPm3bFb7rf7NT2un+XI7Ikasr/AHWXdtqWHT0j8uF5l3tu3sv3t392ren2McfyImx2+baybv8AgTV6tPD8sfdOH2kqgabao8aXPV/m3/JtWr9nNlAiIwRXZd3/AKFTbW10+3ZU3yI33VXf8tWLiG2kjPnTMm2XduV/lVa7IU+aPwlQ5ia3ZI9z+TlNm1mZ9v8AwGmX+yS3ZLZ1zs27f+ebU+RUuGFs/wC8/usqblVdtQzWt5HG8xRkeNNzK38S1p7GUT06EY81jD1BtpO5GVv4JGX5dtYt98siwyXLY37f3afMtbmoRoqs++QN96JvK+81Y+pQpJI0zw/Kv/LOR9u7/gVbRlyyOiWHjy3kYd5G6xMsybv4lb/4mteAlvCcmCAfs0oyO33qrahBbMw8jajbW2Rs/wB1v7tXLZQ/hpk6Zt3z9ea/Y/BySeb45/8AUNP/ANKgehklJRxVW38j/NHk2oQ/Zmx1Pzb1X7y15F+1N8TE8N6HH4M012+0t89xcRt/D/davbPESw6dY3GsXnywxxNLLuX5lVa+KviRrl58RfFVzfwiSX7VKzRK33tv8O6vyLmlI/Os0l7P3TzrWNY1LVrppnfJ/wDZqteH/B2q6xcL9mhZ9332X5q9x+Bv7IHiT4lXyNDpTGPfu3N8y19R+Bf2J/B/gPyrzWPJdoU3y7v4f9n/AHq09nGMfePAeIl8MYnh/wCzL+zNBGqeKvE/7q3hTzW2r83+7Wh+0Z8ZLOzmfRNKvdkcMXlKsfy/u67j9or45WHhHR/+EQ8Lpa26Q7llaH5fu/8As1fFvjjxlf69qT3E0zFv7zfxVjKXtCqceX3iHxN4qv8AVLp5nmb5vlVf4dtc7cTSSMz/AHfmpsl5tkO8sy/7NNWYNL99sN/epRibR5ZFmGR1hZ/PbP8Ad/hpkjea4T7p+9upPM2q33jz/doZkk3eW/z/AMCt/do+H4g5ZRDa8i7H4ZX+RqI18uTyU+f+JGanMvmK+R/wKm/xA7Pm/wA/LVgWfL+YfPuMlTx2IWHZvwf4FaorCT5d6Q4Vf4a0Y5I2kWd03bf9il8IvckQWfnRtsdNo+7/AL1dN4VtUuLhN7qpZ/4nrFe33ZfZyPu7auWqTWNxG6chfm/3amURf4Tvtahm0uxhuUdmP3Xqj/ayNgPcqzf3qZNqlzfeHXhkdsbF/wC+q52TWt1vE7pg/d/8epylHluL4Ze6ej+C9StmnG+ZflrsJms5labyd+19rs3y/LXlHgvUlhvPIQZDOqt8/wAtenSXD3FuHRIwG+VP9r/aq4GFT4zmvirHB/wjUqb8rXzrqMiR6mU/2/lZa+hfilJ9n8LmF0+ZU+bd/FXzjq0/l6kyd9/zVlyG9OPKd58PdWmWRbZ9uGqt8aPCvlj+2LZPlrO8D3nl6hE4/v8Ay16h4p0mbXvDaJs37k3U485Upcsrnivw/wDEU3h3Xo5t7bWZVr6XhZNa8P2+pWwVVkT5NtfLGs2NzompvAybWjb5a+gP2e/Ey654b+xzfO1vtO3+9T+0FSPNHmLV9ZPD9xPvfw1Hod5Na6ojom397uZmb5a6TWNJ87e8Lqy/M0W5vvVzV1C9rl4du5X/AIq0fvaGMZQ6n2P+zVqFjrGl3+q2kmWljtlmQfdVlVxx+GK/az9kb4v+Hdf/AGQvh38W/h1cq954R09fD3iWzjkIaOW3IQlgOu5QG/4FX4hfsbG3n+HdxewS58y4VSmc7ML0/Mmv1R/4JmfCfxF8K/2efHh1lbm1v/El3Z61p9vdndHd2EqiQSRDsRux+FfuWfUpVPCHJVf7dS//AIFUPHxFSNOUklvsforFqlhq+lad8XfC211aJWu1jX/WRt96v50v+Cvk8culfHG4jb5W8XX7KT6HVeK/oG/Yu8RnXfh5eaDc7cWN15aIvZf7tfgx/wAFMPhn4g+Kms/G/wACeENPkub1td126t7eJcs62t3LcsAP9yFq4fDWD+oZ5Se6w8l/5LM/ROCnFZXjqn2XRb/CVz8gbxRJGJk+Xd/47XR/C/WkW+WG6ucqz7flrnWk8u3NtNuRlTbTdFmez1VXR9qK27dv+9X4zKPJM+YjZo+zP2CNJi0743a3PAi7ZvDTNuVs5/0iGvJ/289TbSv2vdYnj72tluO7p/o0dewf8E8rqLUvFuoX7gecNDZSR/dM0RH8q8b/AOCi0JX9prWZliyWs7M/lbx1+1Y7/kx2F5v+gl/lUOKD5cW/Q634U69Dr+iRvM/3k27l+WovFGiyQtNv2vu+4q/w/wC1XmHwF8ZPDfLp83CM/wB3/ar27XI/7S09LhDn5Pk//ar8R5Z851VPd1Ob+E+gz6X4qea2+ZpGV/lf7rV9U2/h/wAyxttV3rNK0S7mX5W/2q+cfA9x/ZviCPzvLTzGXe0n3a+pPDOmw3Wl2z2G1921X8t/lWscV3PbymVoyUjL/sOGSSVP3ij5n3bf4qhm8K7fk8qNv7zN/wAtF/vV2cWj/O8zp8u/5GX/ANmqWHRZo2Donys/z/7P/Aa4Iy97mPW5eY45fDsIHnJbNj+63zfNVa68NzNM3lQ7g3zO33dtd/b6L8/kukmVZv3i/wANQto1ybzeifMsW2X5PvNWsZF8qPN/+EfupJpPO8z737rb/DVabw26t51zD8yvu27/ALtemXnh942Z54WR12/Lt+9VSTQfOje5RPm+Zt3+1/dquXm94qnzROAj0vKtsmU1NZ6fM100c0y/vNvy7PmWuqvNHezj857ZfvbnVl3fLVGaxhWRvL2gN/E33lry8RTnI9XC/AUre38zZZ+TCu6Vn3SLu+Wut8FYEVwocNtKjI/GuY8txtnR5FX/ANlrpfAkkslrO0wG7eOQc561+g+CcZR8T8F6Vf8A01M+jyjl+tRt5/kSecp1WZmfcquVK/3a2Le4gZXRXZVZ9qfN/wCg1zGq25Oq3DRL1fMjf3eatafrVtb2f77cxWXbtZfm3V+T8VR/4yTGt/8AP6p/6XI6pStUfqdZDdeWwhmeMqu0bt27/gTVat7rztsW9lmZ2+bd95a53T9Q3fvkRl+b/vqtK1uEkuw+/P73cjbP/Ha8GnHlj7ope9K8jUmjRt37mOTa/wB6P+KqrRzRr/pLxoI5Wba3zKy1JHev8sNtCsSLufdG/wA26nyTOy75kUbvl2t8yr/wKuqj7xjWiUNzysyFFVJF3eZ5X97+FqpfY9t1strn5Nmf+Bf71bUlu8m3Ztx93cq1Ev2OGRfnjVd/z7ov9mvUp1IxjocFTD/aM2GR928wxumz5tvyt/wGotQhs2s1S/8Al2/Nu/iZqv3UKtGyPbRhpF/5afwrVKS3jmdI25C/89P4a6aco81xRjywMi5tdqMHhaRm2/vPustZM0KLM9480Mpk/uxfdrodQs0jxbXMG9P+Wsbfd2/w1lyQ/vPMh3IrLt+5826unm5jqjTj7pjQqjeY/wAuxtyptpI9PDKqRWzblT52ZtrKtaDWm26aEOzLGnyNIvzf8Cqdbd1uI3+6kn3lqZS5S/Zd2Q6RYI0iPcwrtX5F3f3a6/TbWz8yK2+6v3l3S/K1YNmsLW/nJbRo6xfIyv8AL/u10WiyWduy74fnk+58n3awjHmmRKnyxsZlkqW+/wCzTb337lb/ANmq/CryRiW5hVfLTc26sqORGm+R12Ku1G3bflresY0aWP596yfLtb+7/tVzfVeWPvHRg8RDnMnUNJ3XGy2dWWRd37n7tUb7T4Vgl3vtaZ9qsy7ttdZHapC3yfM0K7UkVfu1TvLNLiObzrVmVvmt41/ho9n7sUfV4PER+ycBcaNC3yfZm/h3x/dasbVNLf7QyTI0iqjNEzS/davQdS0+bavnfKyurbVSse80W1W38mGzW2VpW+9/49WVSj1PqcPiuXU8u1zQ/tFudm0bvm2s1cNq1rDb3R3wqu6LajR/dr17XLGGNXSFFeJW2LJIm2uL8RaTtm2WyY8v+LZ8rVzSjyy5uU9KWOhKHvHE3NujLs2bR/Gy/wAVZ9xJvk2SIyeZ8yK38VbWqWrpcC2Ty0/vs1Zd1bw25NsjszRv8+7+KunDx5veR8Zn1TlgVwqbm877sf3FZ/mWiO923CTeZG3l7vvfwtUd0IoVZEdUP92qzRzXDPeJ8nybtyr/AA17uHjGUD8mzKXNP3TpdLvNsgLurD/x2up8NapCzJv3I38bSfdrgdNaRYd6Pvf73zPXQaTeos7pNCoGzajbqyqPl0OTB4WdSZ9FeELjz/hrDcu2c2UpJK+7dqxtN1SBoRCky75Nq7tn/oNXvAsjn4OwSZJYabNz7/PXGaffNt2Xn3vlWJlr9d8ZFB5Hw7zf9AkP/SaZ9tisPJ0qKttFL8jvPtzSS/vo2Pz/ACf7VbGntDcYh37VWL5P96uJtdYe3aLzpGKtKqbfvbf9pq6O01aFd/kxeay7WT5tu3/ar8G5bdDnlg5RidVH+7j2TTRgNt+b+L/gNV9Qt/Lh875VVvut/FVZNShmj2PlZJEoW8Ty/M3rtmTDM1aR5ubU4sTheWN+UpahNvjif92VjVll/h+Wsa68ncH3/Lv2/K1at5cQLZsjooSN9qsq/NWJeSPNIH3xhlX7uyuv2cJanj1JSo6lW6vprWSSFEVvm2vt/hrP+2PHy6MXX7m5/lp80iWcjJvVmk+b5j/FVKS8e0kV5kWTav8AqW+5/vVhyolVJT1HzTPclER9+35nXf8AdqnI3mQPeZYOrbdzL91qcuoTXUoRE3N/DGv8VZV5cOofzdw+f56JU/d906I1I815DJFdLpHf52/vK9bGkzbRshh3J/Dt+9/tVh+e8Vwuzcm7/wAd/vNW1prXJlXztpVU2p5bVxVo8ux1YWUZVToLHfCy7H3xSMq7W/8AHVrZtbyGzj+eTyW+ZW21z1jbvcSLbQ7ZFZ1+X+7UHjzxRbaLobfabzykunZIvk/iWsMPh5YutGCOvF42lgcLKozk/ih48v8AUGmsNHvNiwqzIqv823+9Wt+zr4q+1aXqUOqvCvk7fu/eZa4hYbaRWv7x/vK33U+aSsb4f+Jv+Ed0/wAQ2fnSb2fzYlr9KwtOOHoxjA/HsZiJ4qtKc/tHV/EXx9Db+IrqPTXba3+oaT5tv+7Ulx44m8TeGWtvtnmM1vsaFvu/drynVPG0OtSx6qjxujf6pf4VqnZ+NLzR4bh/tO9G+Zlb+H/ZrWMZfaMOXljY57xp4Zv9FYeHtZTat1un05l/u7qX4d3TzWM/h7UtrM25oPk+bdXD+LviXqWueKvO1C8kfy2/dbn+Vf8AZrW0XXNl9Hre/bL/AMtVV6qXJKRUecf4i0W50mR0kfO52b/d/wBmuD8QSI0zJ5fzf3q9c8ZW9nrUKarYP8zLuljryDxtDNb3pR/k/wB2lKMTSPwmZb/LcL91tz02aRFun+6GZ/u/xVHpt2i3A85MfP8AJUszJJqT+S+4bv7lTzcpfuiMybS7hs/w1XuIXjjbZ8zN83zVa+zlpFmR/vfc/wBmlmhdfnkfe1Vyi+EwbqPbJvd9x/u1HIu35CmK1NQs0jf59uG+7WfIzqv+7/eqYFRIFXHJpfu/OvVad96Ty+opu5P8mgYrSPu+Y4+amt/fBprM+Pubv9qlb5iv8NVylcrHFvMHNWLMIkZbOHbj/gNQrJtX7i0I5jk2P81Ll7kiyLtbzEf5qns5JGb+HctRzSIsfyfxfxLV7w/Z+dIsju2Vf5l/vVUdifsmzo2xpP38yt/Ftat7+0o7e12PxtTcrLXOeekcmyGH5P46g1rWkmtRDDNs2/L8tHwklHWNQ+2ah533Ru/75r9NP+DaXQ7O8/bi0HWL+FtlnazTpNv+638Py1+Xm52kXZ8y1+rf/BuDpM0P7R0GsI+1YbBtjK+3c27/ANBrbCx/f8pw5j7tA/ps0rUY7nSVuIZg4ZMqy1+f3/Bf/wCDFt+0D+wF498GtazTX1nYNqOmxr8376H5lavtLQdcubfSVe5h2Lt/4DXjn7S2oaJ4i8M3+n6rbRyi4sJoNsn3W3Ltr0qmH5YTPGp1nTqxkfzX/wDBPv4n3kHgCXw3NcyRSw/Kit91dv3q+ktH1K51Ngkj7/L+Xc33l3V8d6v4d1/9lT9srxj8Fp4dkcetyPZxzL96GRtysrf8Cr2/S/FXjCdg3k4Vvm3K/wB2vx7PsCqWMk4R1kft2S5l7TARS6HfeNNak0zfZujJu+42/wC9XhXjbxRcx3E1mkyxJI26WT+Jq0/GnjzxI0MiXj/vY22xSK+5VrxzxLqGt6g0syOz+Zub79cVDD1Yx946cViox94s+ItbtrxWm+VnV/mZX27lrndS162jzsh3rJ8v36y9RuL/AHohfbu+8u+sxVubpmKI277rV6tHD80uaZ81iMZJz9021v3Wbe83+8rVds9STyx8mxVf5KwbGzv2X7M77trf3a6G10dGZY4UZn2Y27a3kocpz/WJS903YdchvIxa22mqzL8ryb/vVj6tY/Z5C8w3bl/hqy1reWeLZNysvzNI33aZrEn2O1KXM0Zdk+Zt9cs4xpStE29pzRMZm8tVdEb+9tZ6RtQS6xvTG35dtV7rUPPt1SF1A37fmqD7VCqsic/JWnLOpH3jmlU+yTSXO3cnzbPvJTdztN58O5k/u021WSSFvvbtv3WpGZI38nzMj+6v3anlZjzc0TX0q8JBSSFt38Tf3q05LzdGX3rtbb8q1gRq8Mfkw8NsrVtWS4ZH+78n3tny1zVKMPiiXGpP4TQ0mxSa6CIjGvafgT8J9S8W30cMNg2/cv8AD83/AAFq88+Hfgn/AISDVIk/56fd+f8Air3rUPi5pvwv8NJ8MfBu1NSuLX/ibXnm7mt1X+Ff9qvncbOpKfJT3Pey3Dyl70j0bxJ8WtB+F+kxeAPD1ztu9ipcXi/eVv4trVW8L+JLHxFY/wBlQwyR7YGSWNpdzKv97/vmvljxR8SPM8RS3szsfn+9u+61ei/AXxlbah4sT7ZeMqyIq+dD83y/7VdFDK6dGMZH1uHx8f4VI4b9qT9hfVdHjPxF8APNdabfNvWOT/WK38W3/Zr50tfDnimPVF0SFrpWkfbtjf5lav3Q/Zd+Cfhv4xSN4e1J7O5s5NNkb7L9nZpF2/8ALT/ZrmfG3/BG/wCCGh/Gq0+LV/4kXR7O1ZZ5dH2My3TL8zfN/CtfZ4bGYf6rH2j2Pl8VlmK+uSdHm5T8aP7H8ZeCvEU3h7Xtakja3+ae3uH/AHke6vob9kD9r7w9+zA1/r2t63eXiyRfuNNtW/1k38P+7XOf8FbPCNn4F/4KF+OrPTbSOCzvILG6sFh27fLa3Vd3/jtfPVncTRtsRF+X+JvvV6VTJsHj6UZT6nhQzrHZZiZcnxRPRPjn+0H4/wDjJ+0TJ8ffHlypmvtsEUcLNttbeP8A1cdfdXwP8Qv4q8A6Zr03lyzLEsXzf7vytX5z/wBnprmi3Wkv8zsu5Pk/ir7c/wCCefie28YfCtLCbafsq/Pu+9u+61ep9VjToRhD4YnmU8bXxGMlUqvmlI97+xwxI6Im8xtulb+7SWtvcsq7XbLfLt/2au/Y5biSJ4XVGjfY235V20q2t3CyJc3imVW3/wCzV0aMT0JVJhDZ3MMhfqvyq+6tJdNf7Ozz7Qdn+rVfvUSQzTQq9y6p/st/Cv8AwGr0apCsX2l5JfMbZtVPl+9XoRp/DE2oVoop2dncttcTR4VNqbU+Zf8Ae/vVLJbOyvD8z7l3bmT7tbOn6PYLIJrNJA7Ozyr/AHamm095oXSH7+791/u0/ZxjM9jDylzcxx+r6SnlmZLbcvlfJXJ3djNCu+HzJUj/ALy7ttei65psMkbTIiq3yqkO9vlb+L5a5bUrU25ZIpo2XYrNIqfxf3f96so0+WV2exH3onF39iJlR/sf75k+Vv8AZq3Cgi0J0I4EL8Z+tbdxpttcMl5NbNv2MyfJ92s+5hit7eSJ8lQh3ce3NfrPg6n/AGxjv+wap/6VA9TKafLWn/hf6HhXx8864+H95pNvcxq940aRbXbcy7vmWsP9nn9iSfUJrTxP4qkjtLPazr9ob5v+BU79pD4oaT4B17RtNeGNyzSTuqp83y/d3Vwt9+29r+rSQeG9NvPJt40VIo93y1+RU5TpxPyLOoyqY6UeY+zm8U+A/hzocWieEobWJ412+ZH/AOzV458bv2gNSjsbjTbaSMBvuSRv/rP9pq4q1+IQk0NNVvNY3Oy/dkl+b/gNeF/Gz4xPfM0drNnd8u3+6tTKU5Hl06cYHM/FjxxeaxqUjvfs3zV5pdaghmb99z/H89R61r1zfXUrvMxO2sxLhNob/vqlGJ08vuFuS6eRm2PgP/E1FvI6gd9tV1b5V2fxVatLZ5Gx/wABar+ySW1kQqrn5m2/99U7a8ap8m41Yt9PeGPyf4lqOZUjYohZT/tfdanLYrm7CKvkr5z8t/s1D9q3RqPvf32qTzP3bfdVmWoYVmmk2TTLto/vEe9KBoWrPMR/Cu/+Kr1qu2M+duB3fJVKzk/d4Sf5v92ryvuj8z+Jf71EthfaL1rskjCZ3VqQxwx4SZF3/wB6sLT7pGZ5P/HW/irZs5hIq+Tt+b/bqJf3Q5jet1SbTXR0yVX5FWuFuL6aOaVJnUlX+7/drrYbx7LdC6NsZa4TxRNNa61LC6bQzfJVe5yExly+6dR4T1ZLeRHTd8zfOtey2d5Mvh2F0+f5Pl3fw18+eH9UeGZN6bm3V7Z4buPtHhuN0f8A2k3VRE+bm5kYnxSvJv7FmR0bKru+Zq+e9ZkdtQdtn8Ve6/Fq8f8AseTCbvn+6teCXs264ft81LlRvT3Ok8Et/pip/DuWvd/DbvqGisg6bPusn3q8C8Gs8c4fzPlr6G+HLPcaLEnzKuz/AL6olsTI8a+N3hN7O4S/httqN/FR+z34lXw94si87lJv3TR16l8YPCsOqWL232bYqpvVq8E0ye58N+I1m+60cu7a1HwwJjLmjyn1pqFnCrM6IvzfdaOuS1yx8uRf3W/crN975a6Pw3qn/CQ+H7TUofnZol+61VtSsdzF3T5Wb5KI8pnLmvyntf7Bs0v9jeJLUnEa3Vs6JnO0lZAf/Qa/oz/Zy+Glh8VP+Cffw+bQfIOsRfDy2g067IwUlFuB5bMOdu9a/nN/YSSaPTvEsc0SqRNacr3+WWv2P/4IZft32euaXqX7LvjHWD52ialcLpIlkXCxeYx2LX7XxLVqUvCHJJw/5+VP/Sqhyxp0qladOps0dD/wSS/bYXx/8RPEHwu8cQw2GvabqU2k6tZru+W4hkZflr51/Zu0LSvEv/BXi58M65ZJPZX/AMQfEttd28vKvG6Xysp+oJqT4paEf2CP+C0Gs+IfGem6hbeDPiVq0epadfabEqotxJ95fm+X733qxv2d/Fh0z/gqefGlphQnj/XLpQ3PykXbYP4Gujw/hF5bnNWP2sNL/wBJmfVcBVWsuzOjU2hTl91pf5H5W/8ABaz/AIJz65+wH+294m+GttpzJ4c1mRtV8H3Cr8klrI27y9396Nvlr41+z3OnybJk3N937v3a/qz/AOC+n7CHhT/goj+w3f8AxZ+HtvFc+Mvh/ZSappDQL+8mhVd00H/fO5q/lv1a18lnhv4eGbbu/iVq/Gq8VUiqq6/F6nymDrulP2Utvs+aPpn/AIJkXjT+ONbjJzjRCc/9to64r9v+xa6/aM1zcoJFpZtF6/8AHuldd/wTEtltviRrscS/INAwD/22jql+3vo89v8AG67161wwltbeOZW6cRLX65jnzeB2F/7Cn+VQ1fL9bfofMnhnVJvDfiCJHb5Gb+9X1D8O9YtdZ0XZ9syzRfdX7tfNPjLQUs5BfwptGzcn92vSP2e/GiMwsLl4x/CjNX4rKMoy5jq5Y1D1DVLFFV33so/vfxV7v+zP4y/4SDS/7HvLlvtdr8qKv3mrxbVLdJUf+JP4mjrQ+EXi7/hC/GVvfzXrQ20cv73/AK51GIp+0pamuDrSoVY2PsS102a3XftVU3/3v/Hmq5DY7vKS9uYWWT+KP5W21f8ADa2eqaXDeWj+Yl0iurf71X5rF/tT+dDH8qtsZv4a8aP2on2kOWMIyMZrENNNcpzM3yptf/2Wlt9LmaT99bK7SfM+35fL/u1t2FvcrIm+2/dN/rW+Xczf7NTx6f5fm/Zn2su7czJuX/erqpx5SpRhL3kc/JpPmMqeSobYu/c/zLVG80G4RmdE2ur7naNPu11dxp6Md8aKPM+bc33WX/ZqrcWaLMszoxMbfJteteXrEv2fNvscNr2jw27bLYfe+bbXI6tZ201w/nOqlf7tejeIo/LV0tvur8/+7urg9cX+C5SPfv3Sqv3mrhxETqwtNxlpsZUy7NronzL8v+ztre8GmX/ShNFtYMoxtxxzisGSTbb7LN2WNW+9I3zLW14DkaRLrc+4goC2c5ODX33grGS8TMFftV/9NTPpsqjJYuLfn+RS1S8c6jcxoR8s5VAG6tUFveOqyzPN95tyeW3zLt/vVU8Q3ccetXLLF0uWUr75+9UcdxZxr5c0yp8zN8v3q/KuK1y8QY3/AK+1P/S2U5fvZerOh02+hiZH87ezfNu+6tbFhqSNMyQzbiv3F+7trjdJuIfOl3uu2P5f9qtWO8SO3i2fON7eUv8AEtfOxpxK5jpYby5W2U20OxtzM8jNtVqtrdItqsPks0n3du/+GuSW8fbsSZYxu+dZP4f92rENwkimeZGRV/h3blZa6KcvZil73wnVf2l9nWSN3b5W3LHH/wAs6hkuobi4dPO3DZuij8r7zbvm+asdbqFYVENmv7z5vvfeqaPU3877NNHtdX2p8v3lrshU6nPF+9yl9FhXek3mMPvJt+9S3kiTSIXmYbdvzVUW88xt6chfuSf7NPhkhZBsf5t/9z7q11U+Y0jThLcbLb7b5vMg+Zf+Wn8NUZrW/uJnxwW/2/vf7VaS+TcKfOkk27vkaR6j8l3lH2l8n5vmZq29pFnTTp8xktb7Wi+8LdWZXb73zVFbr5kcT3KSebH99vvVqzW8NsoSFNir8yKq/KtUJp0j+/OqM0vzt/e/2axliPsm3seWXNIfDHCqq8kflq1XNP1JGmTEysFf5V+7urMmvIWxvnVEj+9tbatQ2+rWCs298v8AfXd93bUU6nvcxliKfNEsaTH5jeTI6lvN3I38P/Aq6rT/ADiv2nYp8v5XXbXI6DdWrXw/cq0K/cZfl/3a6XTZJobYpvyixfJ/e3bq9mOFPksHjuV6m5NFcrG6Q2ak7l/75qK4Xy13xbWG/b5bfw1HHcJDGv77D/3ldvvUbnkZYftKn5NzbazlThy6H2WBx0YxKGsWrzWjJ5LM7fN+7f7tZGpaXDcRt/rFaNPkVfm3N/eZq37eHbuM8Mm6Rvnkb7tI+jp5jPDNx/47XLUpxie9RzCR59qGjzMuy/SMp/C1c3rWhwtG++Ndm/7rfLXoN9o8y3CvcopRXb5VT5WrN1bS0+z7Hhyu/wCbd91q4akYfaOx4zqeLa1oNzDOERF2/wC0n8NYOpaO7SeZbJIyf7ler+INHhZm+9tX/VN/E1cxqGivIj+SmI9u5P7y1zU6kPhPMzHEe2gedSabud5pkYFf71VPsfnTL53Cbv4a7XUvDcLRiRPut99Wesq60eaMb4YVO37td1PFRUeU+MlhZyqmE1uI22Q7VZfv1cs7mSOLe8Pytt3r/wCzVNLZ/ff5Vaoo7d1Xe6ct9/8Au1MsR7h7mW5Tyy5mfQ3w/d5PgbE6Mdx0y52nvnMledaHs+0IsKyMjJub/er0T4flk+BUXmDaV0u5BAPTBkrzfR7xIrgyfN8ybfv1+0+Mkr5Bw5/2CQ/9Jpn1uCwUa3MpLbQ6KzaS33TO7B/9n5q6PTNSfcJndnXZtdWX7tcYt0I4xv8A3Urffb+6tbGjXU0ca2c028Q/fb/er8HlsXiMvhGB2NvfQtZpcwuxaR2/dt96pZL5/MPnWys/ytBGv93/AOKrI0/UHZoVe5kxJFslkjf/AFa1ejuJoZEuXT5N7bv7zf3aujKcZe8fMYzC+zEupHYB3fcW+bb/ALNZ1zFCv+kpM0L793y/Nuq9cMFk/fP8qxfKzVnMYbiNLkeZiNWr0cPKEY8x8Zjo+8Z+qIkyvCk251bc6/3t1ZO65t4fJRIy8j7fmrXuLX7RcCaZ1Yfdba+1qpLazeQ/nPtlX7qsu6meVGN5cqKK/u5ilu+197DdH826qV1C/mbEePYrfvVVdzf7taFv9pdvO+b938y+X/FUN5Cl1I6IjIfveYv97+61ZVJcp10/eM+GNIGCXMLSIr/Ku75V3VrW7Iy7O2//AHaoNC8mwb2Dq/yzNV6z/wBVvmdVZV3O2371cU/fnyx0OyjPkgbenWd75rzJu+Vfn2/dX5vlavL/AIja1f8AiLXDc3kzOti7LBD95P8Aer1TVtesPDPw9ub+5eNb24XZEq/ejX+9Xh9xq25pnfa7N/rf71fVZTlqwseefxHw+e5tPG1/ZQ+CJLca1Dx9mflV+f8AhauB8QaxNayav5N/+8ktWb5k21sXGsQ/an875dv3WkXburjvE+oPJcSOU37lZNu3+Gva5keDGXMc74X8TyX2mvZu+3y33LVbxJr01vZvDC+3cm165LQ9S+y63cWzuqrv+bb/AA1Z1nVHm3Ojtt+7U/Ea8v2jntSkK3hmL7h/drQ0fxE9rsff9779Y+rXEzMfT/x6q0Nw8Mmz7o/vVXvDPSl8bTTWqIk/8G3atcR4q1Z7i6Z3m37XrO+3urDfM33/AOGq11cb2MzPv+ep+yVFdSezkjmmCdBVv54p2fft3N93+9Wfp94isf3K/e/iq5DcJNcStvXf91F/hWqiEi6sPnAp8zL97cvy/NTrhvOJR0bd92ls5t3yI2P4an2/aF8nDfN92iRJlX0ICj5GO37tULyPZIX2bS3/AHzWpJI8LOm//gNUbqNt29ujfw0cvuDiUWjEY3r/AHf/AB6omVBtf+7Usy7JsO7fLTX+6amBYxsK3+9TY1SRfmFO2p5fzPxtoVkA/eL92q/wmhLbxou6SnyRoyq+aiWfbJj+KpFkRmbZ8q/wU/hMyGRvmCfMK19PuPslq+yT52X71ZTHkO/PzVJJIVX5Pl20vhAueXMq+cz7T/HuqrcR/vPv7v8AdomumkkV3fK1Esg3BE+7Rze+KI61jMjq7v8A71fr1/wbr6GF+Ik9/Mv+rtY1Vt/3vmr8hbFWkulR143V+y3/AAbo2MMnifU0eNUElvCu6Rvut/Cq11YL+KeZm3N7DQ/d+G+vG8M9Wc+Uvy79zbq8H/aCh1K40+5hEMgDJt2s3/jtetabrW3R4ZvtKv8ALt+X+KvOvihrFtNG6Xm5EZGXaqfM1etWlGUbHz655H4Uf8F0PgjeeD/HnhX9pnRIdzLL/ZustGm3y/8AnnIzf+O14z8P/ipqWuaFDZ2t+vmbdz+X8tfp3/wUi+Fvhv4+fA/xX8N0hWaa4sJGsNy/NHNGu6P/AMeWvxh+B/iCbQbiXwxrULQ3lncNBP5n3lZflZa+Dz7CxrR5o/ZP0HhvHyivZSkeo+Ntems7x7OZFdmRX/2f/wBquI1rxEn2fZD8rf8AoNdL4sv5ri1d0jVw38X8S1wGqSbpN77lG7+Kvmox93lkfS4itzakEl0j/vl/4EzVWs7qbzC7u2yopo2bckKfNv8Am3PSSXDxxhNn3U+fbW0Y3PHqS5joNL1az8z/AFKsN/ztXT6fq0PmK8KcbNu6vNPOdWZ0f5fvfera0G+nuF8n7SxH91WqpU5Sjy9DOMonS+JvGlhHCkKQ7m+5uX5mZq5TVr57pdjou+P77Vt3Glww/wCu8sbvubfvVlappVsvHkbhJ8qbayjGJpUqS+Ex1keNvs2PmX5qmhyyqMr5rf3qmaxKqnyfN/ufepjW8xXfNt++yuq1fuSMSb5Gz2Zflp8MTsfLSLPz/wAX3ahjjdJmdPu7P4f4astJbSMqu7Db8yMrferP/CVH3S3Zw/vGz/F8tdHpOhvcXUNnHD5u75vl/irDs1e4/wBG2Km5Fauz8LskNiPJttr7/vbvu1wYyc40rxOnD8kp+8dVJqln4B8Oslncr9rZFVdq/NG1cHrXiSaxie5v5mNzM7PLM332b/4mtLxE2pahJ5yWy7Y1+9/eavK/iL4uTQ9Sks9Sm33iou23j+7H/vVz5Xl1Ss/5mz0q2KlGPLD4S5Lr1zcTPc314w3P/DXf/D7xF4k8MtBqulWczN95dybVZa+en13VdVvA8shyW/dKteqeBNd+Jul6cdSe+kks44NryXS/u41/3q+ixeXVI0uWJhTxWIpS5oM+w/gd/wAFfPGf7H3iKy1RfhbFqDRov+kQX/ltt/iXb/EtfYPgf/gv/wDsX/tDwPZfGfwvdeD7hYFi3TKTHNub5tzCvxK8UeNptc1BpjMs25PvL93/AIDWV9smuG+fbtb+7So8NwrYflm3GR6dPjKrhf4kFNn0X/wVd+Onwo/aG/bq8Q/EL4Faw174Yh0uzsLC6aLarNHH823+8tfPI3huZNy1FGr7lfzF/wCA1Lbwo0jpvbb96vrcNRVChCn/ACnxGKxEsZip1nG3MzqfA+oPa3SoNvzJ/F/DX2D/AME69DfT18R6U9s0aRtut/n+VfM+6y18XaLdQ299E7plPl+Va/RH9h/w2lj4FufEKJGEvoo03bPvbf4d1dfN7vKclH+PE9ks7P7Ooebc6L/Ft3Nuqe3t7OOTf5y7Puouz5v96rl1aw2Nos1m8m1U+dlp8ln9om85HbKpu2/w7qdOPunr8w/TdJTzXQ3KqjfNEq/erRtYUkuG2o22NP4k+bbUelf6tUR+WTa+162NO094pt7ou77sSt/FXXTjbQ2oykP0ezhjtVdEZ0k+bzF/i/2qufYZrhWeZ13Mu3y4027f+BVdsVSSNXZNn/TPZUqKiqs3zJu+5Ry8vvHu4eUzmdY0XaoeFGx8v75vux1yGoabCyzpvxGzs+7Z95q9D1RfMt/9Yynd80a/d/2WrjvEFrjMPy7vv7m/iaspS6nvYVc0oxOaax3bfs0TKNi/L/eWsHXrdYtTnt4yFGQAc9MgV1Fn9p85POf5W/8AHa5vxYEXWbkwtuXAKk9/kFfqfg3K+a49f9Q1T/0qB9JhMP7Jyfkz83/2zvH02tfHTWNKhvWa30eKOziXZ/F95q8z+Gun3OueKLazQbjNKqqzfdq1+0Brb6p8fPF04fcs2syfN/u/LTvAMkOi2dzr1y+Ps8X7pf70lfkPwn4bjeaWKn/iOu+MXjz7DeTaPpV5ugt08r92/wAu5a8f1zXJtQkMzuzNTvEWuPqF89y82fM+ashm8xi+/NL4viMOUb5j+WXfrUkK+Yi92/u0kcLyJn73+ytaulaO9ww2Qtmr+IciGx07ciuXZt1bWn6SzSb0Tcu2tzRfB7rD5zwbl/2q1W01NNVvMRc/3a15eWPKY/FLmOcuLN7VS4RtzVmzruk3gfdrX1q6RlZ4Wwyp92sCaWaSRn3rtrKUiox5iG4mkZhsRm/2mpbNXVmR0/2makaT92u8MWp0Nu7HY7712bqPhH9g09PX7SypvVVX+8lbDafM1rvh+Yt9+sXT7jbPs8v5P42rpLOS2EOzftDVXMTLYzPsc0LDzE2urf8AfVaeks63I+78r/OtLdJDx5PPz7X+f7tNjkh3L5Pyms4+8HLHqei6Xoej6tp6bJtpVNvy14/8YNPfR/Ewtndl+X/erudHuHjtmSzdlZdyv8+5WrhfjJNcTX1tNO6lvK2s1IcY++Zfhi58y62TP/Hur3jwTNu8PiPzFYLt+WvnTQbt4boP8pr3X4bXf2rw7M/zDyU3Oy/eq/hCXPsY/wAWtQdtLebqjbv+AtXicm+SYu235q9M+MmpbbYW3n8Nu+X+9XmKcMKZdOPLG50Xg9Q1xGm/Zu+81fQ3wzkeTR1hhTPyfNtr5/8AB8byTRun3d/8VfQ3w3t4Y9PfaPk8r5f9qjm+yYy+Mf4wvIZIz8kgMa7NrfxV4d8QvDL3DS6lbWzZVq9j8TR3OoTM9ykg+fbuaslvCr3y/Zns2f8AiWTZTjyGXvc/MWP2cdck1Dw7LpT3i+Zb7WWNv7td1qVhuje5R8n+JVT7teT+BY38A/E6G3mdhDePt8xvuq1eztcuF2b/AOPb838S1Pw+6aS5Ze8ep/sSwywW/iZXl3KZrQp7fLLxVT4X/tKeL/2Z/wBqnVfHnhm8ljFv4hmaeKNv9bH5p3LWt+xzapbJ4meOYMJLm3baP4OJPlrxj4vgp8YvEUpkwx1mcYP93ea/aOJkv+IN5Kv+nlT/ANKqHDDm9vI/oq8afD74W/8ABZf9hDSvEXhnVhF4m0y1+1aDqqsvmQ3ar91tv3dzLtr8/P2aNW1zwB+1rZ3vj7TrifVLK71WDWLeGLMhuTa3MUuF9Q7E49q80/4IH/8ABTC5/ZL/AGiYvgJ8SdY2eEPEk+2CaaX/AI85m/8AZa96+HGNd/4KZ6jNpl2TBcfEHXJWlgj8wvb77pn2juTHux9RUeFNWdTKs4ozfurDyt6NSPtOFqUaeBzSsvidGX4KVmfcn/BPr4z2HinxBefDfxBdE22u6cYfsrtuVm2sv8X+zX8zH7UXw70TRf2jPif4M0dFa20Hx/qlratH93y1uG2qtfvx49h1T9jb44WXxavrdbDQZory98OtM/7xLVYW27v9qv56de8ZXniT4yeJ/EOpXLSN4g1y8vJWZNvzSTM3/s1flmNjLD1H/LI+Cy+pCtTjzfFG56f/AME1bO5sPipr9tJjYNBbaSuD/r4q6L9sXRIdX8Y62hhYyiG2MbKM/wDLJaq/sCWhtfi7rpOfm0E8s2T/AK+Kt/8AaHla5+MGp6YzrseGAYbt+5Wv1nG/8mPwv/YU/wAqh0yl+/b8j5LmtodW06awuUZXj3LuauY8GapL4T8VGGZ8bZfl3V3HjDT30HxhP8i+XcPtRV/u/wB2uF+IGlfYNQXV7ZPk3/O392vxeXve6dlGUubmPp3w7ep4g8PR3jvhW2/Kv/oVVNQheO4kmh/hX+H+7XD/AAF8YPqGlrYPNv8A9lm2/LXfaxE9quxE3tt+8r1HN7tgqR5Zcx9Zfsb/ABSfxl4Jbw9eXO6fTX2eX95mj2/LXsPluxdPL+Zvu+dXxB+y/wCPH+HvxSs7y8m8uyuv3V02/wC7u+7ur7qlKXE3mJNHLbyIv7yNflk/u1w1o8srH1WU1vaUOWQ7S4Zm80ImV2VPbJeMpMKfMvy/7y1ZsYYVj2bNif3d9WLXTUjXfCjK+/am2iK5j1PaS92JnXdrDJhJLXarfxM+1V21QvLV1kkSa23ity+t3l2o6K4VvkXb92q11Z/u33zttb5vMrojT6oqNWXNyxOG8RLZxyfacNmRfvLXnPiK6htbgw2yMXb5kaSvSfE0CKyPCkgSFG2RyL8u7/erzPWrGaEn/Vum9tvzfNu/3q4sRGHKephfeOcnWa3uNkL7xu+eRf8A0Gum+G0iML5Yx8u9GB9c7v8ACuUure23TI9zMiKu6VVf/wBmrf8Ag87GG/jJyFaIqc5yCGr7nwXa/wCIm4JLtV/9NTPfyqP+0xl6/kZfiWbbrl7HK7H/AEhiMduazobwrMwhf52T+JN26ovGN3cL4nvoowNpu3ViW461jyakJpvO+bbGzKrL8qtX5lxPHmz/AB3/AF9qf+ls5qvu15erNuORI9QzvX5tzfLVyPWIfM2JJIzL8yfw/wC61chca48K/uXwyttT5vvUN4mgaHY8qpNs/h+bbXgxp8xh7Q7NtY2qJppv3rbt6/e+b+KrNj4iRnb9yymOL91Mz7V215/N4gFxGib/AN60Xz+X8u6mf8JFNDt+dn3fKnz7qJU5S0COK5NT0218STblT5UWPdvb+9/u1Ja6tNIzu9yu1vk3L8zbq81t/FkLKttM/wD9jWp/wk7283yJGw3bf3b/AHmrf2cuoU8RCUz0nT9Wha4EKOzpGm3+7tX+9VzT9YSS3k8l2Ks33d9eZ2/ix4ZXMG4rt3ff/wDHWrQ0vxM/meek23b8zK33ttZ806Z3UK0JTuekfbHmjVN//LL5lb+GrEkltfWfmQux3fd2/wB2uJsfE9t9ohd7zajL821tzNV+38WJbyLsfYnlfeb5W20qeI5XoejTj7xuXnzSI+zcv3fvVk6lNbSTR2LTK7fNt+Xbt/2t1Yt94geQM9lMq7n+Ztm6sW+8aeavyTN+7fa6/drKpU97midsY81K5ratrXmQtbIG+VPn3Rfe/wCBVlRa55wi8mZmTZtRW/hrB1TxVNc7kR1Z1T/V79u3c1ZU3ibbEUNzsRfmRv8Aarpw9Y8vFU+U9M8O6pbbYrZHUv8Ae/3q7DR9W+7C8yoGX5mX5vmryXwzr0LN8833f4f7tdrpuqboWRHX5m3V9nKn7vMfklPFTjI6231Sbz2SaaRfn/e+Yvy7f71aSTJcSRBPu/e2x/LurlrS83eZs8xl+6jTVrWt15kavM7Yjbcu37q1jKjGWx7mDzCrE342+0XT3PneaGi2su/7tTeWkjOiOu3725U/8dqto86Ru8z2ysv3X3fdZams45pLhb+Ha6L91furt/irzqlO1z6fD5lLlTKF9awy3Hzv/D8rfdrF1C1s5ISly7EN/E1buofvl37F279qN93bWXcKkcyzP8z/ADLt3V4mIjyyPYp4z91c43WtP2Mu92f+4uysebR1uFWbZsGza7L91q6y+jRpFs03D97ubdUH2G2TciPu3M3zL/FXn1JcoU63tjg9U0F5GaZ/3S/elZvmXdWLfaLtgZ9mG+7tWu+1O3mjjeF03KqfKrL95t33qxdW0ubzC8yKFhT/AFap93dR7T3Tow9OMp3OH1TR0t5Mum1fl+WqbaS5Zk2SJ+9+7t3V2OoaeJGXz02fw7f4lrOmsU3b33fN8yNRzS5OU+xy+nCMT07wTbfZ/g0tsw6afcjBP+1JXmmm6fM0e/ycsr7VVm2/NXqnhWHb8LVh8sD/AEKcbR9XrgdG0/zPkhfHz/Osj/dr948ZZcuQ8N/9gkP/AEmmejlKjz1v8X+Y3T7GaVk851cqu35vu1o6bbzRscOxDfKm77tWIdNMcEcMNtt/i3f/ABVWlsfOzYPM2N+1vL+9/wABr8EjU5TfGU/c5mSaWzyQqVRUDJt2yferRVXjs1R3VV3qz7m/h3feqvp+l7Wb/WMPuurLV6GFJDsRI2+7t3fd2/71dXtGfDZhLlG3Vim24mmnzt+baq1R8x1VHhh3+Z8m5V2/8Cati6bzGbZHJKrJtb/Zb+7WZfLM3zzOzKu1fLZtrbq7aM48tuU+JzD4uaJl3W+a8e2Ta5V/3W19rVn/ALmSR5vmDx/MjK3zSNWrJCjMkybWLPt27Pu/8Cqmtv5l0yJCyMz7VZv7taSlzQ908fl+0MZXt5vtk07R+XtX5V/1n+9UV5HbXUY3+ZFt+8rfdZq0ZrRGhXy9u/ft3bGakWxm8vZ8ruvzfu12+WtcNSTlqjro0zHa1RZkSF2H+z/Cv/Aas6LYPdagltNM0sXm7pd38S1alt4W3u/mJ8/3m+81P8A2r+MtW1+wsJlf+ybJnn2t91v7q/7Vd+W4f22J16Hl5xiPquG5Y/aPOfid44e81SfSrPaI7d2VV2feb/ZrgdLvB5k1s8y+Yz/dan+KLpLfxNf23zEtu+VvlZa5KPUraHWvs1zuUbN26vs4x9nG58DzTlPmZU8Xao9rM+98fPt3N/D/ALtY2oap/aWn79/zbdu5Xql481aO81BpERm3bvmrmrfUJrXd3T7v3vu1EZfZOj4jl/E8n9n+IpJId21vvL/tU2TUppoT2/2dlM8YNuvldI+W+Zv9mqlrcbY97vurWOxXMNuJMzP/AAlX2tVa4mhY7HT7tLcSOzM4f/Z+aqsr7lFL7Q4j5JnEe9Pu/wB6o5Wdfn/ipIW+Y8fL/dpxUbd79f71Ei/hFs5N0y1d0tHlmkjT+9urPs5Ns/P96r+jzeXeGZd33/m20v7opGpDHtkK9NvzVZjkfOUm27m+X+9UM0YwNnP+1Sx3XlxmBP4fv0pcsTP4hLyFJG3pu+X+L+JqpTyeaV/ib+7sq150zfPs+RU+838VQSKh3eT95qAM+4jT5nwuWqm0bj+PdWj5O5m39F/iqrNG+3eiUR900K6HaPu/8BoLbWb5P+A0/wDiaPf/ALtRSfe3ZzTjIBwERG807zHHyJwP7tRxkg5xkU5sKcZpAO3Sffcf8CoaTzDv3/71RMH6sKVW+UjtQBatI5bmeO1QBjO4RGJ7k4FfaXw1/wCCDX7a3xg8Ka94u+GsGjaxp3ha0F3r13Ym4ZLWP8YgZGxltiAttVmxtUkfGehyb9YskZP+XuP5v+BCv66/+CXf7HHxG8I/se+PrjV/Emgv/wALb8NhdBFjqBuFtAbe6hBneNSoOZlJVC5XDA4YFR9jklPhrD8OYzH5mlKpCpQhTi5Simpyl7T4Wm3GCcuyt12cz9q6sYx21v8Aofzy/s9f8EJv2tP2kPivp3wp+F3i3wndapfszkyXNzHDbxKMvNK/k/KijqcEnICgsQD+hf7M/wCyR8av+CK/xTfwJ+0uLLVRqNnHdWN94TuWuLa8iB2lo2nWJhhgVKsqsMZxggnvPEP7MP7Tf7Hn7Yfhr4R+AfHWlf8ACeXNzbHw9qXh7WU275/kVJBKFMeclWSVQHU8B1YZwP277b9pnSv2jdV8O/tZeNY9e8V6fBDE17a3EbWxtyu+IwpGiLEhDbtmxDliSoJOf3LBeGPBuYcUYaWAr05YKpQdRQ55+2k+ZJTjrbkV0nfW91ZvWPkV26tBxqJ3Tt5f8OfSdv8A8FYfgxDYx2Y+HnilNhydi2/P/kWuJ+In/BRb4a+Mdy6f4S8RQBgAWcQ54+khqP4Of8EWP2uPip4Mg8Z65deHvCSXkaS2en+ILuX7U8TKGV2SCOQRZBHyuQ4IIZRXh37TX7I/xy/ZI8WQ+FfjL4VFqt4JG0rVLSYTWmoIjbWaKQdxwSjBXUMpZRuGfUy3hPwXznNJZdgsTGpXjf3Y1m27b8vSVuvK3bqcs8FOFPmlFpGp4q/aF8J+I7meVdE1CNZHJXCoCM/Rq+BPjt+wZr/jH456p8TPhT4i0vTtM1ZxPcWWpNKJFuD99hsRhhvrX05SxxvLIsUalmYgKB3Ne/V8D/D6qrSoz/8ABkjTDV6mFmpU3qeS/Av/AII2ftwftRPIvwc8N2eswQyGG51JWkhsoZAqsUa4lVYw+GU7N27BBxzS/tC/8EF/2+P2d9NbxF8WvB9jZaSpUyavaztd2kW5gqiSWBXWIlmCgORkkAZr9f8A9uz4z+J/+Ce37Lnww/Zf/ZvvX8Malq2lNe6/qdowN2Nqp5pDkZDyzyOxcYKiIKu1eBm/8Er/ANrf4g/tQ+JfFH7In7UniGbxnoniLw3cS2j61JvnXaQs0PmDDsGRy4JOUMQKkZr+eqnh3ldbJ6nFeHwEHlsJS9x1av1iVKE+SVRO/s09HJRa2W70v9DLM8RKaoyn73orX7dz8LJf+CeHxPdht8ZeH9oOdpef/wCN1ufC7/gkj+0r8cPH+n/DT4V3+j6trWpSFLSxt5JRnAyzMzRhURQCWZiFUAkkV9g/FXwXL8OPif4j+H0ySK2h65d2BEzAt+5maPkgDJ+XrgfQVneGvE3iLwbr1p4p8Ja7d6ZqdhOs1lqFhcNFNBIOjo6kFSPUGv2qfgF4d4nL3VwVKXNKN4OVSbjdq8XJJptbXSadtmeV/aWK5/ef4HFP/wAGq/8AwVPcH/infCPK4P8AxVkHP61R8Tf8GzH/AAU9+F3ha98b694Q8Oz2GlwNcXi6br0dzMsajLMsUeXfA5woJx2r6Qj/AG7P21J5Vhh/ad8dO7sAiJ4hnJYnoAA1faP7b3xj+KP7Lf8AwTz8L/Ab4g/EjVtY+I3xEtWfxJeanqMk9xbWjYe4iDFjtUBktsdGBkPXNfkGaeDeLyXNsvwVeOHqTxVXkUYfWOZQiuapPWpZRhHffVrQ7YY/nhKSurLy+XQ/Gj4Jf8Elf2qf2iPEraD8G/D0HiG7twgumshMIbYPnaZpWQRxA7WwXYZ2nHSvUPGv/BtV/wAFQfDemXHiOf4f6LdW8C7ja6XrkV1OB/sxRku59lBPtX6jf8EqLnXvEn7AHxL8D/s2a1Zab8Uo9UmlS4nRQ/7yGMWzbnJGCI50RiAqvkkdWaz+xr8Kf+Cv2kftGaJqPxq8XeIIPCdnen/hIh4i8SQXtvcW+07kjjWVyztgBXUDaSCTjOfL4h4K4Vwmb5lTw/1fDwwTt7PEVqqrVrQUrwSlFWne0LKTel+hrTxeIcIXu79UlZH4K+Of2Bviz8PbTV5vEGsaZb3GhxTtfafOs8U8bwhvMiZXjBVwVIw2MEYOK+f7r5rjZ/ef/vqv2f8A+C0Ou/D7xJ+038Wbz4d+Q1vHpUtvqUtsuEkvo7PZcEfMQSJAVYgLllbgnLN+L16rSMmw4bbt+Vq+a8UOFMh4ew2U4rLKEqP1qj7ScJycnFvldtddL28+x0YKvVrc6m78rsSSSQy4REVf4qZHDBJcb+nyfP8AL/47UCyQ2+1Eh3t91/nq1YW9zeTLGkLbm+7X5LGMTv8AiNfwrYzX2qJbJtb/AGWf5q9Hg0dtNsW2Ju+ba23+Ks7wB4PvLOFdVv7ZovtCMq/J91f4trV0nibXrPRdFfVbny38uLbFG3y/NXkYqt+95YRuejhcPzayNT9nzwC/xB+IiabebZrPT7W41G/h27ttvbwtIzf+O18YeINRuPGni3UPEz8ve38ku1U+6u75V/75r9MP+CTXwr8T/EjxB4/1vwT4ek1XWm8IXVrYWqqzfvJvl2rWh/wW6/YD+HH7N/w6+BfjPw/8N9P8L+KtWt7yy8U2emuqrcLDGrLI0f8Ae3My7q9XKcdQw9aVGXxM9rH5VVnTw/s/tHxH+yP+zxqPxe+IFnYP91pfkVk3LurY/bk+J/hnVviI/wAHPhRZW9poHhVFtb+4s5dy6pfKv72T/dVvurXtvhLw/afs4fsSeKv2gNURYNVmiXSvDTRsySNdXHy7o/8AdXc1fDtmXkgyzMXZ90srdWb+Jv8Aer38u5sTUlWnsvhDi7D4fJcJRwcP4so80v0QCN45Nny7Vq1DCi/cfb/F8tJGqL8n3y3+zUjRvu2Rpt/v7vu17R+cS2FaTj+L/eqS3O4+Xt+9/Fv+7UO6Ers8n5Vb7y1csLczQ5RFpykKPPEkST7LNH5Ltu+9X0B+zV+1V4w+BPjTwx9p17b4P1SdrfxHbzLuW13femX+7tr5+aNFmVs7i3y1r+Ire5vvh5Klna+dLb3Cskkf3lVvvU+Xmiac0r+6fr/4Z1nwr4ys01P4e63b6xYXn+qutPuFkWRdu7d96rjOlvMl46Mo+7t/9mr8XPDvjXX/AIa6na674Y8SalZ6la/8eraffNH5Pzfwqrba+0fgR/wVM0Sz+FM2m/G/SvtXibS0/wBAmtU2/wBoRt/z0/uyLV060afxHXCUJH2/p8KTTfJIpMjbXb7rKu371b+k5b7+5Qz/ALpVT5mrxD9l39pXwB+0locuseG7mSw1G3+a60e8lVZ1/wBpf7y17ZY6gjTJNc+Yjr8iR7a1jW9p8J6NGMpQN2zi81pbmG23/L87bvu06SJ45tiIv7x/n8z5VjqGxkmhjf8AiWT5tzP8qr/EtTFopv3yPlVT5l27t1axkerh+bl0KWpWsJkKPuCfd3L83zVyniSzSHfvmVvn+T+9Xa6hCgs3+X5GTdt/irifE1y821Nnmqv3I2+X/gVctapLofU5a+aRg6aqfaGRIVZ2bd8qferkvGsXl6/dQmU8Kg3rxgeWvNdx4bt3a4MKW22Vk/h+Zf8AvquR8axMfG9xEF2s0sfB7Eqtfq3g075zj/8AsGqf+lQPpsPOLlJeTPx++NENxafHjxLaAfN/bEykt/vVB4q1aHT9Ft9EQbWj+dv96us/aY8OyaX+1H4qgvDwNRa4DL/EteZeINQ+338k+zf89fkkfgPwnF/71Nf3inL+9lO+iGNGb2pY7V5GxXTeG/Cd5qEyeTbb/wC8uyqjHmOWU4xIPD+g/aGX5flr0nwn4JRYVuZoVC/w/wC1Wn4H+Hr2savdIr7vm+Zfu1Z8UeKLbw/F9jR18yNdqsy/drb4fdMeaVSXKiLVprCxt/J8lV/hdq5TXdcj2l3uWO7ms3WvFlzfM3nfxP8AeV/vVjXl49xGOxrPmmXy+5oQ6lePJcvM53bv7tVGmIXZTppvm2eXz0qPy3ib95Ux934iveFEkZk+5xtpbNvm8j73yfPTWj3b9n+7VixjHmK+/bt/8eqhRl9ks26zM5T7q1tadI7Qqj7cR/3f4qorbzXEf7lNm3+L+9V2FXhjGxKCZe8WWt5pmZ0RlLfe21DIs0JPkp82/b8tamm/MuX+bb/e/iqxHo6TSLCkjAs+6l/dJ/wlPSdSns22IWxXO/FWXz7aGb/prXZ3Hhm5tY2eHcR/47XE/En5bZEmVi6t/wB81HL7xpCWpxtmSlyAf71ez/C3VHXR7iz87/WRbvlrxWP7w5zXqPwzvoYdKldH/wCWXyrV83LEqsc58Vr0TaosL7fl+/XKWse+4WtLxhfPeaxJv+Yq+3dVXSLd7ifYlMfwxOy+H+nvNcfOmNu1v92vVLfxZpvhuNYXudu377R/NXnmhwzaRpiuIVYqn3lrO1bULm6lZ9/8X3t1RL+6ZfEeo3nxQ02YN8m59m7buqBvihc3H/Hgiwp/d2V5jDHeTTD52+Vfuqta1qHsVZJH5VN1Eeb4iuX3eUl+IHiK5kvLS/uXb9zLvVY/4a9v8JeIn17wpa6lsWVvKVWZVrwrUoX1axkTZtCruf5a6T4C+OvsdnP4Yv5stC/7j/ZWnEnl9w+yv2OZHe38ReZtz51sfkGB0krxn4yxBfit4jYwHadYnLEt1+c167+xTefbLfxIxfcVktMnGO0teSfFBHvPjD4mijAcprVx8p/3zX7TxNHm8HclX/Typ/6VUOBXjXkcT4g1DUvDuoWnifR0aO5t3X9591q/Wn/gklrU3i/9sf4Va94luN82riSW8kkPLyTadOWJ9yzfrX5ReJrF9Q02aF/m2xbkVf7tfb3wJ+PGqfsx+E/BPx50dz9o8NWel3I5xlSsUbj8Vdh+NYeGKSyrO7f9A0v/AEmZ97wk+bBZh/16f5SP1N/bF/Z21X4w/Anx38GoL+W48Z6TfNFoytulurq3b7sca/8APHa3/jtfzMfG/wAE+LPgr8bNS+HvjLTZrO/0nVJLWaGZNvzK22v6pviX40uv2gPgt4b/AGxP2d/GE9nJr2jLpeuzaay+aqyfd+b/AJZsrfxf7Vfjn/wXK/4JreIfCWpeF/Gfh1LfUvF2rJJJqmh6fO11eLGu399Lt3NuZmr4fmpY7KOaUo80du/mj8jo8+BzbkjGXLL7vI+cv2Dtk3xC1G8yC7+Hzlh0/wBdF09qT9ou7Ft8e9UYEsRBbfIP+uKV6N+x7+xN+1R8CfBa/G340/B7VvD3h/Vov7N06+1S1MH2i4YiXaqNhsbI3OSAOK+z/wBnP/ggzYfttaVbftReOv2g7Tw9o+sTNDbaZaaY010jW7GAl2JCAExkjnoa+8zKrCj4F4WUnp9af5VD6SnRnWxbhBa2PyC+Lnh/7dpf9pQ7t8Ls+7bXB6hp7+IND3+SzfuvnVkr+mzwZ/wbbf8ABN3whpEt/wDER/F/ijam+X7RqXkR/wC1+7jWqOp/8EYf+CFc6p4Mvvgr/Z0906+VcR+IbiOTc33drM3/ALLX4U84wSlqz1qWW42rH93HY/mO+FmsTeHfFSQyrgM2Pmr6JhP9pQwukOVuIN22P5q/fn4Of8G7/wDwRTl1GbxZ4b+EGp66ltcTQSrqniOaWDdH95tq7a6HUPBX/BFb9kS7m8Mah+z54K0+e0kVbO0bTWvLiX+795mqK2a4Kjyzb0kdOHyXMsZzU4QcpR8j+e/w/wCC/FuoTJN4b8PahcPHLtRrGzkkZf8AgKrX3f8As96X8VPH3w5017z4e+JP7Rtbf7PKraDcbpmX+Lbtr9Y779uH9lr4K+EtG1lfhfoXhuXWIWk0nw9a6NCmoeXu2qzxov7v/gVc18Lv+CvPhXXfGWseGtf8JQwJYzqbeaGRctH/AMBrircQ5fGav+R7+X8J57Ti5wht5o+KfC/7OP7RviqHzNK+Bvii4RbfdK0mjSL5n+7Xf+Ff+Cfv7WPiSFUh+COrWisyr5l48ce1f+BNX3Ha/wDBUT4Jy2rTCKYvGjHyU4b/AL5rjfit/wAFk/g74B0K6ns9JuJ7rZ/o0ef4v9qpjn+W8t0/wNZZHxDKfJ7K3zR4FD/wSg/a9mj8xNC0NE27vLutcXzWb/gK7azfEH/BKb9shLNifBWj3Kf88bXXo9y/7X+01Ubj/g4D16bR45H0LTneKWRZWjuPmb5v7tcR47/4OFfiJLpV1b+HNHs7WVp/3V1v3PGv+0rVhLiOlKN4wZ6EeG84pytOcIlTWv8Agmt+3RNbtbJ8AL+UrKyxbb23b/gX3q4DxV/wS4/bvsmd5/2YNZnC/cks7iF//Hd1TaT/AMHB3xe0fxn/AGjc30dzA1nJb+U27/WN92SqviH/AILu/tAeKrZdHsdcl0rdKrNfW8i7v8tWE88jKPvUpHpUcgx8Ze7Whb5ngnxY/Z6/aE+EvmH4nfAfxho0Sy48+88PTeWrfxfMqsu2sT4Nzw3K6pLBdRyATopC/eTG75W96+vvAX/BbD496bMLfVvFlprFsqq0qX0aybl/iVt3y1zX7W/xp+Gf7QEvh74m+D/hvoeg61eQ3KeIp9EsFgF6wMZiaQJwzKGk56/Ng9K/T/BLGUK/iZgoqNpWq/8ApqZ9Fgcpx+FqRqycZQW7Utb27Hxd48vZI/FOowLKwBvpDk/7xrmLrWtkeYXx/Duatfx5Pb3HizVpba4Yj7fNGy7v4hIQ35EA/jXG61JtZXR1+X+Fmr4HiaF+I8Zp/wAvan/pbPCxk25ya7siuvEm3akMLN/Cn+1VaTxVbJu/c43fxLWHq15tkOxGXd96suaSaWNdiZSP/b21xQw8JRPna2InE6qPxY+0+TNtbdt3N8u6nN4wn2o7pGqfdRv4mrjbSWZ2L/N+7+Vfn/hqyrzLMnnfxN825K1jh4bHH9cqyOwtdcS5jffc7d3zff3Vbj8RPHJE6bnWNv4f4a46NraFv3PmO/8Ae2fdq8rTMrPv+9/C38VRUpyidFPEc0Tq4/FTyMyCbf8Axbd23a1WrDxVM22Hev8At/3v++q4/hY96J977m6pLe4fcYXfYdy/eauGpRnL3j28LiPh7noGl+Lrlplttit/d8v5ttaK+ILmaNUvE8z+6u75t1ee2s1yrGazT5t+1GV/lrVtdUufLbfNw3y7d+7/AIDXBKPLM+lw8jpr7XJVWZMyQt5W92X+GsnUtYmmxG958u35P9qmRzSWrND8yxtt2bvvL/e3VBqEf7sbIdv9xWX71KXKej7SMYlGS6htpPOd8bn+bbUH9pJIy/O2Ff8A1bfxU7UkTbvhmbd8rfN92qMjeTIsJRsbPn8v+Gt6MeaR4OMxHLzHXaHq0yyN53l4ZNz7f4t1dr4f1SaNVd3V/mX7r/NtrxrSdcms45PORj/stXa+Hdc3Wqs7shZPm/2f7tfcU5e6fj8j1vR9aE0gfylXy2+7I/zf7tbWj300zb0uWi85vu/e2steb6DrUCx+TD0bbvb7vzf3q7DQdVdS77Iy/m/d/vNRKUEdOHrS2kdtY3H2NUTezvH80si/8tP+A1ajvN0z3UIbzJnVdqv/ALP92sOLUppLPZt2yNuZ2/h+9VhpraNneH50XazV5tan9o9zC4qX2S3qV55bAOm4/d3L81QNcTXNwHmgjlC/61W+8rUyRnaMW0PDsm6JpKfG1zLCba5K/Ku5mX+9Xz2IVKR9JRrVeSJVWDzpBNDCv32V/nqNY87ntl/2fLb+9WmljM23emxVXdt+7uZqtLp/+jojou/71edWjCOqPVw8u5xWoaa8P3E3sqM3zbt22si+09Li55MgMiqz12mqWSQt/pO1X+4zf3axb6z2/OEmabb+93P/AA1lGXMerRl72pyN1Yuryu6eavyqnyfNVObT4YVbyVb/AGFrp7rT4Zd7w7o3/vN826svULFI0/vHerbttLm98+oweKOp8MRGP4bpEyuSLKUEN1PLVxmkw/vPnTK/88/4q7vTNp8DsEcsPskoDL1P3q4Kxknt2H2aFsfNu3N826v3fxoV8g4a/wCwOH/pFM9TKqsFOrfq/wDM3bWN47dUS22Oyt95ttSwh2mP7vZt/wCWiL8rf8Cqpp90ZoTvRtn3t38W6rthv8sI77Nz/PGzfK23+7X4PzcpOZYqPL7pbsYUk3lJmi3PufdVpY4Vj3Wz7V/gVU+7UCx7YHme4h2r/CqfxVYj85WZ7aFVVvlrWMpx94+IxmI9tLlBf3Hl/afvtudVqpcWe6P7Z/Gvyo38Natvbu0fFsrPHuWKTduamXFujWqO8MyBvmfzH+9/wGto1re8fM4inKpPYwbu32q6XKfJJ8yMtQR6akr/ACIzDYuxd/yrWncWLyLIdjJtZvmk+WrGm6K9vH9pdMrNt+7/AMtGWn9Yly+8cv1ecZGfDZ5V7aaZsbN+3+7/AHasNp7tiF0ZG/iZmrVXT/lXcixKz/xJ8u3/AHquJpMMjNC8yq2/dEv8LVwe0fPv7pvToy5rM5C80mGO3e5mO1dvzRs/3q4j4P8AjWHR/EnjuZ4WtlaeFd0O1lbcu1f91q6v4nao+k6hHpsKMrLEz/u3/u186eG/FD6b4o8SWcyTFr61ZvLjl/5aK3y19vklCpTw3tX9o+D4grxni/Zx+yVvilefZ/GVzC8ckXmSs3mSfeavPvFkj2N9FeQ+Ydzbf3lbXjzWvtlxb63skzt2StI+7c396uf1q4m1Kz+0vMpRkZtv97/Zr3IylI8L3TlPF2oTTaozptCsn3qzrySFbYzTbfl+4rfxUniK4VZN7p8+2ud1rVnkh8k7g397+9V/aKH+Il84iZ0wrf3axIpvJZofmrZgc3uijzDny2rMkj3Sb4PvUfCEfeIpP3f393+9TJB5kf3OP71SXCBl+5yv8NQTcMqb9w/urRI05eaRAx29ak2Oyq9RuN5yaVfu7PMpc0S+VCx7Nxy3SrWmSPHL9/738NU6ktWCzLu6UhSidJbSSeTs7L/FTZFeNm2fNu+9UdnM8kOxNtP3eThFTKf7VP3dzEb947M7W/vNUF0zxr8nB2U9piy4mk+X71MkV5FP75WWl8Og5EDL5ihEfbJtqG4DpDh91WrhflR4X+eqszO6N87FVo+If2dSvI2G+cq1RMoZ99Sts279nzVFMvaq+EuO42HO7bu2nNSbUZN9Mt13ueean2umUdFqhy3I8pko9NfZ/DUjR7V87fTRJkYC4NZklvQ5HOuWeHyPtcf/AKEK/qX/AOCSd7eL+yJ+0aq3coEHhjdABIf3Z+wX5yvoeB09K/mI+CHw08TfF/4p6P4D8JQK95d3itmR1VY40+eSQkkcKis2OpxgZJAr+iH/AIJf/tqfD39lvxV4m8EfG6PUJPBvjSwjt717OIyrazKSnmOgIbYY5JAxTL8LhW7fsXBeS5tmnh5nDwdCVR+0w0opLWbpVPaTjHvJRtp5pdTlq1IQxEOZ23/E8y/Ycurm9/bV+GF1eXEksr+OtNLySuWZj9oTqT1r7R8ZeCfDfjr/AILyWtl4muFWOws7TUbWFkQia4g0tJIl+cjGGAf5QzZToOWX5/8AHF3+wH+zj+1j8M/iV+zN8VvEniLQdI1221HxPHPYGQWqxTqwELyLCzsVByhU4AB3knaMz9rj9srQ9Y/4KETftY/s5akb2HS7mxk0q41TT3jiumgt0hfMZKyeU4VhzsfDHhTX7Dm+BzTi7P3i8BRq0YVsuxFKMqlOUHCpKpFRjJNe63a67x95HJCUKNLlk07ST07H2n+2frP/AATy+JfxmutN/aG/bE8aaJrXh5ltv+Ec0rULiC106QKCWREtGG9shjJuYngZwqgeMf8ABT/9rL9kn4sfsseGPg58KvixfeOvEGj6vBJbazqFrM1xFBHE8bvPO8cQd3DKCQrFipLAHDVq+M/jF/wSS/bumsvjF+0HrGs+A/GcdtFBrlvAJkN6URcZeKKaOZF5RZMRylQAwACgeK/8FAf2yv2f/ih4C8O/syfsp/C+y07wP4TmZ7bWL3TNt1LLkr/o7OzSJE4w7vJiWVtu4Dad3wnBHC1aGcZTQq4bHKphJXmqvs4Yei1FqThNU71YzltGMryTvKWmu9eqnCbTjZ9r3f8AkfJ9XPDupR6N4gsNYlEhW0vIpmEMmx8K4b5WwcHjg4OKp0V/VU4xnBxezPJPvb/gu3bya545+GXxNsHkfS9Z8JzJZvvymVlWXIGOpWdMnPIA9OeB/wCCJ/hzUda/bgs9WsxL5OkeG9Qubso2F2MiwANxyN0q8eoB7V3XwR/bd/ZH/aS/Zn0b9l3/AIKEf2pDeeHpQmieLrW3c7Io02Qu0kO6RZgjNGd0bI6orMSxrbuf2wf2A/2DPhV4m8M/sHXWq+I/G/iKzEUfia/tnkjtmBIRpHnSMYjDu6pHGVZlUPxyP5spy4ky3gKrwLHLa0sU1OhCoof7O6c5u1V1dklCWqa5rrZX09N+yliFiOZW3t1v2sfHv7ZXi7T/AB3+1f8AEXxZpTyNbXnjC/aBpJN5ZBMygg4HBA4HYYHOM15pXv3/AAT8+JH7Kfg/9oKfxZ+2j4d/tnSrmxmNpealYtf2sF6zAma5twrtPuXeAdr4ZgdpOGTjv2wfE3wB8YftDeIfEP7MvhiXSfB9xcKbC1eMxIz7R5kkUR5hiZ9zLGfug9EGEX9pynH1cFmkMgjhKqp0aMGq7S9lK1o8ile/NbW1r6O6Ss3xTipQ9pdXb26nq/8AwSV/Zph+O/7Tdv4z8UWit4Z8BRrrGqyTD9286k/ZomPu6mQ54KwsD1rgv2/P2lp/2qf2nNf+I1pdtJottL/Z3htCeFsYSQjAdvMYvKe4MmO1en+Bf2yPgL+z7/wTR1z4SfDHW72D4jeLr2ePxXPd2HlJa2jcSSrPynlfZ18tRu3h2dyqjBPxKvxk+ELkBPir4bOemNct/wD4uvmsno1MXxtjc9zVeyVP/Z8NGp7r5ItOpVSe6qTsoyX2Y22ZrN2oRpw1vq/0XyP0/wD2V7/wB/wT4/4Jvj9tXSPCdnrfjzxrM1lp1zc+YY4g08iRW7cqVjUQPK+zaZGAXdgIy8N+z7/wWr/aTg+L2n23xxOj634W1XUY4NRtbbSEgmsYXbaXgaPBbbkHbJv3BcZBO4c9+xh/wU1/Yn8Qfsy/8MS/toatBqPh4XZ/sXWdP1JJxBEZDKquIn86No5MlHjD5V9pUKp3eg29t/wQt/ZYuNM+O2r/ABm1rXRa3qvoVhq7z+Rd3cZDqsYe3gjldSAdrybP7wIr8jxv+rVHMM2p8S5ZVxmKr1ajpVYRVVOk9KMadRStScFo9murey64uo4w9lNRSSuttet11PBv+C4X7M3w/wD2cPihr0HwysYdP0jxT4KudVXSIGfbZzMJ45QgbIWNmTeqg4XcygKoUV+Is1vMq74du77rV+sn/BUj/gob4C/bQ8X+JviDYeLdNsdHtPDFxp/hrSbjWoJJlgEch3sqMR5sjsWKrnGVTLbQT+UckyTWju4+Rf8AvqvzDxhlmNPJshoZjVU8TChJVPeU2nzKyk03eSVk3d3aer3O/L+VzqOK0voU7PT3mkPz7G/j/wBqun8C+GbrVtUi022Te7Ovy/erC0eJJPn+7/F8v3q90/Z48MwtcjUprBnb7z7f4VX+LdX4TiJyp0pXPbw9P21WJT8Sa5b6Pp8Vgk0bvZp8/l/wrXk/j7xdc65Jt85hDG3yba6r49al/ZesXFjbTY85md12/dX+7XllxeJdRNt3Y+7t/irmwWFjpPc76uKlT/dn6ef8G/Pxavvh3458QajaWE00MGkfariTz9v+r+8qrTP2wtJ/aB/4KJftXTfFfx/4buJfC2jxNZaXpNnudLGz3f6zb97dI33q+e/+CP37QulfCL9qLSdP8Q3VnDp2oBrW8W++6yt/DX7R+HvFnwd/ZMste/aS+J3xK8JaL4L0pLjUPKSeMz3m1d0UMafxfN8tTSwl8zcXufreR43J6WSfW6qvUhH3f8j8Xf8AguDceHvhr45+H/7IXgPdHYeEfDMes65DG25f7Qul+Xd/tLGv/j1fDW/d8mzc392vU/2rv2mL/wDbA/af8fftIeIdPW2HjLxBNeWdqv8Ay72/3Yo/+ArtrzC4037O29/+AV+hYOjGhQjA/C8+zKpm+ZzxNSWrC3Z2X5+v8dTySf6xPl2L9yoY4/3ex3w396rG1/4NrfJt+aurlieSNXZuXYjfe+dmrY021eWFk2Lj+HbWNJN91Jkwu7/gNdj4P0lL5VQr8zfw0yJe6ZN5Yzww7/vvt/uVom4+x/DvVbn5d8dvub+Fq2te0V41/cpu/h+9WT4zP2X4S6mEh5by1bd/D81ZyKpnlEep/Z4mvZ5llmb7sbVd0+SZVa5cfeffuasKxtZrqUMseRW62+KFkz/wGg1+E7f4d+Ptb8J6tBrGg63cWN5ayq0Vxay7W3f/ABNfdn7MP/BU12mh8MftIaas8TMqReJLFfnVW+VfMj/2a/N+x1SaFldE2stdR4f8RTLCN/8Avbdm6plH+U1w+Iq09j90tB1vSvF3hu28YeDb+G/0q4+a3vLeVWVv9lv7rf7NSyTTR3O9zIiMm/cv8Nfkz+zX+1l8YP2edSe8+GfiFYra4+a8028XzbSb/aaP+Fq+pvhp/wAFWJrqaHTfip8K7X7NM/m3F9oNw0bbv91v4f4qX1iUdGj3cHmFCMfe0Z9htffavuQ/Lt/hbbWBqVj5ly+9F2fLsZpfmaofhz8X/AHxq0GLxF8NPE9vfpJ80Vm21ZYf95avSRvJCLZztk83c21Pl/3azrVos+owNRSipxkV9Nt92zybZY2b76r91q868cxovxPnijXj7XDgE9PlTivVNBtZvtSQfZpNy/Lub7teZePFeP4uyhhgi9t+q/7Kdq/X/Be39sY9L/oGqf8ApUD6fL6/tKko/wB1/ofmd/wUi0v/AIRv9qLxBd20OwX1lC33Nv3lr540rRrzU50jRGYv83ypX2p+3R8HfEnxs/a+m03R7Dzo7fSYV8uFN25v71dT8Gf+CZ+q6W0Oq+PYfscEn31+8yrX5VTgvtH4pm9aNPMJwXc+QvA/wR1vXJoXhs5m8xtrMqfdr2/wj8CbPwtYrea3/ooVW3s33t1fUXi7Sf2df2edGmRPJvJIYtqxyfI33fu/LXxh+0J+09N4o1Saw8PWy21uv/PP/wBBqpVI/ZPN9nOp6D/id8StN0WH+x9A2rt+Z5P4q8c17xJLqFyzu+//AHqx9W1q51SZnvJmZmb7rNSRq8j/AO03+392op+8b8vL7xN5010yuif8BpWh2wt3ZqmsbJI4w7uy/wC1U81qir02/N8jVRPxe8Zd1Dvfd977vzVJ85I3sodv4anmt4Vh2bF2/wDj1QvIki/+PPS+2VKMSNlO5v8Ae+8tWYV2zb3Vcf3lpkaom5U+633P4qtMx8pMIv8Ad/4FSXuy94Udje8Pyw3MCo/8P+xV6exTzNkJYLsX7tY/h9d1xsfcxb+61dPb2bpyibv7rK1HLGQ5PlM6xaSOTe7sqf3Wb71alncPHdfO7Mjfd/2agnsY2kV03SM33l/u0jW7rIH2Nt/h20fCTHmlA6/TptNmj2Pc7mX5nXZXlfxskha6RLbjc27bXVWupTWbbPOYGuF+Klx9ouYn3/71OPMVT+I5BPvCu88D332XQ5nd8FU/hrgq6jR5vsPh65ldP4dtEo8xrU2Od1Kd7q8kkk+9vre8G6bPNMjJ/wACZv4a5+CJ7ib/AHmr0Lwvpz2dj9p2fw/dojsKp8Jf1aVLa1CQvjctYyxwzN8/K0mtaptkaJPmLNurPt752Xe7/Lup/ZMeX7R0FvdQpCqJ/D8qN/FUsbSXUmdn/fVUrFXmVT5O2tyxtvs8fz/Lu/hWlGH2QlV5S7pOmpHbSuE/gavPJ9Wm8M+NHmhm2rv+Za9Ek1ISN9jS5UL/AHf4qwtH+BHxd+MXi6LQfhj8OtW1q+updsEOn2DSyTN/sqtVKIqcoyPpr9jj9obwj4HmvLDxhKba11ZI2S+VWdYXjD4VlVSSG3dexHvke2R/GH9lLVr+S8XUNCnuZmLyzNojF3Y9WLGLJPua6b9gz/g2G/4KA/GfTIdY+MOnQ+ANEuHV0k1yfbc+X/1xX5lr9Hvgl/wam/skeBraGf4nfGbxRr10sOydbHy7aNm/8eZq/TOHfFrOeHclp5WqFGrTptuPtIttczba0klu272vra9rGM8JzzumfmI3j/8AZhkh85xoLJ0DHRePp/qq9D8JeDR8UH07wZ4O8JHXF1ZY49M0iz08zfaFIBjVIQpyMAEDHGO2K/UyT/g2u/4J0NJbFIvFgSDbvj/tv/Wf+O/LXyx+wv4P0L4a/wDBV3RPAHhpHi0zQPG2s6fp6yuWZIIIbyJASepCqOe9fqPB/iZjc/y/Mq9TC0IfV6MqiUItKTSk7TvJ3jpsrdT7bhDCOng8wTe9Jr8JEHgz9gD/AIKeeE/CX/CKeBPhb4u0bRbhNz6Rp/iSG0gYEdGgW4UA+xXNPtv2Bf8Agp/4dvW16x+HXiuyuUT5ryDxZbxyBf8AeW5BxX6t/F39qv4b/CK3P9vazGsm/atfGn7U/wDwVv04eHLrRfh5f2s1wZZF3LL8zR7fu7f71fk1f6RuYUk1DL8K/wDuHL/5M8jA8JV8XZttL+vI+Ividpv7VWuasfhr8WPFXiHWLnT5g40nVPFP24QSYKhgpmdQcEjI7E1+nH/BOH4S6J8JP2ftD07xd8SYZ3RZZvsvmCNIHkkaRoypJzgsRnvjOBnFfj54i/a0m8J6hc/Ga5uYbwX11Ikscy/6THJu/irFs/8AgqT4ztbyO2sL+4jRnZkVW2sv+z/tV+ecb+MnEvHGVU8txOGo0qMJ8/LSi43lZpN80pbJva1763srfYYHg/AYWTlSqyUmrcztovKyR/R5B418HwKlomsW0ny9pFNc78TvC37O2seG5/EvxI0HQprSzj81ry6hQFQvo33q/Cr4S/8ABVHxnNcW1lrfiS4V5rqO3gj3MzNIzfw16/8AtOftvfEvwHotnpXjDUo5XhiW4TS7iJpFmbbujZl/2a/MqWb1KfuzpnYuEKcZc1Osz7y0f43fCLT/AAZe/B74ZWt14Z0zV3kS3ubOVnut0jfeVW+7ur83v2j/ANnj41/sMftTXfxd+MXiS38Z6P4gtW/4QjxBq1vtttNb+Lz4/wDn4VfurW5+xT+2xpXjjxY2sa3eMbi4ut3mMnzR/wC7/dr6p/aUvvg1+018DtX+AvjCdkttQ/fadqN7tlls7xfmjm/76/hrhp47nlKNZ/4fI+qw2AlgpxlhvhfxefzPz1174ueA9e1a88f+KvEk1zPqC/uLrULpnvL7/dX/AJZx/wCzXl9n8Rpv+FlQ6r4A84W0zsjMvyqy1698Cf8Agk74z0mbUfiB+118XdPs9Nsb2RLVtLl8+W8j3bo/L/hjXbUn7R3jz4LfDmzTQfgD8E76+TR4Ge41S8ibc3+03y1206PNFa83MdNbMqVGteH2e5Mmm/FTS5X1vxJqSwwMi+RC3ysyt/EzV86/tGfEzXrdryCz1hXdXZd0b7mVa9W1LxN4w+K2i2sPijxzefYJrJXSz01Vj2qy/L81ZGj/AAA+C0dxFc3+iXl+6/LE2qXrPub/AGlX71d8MjxMve0R8niOJI+1l7OVz4lvPiN4q+2LDC7YklZf3KMzSf8AfP8AFWjbt8TtcUPpvgzXLtZH+9b6XM3zf981+gPhXwj4A8Jag03hvwHoth5nzOtvYRqv+y3zV00etXMby+TqrRLt/wBXb/Km3+KvYpZXhqceWR5VbHY7Ee85H5wN8Ef2h9S8nUtK+D/iS88yX7sdlt2r/wACp/iL4S/tUaPH51/8EPFEUUe3fItluVW/h+61fo7NeXlzcD55pXjT+/taobi+vDCUS5uFXr5fm7f/AB6r+p4WJnGtjPszPy+vrj496PeJDeeFPEln/wBM/wCzZG+bd/u19Vfsp+J/GviPwLcR+MtKu7VrW6CW32yDY0i7eWx+Ar3bXPtLrNeQXLYb/W/PuZv+BVyFpuN7cyMWILLhn6nrzX6V4NYShDxMwVSO6VX/ANNTPpMhxWNjifZVJ8ylf8Fc8H8f6Vb6X4p1ee3yTJqcszMDkeYzN8v5EVxGpYM/z+Xhm/hr0b4uW0za3qDEzAG6bH93Ga87urWaaHyfJ+b+Dd/DX53xJH/jIsU/+ntT/wBLZz4uUnOXq/zMC+s3mco7sy7/AJ/k+as24s9rGzTzPm/h/irrxpe5lfYqj/po1TLoO1mSHan8btt3K3+zXBTqR2PnMRTlI5X+x08svv2bdv3qaunXMbG5mm3pv3J/FXeWPhl5YXmmtm2bN3yp92kbwjcrIf3Oz+L5l+9WntoRkc31WUoKSOMsrF0jhd3bLfNuVf8A0Kr9vo811MEhdtypu3bN22tubQEb5NjKGWpF059yO/y7vllVflpVKkJS94ujRnExV09/7/8AHtfzPu/8Bp/2F23zTJsRf4mSuh+x+X5UL2zbG+R2ZPu1Jb6Hc3DFPup91q4KlaEo8qPawuFn8Rg2envb7Psybov+ee/7v+7WlYwQsQwTD/Mvlr97/gVXLfQ4ZIfMdGX+H+6y/wC7RHbvbs88Ls6L8vlt8rNXBI+jwdOUR9vCW3zOjDbtZ2kf7tOuIXW1cuNw/jX+7Vux01JpvJ2b/LX51b+Kn3Gn+dZiHY0Qb5vlqeX7J7NGnzR0OaurF2mKImxFi+9J826siaz/AHgld5Ei/iWN/mautbSXaPZM6n+Gs/8Asl51dHeMBVbbXRRlA8PGYOXPzM4C11IzXDed8pb5k/urW5o+seYyo9+wH8Fcku9pN7vmTf8AJtT71La6k8fz71xX2EJfyn5BKPL8R6xpuvcFPOVlZF3+Y+3ctdz4f8Q2ybZkmZo2b5WWvDNC1nYyeT8/95pH+9/s13Gh+KnXc/n4ST76q/3WqK2Il8I6cep7HpmvJdSK+y4cs+3y4327f9qt/T7xNrJcozOzfw/xV5Z4f1y2lX/XNlX2/K1dZo+tM0eyG5YOrr5sleTiMRPWCPcwtP4ZHb2mzkzQs5mX+Fv9Wq1fhazaNd75ZnVU/i3Vzel6k74hS/8A3bff2/xV0ujzukZd3X5v4V/hrw8RufUYXXc1Ft0aGO2eTe7ff8xflWrs32beuEZF/hbZ/wCg1UgkEcYdH2/N8zbPvUv9oeYuxH8xl+Xatcso80PdPToylza/CZWpx2crN9mTj+Ld97/gVc3eW9nawsiTTZV/4vmZv/sa6HUrzyYVm3xn+KX/AL6rB1a8hVXmmlVmkl2p8v8AF/CtEf7x206kYGVdM8OLnzl3Mnz7vlZV/wBqszUJJ5rX5J98X92tS8+xpvmT55pPlf5t3/Aa56+kmkjdESRdv/LNaz66HqUcV7OZ2ukBIvARCsSq2koy3turgIJJmtTcwOvzP8m77u2u+0qRH+H7OCxX7HNyep+9XmU0bvtRH3fP97f92v3rxnhKeQ8Npf8AQJD/ANJpnVPMZYRp9zoLFprGFn+3rs8pWZtm1a1rWOG4zJvyyv8ALtf7q/3q5/TZvtMiQv8AMipt2tW5Z/uLlLx32wr/AAtX4DKM+blOCtmntomlYyPCG3jeqv8AIv3VZa0VsUmZ/nZWkT5P97/dqlZzQ3C+dC+NqbmVl/8AHau2sc0g2O6yLIny7k+Vf/sqXN7usjy5VOaRPb27x7NjruX+L+H/AIFSHT0m2wwpJhW2qrP8q/8AfVXrDTLm3gRNi/K3977y1qw2dtcRibZHvZP4v4a56dYytzfZMRtI+1Mr+TIFb7zb/wDx2thtLtmjj+zI2xflT5PutWla6L5bRvCilWfdtWtKGzkuLcW03yfP/q2f/wBmqeaMmk5Fxo+5zSObXTZvsYWZ/MG7ay7PvU29s4dNs2d9sSKrPLN975dtdCump88MKK6N9xWf7teeftReLD4F+Hd48Lq8twqwRRr/AHm/+xruoQlWxEYHNi5Rw+FlUfRHkln4k/4Trx7rGpbFW3tbVktfOuNy7dv3q+dfGmoTaB48a5SZQs0zI7Rvt+Wux8D+NE0PWNVsIEWJLi3VZZF+Zo683+L0iXX+nwo2/czK1fpdGn7OjGkfj1WpPEV5TcjM1jVJLy4vNGm+XvAv8O2sHS9chjmbTbybake7ZVG41aa8VLxOXX5XXd/drnteuJluH1KB8Kz7tu+tRR/lL/i6NzcS3ML7g38NcfdTPdKUf5TH9yt268Rf2lpOxNu9fuVzl1Huk3pu2f7VHvyHEv6PdJGrQ3L5Rk+6v8NVJLhIZsmZvl/hqKNvs7B9/wAtMnBlcuE20FlopDcfvkf5f7tVriNFZvKTaGqOKV4n21Z+1QyITs5quYPhKW35s05PvClk+VmSm1RXxCP901ImSy1G/wB008fIyf3anlCRr6e0a437l/vMtWZFh+dN7f738NVdNb5dm/duq1cyiSEp0VU/hrOUp/CZ+5zkEkkbbtn3qj85Gbp/wH+9SPOnl/u3yf8AZpi7Np+fbVFD92z7gxUVxGjSNIU/3/npzbNuzp/tU/anlsjv/wAC/vUE/aKkzfvOOPk/76qu/wB01Zmjzl/u7aqfwfjTj7pcYj7VtswOauTWs0kn3/vVTsXzcL8ma6RYEmtg+za+3alVGMxS5UYFwkiybJui1C0bL9zpWtdWqeWibMP/ABLVDy3iIx12/PUC5j27/gnAhH7W3h8t1+yX3/pLLXr3/BS34y/Fj4b/ABZ0LSvh/wDEbWNGtpvDommt9Ov3iR38+VdxCnk4AGfavI/+CcygftbeHc9fsl9/6SS12v8AwVjx/wALr8OFv+hWX/0pmr9zyfE4nBeCWLqUJuEvrS1i2nqqXVanFNqWNV+xyHwH/aF/aE8V+MV0/VfjJ4iuIlhZmSXVpSCdvpmvY7/4rfGmwjW5HjnVJUQZfF6/6814H+yppqXmvX135O6OG1Xd/e+Zq+gVs4W2xp92RG2K38VfiWN4mz6ElbGVf/Bk/wDM48Xyqpoj7g/4J8+LvCfxP8G2R8ceH4NSvA5jke5t1keRgvQ5HWvtfw98CvgvqGnxNc/CnQmnfh0TTo+P0r8q/wBiv4np8M/HEWgvNNFb3E+9I/u/N/F81fpz4b+PHw38G/De58VeP/GWn6Pp1nBuuta1KXbFbr/Erf3m/wBmvWwHFedTw1p4qpdf9PJf5nl1FUlU5YFzxx8B/g7pMLsPhxosA2ZVVsEDH9K+U/2v/jt+yr+yjo6X3xNk0iwvJ4WMGg29sr30hH3Ssajcqt/eavCv21v+C6Wr+Mrq/wDhj+xtYf2fZSRNa3XxC1aBvtd0v3Wayhb/AFa/7TfNX5V+O/FPibxX4uvdf8X+I7zV9Qmnbz7/AFG4aWWX/eZ6ipxNn1aVli6v/gyX+Z6uCy+75qjPrXx//wAFNdc+IurzHwjp0fhjSYnUWtvCA1zIP7zyDp/u1na/+0n428W6BHeWnxV1fTNQj5R7HVJFikX/AG0zw1fIO98ZD45qwup38YCJdvt/368+pmXEPtLxx1X/AMGT/wAz1FhoxleKPoXT/wBoD9pnWIT/AGD448VXmDhpIb2Zv61U1D47ftgC4a1tfE/jHOcKVmmb+teLab8RPGujqE0rxNdWwX7vlS7avD41fFvem34haplfu/6W3y1pDNs/hvjKv/gyf+Ztyp/ZX3H6PfCm58Z+KP2LTN45vLy41m88Makt1LfMTMzEzqu4nvt2j6Yr4+8KfB+2haKF7m1kmZv9THPG0i/7TKtfVXwJ1DX/ABH/AME+0vtTvZ7rULnwhqwM0jkyO2bkLz69BXxj8JfBOq+BfElt4qv3k+0w/N5e77395Wr9W8YfaV8myCc5Xk8LFtvVtuMLtvuzz6FRU/aWetz7S/Yv/Zfv/Fvi6yeHSvtFusvzbW+Zm/2a2P8AgsdqFtdftDeGP2afDvmNpfwt8Mq10u7cralefvG3f7Sx7Vr7X/4JY6H8Pbr4Tv8AtCaqkNto+j6XNf6lJt+WFbeNpJPm/wCA1+cnijXtT+NHxC8U/HLxI7Pf+MvENxq0rSfeWORv3Uf/AAGPbX4rTj7OhdnHTqPnlVkeN3ngWa+0/fCjKY9uxq4vUvPtbiazmdkVX2vu/ir6D1DSYVXYHZEX5UjrkfFXwoTxpH5EP7u5bau5fu7f71clbBxxf+I9LD42cPi+E4Dw7dQ2tv52yRP4fubq+uP2Q/Deg+JtBks33STSKrL8m3d/s18neNvh54q+Gd5BZ+IYG8u4T9xIv3GX/wCKr2z9iX4tJofxJ02zvJo5LJZW82OT5dvy/wDoNfF59g8RTpuB9TlGKoVKsXL4TU/ai/ZN8aeIvGH9o/D3Qbi+Zkbda26Mzf8AAa5T4M/8E5f2iviZ4nt7b/hANS03TWlX7RqWoReUsa/xMu771fob4V1azj0C38VaVcRm+a4ZZfsqfL97cvlt/u1u+LP2irDQ47zXviR4wkt9I03Tftk8zRfu4VVfur/tNXJgs1n7ONOEfePpa2V5fOXtec+Bf+Cxnhv4afsxeK/hf+zZ8DfDFnpl54b8KLq/iDWIVX7TdXlx8v7xv+A7q+M/iF8Y/in8ULK203x38QdU1WztW3W9pcXTNFH/ALq16B+1H8adb/ae+OPiT4161JIF1S4WLS4bj/WQ2cfyxL/3z83/AAKvJ5rU28hfZuTb8lfo+Gw0XShOovePhsRjKsas6dGTUH0Es7jawRH4/wB2r7SPJGA7b3/g21mQq8M2/d8rP86tVy3uETMz/Kn/ACy2128vunnSkS28myb98i71/i30s10NrP5NUdUZ4ZEvPlZPuvt/hqBbnaQjzMf760uYZr2snmSbH3K/91q9l+Degp/Zb6rcpt+Vdn+1Ximh3KXEyJMfuv8Adr3v4e+Rb+FxMJtz/wB5qfLzGcpFLxhZpDdvsRpUXdvjVvmriPi8ws/hvJZ/KryXC/Ktd74mk8yMvt2p/e/2q80+Nl0kfh6KL5t32hV3N91v71Eub7IoL3zgLKKHT7Vf3eX27qZNdJcfP/C3+1UdxIjRqibm3Lu/3aqx3AZv92o983j8RbhkRlZ9m0fx1qWN1ux5U3y/e/3aw47j+NHbG+r0Nwn2dE3baA97nOw0HXLm3uAEmZVb5dtdz4f1yFdsaRqTv2srfNXk+m3STTbAjBdv+sb7q1q/8JhDplwn2BGeRf8Al43fLuqvdKPZ9P8AFlz8P9Qj8Q23iG60qeGXzYri1naOT/gKr96uv1T/AIKkftSyaOmg+HvG1udq+V/al1YK9zt+7Xy42sXOpahLf6lfyS3DN/rJHrc8O2v22ZX+81R7GlKV2VRxOIofBKx6Bq37Q/7Q/iaZtS1740+JJZW/5537RLu/3Vr7L+BHxF8SW/7PmlfE3xHdTazqNjp095K17MS1yYZJCqMx5wQgXJ7V8H6tdWtisNtBcqWk++q19qfB9Fh/Y6jWZyqjw5qG44GVGZ8/lX6/4NU4wzbHJf8AQNU/9KgfacH4qtWxtdyk3+7lv6xOQuP2zvDFn8ZJPip4b8NzRLeRRxPZ3EXzW7fxf71dH8WP+Cgm3QWtbDclxNE29tnyt8vytXytJ4i0qzXZpqb9vG5vvf71UL64sNcVvt9t5is+394+2vyH2cYnwlaXtqvPLWRzPxi+PHiHx5qjzXl9JJ5m7f8AvflrzS5vri+m86R2J/hr2K8+EvgnWbX5JpLQqm3dH81YF18C9Y0+YPYOt7C3+qWNNrVUY+8HwwOGsdNubhRv+b5vvVtaf4f8yRcv92u20f4SaqrLbJpUxO/7qr91q1tN+EevPcNbx2DBv71b+zI9p/McTFpsNra/Ii4V6z9UmRV+R1WvTrj4FeP75fLsNKZg393+9WQ37NPxguZFR/BkjIzfPN5qrtX+9S5BRrRkedXMe1TvfLN/eqFVTn/vnbXsln+yD4tkk/4mviTSbBNqv5lxeq23/eq237N/wx0dvtPiH4tQv95mjsbfd93/AGqy5YfCXzfaR4psc/3dq/LV2zt3WP54WU/7Ve1aX8KfgCyqltealfTNKrRL5qqrR/xfL/er1P4f/sm+HvHV8mm+DPgteTNcS7Eur64ZlX5fmZv4VX/eq40+Y55Yjl3PlLSLe5hbf5GTv+9XW2du81uj/edl/hb71ffXh39mf9mb4Q6PPbeLfhjpvibxJ9n8q1jVma0s22/eb+81YWg/s2/DTVNY+2ar4YV3ZVaXTbOLYka/7P8As1XLTM/b1f5T4l+yvaxrv3Dc1MjiRbrY8O4/d3fw1+iUP7PPwKtbo2z/AAl02K3hZW3KjeY3y/MrV5t8WvgV8H/Mlu/CvgOFIlbbLIr0/Zlyrcvu8p8V6hYoreckLNt/u15t45mM15sHRa+7dD/Zl03XN7p4VWNPmZZvmVWrTu/2KfgtY2Ym13wxb3N20W54YWb5qXLHlCFbll8J+dWk2JurgJiui8QWM9loax7P9lttfeOk/sE/CvVNUhmTwfDZwSL/AM92Rfl/2q67w7+xP8AdJ86HUvAcepvv3RW9xKzL93/x6nCMf5gljJSlpE/NDw1pH2i6R50wiv8AN/s139xZXjWYs9MtriZ1X5Vt4mbdX6KWXwf+FHhqFE0H4N+H7OX5lVZLJZG/3vmrpPDPwjub6H7Xc6bp9nDbory+XbwxxW6/xMzKv3aUuWOpP1iVSZ+V0fwp+KniC8CaV8OteuVk+55OlyNu/wDHa+ov2Vv+CFf/AAUc/an0yPX/AIf/AACvrLTJuftmq3C26/8Aj1fof/wSY+Cz/wDBRb9py/8AD2l+cnwt8Cyq+rXkabV1KRW/1at/dZlr97vC3hHw94K8P23hnwtpMFlY2cQjtbWBNqRr6CuiNShQjdwvITWJxWkHaPc/nY+EH/Bod+2jqzw3fxL+LvhnRIpP9bHHK0zxr/wGvp/4ff8ABoF8GbfT4/8AhYf7S+sveNFtnfS9OVl/4D5lfs1swPljH4GuR+HPi9/G8Op+I4Jt1m2qTWthhfl8uFtrNu/2m3Vnic2nTpuUYRj6L/O4U8opuXNUnKXz/wArH5zfB3/g1P8A2Dvhx4ng13xx468UeKoIJAyafcNHbLJ/10ZPmavvX4Ffsn/sw/sv6RHpHwK+Cvh7w3DCm0XFjYL5zfWVvm/8er0jc3c1ka1bzXnyIjbPvNXxONzrEyd4HuYfC0o+6WrrxVZKjGGZWEf3m3VxGv8Axshs5pIIbmNpFbair/eri/j78QrfwLobusjIkcTO23+HbXyav7V3/CG/b/GetnfbNKrQW7JuaT+LatfPVsdmGIu3I+nweWUILmlHmPtPWPjVcaP4fXWtV1FbTzpFiiVm5aT/AGf71fkV8L/iI/w8/wCCiF58Rb66jZrLxrrU0srnarlvtQP0BLfrXtmm/tbeAPj94ulv/i1qt94ZttPv1ntfMRl/4DHXy7d3/hu0/ae1zUb7Fxpa+ItUfLHIkj3T7Sf0NfvHgxUqzyHiNT/6BJ/+k1D6zJ8NTo0a6jFK8X+TLn7fX7bt54k8VXdsl5NbPay/PCu7arN93/8Aar4q8YfFabWGW8m1Jre9mf8Ail+VvlrR/bi8babq3jee5s9RZ0kddkkdxuZVX7q7v9mvmjVvGk0l0yJc5C/d3V+M08LSlC6PJWIqUpcszsPG3jrWPFXljWLyQXML+Vu+6rL/ALX96uKuPEGvWEjQxw+dubbEyv8AMrUxfEH2hvJd1DyP8rSV9mfsd/8ABHzxd+0P+z4f2tviz8aNB+GHw4S5YWviTxLbtLPfhflb7NAv3lVvl3NW0MLSStI9KWMoRjF31Pnbw74R+Iug6LZ/EL+29N0prN1uLCS4v1Z/l+b5o619H/aY1X4xeJtXPj/xlcX+q3Uu9Fkl3Ky/d2r/AHa+mr79i/8A4I1+HrBf+E2/bk+InjL7KzLOug6dDaW03+7u3Mq1xnj74I/8EmvDscWt/BaHxRDeKrfZb661xpG8z+FmVa5JQy2Xxz947q1XHxpRUIWj5nI/Cfxd4q8I+KUfw9DIf4W8lNv3q/RD9iHXtNsdcitvjxpv9sahMm+10u8bbHa7vuSN/e+X+GvyQ1T4gal8NfGe+bW5r+2Vma1uN33l3fLur6r+Ff7cXhXxVr2meMUmaz1lbCO11L7ROqpIsa/Ky15mKwtOPvRReFzD937Pn/xH7YfDWz0nwt4rstW/4QzSr/QZv3U9g8XmNbq3/LRd33q1f2k/2RfAPxH00/EP4f2dr9ssoma40m9TbBeQsv7yP5f9mvkH9kP/AIKC/Ca/8PC78R+KYb4WaM0sfn7YlVfvbmb+Kpf2af8Agof4hvv2ltV8H+Jri9vfCeu6jI2mwPLtW3t9vyrH/erLC46UI8jiRmWUyrVVVpz+z9/kfAOuX2ifCX4i3nwZ1vWI47631SZ9Gs9nl7rVpG2qv97b92um0u8t1VIf4tzMjM38VeWf8HFfh/w5Z/t+WNt8Lbq609v+EZtdRtVtfla3aSRv/iab+zn4i8bap8MtMvPHepefcqyq8yrtZl/vNX6LhZVqmDjKfU/Mak6dLGTproz2qLU5+P8ASVk8xP3qrF92ry3k1qoPksHk+V137VWuVs9W+zzboSu3Zu8tU+8v97/ZrX026uZLrfvaRG2ruX+Fv9qrlHl1O+nW+wdXp+9o2h+0szfL93+L/Zp+5JJAknnRJJuZFb7y7W/iqhpa3ir5T7Y1jbc/8Tbf/iq2Psr3G3fPvX725krlqS5Tvp1PsmNrGmw3EMyb+G/iWuRurKSzuXDmT5mPDnvntXeTWv2WN9m3Kt8ism1V3fxNXL+LorWK9C2YITc2cSblzx92v0rwYd/EXB+lT/01M9vI3J5lTv5/kzxr4i6Ib7U7uEkoHLEqDgcn71cJceHUtWebfkM33W/hr3TxV4VN7a+a0bASKHJ/2d1ef+JvCqNM3+hqkf8AeZ6/L+J6kpcQ4yP/AE9qf+ls1q/xpX7v8zgE0lGV32fOr/M3/stWbXT7mE+ds3rtXcu//wBBrebQUMizTJ86ru+X+Jakj0Gbzt/k/Lt3ba+ejU5dDhqR5veQ3SdJhuFebyNrtt2fP96rS+H/AC1WZoVmC/dZX+9WvoOhpayOk3ludu5V/u1uaf4TMcRhm01Sjfw7tu3+LdSlV5ocxtTpy/lPOrzw/wCTF5zo22RvuqvzLVJtBuZJNj/M+z7zN95a9R1Dwj8yySIzBdzfN91qzpPCaNIkiIxH3dqp93+9TjWnL3WRKjLnsonCLodyrjy+qv8AxVdXTUt2CTQ5RV27o/mbdXSyeHYYwiP8zMu3b/E1WbPw7IFWb7S25V+dmSsZSh8Uj0sNTlzcpyseizSQtCiMq7/+Wi/d3VlXWl2cbO7/ACqv8Lfe213WqaXeMuya5XYvypI38Vc7q1vctJNtmjKqq72/2qzp1OY9yjHlloYJXy408v8Ah+ZP722pGuJo5tn2WRxs3fL96mX023Z/Ei/M7L/eqH7Z5x+020i/L9xd3zba1jKR69GnGQjTf6OEKSB/vbZP4Vpk1mkK/wDLNmb50/2qYs1tNCNiZWNNqbqTzIfOyj/dXav8TNWtP4jjx1PqeXarYPY3Don8Kfe2bWrFmkdt8aDB/wCedepeJvCvmKs+xV/i+X71cdrPhSazk85Nv7z/AGa+kp4jofi+Iw0o6nPaXcTWbb4UY/Jt2/71dZouoXTRh9+7au3aqf8AfVZEemzRMu/aGZa29JsZo5UtYUx91mb7v+9RUrHNRpcsveOt8PyQxqtzM7fN8u1W+7XbaTqWJPkdn3J97+KuA0uxeGNnjfzv4krq9Nvfs0Pnb8PH99VrzqnvS5uY9nCx9n7x3ei301nt3orf32rr9C1hLhV/1aLs/i+WvLrHUoZI0S28xTvVvmetqz16Xar3Lw7N+1t3yyM38NcVSjKUz1KOI5T0tdWtpoW8lJBHu+VlTau6o7rVEWb55tiN9xl/vLXJaX4gmaN3d22fdRo/mXdUOra69vdb/tPzfwNG/wAv+7trGOHn3O6OK5o+8dBq19Gsn75+W+VG+9urIvrx5DseZif7sf3d1Y114mc5+07W/hRf4qo/29bTb5EdnKv8m37q1UqMuX3zT65SjLQ0by++aWGb7yt87fd2rWRdXkMz537Sy/eZ/wCKoNS8QW0K7/Ok+b5fLb/0Ksu8vpmb7iu2/d83y0/qvuxKlmUUeq6C0k/w43eblmspsOQP9rmvMVjdmEMKLj5VlX+7XpPhlwPhaJC5IFhOST1/jrzyx3tKPkV0/ut8tfuPjGpLIuHEl/zCQ/8ASaZ3ZpiEqVCT6xT/ACNXS1+zqET5nZvmZvm+X+Gt2xsz5becjCON8o2/duasfS3RYfM+0qkv/PNvvNWrDLCsyQu7O0L/ADf3dzV/Pcqkuc82GK5ocpsaT+8keHpJHtZ/O+626tuzCXClPJZJWT+F/lWubtbya3VU8jO5tyVehuJPJ/4+WZ1b/drmrU/3vOduH5pSlI6G1uob6PZ50Z3ffVvvLtrWiurZdttNbbH3/wAXy/w1yEN+8kkWy227f4fu/NWtZ6s65TyVfb8qbn+7XFKnKPvROyn7szrLW4hk2wj5Ek/hZvu/7VasLW0zb4PnSNNv95q4iHVkhZnM+f73ybm/75rVtdVdXR4V4Xav/AacYx57m3N/MdPH8reS8ypGu7bJInzV8n/t6ePEvPE2m+A0dcWe28umVPm3fw19G6n4kTTrW5uby+VGWJnRvK+VdtfBHxa8YXnjDxpqnieaZm+2XTfe/hVflVf92vqeHsL7TF+1l9k+T4rx31fAxpR+2clpmvFfE1xamZYkuLdk3NWD4uuHkhe2fkxqqvVbxRffYNUS8875f9ml1TVIdQt/tj/P5y/Mtfe8v2j82+E88m1AafcPDcrwrttVflpl3B/aFr5Py7WX7y1L4s06PdK6bUXf8n+1WNp946/upvl2/KlPlQSlMz7q3urHd2RmpkkjzbpFdvmrW1S3+1Wp2P8AN975awnieBz6Uy4/CEgKBlxTJJE28VMpWQh/mP8AvVHPCit/8TSlIqPmR7fMzv8AvUwllb56e33zs+7Ss26P56UTQbTWV2bpTqKctgCpGj2hNn/fVR1LJJuVS6YK0yJblzT1/ds+/dt/hq55iLjZDlf4kqlZyJ5Owp81WtrrIQj/AHan4jIim+Vm/wDHaZJIm3Y7sp/u0sjOzb6Yzbmx/F/eqTQfHJwqJ/6DToIYWVqjSR4+j/dqTzn8obP+BVfoZy+KxHcbVjLtxWezbqs3zlhhvl/2aqnk5NL4jWmPgO2ZCP71dhb28clur7MfLXGhtrq5/hrs9JndrON0m3bk+61UTWKl1b7lyn/fVZslt97ZBuNdFcwusfmPDtrLuFfcdibaDH4fiPXP+CdUZj/aw0BXAP8Ao19tI/69Za7H/gq+qn40eHGIJx4YHA/6+Zq5X/gnnE6/tX+H3f8A59L7/wBJZa7L/gqmm/40eHe3/FMD5v8At4mr9lwH/Ji8X/2FL8qRg/8AfI+n+Zy37H+jvJoesar9mXYssaRSN97+9X0P4J03R7q087yfPuPvJ/D5deYfsg+A7aT4Nw6lczTRNqGqSbWjib94qr/er2XS9JttHX7NDYSSt5W7dJ/6Dur8FxFGUqtzgrc0q7scb401a50PxMr2G2B4/nRlb7tRftBfHzxz8btH0rwl4k1DZomiwK0Wkx/6uaZfvTSf3mqb4jaK8MKarqVmyNcM33lrz/Wr5I0SGzTG77y1hToe7bmLpxieeeMYbOC1mv0tlTy03LtTbXhV3I01y8rvuLOx3ete0/FRrm18Pzb5vmk++u+vFXh8v79ejh48sD1MPyqmMeNVUU5Y3kpT3/2fWpbeGRmDhcf+zVudHMyHyXLKnZq9E+DfwdvPHN8LmaBvs0LfOzL97/ZrO+Gvw51Lx54mttEs7OZ1kl3SyRr8qx/xNX2T8Jfgrf8AiDULb4dfDfT22RyxxXEirubb/eXb95qiXPKPLE4sZiZUz1v4W6Bb6T+yxH4d0dBGseg3sUKp0ViZun4mvk6TQX+2fYIUwscux5N25t275q+5fE3w/f4O/DrUPAcSSCTSdFlGJvvFzCZDn8WNfLnwt+GviHxprS2em6bIqM677pvlVf8Aar9m8V4t5Rw9f/oFj/6TTPKjU0cj6w+G/wAYNX+E/wDwSK8T/CLRHkTUfiF4th0G3bzV3R2O3zLuRf8AZ2qq/wDAq8Ek0m2sdL8iFFhWNFSLy0/hWvUvjF/YOg6L4Y+Hugozw+HdNk+0XDJu+0XUn3pP/Za80uI7zXJPJeFj8/3fu7mr8WjH20vdL9p0exz39izX14+yNnaR1Wu68N/DvTfDGi/8JJ4kh8lm/wBV5n97+81dJ8O/hfZ2Ni/iLW91vEvy26/7tcR8dviw8zP4b0p1XduX5f8Ax6lWxEMLS5Y/EKXK/Q8x+N3ii28a3klhDbedbR/OjbP/AEGvGLvUtS+H/ihP7Bm/eKm9f92vUraz8y6+0/Nll27ttcD8RNFe28QLfww7oZk2oypXku2I/ivmOqjWnT96J6B4I/4KKXvw68KDwn8QtE1K/aDdPZrZ3Hlp523au5q8p+Lv7Z3xU/aI+yaP4huY7LSrX5V0+z3L9ob+9K38Vcl8QNHS+0+Qwwtvj+ZN38X+7XncUz2dxt3421tgcqyylL2lOFpHtRx2Kr0uVyPT1ukn8v5GP+0qfLWdd2aNC29PmrH0PXJtvzu2G+XdW3HcJMrQ9vvbq9nmOSXPGZhXSPbr5LorfxJUc2yFU2btjf3a1dQhQR7HmVqzZInm/c+XsZXpy5hx194lhkmmj+yzJ8jfLWXNvs7h4XnUlW27v9mrLB47hfOf5VenapbvfW/nBF8yH/x6iWxcSx4XZFut7oq/PX0N4XkL+C7a6+0svmLuaNk+61fOHhu4f7ZG+z5m+XbX0Bodx5fgdZnfd5fzRR0RlymVSJpXkL6hYyo7r+7Xd9371eOfHiTy9Jtrbfs/f7mhr0iPxM62YT+KRN25a8Z+M2sXmoalFHN9xXZkpSJw8feOSjvJmjKO9RvJ82xxTKOCKfMjq5R63DxR7A9Wobh5I/nm2p/s1QTp+NPaT+D+7THyo1G1J/uJ8sO37qUsNw8u1Efhv4f4qzF+Y/J96r1jNNHJ+5g8x2/u/wB6szLlOkhj03TbUXl47E/wr/E1amk+Orm482HQ9HVVX5WauRnjaE79avMOv/LFW3NUkPirWBYvpWnzfZrVv9bHH/FV/CHxHb3V9pWiyJf+JL3zb+SLclrCu7yf96vt74LaiLz9hldSMW0N4W1Ntuc9Dcfma/OW11D5lcI3/oVfoZ8B3B/4J9I4XA/4RHVsAf71zX654N/8jjHf9g1T/wBKgfYcEpLGYhL/AJ9S/NHxXpusPdXzoj8yfc21ryLpmn7H1jW22N96GFNzLXAJqdyrDyXk/efKixpuauw0JvCvguGLW/HkK395J/qNB3/Krf3pm/8AZa/IvhPi+U6nwrpOuasq3Oiab9mtPNVP7S1K42r/ALy13eoX3wl+HLQ2Gq+Kpta1hdz3Cr+7trdf7qr/AMtK8M8RfFTxb4w1KOa/vMW1u6/YrGH5YrdV+6qrXOX2ralcXz3N5eM8kjbmap5plfF8R9J2v7QHgy3meG2hj+zq25o1+8zVUvP2sNH0Ni+m+G7eVm/ik+avnGK6mjjLo/zM1Ps7O81OZHSGRi38WynyzlpJhywPbNe/bE8Z3CyW2lTtbJJ91Y/l/wCA1xV98dPiLr0vkvqsy7vv7X/8dp3gn4E+PPGd5HZ6bokxaTbt+Wvqr9nH/gl74n8TTRX/AIw8vTrdZVaXzvm3L/Ftq/Y296RjKtTpy92J8v8AhvRfiX8QrxNKsIby7Mj42x7m3bq+n/2e/wDglb8afikyX+vaDdafbLt837VE25q+8/hD+zT8Af2S/Bdz4217+y7e2s7ffLeahtjkb5vvLur5o/bE/wCCziWsN54D/Zmf7HFNuin1Bn8zzP8Aajp81KHwmcVUre9ex6Rpf7Gv7LX7Mc1s/wATtY0+81WS33Raasqs/wB77rN/DXfah4g0258OjRPDFtZ6PZXHzxR6XF96Nl+6zfxV+Xvw5+IniTxZ8QLn4heOdZuL+9m+Z5rqVpK99b9pzXVsYLC8muES1i2xSK3y1HPVI9j7x9dQ+BfhFpOi/wBveKvFUKf3odu6Vv8AgVcr4m/aM+Ang2NU8Nw3V3NuVNzbf++v92vib4tftRa3qEkltYXmfk2xSM3/ALLXEWnjrX/Fmool5eMfM+bc396sv38pGvs48p90t+098H9SuHFzY3lt95Uh/d/vP4vlqfxB+0l+zfqGivYJo91EFiXz1aJd3/Af73zV8cW+nvfN/wAtMbfvK1al5pdh4d0s3Mzx/c2p5jfxVpH2sY3bJ5YylyntV5+014DXW7iz8JeHry20tdyxSXnyu3/AaJP2lvDcMcKeH9NkaeFtss00W7/K182trH9sXvlaVN/s7v71dd4R+G/iTxBNDbfvNjN8/wAv3t3+1R7OUve5glyU9j27/hfmpapH9ms4VQru3Rxr/rNzVt+E9Q8beJmltrCFlmkXduk3MsfzVH8N/wBnn+zbUfbLDY6qru275lWvfvAeh+G9Ls/s1hFbonlR/wCkN97dWkacacfiMZVPabI5X4ffBvWL6N9b1t/KVZVV5JPmaT+9trwD/gqB+1Y+gIv7HvwguFgn1JY5/Ft9Zj97a2/8Nv8A7zfeavo79qf9pLQfgD8HNT+J1/rEYuLVNul2MMH/AB9XDfLHGv8A7NX5l/AHQ9Y+Knxqi8VeObuafUNY1uO41Sb/AGpJPu/7q7ttRDkky6dH2ceaR/TV/wAG737KWnfsyf8ABO7w5evp3k6n4ukbVL12TDNH92Ef98/N+NfeJJOMV5t+ypYWHhv4BeEvDFmipFp+g28Cqv8AsxrXo5mRByRV1+f2judWFlTVBWMD4r+JIvBfww8Q+LWmMX9n6NcXCyL/AAssbFf/AB7Fct+zZpLeHv2fvClrdERzyaJHdXW7/npN+8Zv++mrj/8AgpR8QJfAX7CfxT8UaXtknsfB9xIqbu33f8a/L/xj+3n+3H8WvAOg+GPBetR6FpS6Xaosml3X79Y/JVVX/gVeHnFSVPDKNviPUy+nTxdVx57WP198X/HD4QfD6DzvGXxD0qw/uCa9VS1fI/7Tf/BeP9kz4La23gnwPb33ivVt7JIthF+4hZf7zV+bt98HfGHiC4+3/GP4tahNbx7mltbi8Zm8yuT03xl+yj8LZLzVZvAd14n1j7R8+5WjRf73/Aq+Tti37spRXotfvPo8PgcvpyvJSl+CPp/4lf8ABSP4oftD3R1O+0GLR9Ml3fZbGA/M392uE8VfGqaHw69tbaIuoXUkqqkc33lb+9/31Xzf46/a+8c65rESeCfhXZ6RayOsEUP3njb7qt/3zXo/g3xlf+GPCt9q/iqa1h1VYIWt45vn2xt/FURw0YbHqwxMKn7uJYsfG3jDxBrmoQ+LbaGGJUjfzpk2xw/7rV5n8evHmofD7w14j8c+FdRjuJbWSRra6kbCzI8uwsT/ALSsefeuW1L4u63481zxP4J0bW7i6abbO8ccvzbW+XatdL4p+EGq/ETwNcfBy0jMd3cWi2uy5l2FWiwSGY9PuHOa/ePBqEHkvEdtnhJf+k1D6DLZtUa1t1F/qfJnwr8A/EL9sP44aH8H/BOlySaz4q1dbPTreP8A1bSN95pG/hVV+Zm/2a+5vjb/AMENf2VP2ara20P45ftT+M9f1w26/wBo2PgTwzC9vYt/Eu5m3SbW+XdXhf7HPgv4i/sD/tYWXxc8S3Nm9vpuh6klncWsqyNa3EkLLE23+Jqwfjd+3V4w+KHjy38eX/iS8iuVsI4vmuNqqy/e/wB7c33q/F6mIlh4eypRPHwuEo1H9YxMv+3T17wb/wAEj/8Agnf8UL6Sztf+CkHiDRrlv+XDWvBsKyR7v4fvfer374//ALbnwr8A+Ebj9kPwlqMepeHvhjpen6To1v8AZVjguI1j/eTeX/eZvmr80r74669qnio6xDeSRSq6sm19qs1c78Xviff+LPFUnip9v2+6iWK9k3f6zavy7q4KtXGYmPJPb+tz0o1snw0ZTo/F/e/Q7H9qTxH8Or7W5db8H6Ja6bNMzNLHZptX5v4dv3a8cj8TX8iukMzJ8m1dr/M1Mg0/VdfmSGYKjbt3zLXT+HfgD8SPFFu15pRhwv8Ae+X+KuqjBOHLPc+YxWZYipVk18Jm6B4F8beLozczQTLZ7lTzpPurXs3wh/ZHspIf7e1LWFuIo4v3sK/drj9N+CvxR0XUIvC+rePIdN8xt/ls25W/utX0/wDsG/sTyftIeMtY+Hmv/tE6xpt5ZwbUutLVfL8xl+Xd/u1x4xVoJy5oqJlhac8RU9xSuY/xSm8GfD34f2WjeErOx8P20MWy6mbdumb+9u/3q1P2Rf2rIdN8bWXizxpq9nFo/huLeuqK27azf7P+1X2f8M/2SfgJ+wva6d4c+KOveGfG+p61p10mt6l8QLJXgtY925bhVZv3bKqtX5a/tofGD4Y/Gz9qrxn4n+COh6bp3g9bpdO0aHTbXyILiOH5WuFj/wBpt22uLJcLHNcTOlf4ftG+PzbGZTaUv/ATf/a2+Pk37c37X2u/Hia28q0uorew0aHbtZrO3+VWb/aZt1el+F7pNN023htodsUcGxFj/h214Z8GRZ2t41zPtaaNP3W5Pu163oN9H5azI6rt+Xy1/vV+mU6P1eEYI+Kp4iWKryqz3kegaTvureL9+qps2zqsX3v+BV1mlTPblEtoWVPupt+6tcB4f1qOJ0R7lYV+95ddto+qblE6bWaRP3rLL8u2s6kOvMehTrHbaTC7XG93Zt3313/L/vVtwt9nsxv+9t+Td/drldL1hJrcpM+14/7r/wDoVXjrXmb0e2aJF27FZ9yyVwVI856dHEc0NDQuLn9ym+2Vzt/esv8Ae/hWuV8YrslhGQSWkJwMckittdSRWdHTG751jVvl/wB6sLxbc/aDbndkgOchMDGRX6P4MXj4l4NPtV/9NTPpuHpKWYQt5/kyafRjf6LAix5Vo0Z2rldc8PpdJsSFU2/Kzb91dtpk62+lQlZWwYhvRl9v4ar6hp7pdfZoZlf5V8plTb/wGvyriv8A5KTGX/5+1P8A0tnRzXqzXm/zPNrvwfZyTM6bWb/2X+9TY9Bfb50KSEb9v3PvV6ND4bRrhv3Kt5m3fI38NOfw7PHMr20LLt/2PvV8pUxHNLlbOiNE5PRfC9sk3nQo0x/jVovlWuls9F+1W6O8Kqn3dzfK1bGj+G9rK8tzJtX5W+fctdfpvh+GS3+zfZY32vuRpP4fl+7WMq0Y7nZGjLocC3gndCdnlhG+bzPvL/wGs/UPClztaaaFkdf4lT5a9X/seHyV8m2V/LT/AFa/LVW48NpNH5m+N42f5/LesJ4qXwhUwsYnjV54XdWZEs4Wf/llI3/sq1QuNJmWPzvs67W/8er1XVPDtt9oYpZrhV2vIvzbmrkfEelvbsyI/wAi/fatadbmlymtOjKMOaRw99Z+ZI0KJH+7+aVWT5V/+KrjtatUDTIkKqG3NL5cW2u/1eZ41+R9kSv8y7PmauM8SfaW8wJwJP4mT7rV20ZHVh5csveOE1aO2hw/ysyttT5m+b/gNZrRusz5RdjfLub+GtrWEmjtGd4t7N8q7flWueW4eNvLwrfO21lfctdq5pR909ijJe6TfIsbQu6j/a27dtSwzPNGJlhXZGnzSb/utVSTzpl/ffKi/K+3+KrdvvaFNiR/e/hq483UjFRjKMjqdY8PoY5JHhkT/gG7c392uR1jwrDJsT5d7fLtr1TWNNjbfbJcyIvm7lVW3LXOaxos0bMnk+YkPzfu/wD2aumjWlL4j8wxGHhc8xuPDsMjB0+YK+2Xcn92ren28McK7NoZn+Rm+8y10l7p81xJ8+52jTbL+621Xt7OGGYwvbLjflW/irT6x7vxHH9XlGr7pDZ2Mit+5h3hvm21pWtjNuXfFkN9/a/yrU6x+TZhETa0fzfL/Fu/vVJBNMLeI/Zmf+H7lKNTmjZGkafL8RNHshbyYdv7xv7v3aFnhhYI83y7927+7Wa3nRy+TDOx+f72/wC7RcXX753R1dP49v3du2tKcb+8pGcqkTej1Z49vkps/wBrzflZaoalrlyoZ5pmZf4d1YP9oTN8kyM8rIq/K/y/7NF5ceSwT7T935fu1tGMY6GUqkuX4i+2sT3jM81z8v8ACu/duapI9SeRXuXuVTy/uq38Vc62pJHMfLdaZ/aiKrwzeW8W1XSOtJU+aFjm9tOMjdk1J1X7U94vzN+6jk2/N/s1BHMl1I9zNMzOv3tr1lzXiXEiTXK703fuvl3bWqeG6SOQvvVU+Vfl/hrKpyqBEcRKUdT3Pwgqf8KlRYxgfYJwMfV685t1dUXUk2v8/wC6jZdqrXongxv+LQo/X/QLg8fV68xhu4ZP3szsy71X92tfsHjK5/2Jw64/9Akf/SaZ9fnDTw+Fv/IvyR0+nybpo7bZmVvl2yf+hbquyXSLN5LzLu37lrmLfUn+1jyZmlbYy/N8q1fm1D5v3Ykyv8O3dX87Vuf29+XQ5KHJKJuw6m7r53lLGy/L8z1I1x9oVIUuVRtn73zH+ZlrmZNQ8yYJ9+Jm+ba+1lq39rmZv3x3rH/49WXLKUeY9WnW5fdR09vq1tDDCnzSv/e/vf7VWrHXIYX37d+75dv97/armY9Q2xxJ9xv4G3/Kv96pLfWkh/c3LeUG2sjf7X/Aay9jPp8J2RqX6nUtrky/JZwxqv3vMVvmWrWna08bF0mUhovmZvmrkI7rzJv3j7wy/My/KtWo9T8m4CIn8H8NbqnzbE88lIk+Nnjx9B8A3lzDeNHLJB5SNH8zfNXxtrV08MjfOzp975vvV7J+0J40m1DWk8PQv5dvbxbpdv3v96vGdWZJGbuq7v8AeZf9qvvcjw/1fC80vtH5pxLjPreO5Y7ROV8WxfaLUps37l+7XL6PrG1WsJpGUfd2/wB2un15d2770q+Vt3fd21wfiCOa3vGuIX5/javaj7x4FP3SzrUz3jGF+K5a8t3gm+TcwrdtbxNThVPmR1/i/vVT1axm8tkR927/AL6olErmM61vk8wQu+f9mpdQt0ulD2yKrf7NZMy3MM3z8H+7UllfeSzbn6/3qfwj5SOSOW3kKFv96nrsmj+T5WWrkkCXy70dfuVmyRvby7G6rU/EUPlhEZ2VE/3jU8bJPD84+ZagZXR/n+Wq5Rx3Eoo8zeaKocRkf3x9anm56dqiC7WWpJt7S+1TyhIu2EZjkTZt+b79XJF2qdnX/wAdas+zDsv3K0RvZN833f7tTAkgmkfazyJ81VfM+b5Eqe+Xg/O2P7rPUKqnlr8n/fVP+8T8Q9XT7mz7tO8xF3b3/wBxagj2LN9/+CpJmhxvzupBIq3Tb2AFRs3lr70s33/wqP761US4/COrr/C8if2fEjov3PvNXHq2eDXV+E2RtPEaP8/+1T5kFQ05pvMZvn+VvlqjNDvZsfKPu1dmVI5/nG7bVSdvN27H3bd1RL3dDH3JHrv/AAT4iCftW6G+/cTbXv8A6SyV2H/BU0MfjR4dIUn/AIpkdP8Ar4mrkv8Agn2f+MqtBHX/AEW95/7dZK7f/gp1bC9+Nnhy1Emxn8ORorf711KK/asu/wCTGYv/ALCl+VI5J/72vQ9U/Z58Pw6L8C/D1mEuE8y189o5P9quj1DULbTVed03Iy/xPu3f7tPi/wCJT4V07R7OZW+y6Xb2/lr8vzLGu6uL8dahc6fp+HdmmmfZuX/lnX4HUlOUpNHn+9KfMcx8QPFd5rmoNDCGeGH5UaR//Za5ePSUt7eS/vNv+wsj/erUu5kt1k+0vtdm/u0uk+Bdc8XTp+5mwybkX+FqujRn8ZtGpyv3jxD42NH5aWcybGml3bV/hWvL9Ut0VTs+Xc9eiftBW/2f4mT6Jbah5v2GCOJ9v3Vk2/NXAXVjeTR7ztdl/hWu2nGfKenT+Ey1QtWx4d0G81u+hs7CGR5Zm2RL/eb+6tVrHS5vMCOrKzfd+Svt/wD4Jw/sl3Orf8Xv8VWCtBb3HlaNayRbt0n/AD2rojR5jPFYj2MTW/ZV/ZD8SaTo9ho9hYSS67qksf2hV+bbH/zz/wDiq/WP4K/sP/D39kP4Hv4w8YQxjV7iJn3Kv+rbb5jKrf7Ndl/wTz/YRtvBOk/8Lo+LVh9nubhGewhvItrRx7fl/wC+q8p/4KVftUal8SvFMPwz8Hzf8S2ziki3WbKqxt91v++q0rcuGhdfEfPSqSxHvTPm74n67a+Ptf1jWTGTBqDSDbI2SU27OT9BXHQ2KaTp/wDZulafDbwtAqu0aqu7b/tVsW9tFaaZ9nDllWM5Zuc9c1xPjLXftFydN0e/k/eJudmT5Vr9Z8WIueS8Pt/9AsP/AEmB1RheJzMnnaheN50Nw8sm6N2+98u6u9+G3wuh+XWNbTZEvywLI3zf981B8P8Awe8khurrcWjbbtb5f++f7y1r/ETxtZ+D9JbyZleZovkWP7y1+J1sRSwVK4SOf/aS+IVh4b0u30HSrlo5mfZKsdfNl5Nc6tNJ5yMXb52krqfHHiy68TapLqV5czM0kv3W+7HXMyR+ZJvR2RN25o1r52piPrEuaQ+WUoxGTf8AEo0z7a/yiRNqN/C396vO/GGvQ+c0Oxs/wKr1v/ELxgkcaaVpiSOzfKq7vlWvPtaZAzO+7zm+bbvq6Me5tD4TJuLf7RM6O+5pN3ys1cB4/wBBfStU81E+ST+Ff4Wr0SLyvtnnTblH8FYPim6ttShmtnTO5Pkb+7XqYeUoSO+jLl944Kxu3t22/wB2t2x1TzF8nr/Fu31zdxH9mlZD/C9W7G4/g3/NXqfYOw6aW6hk3R/eb+JVqBpHbbvHP3ty1V+1P1R03qn3tlWrX5UV3dWf/drQzj7pG0b3HyOjD+41EPnN987uzf7tWmj+X7/Kv/DTZLPbcGbewb/ZqdfhHKXUr6bZvY6wsKchvmir2/R7zyPh6jv823au7Z/s15NNpPnWcV+isz2/y7l+9tr0m1kh/wCFftHMinbKuxv7rU/fIkY11fT28Oyb7uyvKviBM82ulN/3Vr0PVL5P3rzPuG/5d38NeXeILh7rWbiV33fPt3VBpRKVFFIrbqr4jYFXApaKKoCRY0Xl3x/srU6ahc7fs9nujVuy1X8z5XcnJ/2qWO6mVfkpfELlRKLG8lk3ujf9dGqRlhhUpczMf9mOq8l5cyffmZh/dqJiWbdml/dFyls6htXybZML/F/tV+iX7PTNN/wTojPUnwdq+P8Avq5r85beF7iQIn/Amr9HP2f0WD/gnWiK/C+DtX+b/gVzX7B4O/8AI3x3/YNU/wDSoH2HBkUsZXt/z6l+cT4GtZodFhZ4Zle62/NJ/wA8/wDdrNlvXuJDNNNudvmdmf71QtM7D7/C0kbJJ8ju3zfcr8dmfFe/9o3NJ+aze8mh2hvlWo7WxudSvFhh+Z5H+7VqSFI9Hhtkmw/8a16Z+zn4M0S68SR3/iRI0gh+d2kfau2nGIjY+Af7F/xF+L18JtN8N3D26/62bym2r/tNX0Ev7Mv7OXwJhhtvH/xCsbrUlVfNsbfayxt/dZq439oD9ve/8I+B3+HXwfmj0mO43JLJYuys0O35VavkiTxtr2tak+pXmpSPPI/zzMzNuolUlKPukSp825+kHw/+OnwD+HccM2m2sN1cNLul27dqrWp4u/4Kg+Hvh3o7zeGNNjiuG3M9rIisq/3a/OJvG1xptiYYbmRX+8+2uY1nX7y/kV5ppC/97f8Aw1lKM6m8hxpwUT239p79ub4tftEapN/wlXjC+ubbzWX7PJLtRV/hXateP6WlzfXS/LisiGHzm2Abmauo8N6b5MyNMjKG+WtIxjEfuRPTvA7WtnpG/ftK/f8Al+9TvEHiy/WMw280ixfd3bvl21V0Ev5K20L79v3Nv8NXrHwjqWs3z2yQsxk+9troMvf5zC03RNS1S63+W0hZ/k3fNXrXgn4bPb26zXW0Ns3bv7tdR8Kf2f7mGz/t7VbPEUarsZnrttc0fStH02TY8YMabVXZU80Sebm+E4q3hs9JhFy8O5I13M275mb/AGa8v8eeKL/xVrn2LTd3l72/d10fxK8XfaJ/sdttHzbdsbVT+H+g6Da3D63r1zHiT5kj+981R7TmJjRnH3j0P9nv4Pw+IFhl1VIbdYf3sv2ivprwfpfgDwjpkUO+H7Zubzd3/jtfJN58eLDw+zw2dyscX3d3+z/drB1T9pPWLqRUfWG8hX3bt+1qy9p9k19jzR94/QlfGHhtpt6bYopPlRfN+78v3v8AdrA8UfETStOZ7mw1ViYWVkVZflX5fvV+f2oftVa3bvvtNYmYxp/z1rDvP2ovG2oRyo+pSbG3ebt+61KPNLRkxo8p1n7aXxO1P4v/ABWtPB8OqtPp+h/v5Y1ZvLa4k/8AiVrqP2P7Gz034jaVf+R80OpW+5WXd8u75mrwrwGz65NLqty6vNNO0r7q91+CrfYdSS8s/leGWNk2tt3Mtc1Sp7OrE58R/Kf0x/sl/tFWGp/DvSLO9uWdo7VV87d95dtez33xt8N2dqlzM+5W4VVb5mr8nP2M/jxef8Ivav8AbNm2Jf3ay/dWvpi1+Jl5qkYs/t7NF5XySL8u6vbpyhUhdxPM5p0/d5juf+CiPxPs/ij+xx8XPBPh6xk2XHgHUAkjJ96ZY933v+A1+SnwB+OtnN8G9A1u5v4UH9g26SqrbpGZV2/NX6Z3lm/irw/rfhjUrzzINU0a6snVn3LJ50LLu2/8Cr8Avhv8QvEPhXQ9S+F2pTNDd+GdevNLuo1+XcsczKtfO8Rx5sMpRXwn0XDtaNKrI+vfiB8etKufOhhud+75/Mb73+9Xifi74kW2rTzXNntiaSX/AIE1ebar4qvL682fatm5dyfPXO33iS4t7r7T/aSokKbdqpur4WVacj7KnjIy0Pa/C98+rXiarretxxtHu+98q7a4342fHy51LxE9homqsbaO3WBfm+9XmmqfELXplWztr+OFG+//AHq53Ut8zM9y7Ft3ztv+9WtOVWUbSIljIxj7h75+wL4ihs/jxeXOsfZ2ims1l86T7u5W+7Xvnxh8ZjTtL13x1AuAZpLlVRuAHkzjPp835V+f+l+INe8N6out6DfyWlzD8rtvb94v91q+wfiXrMkn7LTa5ekO82g2UkpPcuYs/qa/e/ByMlk3Eb6fVJ/+k1D6XhnG06mFxClvGN36WZ4j4y+LHifxZJse/mKKvyeZL/D/AHa5+38B+Cdd8C6xqupaxdQ67avHLpFvb7fLkX/losm7/gNYl14ks5I9iTbFX7yr95ql0W6triORJpv9Z8yLH/8AFV+IfBq5HL7alWlrK5w2pXlzY3DGF/49u1vvbqfpcmpahJ5E1nvf73y/xNXT+NPh/o6TLeaPqXnSsm+eFv4WrL0fXE8O3EdzNb7drr95K6ZKlOHu+8ePUUva8sixcW+uaTb/AGx9BvNv/PRbdm2/8BroPDf7Qn/CO2/9lfb5Eb+7cIy17T+zz8fvDf8Ab1tD4h0e3lRn8po5ol/eLXvWvSfsZ6feRXfjz4XaTqVtJ8zqsSxNG38O1lrzY1MJU9ypeMkbU8PWTvB80T458NzeLfjlrlrB4EeTU9RkfZFb2aM7f981+mH/AATC/wCCdH/BQH4RTXHxG1n4J6e9tqEvm2q3mvQwSs395v8AZr2//gln8Uv2Z/Dnjy3+Hvw7+Hnh3RzfJJcNeWtnD5/lqv8AFI3zfer7rn+Knhu1uv7Nt4Y2j3fLJCqqq1yYh4Tlt0PbwuGxeElzw3Pxn/4OCfhD+0l8HPAXgnx18XfFuiyv48164sL7R9DRvKsYYYd0cPm/xN/6FX5h6LCkLGCBNkW9dsa/w1+qP/Bzr+2J4P8AiX4q+H37HPhKaO4ufCt7J4h8UNCys1vI0flxQt/tMvzV+VOn3kDats+XEn8Sr/47X2mQYOhhMDH2ceW+vqfnud1KtTMJqcuY9G+HupTabDLB5ylmfdub+7XceHfFybfk3LtT7zN8rNXk2m6hJaqyIm1WT7y/w1f8M+MMRpveQur/AHl/hWvaqfAedR933T6H0XxVDuSZ3Xatvt+ZN3/Aq63T/Eb2scU32lVfZu2qvyr/AHa8F0LxIt1dIiTfMu35Weu20fxnN5LJ57bl+V91c0eY64ylGR7PY+IIV3+VuDSLulZW2qzVqw61Db27v9tkzH/qoZJdy15N4b8VI9uYZvk/6afwtXR2+sp8n77O5FrGUftHXh6x31hrlzbzF3m+6v8AF97c1Lql3HdJEVO1lB3Rf3Olcxa63N9+Z13t97/ZrT0+6a7DydQDgMepxkV+i+Dcb+I+Db7Vf/TUz67hef8AwqQj6/kzo7K7lNpG8gZhEgXy1bkr7VdjmtpE+0+dIrsn8XzN/vVzNpq9ukjQRSEGP75WTGW/u1a03XEYzJDPHu3/AHWf+L/er8p4tt/b+M5/+ftT/wBLZ0KpKOKm13f5nV2dun2MpDN975v96r0drNHh/wDWN8qvCzfKtY1jrCWqql593Z87L83zVr2uoJ+5SEs4m+avh61OXNzcp7OHrcxu6bY2s376G2U7v96tnT7e5jj+RF+ZmbdJ/C3+1XPQ6sm1PJjYN/d3/LWhDrx+0JbQzKGZWZ7ff/7NXnSlKU+WR3+05jaaJ1mFzCn8Py/3Vaqt8u3dMgUbk/u7arTa4kLMiOpdvuqvzbaz9S162tY2mubloyz7WaR/++ajl5dipVOb3WQaps+xzXmyRPMXY6r/ABVx+uSPNC1t/AsStWvrNxc3CunnSOPKV3Xzf4q5m+1J03ec+G/5ZMr/APoVdMYy925cZfZOX8TbJJlhxIxb+Jv71cV4ksZmhkeaZv3fzO1d9qUcxm3p87/wf/FVxHiaSF43h8n5t3zfN96vVo/vdjGM+WZ534hZ5N+x2Td8yL96sKS3mhDwj+FN27Z8u6up1uBJbpdiNCqpt/vbqx7uOOP93Cm0/wAfl1304np08Ry6lGO3ePdPN5hP+1/FWhp+n+cf9Svy/wB1vu/7VNWR4YQ6WzKn+18ytWjpMfltD8+yKZPnbb8kn/Aq09/4i62JjytHockKQ4hSdZWb/lo1ZGoQySM9s+5m27vM2V0l1YuJGj+4n3t1ZOo2ryW7vchjt+5tb7tT7sZe6fE1PeOYuNNe4VXebLN/yzX5vlqhFp80amYOreZ/d+8tbC21wrIlzNMsW/5ZNvzMtFro8LXUPk2EmJEYsu/5l/3lrblhGXvHPKPu3iUrOzmuI/kRdyv+93fNVn+zXkhim3r/ALS/drasdBe3jldH3MzfN5abVWra+H4bqY74Y/l+8zS+Wq1nzRfwmdSjKJyVxpaRrJ+5be3/AC33/LurBvNPeGGP5M7VbYv8LV3+oaDDGfOSGRiyN/qW+Vq5TVLN41be7A/M3zP81dtPljDQ4KkeX4jkbyZ4WXztqLu2/L/DtqlqWouq/uXz/Cm5v4as6sba3aWaZJG3J/vVzepalNCzJsUt/Bu+7XVTjKUos8mtU5fdLv25E2uk3K/Nu3/eok1KFwdgZpNv3lrnm1rbv2JtT7u3ZV+3uplm2O+dybt23+H+7XdKny+8c/NKRr2+oOtvseFtq/6pVqza3UNqXlf5U+81ZtrJIrJNH5ixs33f7rVfh03z5Bvl3N83yt93dXHUjCUrMr35aH0F4GlEnwSjlkPH9mXPPTjMnP5V5at08d0mzd5W/cjf3vlr1PwOxPwURmiVf+JbcfIg4AzJxXmNrY7k+R921NyNH81frHjRLlyPh1f9Qkf/AEmmfbZvrQwn/Xtfkixbt5cm/equ0X3W+7UqxzPh3Zf+A/Nupmnw+ZCN6b9z/d21Z2+ZtdIliX7u1W+9X8+xnJx9w4I+7GJDJbzQ22+F1R/4f/iqns5kb7k3yqn3du1d1RyQozPjbH5e5WVm+anx2rw26PBM2P4Vk+as51JSjy9jrp1JR90kkmufL875W/iT593/AHzUtrqDzNEnkso+/tb7q/8AAapN/o/3DIrfd+X7tSWm/ck3nM6r8z7qco80IxO2NaMYaG3askkDzPud9+5VX+FqmupnsrV7l7xVWOJnfc33WrOW8RG+R2V2+/8AN8tYXxW8SRaTov8AZUMqr5yN/tV34XDe2nGETnxmMjQw8ps8j+I2uQ6xr1/qtzC37xPvN/FXm99qaW8jvHMw/wCBV0HiyZ2V4U+cK7bJGevPNSu3eb55l3f3q+9pU4xpcsT8vqT9pVlN7li8vkkZvvb/AOJW+7WFqMCXXm/w/wC1/darElxHLJsR9pjXc3z0tts8tt7/ADM/z1vymXunJ3Vvc2N9+5fipY9aQt5Nzyf9muh1TSIREZvJ5b/vmuHvrhLe+l2HaVb+5T+2OPvF3UrOzumEyQ/O393+KsS6tZIpG/dbQtalnqyN/rguV/8AHasTWcN4vyP96l8RfwmBBczW3yDpVmZDfL52V/8AZqs32iuql0f7v3/krMVnhk+R6kv4gIeFsZpXm81drJ83rVvEOoxAqwEu3G31qpPC8LlHGMUAMf7xprLnkU5m3UgbdzQWLHhm4NKykmmKu2lrQCzZt83rWhbs6/I7sQyVm25KkfdrRRt1vn2qYmMiO5kf+4p/2qgLSN8vZf4qmkjRW+/j/Z2VUkYLnY+P4fmqZS5vdFykjNs+5Iuf9qmtJGFZGT7v3KZCybmR0zQ8if8A2NPlgWQyfN8+ykoop/ZKiFdR4LXzNPZEG5t9csxwOK6TwT++tXtv9ujmFU+E3bpfLj/cyL81ULqHZ8+f4P4av6mqW8ImTd9/bt2VQmk8tc9C38NHMYSPX/8AgnuGX9qjRAX58i93L7/ZZK9Z/bp0BfEf7VHgXS5Y1McunW6yMy5wBdTH+leU/wDBPuNP+GqNCl3Ek2t71/69pK9+/aY0yLVf2t/CaSSOBb+GfPdU/i2zy7f/AB6v2jAP/jRmL/7Cl+VI4qq/2len+Z2OqalbRyXDp5ezd8it95dtcJ46l/tC8FrNMrMvz7pH+7W3c/6l3MPz/wC1L95mri4rXUvGHiZLdJt0K/K3l/Nur8Gp0+afKcXtJRlzGl8Pvhjc+ONcie23SwtLsVdjNu/2v92vuf8AZ1/YMhm+HWreNvFX7nSdJ0i6v7242fLHHHG0jLu/u7VrL/4J+/sn3/jnXLCyTTbrbcSr5q7dvlx19vf8FZJdB/ZK/wCCRfxY17w9M1vcXHhePSIm+6/nXTeSu3/gLNXv08PGjhbnLF/WMZFH8wvinXE8VeKdV8QRzM4vtUuJ4mZv+WbSNt/8d21nxx3KyKidP49taGjaO5tIS+07YlH/AAGrnkwwzDemAzbdypXGfSc1jpPgd8Kdb+KXjTTvA2iWEj3mrXUdrZqq/ekkbatf0rfsFf8ABN/RtL0nQbnxVoNrFpvhfSbe3+WD91cXCr+8kVf96vzA/wCDaT9l3RPj1+3NpureJ7BrjTPCekzavIpT920i/LGrf8Cav6CvjJ4ts/Cfha40Dw3bJbwKnziH5fM/2VrsoyjGJ4eOqTrVf7p88ft0fHt/CPhW88JeFbxbG12bHaNdu5VXbtWvy48c6ii6lePM7YkuN67n3N81fW37ZHizUtSkENtdSPEr73Vl/vfer498YWc17q8t5bRrsb7zL/FXNWj7SV5GMfgM6eV5tFmkVTuMD4BGDnBrj/DPh+FZPOd5LmX5l+5u+au503Sbi4EWjsS0k77BvOSS54/nWr4q8O2Hwst4rB5F+0yMyxRt8zK38Nfr3i7Up0MjyCUumFj/AOkwNOZLQ47WtYh8G2rTXkK/a2Xavz/Nt/u7a8W8deKNS1q+e5vLnarfLFCv8NejeLmudUhuHmmWW4kdm87b8qr/AHf96vNvEmmW1n5r3Nzt27W3L/FX86YiU8RLml8I5S5vhOWvoHkX532/w7W/i/2q4/xV4qmVja6bu86Pcu2P7q1qeKvESX15JbaVu2/daT7tcffN/AjyMzf3vvVxU/5Xsa05Rj7sjEvZnW6d33O7J93+7WLfiFVLybXb/wAeX/ard1Q7oykdsyLs+eT+9XD+PNfsNFtHgtXxK33mr1KEZOR004zqe6Zmu+KIdPjOyZX/ANquR1PxPPLlLPdjP3mrNv8AU7jUpjJK5x/CKgZscCvZp0Yx3PTp0VAGZ5GZ35NSRvtPXH+1TKK6OU0kbOn3SLGEHzf71aELPJD86NXPWMgjkXY9dHpqm8XG/lf4aOUktRK6ts37v7tacVvJIqv94/3m/hpun6akki/Jkr8ybkrYktPKVJoXXay7WWnze6YS+L3g0G1S4hlsn2v5iMu6P+GtuS1ez8AvZ3PmZjlX7v3vlrO8GyQrrH2Z9qbl/i+7urpvG32ZvCMt5Ci75Jfm2v8AdpRIkeY+JNQSOxld32t/drgJH3ylg9dJ4w1FJYfLR/vfermQDnLVJ00/hBhkcUKu2looNQopGOBxS0AFFFFVygFLHhm4NCx7l+em8sPSnHYmRaiuEjj2Inzfx1+iX7PmD/wTkT38G6x/6FdV+ce75sV+jn7Pv/KOJP8AsTNY/wDQrqv13wb/AORvjv8AsGn/AOlQPseDV/tlf/r1L84n50xk7tnf+9uqxDIkd0oyvy1T3t605ZnXPz1+Pnxso8xuTaw7TL/s/LV+L4gaxYw/Y7O5ZNyVySyOn8dL5zsfnfijluHKy1faveahM8tzNvbd95qktbpLZV/vVQbZjilE0i9GoCUTRuNRnui6Oyr/ALtJa2fmSCGY/wDAqqQLDK338VZW6VF+dvlV6v4RcvuGzpOmoI1m2LuXd8tdHosyKqF5vl/u1wn9pzRt8kzfLVjTb52z515Mw/uq9EdjLlmet6HfbrpfJvI02t/FXsvw18R/DTwCn9q+NvE9ncTL86wxv96vlttQ0q3tWmuZrpfk+SP7Rt+asDUdViumzskd1+6zS7qzlGXQrlj1PtLxt+214CgD6boN4qRqnyL/ABV5r4w/au/4SBX8m/bDJ91flr5wS4ST/XQ7v71Ss1mql0jVWap9n7oRjE9Sb4oaM159tub/AHDbu+/96mXnxSs7iZPs1+sQb7+1/vf7NeTNePHI3yL/AHals3825Akfcn3sGrgOUTuNe8dPeXAh3rs/urWReeJPM3/v2X+H71YU198v8P8AwGo/Odl42/N81Ll/mJj7ppnVHbKbtr/epJNSmjt2S2mbe1UIrpyu/f8A8CqW1vPnZH2tTCXunrXwhjabw/Dsdd6/e/hr2z4c3X2dvM+Y+Wm99qbtq14H8FdSddLkskmYlbj5VZv4a9r+Hd463ywpuCTLsb+GvLrR9/3jz8RH3j7x/Y58ZPJDZwojSIr7FVV2tur7g8ByTXlv/plyqIy/Jt+9ur8z/wBlvxIlrqDw+cybvL+WOVvmZW/u193/AAl+LiQ6ampWEO9422SrJ8yr/tba9LA1vdtM8qpTlL4T2zwPpOsf29CsNyyQrcf6xn+8v+1X4W/tqeCL/wCEP7eHxg8GeT5cbeL5L23VflVobj94tftp4d+L81nqx/sG5hieaJt9xJ8ywsy/3a/M/wD4KsfC1Na/aw/4Wp52+HXPD9vFcXjRbfOmh+Xd/vbanNPZVsM4HpZTKdPERTPj/UtSv9vzp5RV9u7d96s+4k/cu77Q7fMPk+9Xd3ngHUNQuBYaPA1zMzf3P7tc78RvDmp/DB9Li8d6NdaU+u2bXmjNfWrJ9qt1ba0kO77y7v4q+Jll9WpG8In1Uq3s5ayOcvI5reUvs+Vk3fNVNm2sN6b1hXc7N8qtXG+LPjpZ6XI1no9q1w6/K8kn3a838Q+PvE3iOeRrzUpFjkP+pjbatdmFyWvUj7/uoyliP5T03xJ8WtB0e4ltoU+2T7tu2F9yr/wKvtn4mXIuP2LlvHULv8K6c+30JEBxX5j6VuOoR4Xdlq/TH4s7rf8AYdIjxlfCWnAflBX9BeE+CoYbJc9jHrhpf+kzPpeGqknhMe3/AM+n+TPiOz8aTNceS9tlFl+WRq0rPxwin76oN+3bXC/PDcNs8xPn/ibctXoWdpNiP/tV+OyyvDVN4nx9PGV6fwSPRbfxNNeL+5maV/4/9mqlxffapPs3nfMrf3v4qw/D+pXNvKqCH5G/5aVZ1iz8mZrm2mwu/wCasKeS0KctDSpmVWUfekdn4A8J+M/FmoJp/gLR7rVb9tzxWtn80n/Aa0tY1D4x2M7+Htb0HWra6t5dz291YSb93/fNcx8M/iN4k+F+uWHjzw9eSQzabeRzqyvs+7/u19p33/BQ8+MPDFlr1neTXVzdf8fVrbwK0kn+zuZflWvXwPC2VZi7TfLI8XH8SZnltpUo3izK/wCCd/7QVj8F/ii/i34u6DeaUsem+UuqX1u0UTKzbtys1fTf7Vn/AAXI+Hvwz+Hd7o/wQ1ux8VeMb61b+wbfT/3lrp7f89p5P9n+Ff71fFnxq+LXj/41aLcaV4h1W1s7a+t9iabY2+75d3yr81fPHiX4d6x4Lwtz4emtrTbuVvIZV21lmfh7Qy+rHEKfNB/ZO7L/ABEx2YUvq8klIdrXjDxh468Ua18SPiJr02s+IvEF011q2pXTbpJpv/ZV/wBmqMbzQ6kroihG/hb+9SRxPGreS8bPv3f8BqPd/piQoiu9aRhy+7EzlOc580jo7hvJ0+W5dG3bf4q5fRdceC7ZN+5d/wDC3zVv3myaw3o/3l+fbXCrcwrfMj/K+9vl+7tp/EaUdz1Tw74k/eJDI/y/3m+9Xb+H/Ej42CZWeb5v7teLaHqnkyBw+3/a+9XZaPrUqtsdGdtn3v4aylR5jo5uU9e0vXHuIUm+aJWdv93bXV6D4kd4US2mjRG+bzJH/h/iryXR/EMMMiohYMybnb7y7q6PR9QgaFPPfJZPnVX+VazlR/mHGp72h6rY65cySCF5o3Vm/wC+v7tdp4MvzfJckk/Ky/J2Xr0rxvT9ciWZNkylFRlb5a9P+EMolsbt1nDqzoUx2HzV+geDtOS8RsHLyqf+mpn1/CdTnzqn/wBvf+ksl1C6a31e4isZ1QGbfcFV+YjPNaunatuuEdNuxXbdI23/AIDXMa1Kttrt3LNtIaZwGZ+nPTbVq11S2jmX/Vh22s7bP++Vr8x4rp+2z7Fxf/P2p/6Wy3W9njpv+8/zO5tdV+0XDv525m2t5i/8tK14dUhtLj9zC277yNv/APHa4OHWnjYPbPslmdt7L81WYdam2pHDfyF403N867mr4zFUfd5Ynp4fGe8d7b+IGhuInR97bP8AU/e3VpW+tzMxhjm3bvlfy/8Ax1a85t751VJppv3X3maT71a+k6oiyeZC7Mv8NePXw7lVPVp4qMtUdlJrVzMu93VJV+Xav96qt9qUMcfnTfN/e8z5tzVi3F0i3m/ZvCoreZv+8v8AdqGS+eONdu3+981Ycvs+Y2lU5pFjWNQeNvOTy1Zv4Wb5l/3a5241KaaUP5y7d3zNUt/qSfZ5ZJn3bW3IzfN97+GsDUr6GOHY6KvzMj7V+6v96tKPNLcunUjEmvNUjhjcu8aDzflZX/hrm9evbaGxdH275Pm8tvm+aodV1yGON4bN2x/Hub5t397bXJeINcmvpjZpeRh1+ZpGf5m217GFoy5jmxGI5eUr69ND5zwQ+X+7Rfu/Kv8A9lWPJH9ouHTdkt91l+9VbUNagupC6bf3fy+Wr7mWqf8AbTo0XzfumfdE0f8Aer144fmjGxH9ocupuwzJDb/fYoqf99f7ta+m3CR2apMm6JfuR/3a5eC6eO4KO64k+bd/drWtb6OTa7vJ83y/NVSo8vwkVMzjOWp7hcWc0cju+0/N/vfNWbqmmpdW/wAkPHlfvdvy7a2lZxcP9m8yFW3Inzfw1JDb+dK6eTuRlVX3fd3V50pSjqZxlCRx3/CMWat9pSSR/nX5qn0/w6i3Hmw+d8v3m27mbd/erp20/ddeT5Me5m/hrS0vRdzNv+R12/Ky/LJSlKIcq5eWJiW/htI2/veZ/eX5t26rj+GhZt52xcM/zbq6eGzRdjmRS2759zfdqWTTpreGSZ7aMvN8rrG+5VrQipT5TgdQ0Xy4dn2bZ87b1ri/EmizKsvkptT5vlX/AOKr1PWh9njWGGFVZk+6zbt3+1XEeIoXmjdPJ5b5vl+61XCc46nn1ox10PGddtPszKjovzfN8rfLXD63ZvNcPNav8sb/AD/P96vSfFVi6q/ksqCNvkbyvmrktU0lJG8t/wByW+438Ne5h48sOZnzleO5xMNu8f79PmG7czSfw1oafcXUm77TGzL/ALX8VWptNmWTyblFYNS2Mc0jvbTOuF+ZW3V3y5ZHDL3eU0LG1875NjRIybt275VbdW5odpc3UyQud6t/y0rN0+3DKba5dj/fX+Fa6rRbHy1S2hZW2/d2p92vPqctM76fvHsXhG1nt/hGlrIxaQafOCT3OXrzq3tYbWNfs0G12RW27NrV6f4ZTyfh0kcZ+7aTAZPu1eeXkUN1dDzplj/dfKzJ95q/W/GOi6mS8PSte2Ej/wCk0z7LN5RjhcL/AIF+SH6TshbyURiWb5W27VWmx2pt8Q7Msu7aqp97/eo+0Qt8kM2x2/h2/NU8Pk2wKJNJuZ1bdtr8C+rxjUPMp1I8vLKIq6bNNtd0V327kZUqG6jLwjfOzxsnzqvy7a0lj8uOWG26/K3zN96s273xsYUuYyW+9Ht27aylQ5feKjWjTK0kjwyJ86o2z7u/dVaPVolYpCjff3f7zVV1K8CSMIUX5trP8vy1lw3ztM2xF2M3/LOuqnRjL0I+uSjLQ3o7raxuUkVE3fdb+Fv/AGWvNfiZ4k+2axtd8RKjKu1PlZq6nUtSh0/TZZt8jMyfIrJ81eG+NvF1zcXU1zNKy/3Pmr6HKcHGnKUjws6xk6lJQKmt3TzedNbTZ+bbtrhda3rI3zrvV62rHWHaN33s27+H+7WJr0yTS+d/6FXvQPmZGb53+3y1Tw3SRyBN/Lfe21n3Fwkcfyc1TivEkmZxuDr8u3dQUavijXktdNeKGZt7LXBzSSSSNMXyzVratcSTN/u1msjt1StBxlcrZOeXY1Zs9VubOZXSRtq/w1C0Pl7eflamsh27kWpkafEbsOuJeK6TIuGqhq2n+SouYU+RvustUPnUVpaPqkKt9mv/AJ0b5V3fw1IuWW5mxO8Mnmd60murbULEo8aiZfuNT9R8OTAfarSZHik+ZdtZcgmtpNjqytQP4huHVvnopWbd2prNjgVfMiwUYWlooqAHwLubIq9bsnl799Uo1/5Zn+KpoWRW2P8Awv8AxUTMZE8kjK3/AI7UEyoy79n8dLNJtb5JN1I0u77+3/gNARIoup+tNkG3kvk0rYx8lMf7xoKEooooNBH+6a6T4fyJFJNvT/gVc2/3TXRfD+VFu33pVx2M5fAdTqFv5kO9H3fw1k3Vu/nP/s/N9yujms0ht12bfm/iWsi6hdm3u7ZpfEc56t/wT+jK/tSaAw2qDa3vyr/16yV9O/HpYNO+OEXiG5jUiPwnFBGf4gz3M3Svmj9gGJ/+GntCfbwLW85/7dpK+i/2tL/7F48sI1KqZtJjBO7BIWWU4/Wv2XB+74E4v/sKX5UjixH8dehgnXLm+mM00PyLEzvGyV7L+wP+znrHxe8ZQpJpUnk30qv+7Ta0a7q+cobua4ki01LlovOlWKWRX+ZV3V+1H/BG/wDZR03/AIQ3S/F1h5awwzxw/f8Amb+KvxvLqVKVTnkeViXJU+WG59ofsb/sX6B8FfBNtf3kMYnmtV807fm21+WX/B2d+0XBq/wO8N/B/wAI3Ey2OqeL4kuvLuP3U32dWb7tfsp+0p8X9E+FXw51C3h1SGG5WxYBWfDKv3a/mq/4OAPiJ4Y8WfF74b+A/DepXReGyutUv7WS682NZGbbGy/71d1ScqtPnn8jvw+Fp0K8Yw6fEfBun2M0caP5KudnzVKghkmCX9hu+b5dtaVnZeYq/N8rfw/3qsx6LukCI7b91cX2z0JW+E/bD/g028Gy2E3xW8XxQxxQNolna+dt3SRs0jNt3V+iP7R3i5LfNnZzKFj+RGX7tfnJ/wAG0PxUtvBfgX4teCby8hRrrS7HUUZfvK0bNGy/7vzV9gfFL4oWGrXks1tC0vnL8i+V/wCPV20fePDxMeXlPDP2gpvtVnNqt7M32nfsVv4VWvEdB+Eut+LNW8mzs5EST7snlbo46+i7vw3qvxA1SZHsNsTf6pl+Wovjd8Sfh1+yX4PhtoJIZvEF9Fs07T12yTs2370n+zU4ipSpwvMy5XzaSPkrx3oy/DX4ny6XcwmUaVcW7yR8DfhEcj8a888ZeNte8VeIpvE+sCNrmZ22Rt/yxX+7XQeJfFuteObm+8X+IJS95emSSYsPqAPwAA/CvOtU1CG1jm/fZX7rfP8ANX6P4z1H/YXD774WP/pNM1+ymyO8WG3Vrm5uYUXbu27/AJa8O+KHjD7drDw2c2yPbtWFX3LurpviR48RbR9K0253vs2quz5dv97/AHq80j02a+unuXVvm+bctfz1GtKvsXGXNuZt2yXErbLrG5t33Pmps2ivGz3OpXKt5f3P4f8AvqtSaztrWF5rmFTt/wDHa4jx14uKwulteRpDv+eT+9/s1vGjE2jT5pmP8SPF1nZRultMoVvmfb91a8R8R65c61fPLJMzIG+TdWh438X3OuXjwxTN5StXPKMDFe9haHs4XZ7VGl7OI1V3U5V20Ku2hW3V2cqOgFXbS0UURAkVvmyP4a3PDt1I0gT/AGq5/cfu1e0e+eGZE3/xUpRIlE9Z8O25WFfuhvvL8n3atX1h+73iHd8vzSfw1neC9U3xo833W+Wusks4bhfvsibfkqvscpjKJx1nvt9W+0oigq+5GWuj8aXv2fwSXTbhn3PJv+ZflrD1axfTZvMhh+VX3basa1Nc6t4BvLOGHbtt2dt38O2oJ9n7545qN295cM+eP4ahjh82THrTWOFr0r9k7wn4S8dfHXQvB/jOwkubC+uGSeON9u75WoqS5Y8x1xjzaRPNmBXgiivsX4lf8E6NB1Oea++GPiGSwLXDCKx1D5olX/erwjxj+yR8b/BzO914PmuoVf5prH94u3+9XNSxmHq7SNp4WvS3ieY0Vf1Hw7rWmStDf6ZNC6/eWSJl/wDQqqfZbnbu8lv++a6ozRhcjop3kuv3kxTeAKQuZCsfmz6UlFFBQrNur9Gf2fP+UcSf9iZrH/oV1X5y1+jX7Pn/ACjiT/sTNY/9Cuq/X/Bv/kcY7/sGqf8ApUD7Dg3/AHyv/wBe5fmj85k+8KGG00lKx3GvyL3T48Siil3fLtqSeZCUHPeiigdkSK3ylD96mD5l2fw0csv0o+98iD5qBRHrzt/2vStKyVLWHf8ALtV/mqhHCjf71TXV4qxeVC/P8VVzESVxNSvjeXG/HyDov92qrOQ386WRm3mmVIy1bs8ak72pl1Ih27P7tQq2T8v8NDNu7VXxFcrF2D1NWbf93C29P4Kgjbc3z/eanzSbcIjttqSZIduRVxu3UrNlV2cn71Vy20kUsTMrbw/NAFqbfCn38j/ZqLzAv3Hx/fqNpy2Pm6U3zB/cH50Adf8ADXxA+l6wltv2pI/8X8Ve9+CNatm1S28l2c7/AO592vly2umguUuU3Eq33lr234X+LIdUhhm+04dU2uu/5lrjxlPmic9anzQPrv4L65c6Lr9lfvMu2OX5W2/LX1V4N+LFta2ahLnaWdvmZvlkX+LbXwT4J+IlhCI3v9VjhRf70qqq13cX7XfwF8AWq3/ibxrFd3EL7fsNs25v/Ha8b2uIheMInlexq8vwn2w3xo+15fTUkb/Zjf8AirnNU/Zr1v8AbmuLn4Y6V4wh0bxxHpdxP4Ih1BP3WpX0a7ltWZvu+Yvy7v71fGXij/gsB8OPD6TWvwy+Gt3Kyrtgnm2pHt/usrV5P8Qf+CtX7SHjG4SbwTBZeGp45d9reaeWaeFv4WVv4Wrow9HH1JxlKBtSw+JjNTXun3/+w/8A8E6/ivD8QbzxP+0/pWpeCfD3g+Ka7+Jeva5b+Ra6TYw/NLGrN8skkm3av+9XwF/wVJ/bw1r/AIKA/tm6n8adA03+zvBmg28eh/D3Rdm1bPRbf93D8v8Aek/1jf71WP2pf+Cq3/BRT9sX4b2HwS/aP/ar17XvDdjbx/atFjSO0ivmX7rXLRqv2ll/6aV4HDEjQ/PCq/Jt217kIxjK6iepzSUPi1OV8V/NqDzJ0Z6y62fFUSRXHl7MCsatTSnIuaDG8mrQqn96v0m+Odwtn+wnNPKdoTwppuSO3NuK/OLwbZ/atchR3xtfdX6JftJyGD/gn/eyKenhTTO/+3b1+veGH/Ipzz/sGl/6TM+v4Y1wmPj/ANOn+Uj4WWZLiH5HXbVmxjjmkZEfdtri7PXpIVMPr91j/DW5pesfdSN/95l/ir8dPipROphRFwmdyrWza2qX2nvbTf63duRq53T75LhhMnH9+uhtbh1nWG25Vk+8rVpT3I5oX5ZEcMf7uXSb+HI2fe317j+y5+xz+1T4806O58LfDe9bRr5mlg1CC3Zo9qqzbty/d+VWrxq7s+ft9sm14/4f4f8Aeav3K/4N7/8Agqh+w/4c+Alp+zN8bfENt4V8ZPc/2fK+qhVs72Nt3lMsjfd3bq6cPjfqVWNSx52PwksbS5Iux+Qmv/tTeAPh2raR8P8AwaPEGt2dzifULlP3Uckbf3f4vu1+1v7J/wAPv2IP2zf+CW/jLx/8btM8Pt4gtPAd9qN79h2pc6XD9lZlby/vKyyKa+Mf2TP+CWi/DL/gqB4ttfj54GhvPhvdeJLi5t9Us4Fls2t5rhvL/e/dX5WXb81fVX/Bej9mz4Z/8E7v2XNc+NP7Jvh6405/iFpyeCtUt7eTNpZ290dxuN277zKrKq0sfmlfMa0Vz/CceCy+hgY88Yb733Pwc0FU/su1/wBJmcNEzeZ/eX+Gq02oPF4khttn+si+fbWja6Wmm2KQ9oYtm5n/ALtcbpOqTap463o+7a2xPn+7WcfePZR6T/rNNJ+9t/u159eLt1B3TdnftavQ7dvMsf3L7tytv2pXGXGmus0yb87Zdzt96spbG9GPvO5DY3j+YUT5GX726uw0PV4FjVJpm/3o65SOF4mRNnzrWlpsvk3B+7/wGtYxHU909B0nVE2q8L7i25dtdHo+oTSMqI8bIz/e3fNXnljqvlwhERmP8ddBo99DIqwsnlbfut/DUyp/aM41OWVj0Wz1gOyfvtu379ez/s8zRz2epvHISC8J2kY28PXzrZ6ttkZH2srfd/2a91/ZRuFm07WVA5EsBY7s5yHr9B8IqfL4gYR+VT/01M+u4Pqc3EFJf4v/AElkuvarAfFl9aTP928kC5X7vzGrH9uWklqnkuqP9193zNtrhfHWvmx+IGrRm5XZ9vnBU/7xrMXx4kcfyTr/AHdrV+a8T4WU8+xTj/z9n/6UznxOLgsZUj/el+bPSrfxA8PmbH+Rl2o0f8NXLfxZbxqEeaNGX5V+626vK5PG32iGKf7Sp/2Vaj/hLv3b7IV+V1+6q18tUwfNGXMa4fGcp7Pa69C3lvNNGV+9LGv8Na0fiKS1d4d/3fm2xv8ANXiOl+LplLbzJ8z/AHWbd/wGtuP4gJb3D6g9z9odk27m+XbXjVsHyn0OHxkJQjKx63/wkieWfkkiaHavlt825f71VdS8cWyyM8J2Bd3lRs25tv8AtV5n/wAJ07Ls+0sf3XzbX/h/iqje+NvMRJhMpSNdr7vvba5I4Pl956nd9eh0O/1DxY/lvczXKoNm7y/4v93bWPqviJ2jR3mYq3zOy/K1cNeeLnjkFtbOvzfL9371Zd94svW3Qo6u/wB7dv8A9WtaUcDUlyyPPqY/llY6PX/EE2353w+7564nVPFFzud98OPu7f4t1UtW8SujN51yu9v7r1yl9qzztLDDMqn7ySfe+9XvYXCy92MkcGIzE2pNc3SNc3L4Xf8Ad3fxU/T7pLiNn3qvzfeb/wBlrkpr2ZW2QuqKvzMv3t1bGm3k0j/I/mps2o2zbXrewjTieX/aHNI7OG+huoVT7Mvy/fVv4lq/a6gkeZHTytvzbWTcq1zelyTeX++uW3b9v3K1luLqTcnVt38X92s6lGEYF/XJc3NI+kIdQeSRHTdsV/7n3lrQhkST95cou2Nlbdv2rXDWviCaOEO/mOjPsSRm/eLWzp/iAyTfutzRrxukf7zf7S15VbByPbw+Mpcp2NjdObjzvJhd5v8Almvy7V/2a2LWRLdmk35bb8y7N23/AGmrkNP1q5dv+PlVWNvvN/DWjY6lbLIs2xk27v49u5qwjhPetLY6Y4vqzrIblLqM3CQxu6p/47/C1VppJWZzbNhmVm/efdZqrafqELfvPtmw/wDLWqV9qz3Tf6HtUSfdaZPmpRw3vPlFLF+7qUfEW+FWzMrlov8Alp8vl/7NefaxJ5Nr5kk292TZ5i/w112qXjtepO/l+XH/AAt/y0rn9WgSWGSOF/MCuvyx7Vruo4flPNrYiMpSZwesWsMl0z+Y37yL5N38X/Aa5rWNOQRlIbZijbmdv4d38S13Wp2e2Ty327V+Xy2+9/wGufvtPnWTYifOzs37z7telGjGR5NaS2kcLfWqeXl+GX5tv3WqmbflEhRc7d3mL93dXT61p/2xm2Qxuyt97Z96s0aX9on/AH0PzRv8+19u1q7Y0Tz/AGkuYk8O26bd95w6/wDj1dVo8CQzfvkY+Z/t7flrJ0fT5ftHzpy3zbfvMtdNp8KW7ZebJZsrtT5drVjWw/NO5tTrRien6CVT4a5hQ4FjMVU/8Crzu786Vvtlzt2/KrtIny7v9mvR/DP/ACT5MNn/AEWXlvq3WuN/s3zMJC6hfv7mT5a/X/Fmk3kuQX6YWP8A6TA+yz+rbC4N96a/JGVHap5i+cjfL8y/JtVv/sasLN/pMW+bdGyfLGv3d1LNYTNMtz9pVnVNvzfe/wB2mLb21r8m/wCVvk2/7W6vxGpheXU+a+uTHTfafLmlRGQLt2M33W/vVR1C+hWH7MHb5X3KzfK26rF95253SFX2vtl3P/D/ALNZWrSwq+x0Zzt/5af8s6qnhf7opYozbr7ZIpSGZdv3fm+6y1HY2s00n2Ysyfxbf7tTxxwwxvsmX+86tVm10+2kk89ptkzRb9rfdrf6qowI+tHMeOmgs7H7Nv3vJuVPn+avAvHGmvZ6hPbdXb7+2vZfih4mtrbXrPRPOjQR7mdpP4mrzL4hx2010LxJty/eZY678HTjTieRjcQ61TlPOILx7Wb+L+78z1T1qZ5GD722t/CtWNZmh8xnRPu1lX115kY2Phdv8Kferq5eU54lO4m8z50+U7qpSSPv379u75qmvN67kfd8v+zVCQ+Wdm9tqtSFH+Ukm2Mx2Ozbf4abHEWX/b/utUDSN5Z2fK38fz1Pp7edJ9/5v9qnGRUv7pXkXaNjpwr0zenmBNny1b1W0eNVd3/2flrP+dGHyUviCJJJb7t3l/NVdkZD861btZOf3xxVqS1SaFUoDm5SrpOsTWVwm58p/ErVtalp+la1Z/abOZUk+81c/eWZtmHerOl3H7l4XnxVRlylSj9pFCaF4pWhz92ihv8AXH+KijmNApsnanUMN3WpAVWx9/mpF+Zh3FRqu72pyBOfn7f3aCZbkzK6MPN5Vvu1EXOA3y4oZn27KYx+bPpQSDH5s+lNf7ppzL/t0BS1BoD/AHjSUUjHA4oAWt7wDIV1FzvxtXdurB74re+H7/8AE48sorGRNvzUES0iehra7bOLhtrP96qV9bpHNvG3C/w1oyXUMdr5OzezP8i/3ayb5nf7/wAzM9Bzx00PVf2EFiT9qHQvKxg217nH/XtJXu37ZMhh+IOlzl0GNGAjyuTuMsleA/sF3DyftU6ChdTm0vdwXt/o0te8/tooJPH+lh1ZlGjjAX1M0lfseGbj4D4u3/QUvypHBiJ/7RfyOF+HSwXPiK0e5+ZvN3bdu77tf0Of8EmNF8WP+wRF46+GTWf9t3sszWrasVSFpFXbGq/3a/ni0OH+w7q2uXfyp2dd0bPtVVr9ef8AgkZ+1Lovw5+DlnpX7Rmq3lh4It5ZLiy1KO48uO3ul+Zl2r975a/FKWIq0o+5ExjThKr755/8QP8Agop+0V8XNU174aeJ/A1jd6w3iCazv7e6vGVrdoW2su6vyP8A20virJ8YP2zvEusJCttbaWy6Xa28b7ljWNfm2t/vbq/XHxB40/YM0z44ax8e9N8bX1zDea9qWoy2MkWzdGysytX4h2mtW3i74j6/4th3GLU9burqBpPvKskzMv8A47XoSceSJ14eNRc0pHTxrlvsybXP95VqSNblbxETckX3tzfxVNaxvGq/PlW++1S6bp/2zUmkfbu/2nqfdLlGMT9LP+CEeqXLfFDxLo8MO3+0PAcyyqv3ZFWZfm+Wv0R/4V3c6lceYnmBJIvmj2fdavz+/wCDeNbCH9ojW7C/v42iX4fag3k7vu/vFavr39p79udPDX2zwB8FvLmvIf3V5qkafu7fcv8AC38TVvLERpxPGrx980f2hv2mvAH7MmkvonhLTbfWvFs0G2K137Y7P/ppNXwX448ZeJPHXiS48YeMNYk1XU7p5HuL6R9yx7v+Wcf91a1PFFxeXuoXGpalqsl5f3G55by4ZmaRm/2mrzjxJ4iNnILCwm82Xdu+98sa/wB7/ar53FVp1J3kClGXwnS/axD4WlvmXaI7WR8DtgE188+MviBqupXFxbWyKieay7o5f9Zur3eOaeT4a3MzMryHTrg5HQnD182yxpYyHzkV32bmj/h3fxV+veNMZSyPhtL/AKBI/wDpNMdpOSsVF0r7Urpf3O2JdzPJC275qp6tfW1vG3kusSLEy+X/ABNS63ryWy744eG3bI9//j1c3q11NJbx3+pIrK3yo2/btr8Lj7vum0Y8xl+KtecWro77Itm7az/M1eF/Evx0+r3j2NmyhF+Vttbnxg+I3nu+n6bdNu+622vMCWLfMcmvYwOFly88z1cLh+WN5BSMueRS0V6vKd4UjLnkUtFSA2L79OpFXbS1XxAFPt5HjkD/AO392mUm4qwokB3Xg3XHjm3u+dv3FWvTdP1B7yEb33btv/Av9mvDvD999nuNm/FeqeDNWeaNETafn/ielH3TmqROh1TRzqlk3+h5K/xf+y1k6bCn2O50yb5PMRo/ufw12tjJItizvt+b+Gub1zT3t77fZ/IG+Zt38S/7NXL+6KmeAataGw1Oe06eXKy177/wTr+H2s+K/jta63bR4ttLs5rq4btt27a8d8bacbnxncw2y8SOrbq/QX/glD8GrCL4S678S5kbfeaothYfL8skca7pPm/3q8/Mq31fCyZ6ODUZ143PRpPDMdrbx74WYb13sv8ADVy3sZrSGSZJt6/3VT+9XoWoeDXa+GYY1XZ/D93/AL6rOm8P2dqpt0tmMi/LEv3lr42o5z5bH2mHnCx5V4k+HngnxJH9m1Lwlpt5u+aVri3Vmrz3Xv2T/gVqrb4fAclqzbt7W9wyt/3zXvl/oLqqbLZUSN9u1v7v96ua1rTYYbh0O6Ir/DG27d/drSjjMQrxUth1MDhanvOJ8zeJv2E/hdqf7nRPEmoWMn8XmKrrXlnjL9hnx7pe+fwxPb6lEqfOqPtkZt3y7Vr7M1DTUt5GjdF/22X+Kov7Ff8AcokLI+zd5n97dXVRzTEwd5S0PNqZJhpS9z3T84vFnwk8eeDLw2fiHwreWx7eZbttrBk0yeFtkylTuxX6dT6PBHIXvLX7RuVfluolf/0KuW8Qfs//AAl8XGVNb+HVmC3zy3FunlSM27+8texRzilKPvHm1Mkr/YZ+dL27xln2fdr9F/2fQf8Ah3GgPX/hDNY/9Cuq838YfsGfDfVpJpvCWvXmnPu/1Nwu9F+X/vqvbfA/gC58C/sXXfgBrlJpLbwrqkQkj+628TsP/QhX7h4K4ujic3x/I/8AmGqf+lQPe4TwtfD4yv7Rf8u5fmj8vyMd/wAqGXdir13ot5azPbTQtvjfa2KgksblIw7wt/wKvyc+GUkQUU9oXU/PTSpWgsb/ABr9acu/HFGxvSjlTQT8Qd/nzQp2t8lJTQS2aCiTzvL+6cUskzSSM/8AepjDd1ooFyoVvmYmkopeWNBAsmVfikX5fn2Zpu75sU7+D8ar3TQX5Nu9P1pJm3/O4o/hLUYVvvmjmARVfvT9ybt1MBxyKEVzUi3Qr/eNCt/B0/2qMfwfxUHIBWq+EXMxY1zz61d0nWtY0kv/AGbctGZPvtVEsW60cqakSjzF/UtZ8Q3Ehh1DUpnPdTJWfuPIY8/3q0bXUklhFtefNt/1Tbfu1YXQnvJPtNnfwzIrfxPtb/vmiPILm5TH+8n3/lrY8OafG0h1K7Rtkf8Ad/vVfjsfD1nb+dqUMbO3/LOP+9UI1I3G2GzTyoV+7HVSJlItWcj3UzS/wt9/5614dn+p34+WsjTV2zfcU7f7tdFY26SR4RMt/tVUfeI+E47xlvWSJN/zL8tYVb/jtUjvhGm3b/s1gUG1P4TqfhfavJrC3Ozcq1+gn7Q9m17+wXdWi9W8LaZ+jW5r4K+G8f2azuLx32BU+Rq+/PjBcRN+wv8AaZMFG8KaWevqbev1zwufNlOd/wDYNL/0mZ9dwt/uuP8A+vT/ACkfm9d+H7yFm+T5f722qhW5s5G+dlr0aSOzulKeR/uMv8VZF54XS53ZTYrV+S8sT4mNSX2jC0zxLcxzLmb/AIE1dr4f1hJsbHXaqferi9Q8M3OnyF4UZlX7tLpOoXOnsu+baq/7dSVKMZHsdvdQyWYh343L/laqX9tYXUL2bvsX7q/3qxfC/iK1kgRLl1Zv71b9wvnfvkO7zH+Zv71ZmXwnNeI/2hvj9baTF8MYvjV4sXQYbhZYtI/t6byEkX7rKu7+GvozUv2v/wBrf9pb4S+Evgh+1L8Zda8ReD/DN59o8L6PcN8vnN8qyTMvzS7d3y7vu18vfEbR3juItbttqmNvnr9lv+CMvwF/ZC/bk/Yh8SfCXVdEtbbxjpOrW+o3HiSaJmlj02P5plX/AJ57fu/7VZVYxX90qpz+z90+Bv2qPhH8N/hH+yroPxOsPiFZt4q8QeI5rVPCKwN9pt7OFf3l1N/dVpNqr/er5T8AWV7f6u1xbDLj59tfv3/wU2/4JZfsjXH7IMfj7TfHepa94r8RWi2HwqtrOz2NIy7Wbf8A9M1XdX5z/sT/APBJP4tftDeKNUs/APhaS5n0fzP7bkvt0UFuse5mZmX+FvLatqco0qW9zjhWlL3ZqzPn2z0/UtLUQXkLIZIFbaybW21TtdD+0NI/k7R95K7/AOOXxWtvi58VG17SvAum+GNP0/S4dI03w/pbsyRx2+6Npmkb5mkkZWZm/wBqsfR9P/0V5JkUFn+796rjrHmZ6OFlzfaONk0HypPnf59/92o20ncrOm7d/u13lxoaIp875Tt+Vtv+srHk0J45G+Tb5j7nXdVxjyyNKn8pjW0bwrvmf+Kte1mfzN/+396mR2KQj54W+ZPlX71TW9juk+fdtj+5urblhI8mo+SoaMOobV2O+1f9n+GvoT9i68NzZ+Io2X5o5rYMfwkr5yhj2szp1b7m7+KvoT9hwyHTvEhkj2/vrX+Utfo3hNC3HmFflU/9NzPruCK3PxFRX+L/ANIkeX/GvWZLf4peIAjNui1WfP8Au7zXLf8ACWPgDev3KvfHNj/wuDxKqO2H1mdX/wBn5zXGMrxtsRMqv/LRa+Gz+nH+28U/+nk//SmePjakv7QrR/vS/NnRx+KppFCPCrKvzfNVy18RbrlZt7f3nrkFun8zZCG+b5mkWpIb65X99vZVVtrL5tfOVMPGXMXTxEonb2/iZI5N6TMFbcu7dU0Pi5BHs85TtfburjYdWRWT54/9U33qgTVJFjXhf73y1wVMHSl9k76ONnH3Tum8YTNIro7MnlbX+ek/4Sgbhsm+Vv8AvmuHhvnaUoX3M1WIdScrseZlCv8ALuSsY4Hk91HRHGc252MniR5FSbepC/f/AL1QXeuPNb/J8jN83365htYRoh5Zb5n/AOBVDcX03khN+fn+9V08HGOhz1cRKW5qahrEzfOdr/8ATRv4azrjVHuJNnyqf7y1Ukn27nRlLL/tVVWZ5Nv3t396vRp0fsnDKtOJoW949w33N38KyVv6PvjjWFNrrs2v81YGlwu8ez+NvuLXQ6LDcyPskhwsf975fmrSVP3TONSR0GnrNHsT5l+T51atu3t5lZEhdkOz59v8VYmmvcwxvDcpGdz/AHt/zVt6WzwyJNtY/P8AxfdkWuSVP3veNY1pHpl59pi2TPDsWTds+b5asRatNZzb0vGEWxWZf7zVl3urL5LI7qQy7kZU+7WXHq03nB4X2lf71aywvMdMcVyneabrX7tLlIWU7922Rvu/7VdFa6pHcxpN52/zHZXVfvLXmtjfQ7Yt8yt91tv8P+7XT+H/ABAkbfO6yRN8yKv3VasZYOPNsdMcZLuegx3UKrG7vIjt821f7tQ3jJJ87ybGV/k3P/DXOWurQ3O+Ge8kd/8Ad+6392lvtedo1SF2jf8A2ko+reRtLFR5CzePNqcnk+dt8tdibn+Vqz7y38uTGxoj95P7rLTmkdfke53Hb87SJ81VtSuP3aQw3KttlVWXb81dEcP7pySxRl6h53mHydv+q2/vE+7/AHax9UheFUme5Z/MTbtX+Fq3bmPzI5p/tPzq3yrWXNbpNcNN++RGVdnmfd/4DW9PD+8ccq3Nuc+ummZYv3KruZtjb/lamx6OskpdLPcf4/n/APHq6q30NJm2WtnxHuVvl+7/ALVX7HQ0hUbE3mT5Xk2bflrtjRgc/tmcjb6Pcr+5hdgVb5Nq7a39Jt3sU87e29Zf9X/z0XbWzb+H42f9yFKRy7dq/wALNWja+G3VNkkPm7X+fcn3v92j6sP6xE1tEhCeDBDjy/8AR5OB/DktXKeSkO9Emk+/95vm213QgZdLa3XJPlMB8vU89qw7rS/LjmfYqPD/AHn21+s+J9FyyjI12w0f/SYH2nE82sHgbf8APpflE5iSFGk3p/wL/ab/AGqpXCw7rgwpz/B5n95f7tbuq2qANND5bI332X73+1WDqUbyQumyNx8qxbXr8g+q9eU+O9tEydQv3baiTqHbd5/l/wALVkXF1iJcw7W/3q0NUmRoTDCi7V+aVl+X/drCurxI/Nd33vtVVVvurVxwfu+6ZyxRZtbhIZWjeCMrJ825k+7/ALNTLI9nDJePCr+TFu2/8CrPs9Qti62z2zD5FVlb7tZ3xO1qHS/Al68LtmOJtskf3l3fLWcsLOMveia+25jwD4tfFRNY8aaleJNytx97d/6DXOyePH1SHZ526ua8RL/rZvl3s/zN97dWPZ6pNDIfkqIx5TG/N7x0V9Ik1wz/AN77/wDtVmyfM4R/4f4akt7gyw/vPl/2v71RXi7tkzvhqCOWERk2z7jjlv4azry2dlb+9vq08iNMu/buX7lNupt3zpD/AB0ehcfdMqQJDnKfLup1vcOs6zBcU6ZnVnKbQWeqzb1X7/NOUh8p01vHDqlnwnK/wrWFdW6WuYXRlP8ABV/wjqyWt0IZn4Z60fGWgzW+L9Ifkk+b5aRPwyscuY2jkHzZ/wB6tnSbX7VAUf8Au1ntC9wq702lan0uR7e4/efdoCQzULWaGMpv+X+Cs+OTy2ztrodZVLi23oi7dn8Nc9JvWSguPvETM0jk5pVXbTU6/hT6qJoFFFFH2gClVj9zfik/4Bmj/geakBd3y7aSikY4HFXL+6Am3b82adRStyu+lEmQlFFFHKUFbfgWR49aV0P+zWJWr4P+XWEcpkrUkVPhPQ7xnaNfnX5f4l+8tZdxdJGp/c4Zk3VoXF09yqwv92sHVJsK3yYXb8qs9X7kTmPYf+CfCNd/tU6TM6bRFZ3jr75t3H9a+jv2sLeF/iRptzJGCyaKNrHt+9krwL/gm7pQk+OtvrzqAWt7hF+bn/UvX0L+1dOIvHGlqJGJOmgmLPGPMf5q/YMNb/iBGMt/0FL8qRwV05Yj3TyLxJC8MUd4j/3W+9X15+xPrz/FD4I3/wAKL/VY47j+0pEsFklZlVmhZV+WvjvxJvvf3KIse1NvzP8AK1dv+zX8Vrz4d6pczQ3LM0d5bzrHv2t8rfw7a/FsLU/e8pnLllSNv4sWHif4I/Cb4haN44SO31fT9BuINsifLM0jeWskf/AWr4x+FUDx2jFPn+X5fk+7X6lf8Fndc8B+Pf8AgnHo3xpsIoU1vUtcs9L+0Rr800bbpJFb/d21+ZXgSzSPTo977N33l/iruqRpKXuHXhuaOHXPudXu3WqfPtZv/Hqm0d3XUndtquz/ACf3fu0kCo1vs8nC/dWP+Kn2sKW0YmdPMPm/KqvU8oe9sfYH/BK/Xtb0n4vanqWg6xJb3Fx4PuoJWjZlZo2Zd3/oK19D+OL618LWLPcvJmTc7t93ctfJH7APxCm+H/jjVtYfSftzzeHLiCC183aqszL8zV6pr2tar4y1I6x4nuZEaTa3lq/yR/7K15mOxHs5csdzzsRHm1iS+LvGl54g86bSt0MX8Ks/3l/2awk0ua4maNIcK2399/e/2aluLqztbf7Tqu1FX5ov723+7VW61x2sRc3k32O1+bZDt/eyf3dteZ78Y80jKNM62OBYvh1dW4IAWxuBkNnH3+9fLWueJvPZ7PTX3FX2vJsr6U07UIdT+Cl1fWsTQq+k3YVSeVwJB+fFfLFx9jtYXMyNmP5ot1ftHjRJ/wBh8N2/6BIf+k0yo6Fa8W2sYftNzNu2y7v3n8VeT/F34i+RbTQR3Pzbm2r0+9XRfEzxtbW9tNsuWRF+b5v4q+ffFXiK58R6q9/NwD9xfSvxjL8L7T3pHrYPD/bZUurqa8uHuLl8u33mqOiivoPhPSCiiiqjsAUUUUSkBc0+GFoi7puqO4sHj5TmptKu4rdZFuXONvyIKn09od3+kuoH93dWMvdkZe9GRlEbOGGKA27mtPVI9NkuGENyr/7VUprN41zvX/gNVGXNuacyGwSFJg6jNd/4C1aG4u0R3+feu35K88VvLf5K2vC+ofZbwfPj+41XykSifQWm3yfY98zrtb79YXibVPtEMiPeKnl/dXZuZqoaX4ge40N33r8vy1zt9qzzTtvdh8u16XNzHPHmiZGqNCNQa837ZVT5GVPu1+0v7KvwR/4VD+yf4F8AfYNlw2jR6jfyRr964uP3jM3/AH0tfld+xv8As6ar+1Z+094P+BuiOuNW1aN9SZvvR2cP7yVv++Vr93PE3hu2jmfR9NRktreJYLJd+7y4412r/wCOrXkZpK8LM9HAy5avNKJ4dqHhPzlbfu+X+FvvVjXGkuuIURQ7N8jK3zV6zrmivbzbERU/h2q3zM1cjq2g7V4hz/F935q+ZrSlGWh9RRrR+KJ5jr2n+ZI8c22It8u2T+KuK8QafCzK6eWHX7vl/wB3+7XpviTS7xZNn2ZRD935vvNXF65a2yrLZwzQr/DtX71cUf3nvLQ9iniPaQOEuLOb7Zvj2lZNzbflpq2P+tDvIrLKo8uRvvVoapa7djw7d6uyxTNF80f/AMVUEzIsaJMjGRdv7xa09pCUrlyjzRuV2s/uwvMrsr/uvlpF86HzIZodq/xs3y/N/s1ft5I2khTydpX5d33adJavPuNzeK4j3bVZ/l3VcanLuTKP8pg31rPJHhLZt2xt7ferY1hFtfgRq4hHCaBfED32SGiHT5rq3GxG2/eZWXarVo+JrSGT4V6tZiIRpJo12pUdBmN8/wA6/oDwE5f7cx7j/wBAtT/0qmd+Uc/1ivzfyP8AQ/Mbw34Xv9a1p/tkLHdL/rF/ir1vSfhH4YXSnm1iwjkTytyNt+7XXfDv4V6bp2lrrFzHH5S/N8y7dv8A8VXK/F74hW2lrJbabc/d+Vdvy/LXwq5IrmPxv35zPLvil4X8Daey/wBlWDRP/Ftf5a8/lsUWT5Pu/wB5q1/EGtSalcO/nMV3/wAX3qp29q903mCo5uY6I+77pUh0ma6/1Kfd/vUv/CM6kq7xDuFdLoul7m+40qfxNWtdR2dpb796jb8q/wC1VcqJjLseezaTc26/vrZh/tVXa3f7ipXX6xqkNwGTYrLWRb29t53qzfwrUy/ulRqGMUfhKVY2YZH92t9NIs2/1iferRtdC01o13w7lp8oe0Zx6283UJTvss+MeV/wKvQ9N8M6JcYT7M3/AAGui0TwfoMMqv8A2bDt2bd01HKTKsePx6ReTfchZto3fKtOGh6kzbPscmf4dy172sem6XZva2Gm2+1vmdmiXdWKuh3PiLUEcQ72V/4Up8sQ9pI8butJv7OPzrm1ZV/vNUUaxPJhn42/3a9H+MmgpoukxLs+fdtZq8+02DdMsv8ACv36g15vcLdj4ZmvId+9VH+1TL3Q3sF/1i7WFdFZ/u7Mv/33urC1y53Myb8/w7d1BnGU5GUW2sSetM3O336c4yd3rTaDaIoYrR5jr/q3pKKuMSiSNsHe/wD+1Usb7W3pxUC71+THzVL87Y+7/utUGZMD50gff1+/VmE/88+WV6qRqi/f3VatvNZgifKy/wAW+jnIlH3jc0k+Wy70+999a6Cx2La750z/AH1WsDS5EmZP3e7+Gt6SRILAyOmAqfJt+VmoEcL4yuPO1Zk/hWsqJPMkEP8Aef71SalcPd3zzP8A36seH7f7Vq0cPbfWsTX4Ynaw2b6L4RZF+9JFur7c+PErw/8ABOYyxk7h4L0jB/G2r401fUbbTbeGzdG27fusvy19q/HuzOof8E/ZbSDjf4Q0vb+dua/W/C582U55/wBg0v8A0mZ9bwmmsJj7/wDPp/kz89tH8WTW6t9pfctdTpOsQ3UKO/zL/tVwl/ot/p8jJMjUyx1K8sZB87Y/u1+Pnx3LGXvRPS2sbK8Uokasuz7tYmreEUjj3pD/ALX3ab4b8Xoy+TM6ru+V2rrPtFtcQ+dDt+593furSMiLTOK06P7KyTRpg13fhnU4bixa2mT738X92sfWNH3Ik0MOC3+zTNHknspt5mYL/d20EzNfxdpfnafKjorBk2oypX0d/wAEN/jI/wANv2vPD/hjXvEOpW2i61fx6dq1rZ3TItxD97ay/wAS7v4a8EvJE1DTVTzsPs+638VZPwD8aXnwk+OGleKoXxLY6jDdRbv7yybmqJ04VKckKXMf0QR/tP8A7HnxW/bE0fwfqvhLUNHtPBeuNomh2eqaivkQ7d0lzcNH/CzNtVa8u/ZO8FeLL/4qeLPA+hftAar4N8NeOfEN5YX8mjxKslxp7TNtVWb7rMrferxj4nfDvwNJ8dvD37QmleNtJ1uHx9YTa59jsZ1Z9L8uNdzSL/DubctdD/wTh+JXh74+ftHRaJrviT7NYSSzNpax/K00y7tu5v4V3VxYiE48vIzzuWrWxPNLSx8bf8FZPgN8OP2cv+ChXiz4QfBjw5qFn4Y0XT9PTSnvl3fa28v99cK38Ss1eS6DYpJpYe5RkdXZWX+Kv1Y/bB+FPw9/bq8B+J/iXr2jx3njj4b6yunf2Lotwvm6lp8LfvJPMX+Lbu2/7tfmNoWr+G/F2paxqXhTw9Np1guqXCWFjcXXmyxwq21VaT+9XZGp7Slc78vlKVfkZl/YYZo2CQ8R/N+8rP1TS0ZmSBP7rbVrrG01GZN/7oqitu+9UV1YpcKr/ZmJX+Ja1jL7J21v5jg5NNmjbfs2/P8AJHt+7U9rZhd3nPs/irdvtLmjnPyb9yfMy/dqFdNhhkbzoWKqv3lrqjseLUqe9eJgLv2h7ny9y/L833dtfQf7F0AgsPEKZyfOts/lJXiq2KyZQ87k+9Xt37G9s9vZeICxB3SWpyvTO2TNfo3hR/yXmF9Kn/puZ9RwGn/rLQb/AL3/AKRI8K+OtqW+LPiTK5Da1Oc+nzmuJaPy12b2/wDZa9F+N1u0vxT8RyBE2rq8+5j/AL5rh7q3kVNibflT7zfxV8Xnn/I7xX/Xyf8A6UzxsdUlHMa3+OX5sypGeHdvRt38FMaQMp2bdy/eq59nuYY13ybfkqvH/rDv2/N97an3q8XlkRGUSLzkXr87LQziMN5O13k/h/u1JDbuyum/bt+41OmhRsOm7ev8TfxVyyjyyOynLmhzCQtNHGvo393+KpVuMQ+fs3M38NFrGkakPw33qGhRZEhO4q3zbv7tZSidEZe6PkuNq/uU+VU/h+akMjyN5+9fm+VPmpI4CqnyduPu/LU1tp+795sUfxbVWp5UORDHHt+d92V+6uyrml6XLfAbE+7/ABVYtbOa4m2eR975dypXXaDoMMcA/iXZ/c/iropnJXM/SfDLy5eH+FdvzfxNW5a+Gd02xJmlMfzL/vV0nh/wpDcSfaPI4+8i7P4q6Gx8IpNGNiqrt825Vq5ROL2n8pxNtotzCuzyWZv4NqfdrVTT5rfHnTbGVd3+ztrt28J39q0bpbMzNF83l/dqhfeEzGrzOkY8v7qyKzfM38NZ1I83wm0ef7Q/Vmv2nZ/JXDL8/wDdrAmv5vtR+dcbP7ldHfRzNDK8xZdz7drfw1y+pQw27fO+/d/dr6GOFhyeZxRxUuYlh1Y27Rpsk+b5vM+981bun695duNjt8r7trf3a5FZ0hU/eR/lVGarSXUxQIn3l+bctU8vjL7JrHHSjqd3/wAJQ8jb0mj3qisir/7N/tVaj8SPN5rpc79qf6v/ANmrgIb+ZFZ34M33G+9U32zYzb3xIrrsVf7tFLK+aLsVLMLwudxD4u/1aB2Zf4/Lf5m/2aY+oPIrOZoV3P8AdkbbJ/vf7Vctb6gLrbczp5bt8rq3/staulrHFL/pO0q3ypGv3lWtP7N5TL69zGwtwt7NshRQ/wB1Wb+Kpre3uVki865Vw3yvCqfw1Xt7OFfK2O2FfcjN/eratbNftQtpnbH3mbZVrL+U5/rnvF3R7GaSaP51bb8yLv2tu/2q6a10kqwhd42f73yvuVd1Q6Do/wC5aZ0jZF+42z5ttdhpGnpax+TMjNFJtbdJ96pjg77DljOWPvGHH4bTy2eSH545fkbyvvf7LVch8PzCP98jf6rduj+6rf3a6L7HDt8mT5n81fKZv4Vq4ui/M7wou/5vmaqjQ5Yk/WOY42dJHvjHMcMzgHd2qvrGk7W3w/N5i/e+8u6tHVIjB4geLptmHJOfTmpdSsYY9nkuz7tzMv8AFX6n4h0efKcnfbDx/wDSYH3XGOI9ngstfeivyicDrFnthz9yJkZX/irmbyHzIf3G4bk2MzJ/49Xe+IIf3a7EVdu5tq1xepxvCGkd2Mrff/dfdr8zp4fsfC1MRynH61b7pD+52IvyszP97+7XPX0LR7Xd9y/KrqtdZrVrumaHyd8ez59v3d1c7qEbsySG23fPt3bv/Za6Y4HljZRMI4iXxGX5EzKz72Rmb71c18Wle78NzQw7R5n3t33ttdXtuftHz7T8nzR/3a5P4sSbdPTZuiPzfNs/8drkzHC+zwrkduFrx5uU+bdc0+2maVC/3Xrm7yzSPc6bcLWlr2pPJqUqu+G3/dWol/fJsyuG/wDHa+U+LRHpR+H3inDK6qr7FVVT73+1VtmS8gYJJudv4qimtXDfvCxVvl21CsgjkRNjfL9xd1OISiQXCvHNs2KV/vVG0ybdjp8q/dapLp0kLbE/36oXk7x4kR/mqvcCN/hLMkL3C5jT7v3WqpcQtGuzHzfxtSR3jxYdDy33vmq1HIky7+rf3TWZcpcupnQsYZN/da9E8G61pvirRZtB1P8A13lbYm/u1wd1YzIvnJytO0fVbnQ75byHcpoHpI0tU0m60XUJLO5Rl2v95v4qrTQ+SwdOVZK66+jtvHGjjVYfluY12s33a5Vle1keGaRsr/C1HL9omIlrcZj2z7v92s/UI/LkLp93/aq1eSJuZ0T/AIFWfM+6TO/5auMiojA27mikVdtLTNQopD8vz4paACm7dvzZp6feFJU+6AUU3P3aU/N8maIgLRRRR8QBSMueRS0UcoBWr4PV21hNj4rKrT8J7/7VXYmSv8NSKXwnaXDItqPm/wBZu+b+7WBrEm3ds6/3fvVt3kjrA0f3VWsDyZtS1aCzhhYmSVV+WiX8xgfTf/BPOzew+I+hRykF547uVyOx8h69p/a0Rm8daYf4f7LAf2HmvXD/ALHei2lh8VNLa3UKkdnKEQptKt5Dbq739qtW/wCE+0uQu20aV90DOT5j1+xYGUf+IEYxr/oKX5UjyZ1IuvzHjmvW8ysiF1likT5F+61V/BNi9x4otJtNds+btbb/AOgtVjUo7/VNSWzs0x8+yLb8zV9Cfsm/sm+IfGmrLPbeFbq6upmV7O3t4tvzf89JP9mvxXC0Z1JmcsRClE8k/wCCkPxB8T2/7MPw8+C+qQ3CQXXiKbUovMf5f3cfl/8As1fPvhfENnDshVVjT71fUv8AwXD8Dn4Z/E/4X/DfUb9brU10O6v9S8l9yQtJIqqq/wDfNfMuixosaJD86Lt2K33q7uX3z0Kc5OlC5u2ph8vfNMzD726lWZGhEJT5v9yrvhnwzf8AiTVLXQdKhmuLm6uFjit44t25m/hWtD4lfDHxn8J/EX9j+LdK+zTb22x7lk3f8CX+KnGXKV9s9K/ZT/c6pfzQtGv+i7d0i/L/AL1e1LeXOrXDw6UnzRvteaZPkXdXjv7JOm22qXmow38MjpHbrLKv/Avlr1zxJ4qs7O4/srTYI43X7qx/eX/erycdKlGrzHl1pctWxFq1xYaAN7zfabz5l/vIv+6tcT4k1C8EM2sPeK00MX/AF/3a2vst1cyb7y8VFb5vO+9trkviJqT2+mw6Vsxudmlrx61aX2zHm5j1HwTI0n7OrSzRk7tGvSUz1GZeK+QPGnipY43s7ObcjfMzN/DX1x4PkD/syzOjdNBvgCPbzRX54fGPxzFYPL4e0yZmuZPluJP4VX+7X7v4uUJYjJOG1/1Bw/8ASaZ1YajKtPQ5n4meNn12+/s61uWe2h+Uf7VckuMcUrAt1NIq7a/LKNONOHLE92MfZx5QVccmlpGOBxQrbq1+EsWiiinHYAooopgOjj8yVUP8VDRPtZ9mQv8AFTaWOR49yJ0aswEqSGbayh32rUdFXyoCSaRGbeny062nMUoZRwKhqS3jfd/s1ApRO+8G6w81q9tM7bWT+H+Kq2qXjx5m2fd/u/erG8O6klrJvmfb/do1jXHmm+5xvpy90x5T6p/4IveOx4O/4KZ/DCXztg1a7utLn/3ZoWVf/Hq/cjXtBSx8+wSH5o5WRlm+996v53/+CfHiF/D37c/wk1p5mZrfx9p/zL/tTKv/ALNX9InjSzRdW1ATQsrfapG+Zv8AaryMdR5pG9OpyxPKfEuj2d0r+TDJCy/K275mWuM1aGGOeZHTcsfy7fK+98v3q9J8Sectu0bzMrN/F/d/2VriNUs90LTWz5Xyvn8z71eJWo8vuuJ6dHFcu55T4is5o5Hh8nasn/LST7tcLrlgm6XfDsdflVf4f96vUfFFsk0jpH5ibvvs38VcJrVrDDHNM7/OrfeauPlpS91Ht4XEcx55rFrmN8vu+8v91qobphGjzWfKqvy/xNW9qlrNZ3H7na+35fmbcrVlSWsLTJN5zLt3fK396sp0+V2jHQ9eNT3SrDG87SwzJHs2723L83/AWq3Z2/mKj9WX5vu1Fb6f9okea8RRIz/JtrWhV1s/3MOdvyvH/dq+WMpxM5S5feD+z3jkRJvu7d+6NvmVqZr8DL4K1GDdknTpwCVx1Rq19Nhdlj+Rc/3tny0zVLe1eGe2UEwtGVwR1Uiv3/wGp8mdZg/+oWp/6VA9LJKntK9Vf3H+aPiDWvH02n6HPps15t2ysGjX7qt/s14D441681a+d5psru/v16/+0xbw+H/GV3ptnbLDFM7Mir/vfNXlFv4dutQk3vbKV3/xV+e017SlE/JqkfZ1ZHJw2M11J5wh+X+9XQ6XoW2Nbl027W3V0Fv4ZsdN3b3Xev8ADWZrmtWdirJDNg7PmrYz+IS61CHTV2QhQfvba5zWNceRzBJNz/e31R1LXHumd0Rs7flaqLfN87/e/wBqlzcxUYltrhPMCI+fl+WrNv8AOo2J8/8AG1U4Y3kkCJ0rW03S5ZtrojZ/jqOWYvhC1WZhv2fL935q0tNXzm+dGVd3y1Nb6P5G15nY7vvbv4mrQ0+z2y/O6rtrQjm5dS7o9v5arvm/2katRtW8lVd/vb/vbP4qzluraGPZCm5qb/aEClt/y/8ATPd97/apSCXMdBp+nzaq2z77t8v/AAKu60HwnZ6DYia5dd/91vvf8Crz/QfElnp6rNM/zL821XrTuviJc30DW9u+8NubbJ/dqZe9rEOWUuU4X9o7Ura4voLa3m37f4l+61cNoVt8u/yd27+GtP4mX1zqGvL9p+UKn3ah0eNLdWHyk7Pu76Rr8MNSbWr57WFYYXbDJ861zV5MZmxs4/vVf1i+eaT7+Qvy1lSOm7ATbT+H3R04yEc7Dg0jDI4of7pp25NuacTRiUUUA7e3P8NSUOX5m+c4qXzFPzpubb/epiqjLn5s09f/AB2gzJI1jaNX+bP8dW7Pf5nnGNiN/wB2qm5Fj378n7vy1e0xn4Kbg3+1VcpMjf01Xl27/u/e2qtWPFGqJDpJTGG2/I2/5qk0W3O5Xf5VrG+I12rSR20fy/7NOK5SPtnJv9010fw9037VqyzOm7b93/Zrna7X4d2otbGa/k/u7aZtU+Eh8Yapu1BofO3iNdv3/u19/fFuYQfsDRTEDA8IaT1/7d6/ObVm8y/km+bDSs1foh8bWCf8E9N3UDwbpP8A7bV+t+F//Ipzz/sGl/6TM+t4VVsDj/8Ar0/ykfFF1bWGpWrNIin/AHqw9Y8BecrPbJg/wruqbTtVeT/lhs8v+9/FXT6TfR3S4MKlv4F/u1+Rcx8T/eieVtbX+l3DI4ZWWuh8NeLHtYwk3zfNt3NXT+JvC9hqlq9/bKu7+7/FXE3mi3mmt53ksqr92nL+aJftOY9F0vWrbVF8l3yPvbV/vVJqGj741ubbaP4dtcBoesXNjMvzso313Hh/xBDqGIZn+6+7d/epxl7pEy1o9vcs374fKvy7a5fx3b/ZdUhmSFsLPXZTW/kyfabZ2+Z/k21g+NtPmvlivLlG+X5mVaqMf5RS5j7/AP2JZpvin8K/CNnc2cNmNJ066066ks2/eTfeZVkrC/4J8eILPS/2gLGbXrm4g01tUmgezs9ytJuk8vb8vzLtrs/+CWqeAPF37F/jnRPDsN1N4o0nxbp9+8kny+Tp+1vOkX/0GvMvh7eal4P/AGltetvD141s1rq7T2TK3zeSzbttEY81GXIcsoylXPufxT+0T4i/4I+/HHxJ4mb9muC5sPiF4QuZ/Cr6zOIvs8kbMquy/wAWGb/gW6vzh+E8mpapoN9rGq+T9tvr+S6uo4V2xeZJI0jKv93burv/APgrh4y+PvxA+Pvhjxx8aviVqeurceGo7Tw+s4WOK1s1VW8tY1/2v4q4z4I2rzeF5Em2sv2hfl/2lX71c8aMqcbnVhKcadU3mtUaQPM+3cnzxr93/dqCbSkWQ2sXzq33tqVu/Z/Lb50+X733futVaRXt5Hm37tu7bt+63+zWlOPNM6K0uWBy81m7SGzeFfli+8vyrVRdPRV81PufxMtbt8uxdhm/1i/Myr/47UMmmpDMzpyNu3/Zrt+zZng1Jc1W8TBW1hb5ETb/ABOzfw17F+ypbJbwa6FdTmW3zt+kleX3EfyvM+7ezKqt/s16r+y3EUtNccOCr3ELKQMdnr9G8J/+S7wvpU/9NzPq+BJc/FFB/wCP/wBIkeNfGW1B+JOvYTKnVZmdf73zmuKvLFJZk2QqW+Xau2vUfidoUkvxD1qZZdobUpnw3f5jXPXHhmFmV5rZk/utXxmeR5s7xV/+fk//AEpng5jWtmVb/HL82efSWfmQvvjbc3+q/h21VuNN2sg2Ln+Na7u+8LTW43ptdPmVFase60OFgmxPvP8APuryvc+Ewpy974jlmt5lykKN8zfd/u09dPfcnyfL/F838X+7W1No77vJ87au/wCXy/uqtRLprtMYX27925f9quWUT0sPU93lM2PT32b3Xb833Wf5qmWyeNdi7S33ttaUMKRtsSFSv96nQQvGyo6Ns/vMtYyiehT3MtbF5NzQow+T7v8AtVftbF5lRIQq/wAPzfxVLAschZIYW+ZvvMlbml6Wkql7bk/d+aol/KORNoWk+Xb73+7/AHdld34Z8LmT959mjYf+PVR8M6Huj8mdGHmJt3bdzbf9mvV/BvhdDl/J8uLYqPHGnzNWlPkictT3iDQfBrraxQmFmEi7t237tdboPgd2V/MtmH8PmeV96um0PwrDZwtNclvmZdkbfw11lnpNtaqHeHEUjbNuzdtrOVb7Jj7CBwX/AAr/AMxWtjbSb5Pmikj+b5qyNc8D2y5SFN38X+7XslrpNnMrfPvVX2pt/vVQ1DwXYLC8KWzY3fN/dojLm+IUqZ836hZ/Z4wZrZtuz7q1y+uWLrG6PDgr92u81zTXmkaBIcqrs21f7tcdq1uitshTllbbuf8Ahr9F+r+6fKU63LI5Zi7XCpDMr+Z8u1v4dtPWGaFVgdGETfM+1/mb5qdcK8kjwlGYL99tn96pLeGFdltbWzbY12/vK6KeHtC8Tb23tPdHSWaKpdHZmaX7u77tOW1IzN827Z8vl/NSWilpPubhu2/K/wB2nstst2Y4XkKr8u77u2uijh7fZOeVb3CzpNnC0nnGZn8x922uh0mRFmf51+6pX5PmVa5qzW5kYfPuH8W5K6LTWmuE3oivtf5mX5WqpYOUfjJ+sc3unQ6XHtuP3zrtZflXZ/DXQ6fGjXEU32lmO3y33NWFpbIjI6P5vyfvVk+Wuo8OxldkM9tGn/AqwqUYRNIy9w6/wfZvFD9mk2na7Lu/vL/vV2GmWME0KI6SbI/9Uslcr4ZaGH+CTbJF80a/ers9Lk2wo824qu1fmf5v+BVw1I8srx+EuMoyjYuNYpNuCQxuy/xN/eqRbV5IxD0ff87LT0kT7RNG7xsn8P8ADtanIyXFul5M8jHZs3b9vy0uXsRKXKcFr8Mdt42eDAdUuYweeG4WtXWrBGaeb5S+z5VX71ZevqqeOmWNcgXMQAYdeFrb1KTzI2kgTJh+4qxfMrV+nceJf2blKf8Az4j/AOkxP0Hjdx/s/K3/ANOY/lE4TXrW2ms3RI2WSTciq3y//s1x+sXX2NUfZ80e1XjVd3zV3muWsbSOjvh/mZVZfm3VyGpWn2Jkd3Vk2/dVv++q/PqdM/Pub7RxusPDeySzO+X835l+6zVyOpRwNIz7JIy27fu/hrttUj8n5EkmL/3W/u1yuoWb3i+d9mVVZv8AWK//AI7XoYWPxKRnze+Y7w2SyG5fd/Cv+9/tVxfxesoWs4kSFg7O37xn/wBn5a9Alh3TH5GRWTbtauQ+L2nvb+G01B0V0tbqOX/gO75m3Vx51h1Uy+fL9k6cHW5cTFSPkHxBa/Y9Uld3+ZZW30+zZA29EY7vuVvfGbw9Np/iKa8hhbyZH3xf7tc3pbbmbY7f7tfmkf7x9R8RamkO3+6v8Tf3az7r+JML/vK9WbyZEU79y/7VUJpPOY/7X/j1P4ZBKMSORvNC7EVV/wBmqNwok27+Garc33VQPgf7NVmciTZsZqfL7pUSpL/rClCzTR/x4NXG035WkfpVV7dlByPu+tLmNOaMi7Y6s8kohm+ZP9yrl5psN3++T+5/crDU+WM5rqvAz2epI2n3O3zNvyM1EiJR/lM7w/rVzoOobN+6Nn+f/arY1aOz1KP7fpu3Lf8ALOsjxFov2W6KJtG3+L+9Wbbahc6fwjsv+0tTy8xXvDtQV1B38f7NU1+X71Wb65+1S+bvzVb7/tiq/ulREVtpzTw27mm+X70qrtoiULRRRTlsAUUUituo9wBaGXdiiimArM7cvSUUitvbFZgCrtpaKKACtjwWv/E0WaP7y1jK2eDW14Ng3XDzdlq47Cl8Jv6szrCzydG/u1r/AAD8IzeLviBbbE/dWrNOzM33dtc94gn/AHexP4q+0/8AgkP+xn48/aEk1nUvDGgzXO5lt4pPI/1ar8zM1T7OVT3EefiqnsaHMaf7OOnT2vxYsW8ghBDPliMf8smr1jxx8FfEXxs+K2keFvCeiXuqajc2fl2en2EO93k3sVJ9Bz1r374g/wDBO7Uf2e/DJ+J0sbyTWEEcepCdQptWkcRqBjqzFuR2ANbH7LviDU7bxFp+jeGNXbS9TXWVnXUbeH97ghFRN/8AdyHO33r9ryrDxpeB2KjP/oKT/CkeB7R1KXMjE+BP/BInQfhHq1tqX7Q2txprk0S3D+H7FvNntZGb5Vkb7u6vq3wn8PdK+Gvh2Wz8K6DJoVqsX/H1ffNcyL/vLXf+JJrDT/F194kjhuPEmrSOqXV95W35lX+Fvu1558Trrxnr0n2a9maC2k3K9naxNLLu/wBpvu1+URiqceWETJUf3vMz8bf+C3Grf2l+31p2g/aZJY9L8HWbRMzbvmkZmavCNPtXVvPf/lp/er07/gpxZu3/AAUZ8T6VePIr2OmWkT+c25t3l7v/AGavOLOHzpAj3Kr5bVwy+I+ljH91FnuH7JenfYdZ1Pxgk0aT2tk0Fv5z7WXzF/eSL/tKtQftHx22reE7DVU1KM/ZZ18pVl3yeXu27m/vbq5v4P8AxY0T4a6tNc+PLBr3SZE23Edvu3r8vysv97/dq/8AtG/Hrwx8VrrTLDwNo8lrpdnYQpPI1ksX2iRfu7V+8qrUS96pYyjz/Ea37Ndxr32e7s9NSR/MTa0kf+9/6DXrjaVZ6T5t/MitMybt0n8VeVfst3D2y6jPbOq/ul+bzf738Nek3Fwk0j793krF+93fw189mcX9ZvE82tz8w2+ZLiN7+aGN7fY3y/3v96vGPih4y87WpLCzTzdqY8yN/lWuk+JXxNext5tE0GaSG5mX/XKm5fm+WvIPGGpJ4P0ebUtbud02z7sn8TV5nL7afKRTjzep9T+D559N/YzvLuJ8yQ+FtUkRgc8gTsDX5c3d5cahcveXMrPJI2WZq/Sj4PavJr3/AAT4n1iXOZ/CGstz6ZugP0Ffmh5ntX9LeKMFDIuHu6wkF/5LTPZwEeVzXZjg27mikVdtLX46eiFFFFTzAFFFFUAUMdvWikZd1AC0UUUAFFFFTaYC/dapYztRnZ//ALKlmhRbVJg/zf3aYvzff+796jlMx6yTffBw1Cs8jHe+TTJGy3yU9Ng46VIHd/syatPof7RHgLV7WXy3t/G+lyLI3/X1HX9QvxChS41698mzVUW4Zom3/wCsr+Vv4fag2leN9G1VGw9nrNrKrbf7sytX9TnjDUEuLq3v9i7brTrWf/eZreNq48VT5uWREpcvvHBeIJt0bJIm4Rt821/u1xfihYbfda745vl+X/ZrstcuJlWXZCsvmM3yx/LtrifEXk/8toVT+4y/w15tajzaG9OpKUjgdcjma+3j5Xji/wCAtXC63bv5jvvVNy/vVZ/mVq7/AMTTIzP5KLsb5fMWuE8Qb0hXyX85mVllVv7q/wAVebOlGjK57GHqSicHqEMK+a802yJX+61ZMyzSKnyKrrLt2s/3q2dUvIYf9S+zc+5P4ttZLNE3yO7I7P8AeWKo9jzS5uY9SniPcCzhmvIUmSFQ6pul8n7tXNPtZt3k+ZJ8z7Nu3726jT7O2SHa/wAi/e3f7VXbXfb3CukzOn3vJX5f+BNSp0+WroOpU5Y8xb0+NLWzms4bnHlv8jM+5lb+6tV9XbyJJnmbAVMsfT5c1es4ZnjjTfGHm3M8aru+X+GsTxrP9g0DU7hgD5FhKxBXrtjPb8K/evAxp55mCX/QLU/9Kgexw1Uc8VWb/wCfb/NHwl+0RqFtr/xSu0d5H2vtT+L5q4q4utN0G18m5dR/tL81W/GWvPda/c6lN/rZpWavN/El9PdXDfvGC72r85hH91FH5nUl7SrKUi94m8bvdGVC/wDB8rL/ABVx11qE11Lvkm3Nt21O1reXDL5aN/3zWto/gXUr5kRLZn3fxbaqMbkxkc5BbzSfchZq19L8LXl0yfuW+Z/u7K9J8D/APVrxftNzats/i2rXZXHgfQPBOmrc6rAsUS/KrN96tOWFOXvGftPf908x0P4b3Jj86/RlT+P5f/Za1pLXQ/D9l9xd7P8AI38W2q3i74rabbzNb6JC2F+Xds+9XFy61rGsTec/X/aqJS5jT3pam5qWvQySecnWqEmvXPl/IWDfd2rTFtbWEGa/dl2/3q2vB9x4evFcw2DS7U+9J95v92p5uUj3zAW88Q3G7ZDINv8AeT71QNB4n3b/ALHIv+01eoWOraHZsAmnR7F++s1X77V/CV9CpfR1R/vfu/us1Iv3vsnktnda2v8Ax82zf3vmrVs9aeZRvfb8nybf4a7xbbwHfE+SlxEP9pN1VrzwHol5bG50y5xt/h27a0+H4SJHmfij/iaa4JvLbCr/AN9Uy6keGEH5V/vf3ttXdY2R60/k9IX2bqxNW1BGmcb2/u/dqPt+6aGbdS+ZIRvYhXqCl65NJT+I2iHBFIq44FCrtpyfeFUEgZdu6mKNq7yKk+8xSmqNvSgOYdG3zBEqS43x7k602nq7qmwfN/vUEj4W8xW7N/erS0mBJNvz7mrNt1cM+9M1saDGhmRP7396gmR1Fuk0dutyk20Km3dt3VwvinU31TVHn37gvy7q7XXdQTS9DZ/lQ7PkXdXnRdnJduu7mojzjgLCpllCJ/E1ekaXappvhuJAn+u+auA0S2+0X6J/tV3Oragn+jabHtTy0Vt1P7QVJfZMnVNK+bzpOGWvvX47Ar/wTxKqQP8AijtIA3dOttXxEslteW+9EZ9rN95K+4vjzEJP+CfkkQXg+ENKAH421fr/AIYa5Pnn/YNL/wBJmfXcKaYPHv8A6dP8pH5+6fcMW2P1+9WzZzPGyuPlb7y7XrEjhmttu9P/AB/dWrZ/M2/fX5DHY+M5jo7K++QI77tq/wBypNQ0m21CEuiK52/drHtZHXc5f/c+etWzuvLK/d+WtTLlkcnq3hu4sZj95N3zVJpd2beRE+ZStd/qGl22tWXyQru/hZa4/WNDezm2Q7i38W1Ky5UaR/lZ1tjq32jS4kd9/wAv3V/hqDWl+1W/91GrD8N332VhC+3av3K39QkS4s3dHUfxKtVGX8xnUjOR9if8EWdW1XVPiN4z+CFheeUPGHgi8giaP5WaSP5lWrf/AAhqL4p/4SqzSNJrVvKut27zWZW2t83/AAGvDf8AgnH8Vtb+EP7W3gXxVZzR25bXo7O6aSXav2eb923/AKFXv/xgk174L/tReNPh7co0ljp+tzeVDJ8q7ZG8xW3fxfe+9W1GPNKSOWp7soM5v/gplJpXiZfAPiHTLyaabT/D6xXCyS7tsjSN/wB81zH7PsLr4G+0zQ+an2j5l2fdar/7QVunijRfO8+PyVg3RQxpuZW/u0vwH002fwrs3udyPJLI7r/EvzfdauepFxid1GMvanS3Cv5P2l7bb82379ZuqMkKpsmxF95l/utVy/ZDIZodoVk+X5/mX5vu1zuuagkDSJ52359u7+Fmp0Y/akTipfylW6unVvOm2/M+3dUUl5t2pC6jb99t1UZtQRnOxNqfe+akMiNJs3/e+bdXYeJzTjL3i8sUN43ko/yt8zt/8TXrH7Nlutvp+qJGfl3w7cnJ6P1ryq1byvnTc6K235vvbdtes/s5MHstUdIiil4cZXHZ6/RfCdW47wvpU/8ATcz7DgP/AJKih6T/APSJHDePjt8Z6rstvNYapKef4fmNY91HbNmN0Ztqfe/hre8dW7r411V0kfedQkOAvBXdWfZwwz4mRN6/e2r/AHa+OzqX/C3iv+vk/wD0pnzuacqzGsv78vzZkXGn+c6pBCoP3/Mb/wAeqjqGi2zY32y7vu10clvC2XLzfu3bY0ifwtSjTXkgKPbfeXcu5/mryJS5djgj7vvHBXWgujfc3bn+ZqpSaTCq75o2PlvtTbXZalofkyJs+VG+Xy93zVkXFn5e5IYWST7v+zWFSM+Y9rB1OaJzv9nwxt58O4vv27V/vVN9heXdvRnH3dy/dWtBrG5+V3di/wB3cvy7lpiWkMaPvG5/vblfbWEonrUY+8VLOx8mTYjthf4pF/1ldH4ds7ZcJHD8v3l+T71Y9vbozZ2b1/2v9quo0CzhW4E25sKq7/8A7GsJfGbyjGO0TvfAunorLNHC29fldZE/h/2a9i8J+H7O4t4X+9u/iX71eceB9NeRVuftLLuVVRZP4f8A9qvZvBdnNJGiCCNl2ru/vK1ZyqGNSMOpsWemTQrsdPM27VSPbWxHZ7pE85/nklbzWX7rbaks1e0Vpprb7ybUVn+ZasRw3Kr+5EcZk+40kX3f71c0ZSlLmOeXu7iW9nDJ5Uz/ACIzbkj+6zNU02nzXUchWHa0P3V/iZf4atxr+5eaBIwu/Zt2/eq1ZrtZnW2kRdi/d+6y1tGpymfLynyJ4gb7Qd/3trr919qt/vVyeuQ7ZHmfl97fKtb+qahmzZEdVC/cb726uX1SREy/k8Ltby/4v96v2Gn72h8NHlUbsxZPOVtibS6/eZvvLT4bdLeF3eFvm+b79X49Nma9d/JXLbf3lW7fw55jFJ0kA/vfxVqpUoijzxMaRWg7xpFt+8v8VSw281xtR/7nzyRp8rV0LeFXmhTybbcuxV2tTZPDM0O1GhkXdu+Xf97+KtvaUvskL4jAhjeOQwpu+X7+7+GtvS5PMVZsKPmZdtRS6W9vHvlhmZ1fd9yraWr2sg85G/ePt+WorYiMoijH3/dNvTbhGjRHjZgz/wC7urp9HvvtCoj7tkf93+H/AGa5K1X7OzI6NsWXdFuf7vy1saTqCRK0Pk7W2blb+81cPtoG3LLqejeF79FuETYqy/xL/s112n6xDC0sKfM7SqssK/w15Zp+teZCJp/mf7rf7X/Aq27HxVNH87yfe+Xdu/vVy1ImkfM9IsdYtplCJCvyv87Kn8X+1VptUe8j3wbd6vt+avPbfxE/l/uXjzH/ABN/FV6PxRNI2+bayt8u6P8AhaspS5ZGsY8wa1db/GJu+OLiMnIwOAv+FbGqavDbyPI8myRv9b5b1zF7fmbUX1CQbSHDHK9Me34Vma1r3nyDe8mz73ytX6dx0k8vylt/8uI/+kxPvuOVFZdla/6cr8olnV768upGuUDJEysrybK42+1BLiaazuUb/Y+T+L/ZqbWNQuWk8yGaZArL8slULjUt0MyJDh2bdK0f8S/7NfAUZQjH3j87qcvumVqnnXDHYkizLFtRWrA1KGZyIZuGV/4k+X/9qt268l2fyvMi2/Mvz7ty1k30HlzCHy+PlZGV90daxxUI6lyozkZkMP7tvOeNhsb7v3mrA+Kdul94D1O2S5j3rZSbdvytuVfvV1Mi21ux37cK+3bt+bdXI/FrfD4H1ObfvRrVlfdU4/FQ+qzX901wtGXtoHg3hW48PfFLwmnhjxDeLDqtqvyTTf8ALRa888b/AA18Q+A9X+z39tMsbS/Kyp8rL/erN1TUNT0PVPtmmloju/hrufDX7REOoWv9k/ELR4dRSRFHmMvzKq1+XrXQ+o5eXY4aa3huoTsjZnX5vmrEmX98UC17JZ+EPhL4quWn8PeLPsDyN89ncfdVf96sbxd8CfENrGbzR4Y71FbbutZVZv8AvmtI/wB0nm5jy9mdfk343Uxt6/6n/wAe/iq9rHhfXtJm/wBM0e4Tcm5PMgZdtZu1448v97/aWl9k3NGyvIVZftNTSSaPdMUkmUfLt3Vl2UUt1OsIT5m/iq7e+DtSgXekbNtGXap5CfdGXWh2zxt9juVP+yv8VU9NuJ9M1BJgWRlbGajltdS01t7pIn91qZLcTTcO+6qkUdB4kuPMaG/R93mJ87NXPTS+YvlpWnb3MWq2gsp22sv3G/2qy5oXgkaF+GWpiOMRtMZtxzTlbPBpPL96Cx1BbbzRSMu6tAFopGOBxSI3Y/hQA6iiil8IBSv940gOORQG3c0viJiFFFFP7ZQUUUUuUAre8GnCzv8A3krBre8NxhdPd06/79TIiXwFiffcXwRIWPzr8v8AFX9En/BAPwn4Y+Cf7Iov/FulNDp/9qW76prEdvuka4m+byW/3Vr8Iv2O/hpZ/GL9o/wn4A1JV+z32vQteNJ/q1hWRWk3f7O2v6y/hl+yB8Pf2ZPh5rvgrwzfR3XhXWpk1S3025iX/R7jyVX7392to0ZVIvllaR4eYV5RlGNvdPlX/gqZ/bOtw3PiL4Pa+JPh68du+sWEhCNHe+Yqoyq3zOpyD7YrkP8Agn1pXwg8P/DXU/iv4ssvtPiG18RtZacjjKRRG3jYOR/vFh+FbH/BSH9nTxPoHgu3+NngnXC2hy3kdr4m0uSTAgbpCyL/ABDeVGa8z/Yk1zTdS07xF8MdRvBDJfRi8sWmP7vzIwAy8c7ipxx6V+3ZTTlS8EcTF+9/tK/KmcMnCSemh9I+IPjRpWtal9m+2bYmlXbHap8rL/FWdrXiywWN7bTblrcN/Dt3btrfdavMPDem6rDfPDDZ/Nv2r5afL/vLXVX0KaHbpeX9z5T7t7rcPt2qtfjtSUojp8t4n4c/8FAdem8Uf8FG/ijql5eee0eqLb7l/h2xqu2uS0mNJtqP91V3f8Cpf2g9cTxR+2D8UfGEMqyJceLbpVkj+7tVtq7abp7fKsycNs+bbXHGPMe7P4IxJbxQzCH5T8/3v4ahVv8Als6fN91F+7t/2qmWZG/c/wB5vnZqbNJ8u9+F3feo+IiX7uPunrP7Pa2dvDfb3Xe0S/NJLt210/izxQ8Nq8Ns+F/5ayK/3q4b4Ptcm1uYbBJHdlXbHt3bm3fdrV8ZRppNxJ/bzrF5abvLavnM0lP255mIj+8uc9r2pDTPM8Q63c+c6xfLCv8Ad/h3V85fHH4g3HiHU/sK3LMN26RWfdt/2a9C+LPj2eLTptZu3URp+6t41/5af8Br5+vLuS+upLub78jbmrXL8NeXtJnpYGjGUeeR+jn7Pf8AyjcT/sS9Y/8AQrqvzfr9IP2e/wDlG4n/AGJesf8AoV1X5vM2OBX9AeK//IlyD/sFj/6TA1wnx1PUFXHJpaKK/GYncAbdzRSKu2lqgCiiigBd7etJRSK26l8QDtqfwUBDI3FJVzSdPm1KbyYev3qcfemTKXKV44HZtv8AFW/4B+Fnjz4neKLPwV8PfDGoazq99LttdP021aWWRv8AZVaTTdBm/taKw3LukdfmZa+gvGvwY+Ov7K37PfgT9prwhqv9jxfEfU9QsdE1DTbpo76NbXasrLt+ZVbd96qqcsYnP7SUp8sT518XeENb8G376Tr1s8U0MrRSq38MittZf95ay12eXX1r4f8AB8fif/gln8Q/iJ8U9VjRNF+IOl2fw886BWnvL2bzGvVWT7zKse1m+981fJLMitv/AIa54y5om0WMJR23gYFSLsZl2feqJR82PSpoWh3/ACfw1XoVLcuaRO8V9HdJw8Msbr/vLItf1J3WoXN54Z0G8vPmkbw9p7eWqfeVrWOv5bLLZK6Fj96WP/0Ja/p1vtUhs/B+g2fzPNJ4X03/AFiNtWNbWP8AirnxHwnLiPhMvxFcQq2yZ2i2/daP5fmrh/El0kau6Iyy/d3NW1rmsPwnkq7K25tvzba4/XNQdVDzzeYvzb2Zf71cfs+b3hwlKxz2uXiR3Dvc2yqV+VW3/drhfEl6itMls80fmbf3n3q6fxNdTQ/JPtRdn+sb5vvVxGuTeTcSI75+60W37qtXn4iPvHqYepynL6nMkkjujqX8/dtZPurVBpo1ZspIRuVpfk+WNf8AZq5r0z/8sU3/AD7tyfLuZqzWjS1i33O4rGu51WX7rf8As1c3+I9OnK8NC3Zq/R5tm35Ukkfc22tbT981vG8yNmZPn/uyL/vViWsxbYo24k+bzFXa3/Aq0LO4eP7/AJ0asnyLHtZW+atI046GntDa01bmDHk7V8tPvL/Cv92uc+JtwsfgbX7qTcQuj3LNnqcQtmtq1vn+ztDDuMi7l3N/FWF8Unx8P/ELhw+NFuuSMZ/ctX7d4FK+fZhL/qFqf+lQPoOGZ82Jr/8AXuX5o/NbXJPtV000MrP/ALX96qFv4Z/tKYOerPUWo6g8Ko8f+7Wn4R8XabZyD7Ym4b/utX55HY/NJc8jqvAfwRTWJUfyfkVvmZk27q9o8I/CHwlo8K3OpvH95UX59rL/ALVeZ2fxkttJhVLDy/LX7u371Ynir47alMsnk3O7cn9+n7b3fciZ+znKXMep/Fr4z+Ffhvo7Q6OYzeeQyoflr5X8dfFTxL4vvnmv7xvL3fKu6meItW1XxVqD3M0zPtqnH4ZEgDv93+7Sl70eZm1OMaf2ShDHJcSb/mLr92txVSxs0mZ+P/QWqCOwS0XZs+Zf4f4aiuPtl9hE+7911o5UHmZ+qatcalfbPm2b/lrpPD95HptqMuu7726s6z0NLdvOmT5vuqtalvZ2e1Ud8/7Lfdo5Qv71iyL68vmZPLZVb7/+1XQaPoNy1qjujKPvJuqv4T0+zvJm+zW3mOrLsVfurV3xdDDdXhhudVm+4qy28Lbdv+zVe4Tzc2xauJPD2jx77/VbdJo33eWr1k+KPihpVjp62Hhvc1xIn71mT5Vb/ZrmfGngV47FdV0rzGRV+dWfcy1y+n71Vkd2Wsyo/wAxozXW23d0fczfM7f7VYV1I8sxd6uX1xt+R0Zd1ZxJ3ke9BrGIU35F96dRV8qNAooopgLsb0oXofpQzbqFO00pbEcrHwru+d3qSNU8yo1Z4/4Ny06ESbt2zmlykyLNvHMz/J/3z/erotFj2r+8TKr/ALFY2mw7bhXTmuo09U0+3e6/hVPvURlymcpcxiePtQSaOKwWHb/Furmqt65qE2oalJNI+4L8qVUqjePuxNvwJatNrKOEyV+b5a1vEEgm1R5tmxl/h/u1V8DLHb29xdhPnVfk/wB6rVwu6NpnfcW+/wD3acY85lKXvD9Lvvs67H6feWvvv40xpcfsFtGM7W8I6X+X+j1+e+51jHzbP9pq/Qj4vlR+wYpbOP8AhEdKz/5L1+u+GC5cpzz/ALBpf+kzPsOFHfCY/wD69P8AKR8A3Fj8+90/75ptrN5Eyo/T/arY8nerSPu/4DVKSxQSB0+dl+8tfkEf5j4otW6pcSM8LqP7+6pBdTRtvd9rbqr2cscVxsdKuXEaXH93a38W37tMDb0HWiu37y/wtu/iq/faemqQsj/e+98v3a5O3kmt1Z4U3bfuLu+9XTaHfboxvTKf+zUS5ufUzMC40/7DdeSU+Xd8yr96tBmM1mYfJX5k/u/NWrqVjDeKzoiod21f71ZM0b2Hzypz/tUGhZ+H/iq58L+LrHWLP/j5sb+G5t/96Nlav1A/aW+GOifGz9o3RfiQibLTxp4FsdSguPtSqnneTtZW/wB1lr8mtUvIIboO6fumb52av0V+C3ijXvif+yL8LvGdheNLd+Edem0S4kmb/ljt3Rrt/u104KX+0rzOfFQ/dXPPrXS5pJrzwvMkcvlyyReZs+9tb7y11U3h2bwj4dtIfsawwtFu8mT+Jq2bzw/C3iK/s5ns0v5rprhFVWXcrfwrW58VNah8Xfs86Df23hVrKXwveTWWrXHm/wDH00jfKzf7q1eMp8tWRWHqc1L4jyDXtas7VPMm+VlTdtX+KuL1jWvLmdJI96f3m/vVc17VJo2m877zPtiXfu2r/erkLy+/0gb+f95qwp+8RXqSjHQtnUN27zivzfeq7Z3X74RhGVfu7q5xtQRZH3+W6N/eSr9jceWuwv8Ae/u1t/hOCUTqNPvkkjaEcf7X96vZ/wBm+4e5sdUkZifmhxk57PXhFnfJ8kPyv83zLXuH7MVx9ostXbYikPACEOez1+i+FH/Je4X0qf8ApuZ9XwGv+MooPyn/AOkSOO8fiCPx3qcpMrML+bA37UDbj1rNjm8uZUR9hZWXcv3dtWfH95s8f6y7xnZHfy7gejfMaz4rp45P3zq5V9yK3ytXx+ef8jrE/wDXyf8A6Uz5/NZR/tKt/jl+bL8mF+S1dnRUXdu/vVb3JH88cLNui2/N95azYblJI96Jlo9zL/DtqeO4uUtV3v8A7XzfeVf7teZynncvN8I3VFSONnR+Nu35qx7pkht22W2/5d26N/u7q0by4RbeVHRsfMzeZ/7LWPcXD+UgR4y33dzfxVz1OY9LBy/eFS8kEjbEtVRvupVWOP8A0jM0LP8ANtSprpoZJld/n2/LuX5d1VppN0nkvt+/uZV/iWuOUox90+gp7D4ZHmZ0R2yr7dtbfhqR1m8m5dVVfuKz/ern1aCaTYny7U3Ve0nUHtXV32ttf+5u+Wsai933Tfm5T2nwPdPHAr3k0bOrr93+Jf8Aar2/4f3jtDC8zq7r8/mL826vnHwVrSRn7Mkyq0f8Tfxbq9i8BeIEt5beF5mKSfN8rfKq1zy55aky+E9f028e6uPs1zbbh977QqVttbzsn+pZyvzRM3/s1cf4d1bTbjY8Ny25ZdiNv/zuro9J1KFWCPbbX81t0jfLurGXN7r5Ti900bLZuPnSf7/+zSRxyQzM6TZH3Io/4abcXDzQqtt8v3vmqhqWtvaw+S80bSbd237q1p7SO4HxTqV86M1sjrtZ/nZv4aRbd764M0zttjXdKy7W3NWGNSubhfkuVKs/3f4maug8O28ilLl4fuvufy28xVav1WOK90+Sp4X3bmro+j7mxsV12r8rV0mn+HY1VLl0j3Kv3aZo9kk8yuvPlp88kny7q6fTbHbGHe2Up/z0/wDHq5a2OlKOkjslhfhM5fCdtcbHjtmCR/N/wKluPBaLl7k7dr/LG33lZv7tdpo+nzTQiZ9zCRdyN93/AMdom0+2jLQgqo+VFb733fvbq4pZhKMr8xjLCRied3mhwxxpMSxM25Pm+9D/AL1Zl7pKNcD7HujVvusz7v4a7/UdLdpGtofMCbdysy/K26sTUtL+x5eG2xTWYe03kTHCyhscsrw29wiOiuF/vfeZqI7rb5jv5ip/spVu6s5rWSbY/wA80u5Gb+7WdeTW0kB8n7irtddv3qUsV7xp9T7l6x1p9Pxv3b/uvuf+GrNn4mS3kfZNuH91q5K4uUt7dbVNu2P+6jfL/u1A2tOsaRP8m75dypWksdzbGUML3O9bxck0buHXau35aY3i6CzYhNzfPtZt/wDFXANrnlRs+9l2p8jNWbJ4s2xrc75Nky7mVqr65zlLDyie76ffGbwob6J2Yi3kKljzkZ/wrh7zxJbTMZkmZjJ/ra2/Cupi4+Cp1MfNjTLlsDvt8zj9K8lsfEFzcQh5J4wzL91v4q/T/EGtGGUZQn1oR/8ASYH3nGtKTwOW8vSivyidsupQzM0z3Mm1V+b5vvLUc+o/MPnZ/MTbtV/u1y1jrDqu+2mZvM3Iysn/AKDWjb3KbldPLYb1X5flZm2/xV+XSxnLHlR8LHDXleZo7nkjTzvM/uszfN5dI2+RmTezbU+X5f4qhs2hk23KTf3kb5/lqyykSI/y7Y4tu5fu1nUxnKdEcPKRUuI4XbeifL8v3m3fNXAfHqNP+FT63M6b/LspG+VNteiX0fmRpBD8zMjfvF+7XFfGyz+0fCnXYU3FI9NkeVW/i2/3ayxGM9pQ5eY2o4WMalz4w1aPzrFfmZtyKyKy/wCzXLXEL2txvT+9Xa60v2PTYpvMbay/Lu/3a4bULjzpSf4t1fN+6elH4gh1SaF1dZm+Wul8P/EfX9PmR4dSk3L9xt1cgqFm21a0+3dm2PuA/vVJXLA9Qs/i54rmiENzqTTRbG/d3C+Zt/76ouPE2j6gz/2xoNjcq0XybYtv/oNcJ500asiPytWre6f5WTnd/d/hqoy5ZGUtjfk0nwNffvrbRJrfam793P8Adq7DcW0lm1rDuddu35k+asS1mdx9zarfM22tPS5nWffPM2F/urVxFKX8pDrnhW8uoYtiK0ez7rL95q4/VvBmq6fKdttJtX+7822vWtWW21zRdnzI+35GV9rLXneral4k8OzSWs037v8AvL/F/vVPwjjKfMcoVmt2+fhlp9xdeeNkyfMvetr/AISawuTs1LSo2/2l+9WdrTWAYNbJyy/w/wANHNE2+IoUUUVBY3y/elVdtLRVfCAUUUU47AFFFFMAooooAKKKKACiiilyoBVXd3re0yaa30kum3bWCpw3z/NXQWUciCNIduNq8GiUTGqfRf8AwTv0+bTfFmp+PEhX7THZ/ZbC4b70MjNuZv8Avla/qG+HHxph+OP7O/hX4j6HrCy22reFbeK4h8r5o5o41jk/8er+cfwL8Nb/APZ30/w34J1V9uoXmkQ6tfwtFtaH7Qu5V/7521+y/wDwQ/8AilD4/wDg34v+Dt5qqyXHhe6t9RtbVvm22s3+sZW/3v4a6MPLlmfPYqpKpLQ7j/go28nhv9ljU/D1/JiTUUspoiW4l2XcQOPzrwD9gXwFD4o0W41eTTVmNrrEqh40BlUmCIjGe3Wvd/8AgrzbXD/CXSbm2gZreCURNJt4UNIjD9RXhP7Hvxu8DfAH9nTxJ498XeLo9NlTX5FsbdU3T3cgtosJEP73Nft2EqRpeCmKl/1Er8qZzU4ylTsdvq3ijwB4CttS1jXtehhms7qRZVa4VfJ+b5VZa+H/ANrD9rzxh8bLq58N+Cb1rXQt7farj7slx8rf6v8AurXP/Gb4yXPxW8V3usanbSWNrfXX2r7HJ96Rt3ytI396vKvHnjaw0vRZ/I2ysyyNtVfm+VW+Zq/nrG4+cp+69DelGV+U+MfCljNeeINYuXDFW1eb+P73zV2Nu3kr5PzfL/DXG/DeN76zmmmfa81/I+5v9pmrs/JTcmE3Oq/Jz8q/71d9L4D2X1HzQvJDv2LiT5fmqNti/I8KuPu/7tTXB3Q/J/wLbVJo9u/zkZlb7u2r+ID1/wCAPijw34K8N+IfEPiGGOW4j+zrpas3zeZu3NtrjPiV4yvPF2sXPiHUrny0+Z3j3fKq1Q0CRDYsj7di/wB3/wBmrzD4+/Eh7iY+EtKmUL/y9NH/AOg14lajKti7HPHD+2q/3TjPiR40fxZq7JbP/osPyxD+9/tVzZOeTRSAY716tOMacOWJ60YxhHlifpD+z3/yjcT/ALEvWP8A0K6r83y23mv0g/Z7/wCUbif9iXrH/oV1X5vMu6v2nxY/5EuQf9gsf/SYHHhPjqeotFFFfjHMdwUUUnz+1UAtFFFLlQBQG3c0qfeFG1FUbPvUogNUYGK0PD1++n3n2mPrt21QrY8EeHb3xN4httD02NXuLqVYoFZto3M22nzcvvEVPehY1ZtcvdS1RLmaZs7l+996vtD4Z2HwK8dfC/4b6r+1pD48ufBPgV7hvsvh3UVb/RZJPMlhjWT7rSN/EtfNmp/BLTvh/wDFr/hWvjT4neH4bu3ljWe80+6+1wRyN823cv3v7rV6N+1Z8U/Fev8AhDSv2fPDXhHQ4LrS7VZLy68P3O77Rb/dVdv97+Ks69eMrQW7OWitbs9P/wCCnmp/szftAfBHRfj3+zx8SNB8F+EdEvF0nwH8DrOXzbqys/8AlpdXLI3/AB9SN+8Zm/2V3V8CS793FO1CyvNNvHsL+2aGaN9ssci7WVqazJxvpxjKJ2jWG2ShndpN6CkY5bipLdfmOf4aCPhNfwjp/wDaXiTTdNT5TdX9vF/31Iq1/S94nWG1tbXTXEzJDpFnEsf8Py28a1/Od+zb4dufE3x58EeHkh859Q8X6bEkK/e/4+F+7X9E3jq++x+ILy237mWVkRm+b5V+Va5cQ/hic1bocpdXiTXDQvbMnl/3m2rXO6o00bMifLu3N81buoNJI0r3KRqu3ayr8zVg603mLvfcSy/JJ91q5pe7EuPxnH+JLf7VbhHKoq7Wfd821q4bxEsy70tnb/b3fdr0XXFhXf5wjCRovm7vvf8AAq4HxEtz572yJ5nztv8A7rLXJU5pyO+jE4fVJHtWRJkZPM+dvkqnJdTHfJs5V/mX71XtYbdutoUkdNzfKvzf8B+asuFZtyzQo27ftlVvvVzyjM7oyLFvNNG3mujS+Z/D/dqzZ3ELRs7pJsWXbuX/ANBqpH5PlrcvbSRyebtTbT4Vh+a2Tdne0rx/3m/2amP940lHmiWlv3jOxJpHZX+Vf7u6ofHTG4+G2uG4JGdHuw5PGP3bg1VWSRZn2IzBmVW3fwr/AL1XdYhF78P9Qt5WGJdNuEYjnqrCv23wLnzZ5mH/AGC1P/SoH0fCitiq/wD17l+aPyq1PVHabf5e3/ZqlDdvDumR9n8NX/E2jvZ39zZzSNuhuGX5vl/iqlPapCvzphW+ZK/OYfAfnsiRdYmjX55mqpJrE03Dvn/ZaqcxfCp33fJTJB8+08/3v9mr5fcJujXtdchjVUfcP9qtSPxFprQ/fX73yf7Vcltdm2fdX+Gnx71XZ/7JU/ZGdO2oWDbd6LuX+JaZJqkLLsRFT+JdtYVv8o+/81Wo28ydXd2qpS7GZPNqTIu/fuNVbrULmTdJvbC/d21JJH5iu7v8u/atWLXTrZnH2x9gpf3So7EfhfxlrGg3QubUZVfvV1+m+PPD0twXm0eTfI/zNI9VdH0bw3Lb/JD+9X7/AM/3qmuNN0f7Tss4W/22kquX+UOZHq3hTQvCvjDw+88KNE3lMssbbf8AO6vD/iR4TfwbrUkKcozfumr174WtNpuj3Ezw7UX+H+9XL/GzS5Nf0ptZCZ8v7jLTl7xl76nc8Wu5nuJi70z+H/2aiT74/wB+koOvoFFFFBoAXbxRRSbflxQA5V+b79G35s7KSljUSHZQTzMlLbgP9mpI49zLzhqjVDJIz/3as2qozLv6fx0+Yk1tFtst52xSqt8tafiS8Sx0V0L/ADSf7dQ6XZou35N/8VZPjS+Sa6FjC/yx/eWp5kZKPNMw6WNcsOPlpKn022+1XaJ/tUcyOk63QbEW+i7OvmfNuWmSQ7/uJvq1YzIzLZu/lIvy/wD2VOmhfcyo+F/gb+9Sic0tinNbv9kVH2/M/wDFX318aW8n9gPKk8eENKA597evgib/AJ47/u/xV96/HBHf/gn+yIdpPhDSucdObev1/wAMFfKc8X/UNL/0mZ9lwpf6nj7/APPp/lI+HLeTzIQ/3m/36WSNPMTyf9ZWbYXzw/u3f5v462I7q2k2I8K/c3J8lfkX2D4rnKiw+XP52cLvq3C0O4/40k0fytsTKstNsVeORpN//jtKOw/8JLGu1fubd1WdLvvsrbPO2DfuqJ4U4m3sN1ElqjbRsyy/N81HxQFzWOjW4S6t/wBy6qV/iasrVofs8jfP5u75qsaPcRtGE+4zfeWjVoy3zp97ZVxIt0OQ8SMWtpfn+Zv4Wr7P/wCCYuvf8LC+CvxF+DP9pNHqENhHrehqv/PSH/Wbf+A18Ya03mQzpNDtb+CvWv8Agmd8crP4NftUeGdT151OnX1xJpuqRyPtT7PMu3c3+yrVVKUoz5hVqfNStE+spPGniG61jQbnxVC1tHDE2y4ktfmm/ut/u16Notnbap8MPG3gPVdS+2Pq1m15YKz+WsMy/N5i/wC18tfoqP2Qv2b/ANuf9lfQrDTbLT9M8V+H7C4t7DWLS32RfL93d/vfLXwNpnwr8efsu/GSz8JfE62jtP8AiZfZ7ea4bcskf3Wk+avpcdg41sNGvT/7eifP4bEVKdX2M9+h8R+JNWdbrfv3qybfm+XdXOXmpJCwR3+993/Zrv8A9srwqnwx/aQ8U+CbaRnt7e/+0WDbFXdbyfNG22vIpdUhLDfuY/3a+e+E9SN5Q5TVk1KGPdDhnVv4lqzY6gmVdN29v4d1c1JqTtJ+5dfvfKtS2usPDI7+c33/AO792lGRUoSXuo72HUodzlLZl/uRrXvf7JtytzZ646k/6y34PbiSvlmx14xxoiTN8zfw/er6S/YmuRdab4iffuPn22T+ElfpHhN/yXmF9Kn/AKbmfU8DQtxLRf8Ai/8ASJHNfEeVf+Fga3G0zZ/tCYnaM/LvPy1kx3dvcKEdPmbavmfxbqg+KGqiH4na+EkUeXq04I2/7ZrNXU0m3+d8oj+Z1/hr4vPdM7xX/Xyf/pTPnM0p/wDCnW/xy/NnSR3iKoRHjba6ttqU6g8cm/zt7K/3a5y3voVZE8xVWP7q7P8Ax6rJ1RGtzPMiqm35GZtrf7teZzTjE4o0/dNDUL7zl865mX5V+633q5261RBMYflG1t21VqtqGsfu8fKr/d+b+H/gVY9xrDtcb0mXc3y7qzlI6cHHlnqbDaluUoibE3fxfxUf2jD80r7drOqoqr826seK68yRZN+4/e+WnySbVaPzP9Z8ytv+7XJL4j3qfMbFxcJHGv8ApOwf3l/9BpY754pPk+Td/CrVmNdPbqmxFbbtV1+9uqbdMzbEkjwu5nbZUcvKbSkdv4V137O3lvcq4avTPBfipIbUQzXPz7PlWT723dXg+l6s8cImh+Tb83/Aq6jSfEyKqvc7V+X/AFi/N81RKP2omVSUeU+nfC/irav2bzsoyf6tvuq38NddpviosuLm5aQRtuSNW/h/ir5v0Px15P7uabarL8rbvmrstL8b/MiQzfd+dZG/iqJR7nJKXvHtf/CaPDZmCFFkT70C79rN/s1j6t4yd45X8tdjff8A4v4fu159N44eRhsust/e37VrD1b4geXE01zMsTN95fNaspU+aIuaETyPR9Q8ySPekar/ABx7vlWu18MyRxxkpHGi7trQx/Lu/wBpa800Wazjjaaab93v2/L95t1eheF2+zLE+/7vy7pP4v8Aer6ytiuX4TCnh4/Cdz4d+SFYRGqbk+633q67QZEwyOnlt92KPZXFaXHDPIJi6zbfvyb/AOH+7XTaXctDGk118qK25Pm3Nt/hrjlipS1NpUYna6PNMzf6TN/d2SbdtXZrVGzcwhdk27zV3/8Aj1Zmi3Ft9k3wTNM0fzOv8NaUdul5tSY4LJu8tfurXFUxQ40jP1SzdVR9m8wozbVfcrVgavbPJ9y2kVpNvy/7Ndmqp5aRu/735l8lkrH1izdfNtoZt25/3Uf/ANlXN9ejGRUcLzS0PPPE0aSs1zbbU/e7Ny/NXL6jNNtaBPlP97ZXc65ZwwK8fnKzb2b7u5d1crqFrB8s0Ls0rfK3y/K27/aqvr0u4SwvvfCcdq19DF/y7KzxptaT5l/4FXMzaw7SKn2lsruV2+6tdN4ktUjt2hd2UL8u1v4q8/1z/Rrp0+Zn+9tZ/lX5a6qOM9oc9ShKJZuvEU1uqO7/AL1W2/L/AOhVj6lrl3H+8eZm+f7tUZtcmt7j5EX7nz7qydb1qGWNnfqtdUcR/KYSo8p9V/Da43/s1rcMTj+x708+zS14fod9H5aO+7Z95Fkr2P4S3Ak/ZPjudxIOg3zZ/wCBTV4P4T1Kfy/J87Lt9xWT7tfrfiXU5cpyT/sHj/6TA+04shfB5d/16X5ROys5kjZHv3VU/vL/AA7q19Pb7Psh/wBY33l8z+7WDp80/wBoieaZXXZufalb1hdeS3nO/P8Ad2fxV+PyrcsviPkYx/mNjS/JUqn3nk+9D/s/3qstskZn2KVb7y/w/wCzVOxWa4Znfa0W3d8v8NXo2RYWm/5d1/u/e3f7tc0sZL7RtTpkcnkRxb0h2S/ebbu2r/s1x/xRhhuPhvroebLNpsibv7rN/s/3a7ia4eOzZ0hYfL/FXD/FRobL4Y69fpCskq2DN/d8tdy/NWf1qUjWVHlPjH4qX0Nnb29hCjfKvzf71cAzfx1t+PdZfV9ZeZHyKy9O0+a+uRFGjGrjEmPuxuFjb+fKBt71vW+nvDb8HczVs6D4Ff7LvkTa7VDr7Q6XCyIMndtq+XlM+aMjG/fecyO6k7/u1oaTapu+eFqw7jVEZjsRvlpIvE15byB0fil8Ivfkd5Z6I8n+p2hPvbv4v92pI9Njt1i33Knb8zs38NcpY/EK/DeTO+1W++y1c1rTLnV4vPsdaDI23YgpylzClHlOuF5YSL9j/tKEj/rrUV9pMOuWv2aYK8ez5JF+avO5vDWuxMXhDOF/iV6m0xPHdsv+hw3RVfm2/wANTHnKjGO5V8S6DcaLqLwn7u75aypGfOxq3Nb1nUrgbNVsSHVf+WifxVhzO8jbmpGsRaKRWzwaWr+Iob9/2xSMu2nKu2hl3VAC0UUVcZAFFIrZ4NLRHYApeVNN53b91LUAAbdzRSKu2lrQAooooAfHHukXf0avW/2VvA2m/EX46eFvBmq7fsE2qRy37M/3YY28xv8A0GvKLH95IN/G2vev2WfDepWV5P4wsJZEmX91ayMn3W/i2/8AAamXu+8ceKkoRPrL9rbWP+Em+M0/iq2mV7aZVit2V/uwrtVV/wCA7a+xf+Df74jWfhL9q7WvDd5qvlL4i8HzQMsn7xZGjbctfn7pdrc3V5/aXiR8xwp92RPvN/er3f8AYB/aAh/Z7+Plt8WraFbiHSbK4/0eR9qzNJHtVaxp1LS55HiOXOfqr/wU9+I3grW/2cZ9GbWoY9Qk1G1Sytj9+4dHBcD2VQx/Cvys+Lut61Dd2elWDEpHG00fmyfJFIx2Fwvc7Rg+wFbni74q/E34/fHKX4n/ABL8fxXGZ5hpPh6zi2W1jAUIAX+8395q4r47WllN4itZ7y8nVRp5QxxvgYLNz9a/YqWJ9t4C4yf/AFFJfhSHFezkctdWaPdP/aXiFX3Kqsqy/Kv+7WJ461DwloHgvVHSaNy1rM26NNzM3l1Pb6f4es1KfY1Xau5dz7ty1zPxq8QWui/DPVb+zhVCulzRbWX5fmXbX8+r95Vi0VR96rFHzp8M7d4/DsE3k8ybm3N/vV1Mm+HOxPl+9838Vc/4H/0HwzZwpD/rIl2bWraWZ5Y/33yn+61fUwjoerU+ImjjS3jaZJmK7F2rVSS48kF5Plb723+GoLrXLaPMP2nZtfbtasa+8RIvyQ7Sn96l9rQXLGRqeIviU/g3wfdJbJH9ouG/dTfxL/u14TeXc1/dSXly7O8jbnZq6D4iatLfXsMH2reiqx2BuFaubqY04xlKR2UafLEKKKKo0P0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs9/8o3E/wCxL1j/ANCuq/N+v2XxY/5EuQf9gsf/AEmBw4T46nqFFFFfix3CMMjiloorQBFXbS0UFd3FT9oAooZfmye1FPlQCt+7au1+BWr6bo/jb7Zf7d/2C4W1Zv8AlnN5fytXE0b3Vg6Nyv8AdqZRJ5UbUWnzLdPc3k2597M0m/7zf3quW8F42rrq763J5i7dszP8/wD31WENVuQhTPBqM31y38bCr9zlMOSrzXub3xDvbLVtcTUbYbpZrZWum37t0n96sBd67t9G4NIzvTJF+bmp5TeO4L8nCPU0bKmE6bv71RbTu+SrEMM0mfumpJaufS3/AASn8K/8Jh+338LdKmto5IYfEa3T+Z/0xjaT/wBlr9w/Fd1CLyWZ33edcMySL/eavyN/4IS+CpNY/besfEkyRvD4b8L6hes0n/LNmj8uP/gXzV+sd9Ntt/kfeJPvKz/xf71ediJe8Yv3nymRdN+7d/m+V/7n3qyNSie4ZY3s22sv975lrS1BrZbqSGO8Zwvzbf8A2VarXiw3UaTfbP3f/LKNflaSubm6s6oR945XVrVxG6PbZf8A5a7n+Vv96uS1LSUhYiZ23yP8vlv81d/qcJaRke2XLbvNb+Jq5i8sZppJSzyJMqfJtiX5f9ms5e9E6qcTzTVNHcRo6Iqtt2urL92sSTT3jZ7Peqn7zbl/75r0DUrFBueZY5TMrK23+GsO6tLZQ1s88hRtvzNV/YOnlOVW3uo4/J2fMsv72T+FarFZlZJ98ed27zFb5q3NUtUWHfbQt838O/5WrNuP3yt9p3IJNv3fm2tXLKjKUtS170feM2SZJVdEdkZpd25futt/hrYMYk8HTwncN1nKPlPPRunvWTJHtaVBMuyN9y/P/wCPVtGQjwxNLvPFtIQynkAA4r9p8DoyWf5jf/oFqf8ApUD6fhWFsTWl/wBO5fmj8z/jp4bfwv8AEi/heFkt7iff+8ri9auNtqnloo2/w19V/tUfClPFnhd9dsH3zwt8y+V8zf8AAq+SNehubeQWdyjK8bbZa/MsNW54nwWIozo1PeKD75GX5/4ql8t2XYNtQK22T/Z/u1Pbx/effk/3a7Obmkc/2B8caeZ845+7UbMiyN89SeZ5cZb+Gq/yeYetMB5zHJvdKuWsbzN8ifMv391Vlj/eM7u2Nn/j1aFnC8O19jb/ALz7amMeYiUSXb9nVt6bvk3VWvbx5JlRH+T/AGaTUL6ZZGh34Zvvr/dqpG0zNwn+/RER0Ol3lyq70euo8P6XNfTRo8n3tv8AHXJaLE8zhN+0f7Ner/DnR4fJ8z7Kqbfli3J/49T5eUcubkN1NN8nSbews9rH/lrtqa+8DzSaTNvhVk8r/d+b/ZrrvB/hWHb9qmddy/N8qrtatPxBbJcM9sj7Pl/u/LWspGEZfzHxF4r0z+yNeutN+b93K33qz67j4/aGmj+PJtnzCRPmb/arh6zO+n8IUUitupaUdiwpfmWkpFbIpgKW280/bvxsprIFApwY5L0uZAEP3q09LhSZvnXH8Py1ST7wrc0O3TztjvtpmMjXhaGztn+fbtT564nUbp7y8eY/xPXS+Lrp7XTvJR23M38X92uTUEDBoCnHqxa1vDlm6yNN/Eq7k3VmW6+dNsrqtN0/ybXydm5/vfNVfEVU5ugkbfZ2+fcf4q1Ix9ot/kRhu+b5qz5V27UgjbG/73/stW7G8Ty9nzGp5eU5/iJLq12oHTlv92vu343K3/DAzIDtP/CJaUPpzb18NSRo0P3GX/gVfc/xtLL+wW23kjwlpf8AO3r9d8L/APkTZ5/2DS/9JmfacKf7nj/+vT/KR8BTxvCyom1j/eqxYXE02Wf5T92msJvM/efKrUscZjbfvr8kifHmowk3B4fvfdTdQJplk2b1H+z/AA1Bp9w6N5fysy/xM1W1jSTLvH83+zR/hM/cFt7hJJFhcM396rEcyXEOzO35dvy/e21Vh+WPdv2r/eqZW24dEZ1/j/hpa/ET8USW2vvst4IUT/crZuv32ns/3dq/erHWRJFXCfd/iq3HdTSW/k/xL99f71WP4fhOX1uHbcOkLt937zVzeg6hJo+vLdrMyvG+4bf7y/MtdR4i3qx/2v8Ax2uEupdt57K3zNWZpT94/pW/4Ik/tBQ+NP2c4YfOZ7q4sFVoZvlVZNu2voT9rD9mbw9+1d8HY9N8SaD9j1zT1ZdD1KHbvaRfuqzV+Q//AAQp+P1/pug6n4S1LW/Ljjuo2TddfMq7fl2x1+3n7Hvjf/hZel6n4Ytk+3JZ3SvdNJ96Hcvy/wDAa9rDY2q7QlL3Tx62Ejyua+I/Bb/gsB4BuvCPi7wN42udNjtru60OTSNZXdul+0W7fK0n+8tfFz6om77mx2r9qP8Ag5A+Adgfg/qHi3QdKkF7perR39vNDb7vLj+7Krf/ABVfh7PeeZJvd2wq/IzferjxFOVKVgwdT2t2/iNBtQT75TD0+TVv3Y3sy/32WsRrzaF3v8rURX/y7C6tXLKXKd/L7x0On+IBGyfI2zd8rV9Wf8E+9QW/0rxQMcx3FoC2c5+WWvi+O8eP597I33v9lq+uv+CaV19q0fxa2zG2eyGfX5Zq/SPCX/kvcL6VP/Tcz6rgqMVxFRt/e/8ASZHB/GTxA9n8ZPEsSsny6xcDkZ/5aGs/T9e8+HZcj5W+ba33v92sL496zJbfHPxWqP8AKPENyG+X0kNYsfiiaGH/AFO4/wC196vjc+/5HeKS/wCfk/8A0pnzWZ6ZlW/xS/NnokesI1u8aPiKOXcu2ql54utoY2mdFdV+b71cTL4gu5G2P9xvufPUcEm6b53Vf71eVLl5jijGXLdHR6h4oe8uNiPsVvurVdbpGl3u+1ldfl+9WRHMkb70RmLfK9SrJD5g2fK38TNUy5eh10ZRibsNxuZHtn5bd8uz5asRzKJFV3z8rfwVjreTf67+997b/dq3b3CQwvshbd/Buf8Ahrm5UejTNSNvP3Ike7b9z5qk855EWabav9xVqlDcboymz/f/ANqprfybnL/ad+5v87aXLzQOnlRZWbyY49n3G+/u/vVLDrU1vJFcw7flfbtVqrx3Dx24heaNdy/Nt+am/Isfr/srRTiYVNzsNL8VJJCH3qfm+9u+9WzY+NJYZEdvM2TfxM/3a82t5prfaJLZm+b5FVPmWtG3vH8tf3Db2Rv4/wDx6rjRjKJ5lao6cj0abxw8cH7l/mVNr/Nu+WsHVvHE11sthcsWX+L+7XL/ANoXkm5C/wAkabfv1QvLp9y/PsX5tu2q+rmPtub4jc8N3/nN8k22Vfm87dtr0Lw3q20JeWyKjN9/a+7c396vFdAvkvJV3/IzP95a9G8LXkcMm9LmTcqbUVfutUylLlPa5Y/ZPXdNvoJLdJkmxNvb7vzfKv8Ae/u102g3iKySJu/haVY/m215roerPCyPbTN5jbV27vvf3q7DQdY09Llkjm/e71Xyf4vmrmlKrGI+U9N8P3kMkiO53fLu2r/FW7p8kNuqTI7B/NZ9sP8AF/vVxnhfVEVQ7uqNHuZP727+7XU2sk21XdN25lZNvy/99V52IxH2Tqp04yjHlNS8/eIHmST94+7csVZetNc3Mb7HjhibcvmSf7P92r81/wDZ4mmhdn2/M+77tZOoQwXG25SGQ/P8kO/7teRPERp/ZPQo4X4XGJzmrW6CZC80kT/di3fdkrnNW0n7K32a5Tzl3s8Xz7lX/arttSW2uI186Ft0jbVWN/u1gaxYpbwCFEZF+98vzK1Y/XPaRtc7f7N5o8x5Z4s0/wA6N/k37dzN5ny/NXmXiC1ma8dPO/g+evZPEmiu0ZkmTbL/ABqv3a848QaDMscySIy7U2v8v8NevhMRyzj7x5OKwMtzzbXN8DbHTcy/NuWuZ1i+/ct+/bGz7qpXba9pqLan7yuvyu1cL4is33M6fJ5nzV7lGp7/ACnhVKfLLQ+t/g5M7fsbxzE8jw5qP6NPXzn4LuHZk3zMDv8AkZv7tfRPwZVl/YwjWTk/8I3qOf8AvqevnDwbavJIkL7sL/47X7T4mK+UZH/2DR/9JgfV8XK+Cy//AK9L8onovh+6haYQu8jJ/A23+Kut0lkW1/fIu/c3mqz7vl/3a5bw3ZpJMqIq4rr9NjtoZFTDSmZdrMv+zX4zWj7x8jGVjSs1+yx70Thv4d33alWN2kZ/J+6/mI3+zRDawtc/Pcq7xp86r/6DWlY/vI1mdP8AVozbv4v92vPqc0TriVdszW8n7najPuikkfd97+GvKv2nNQfR/gz4lmSZUT7EqIv+823bXr+pSQTWCbHkQt8yx7N1fP8A+29qkP8AwrtvB+murI1wtxesq/N5m75Vb/0KscLGftb/AGS6ko8p8c2ts+oXXzx16b8O/hskdv8A2lfosQX7nmL96rPwp+Ef9pSf2xqSYgjfd/vVsfFr4haP4djOiaJcruhWvaj7vvHDLmloYPjDxRZ6DZvDCMbfl+X+KvLtV1i51O586SZsfw0/Wtdn1q486Zv+A1TjjeWTatH95m0Y8sRA27mlVNxwtaGneG7i8RrmUNFDH/rZGX7tLcLa225LBPN/2mo98Ob+UzmVwv3KvaNr2q6PKv2a5YJv3eX/AAtTFjUE/aZsf7NT2MkNq29LbNKQuc2JPiB4tkj/ANG2xjbt+WKoY/EHjCST7S+q3CfwsqtTH1CTyRDsX5vm2rUtja3OoTrDIjEyfcpxjzGXNyxOh8F3H/CQLcaf4ks47kMnySMvzf8AfVc94w8DxafC2q6RIrx5+eFfvR11ENna+HdP+zWzs1zIn72T+6v92orfTWmt/wDSXWGGT77NTFGU+c8yTr+FOZc8irWtWq6fqk1sj5Ct8jVU8z2rM6h1FFFXyoAoLbeaRjgcUn3/AGxS+0A6iiijlAKKRjgcUK26iIC0UUVQBS/wfjQGK06P5uBQBs+DdHu9Z1mPT7OzeWWZ1ijRU3FpG+VV2/71ftV4F/4J3+BvhT8B/BeiXnj+1stStdDt5/EelzWSvI11N80nzfe3Krba+Lf+CBP7H9h+1f8At6eGPDviSzkm0Tw3FJ4l1lVi3L5dr80as38O6TbX7GftOfA/wR8bNcuLl7mTRdRsb/fLdWfyrdL/AA7l/wBmt6NOXJzI+czKvzVeQ+Nvjh+zL8KNDtYJ9H8yRI0/dfKqrIrfxNXlX/CpdE0+3lhh1JoYvvrDGi/99LXrfxw+E/jbQfEt14bm8QzSwqi/Zdz/ACsq/wAVeUa94V8VaOxSa83n/lky/wDs1eXWlVcruJy0Y04x0K3gjRLPTPH9qYZixEUm3Emc/Iai+OFkLjxNav5W4/YAuP8AgbUvw90i/tPG1pPezksEk3Koyv3D3qf4z6bNf69a7GYKtqudrYx87c1+uYRSl4AY5f8AUWvypGp57Do/zJN5zSBVrzn9qu6Sz+D2ozIP9c8cDbf4dzV7JZ6LNHNvdFG35XZl+aSvFv27o4dL+F+n2aO2+81mON1ZPuqvzV+EYelz4qJthOapVPGbXUP7NsYFs03pDEqv/vbap614m2/6h8Lt+bc9Y9xqD/ZfOSZv9nbWbJcvNIru+6vqua8D1Ix5ZNl3UteeZV/iP/oVYeoaxcySMiOyt/6DT7q4eBf9YrLWbeXCMv8As1jKXc0jsZeoSGS6+/uqKiT/AFx+lFaHTH4QooorMZ+kH7PP/KN1P+xL1j/0K6r836/SD9nn/lG6n/Yl6x/6FdV+b9ftfit/yJcg/wCwWP8A6TA4cJ8dT1Ciiivxg7gpVXc2z86bt+bNDLupR2AWkZc8GlpCd33KYC0UcAUUAHBFFFIq7aAFoop0XQ/Sp5gE2utJRS7RxUk8yBPvCrunrukGU3D+6tVkV+Plx81amhWvmXsSf8CoJP02/wCCAPgN4bP4pfFqaHZtgsdGtZlT+83mSL/3ztr781K4hW1eF32bX3bVr58/4JBeAZPh7+wPot/qttHFceMtcvNWuFZPm8lW8uJm/wCArXvF9N8zIm11/ut8qqv+9Xj1qjlVlEr2P2ipIsDEW0a/NGjN/ebb/tVTmZ/mTyYWeFG8pmX5lp02oom+FIdv8PzN/C1V7q6ka4MPk8bf9Yr/AC1P2TeMfeKGpW/nbP3zfu/71ZWoWaTKfveZ97zP4q244/OuFR4coqbnbf8A53VBdW8zQyTTfw/xRtu21nP2h20eXVo4rWtNn3O8KNv3/LHt+XbWPcaGvkul5tz8uyNlruJrNbpnRz/B91U+9/wKsjU9M3Wr/I27ay7Vf722r5eZeZrLb3jgtc8P+Swv97Db9yNfmrIvrfbGJkhkTsrL8u1a7W6t91v9pghXzVT51k+VlrCurSGS18lHVn3/AN37y0f3pGManL7pyTabbWcjbIWU/eVmrSEAtfDckLsJMWzkkchsgn+tTXlnbW1x/pKK0km75mXau2mFEXQXRWJX7O205xwQcc/Sv1/wPSjxBmLX/QJU/wDSqZ9fwo08TW/wP80eP6to6NbvZzRM6zf61f4d3+zXxX+1F4Rs/C/i50sLZk86VvN3fdr7zks5lmltvOVlbc27+Kvl/wDbS8FfbLdNehhwGdvu/wCzX43ga0frNuY+azCj7TC866HyzjZIJuOPm+arVuyM3nPJz/s1DJsjkZHTd/vUscaRtw9fRR2PnV8I+4bco2f+PU6GDaocbWbZUW794EdN1aAhRV3pDllT5FqeXsIYsfl7UT5lqVrhId3ko3/fVKtv5m5If++qRbcx/wCyNnzL/FQKUftFSRTN87jeW/vU+GF1+TZ/F/DVq3hTaqI/8e75kqaOHMjb0Xb/AHquOxMvM0fDlvuugiOzru/hr13wbeJp6hHRSsfzfNXmXhGFGukT5VP8FepaT4fudQjRzD5QVPvf89KvlhIUvdgeg+F/HWmnT3TyVRvuouz5q1F1ZNSVPs1rv2/Ku6vO9N8O38d8qGaT+Jm3V1+m3Ft4f0nfdXKs+z5VZvm3Ue7E55e8eKftdeG3VrfW0h27fldv71eGV9G/Hi4m8QfD+91KbbvjZW2/7NfOVKR3UZe4IwyOKWm5+7Tl+/8AlSNgpf4PxpvCr9KWgApyLtGKbUi/eCPSlsTLcsQp5kmE2/7y103h+12tvf5f96sDS4ftEjJs2LXSTSf2fpst191fK21BlLnOe8XXz3WqPDvXEPy/LWTSySeZKZH/AIvmojj81tnrWhtH3YmhoVr5lwH+b6V0sDeZu+fB/iVazdPh+zWq7E5b+Kp4ZHT92nCt9xqXwnPKRbbZIvkdNvzbaqLMkbHzH+b7qLV+P97HvR+dv/fVU5oUti00yKxZ/k3fw0c32SY/CXrO4eTYPOwy/Ltr72+M4P8AwwiwVhx4T0zBb629fnzHqLxyZfa3+zX6B/GNz/wwX5mCT/wiGlnp3/0ev1/ww/5E+ef9g0v/AEmZ9pwp/uWPf/Tp/lI+EJIfOc7/AOL+Jvu1UVvvI7tlfu1o3UHmKiPDtDL/AOPVWkh8uTzkRTt/u1+QR2Pih1oqxMUT5T/FWlZzWytsebcW/wDHay2aGQbNnzN/d+9uqeONI5hv+/IlEtgNONi0eyFMj+Kk8yHy9/RFqrHJ+8+TduX5amEM3yJDyjfeanHmDkLFvIi/xq39yjy9w853Zf8AeqBYHjff53Ozd/u1ZjuIW3v5e5W+/T5gMDXm8yR1PCx/+PVw998t0/b5q7XXJjJcSw7GVV/ib+H/AGa43U+bpuMfWpl8WhrTjyn0x/wTC+Kt18PPj3ZNE0ZS6XY/mf3v4a/oJ/4JLfFm/b9o3WvDd/Nutdc0tVXd8q+Yv8VfzHfAHxs/gD4k6V4kVGcWl/DK6r/d3Lur+h79gnXtN1CbRPjHoOpXUdvZ3Czu0e3/AFLL/F/7LXRTqRjGSZ5GZ1pYaqp/ZPq7/grP8B4fjJ8C9Z0q003znuLCSB2V/lk3Lt2tX8p/j/wnqvw98bax4E15GS70fUZrO4Xbt+ZW/wDQa/sNuNc0r4nfD3UtGmmW5+0WEn2K8ki/ds235Wr+U3/go98MfE/w5/aw8VXPiRGE2rapNdbvK2fxba9CvaphYzj0OTBuEMT/AIjwncF++/y/w/xULM5ZUd9oaoVkeRt4jwF+5T1VJG2F2+X+9Xl8x7XwlppH3Nv+5/dr65/4Jhn/AIknjBf7tzZD/wAdmr5B3Oy/O+1v9mvr/wD4JjBho3jAnvc2WP8Avmav0fwj/wCS8wvpU/8ATcz6jgxW4jov/F/6RI8K/aFkd/j14vTZwviG6+b/ALaGuYhuk2tJNwyptSun/aGL/wDC9vGEQ/j8R3Q/8iGuVt43ZdkKfKtfHZ7/AMjvFf8AXyf/AKUz57M/ezCt/il+bL0M27B/vf3vvVI1xMuVT7jffaoFd1Vn8lS1I0yKoR32mT5f+BV5RxRiXvMdlR0+UfxrvqzDN8xm8lXl+XYtVF+aRX8nPy/dWrkJ8tvJh2nd81TI2pxhGZcw6/6SkLNtT/V76uRs7Qr5gZd3/fVVFRNu9H2t/tVcFvuX/WY3J96uf3YnoU48pat5oY1XyNyfI2/d825qtQpCu1IUw7fw7arWscPkjhm/3atWak4KTNt+7tb+GseaZ2x5uXmRP9nf7OcP935fmpV2Qr8n3vvbaSDf86J/vf71OY3PmB4YW3Knz/PXQc9YVmk8zzng+Rvlfa9TW8iRzK/2n5I0bcuymNC0IfznwG+43+zUkEky7Rv37fmfd92toxPExHx8shsk1s1uZvmXd/EqVQuoyJF2eYq7/uird1HuZdm1U/2fl+b+KqepSTbdnnb/APgH3avl5Tm5vsnPafevJMiQ8FW/1jV23h/XE8lkd9jL825f4a8u02abzF+dv95a6CzvgzB9+4158ZfzH0R7N4b8UeXHFvuV+Zf4fvf71dx4X1u2WcOkqpuX591eB+H/ABQlqu+YqpVPkZa6jRfHASZvMm3fPudW/iWolzSjoVHl+0fSXh3xNDJHGiPHsVVbds+81ddpWseZBKiJHs/56fxLXzjofxAbzBNNMzpu3RQ/3WrqtK+IlzJMjw/Id/zs38S14uKjPm5kexg4xPaZPED2zLDpupRsrNtljb5mZf71Nm1aGa9DpDDlbfDbXb7v/wAVXndr4yeScv5yn+Hcv3m/2q0rPWnvo1fZsVW+fd8vzfw14lapy+9I+nwuHv8ACdW115qx3LvsK2/zq38P+9Ve4jcK+z96qt86slVLNk8s2yQswk+/N/eqz50z/Om5VV/lXf8AeWuD20paQPTjh6X2jntctdyp+6+Vvlf5vu/7VcLrGl3MzSpbOp8ttu7/AOKr0XULX7RE0bwsgkfcn+9XMalY2saumxlddyyqsXzbq9PBS9vM8TMKMactjx3xho/l7vJ2qfNbzd33a898Sab5TPEX3BX+Rtle8a5ocLK7ujOm3b8y/wDj1ee+KPDbrumRFLLuVGb+7/dr6/C+9HU+IxUZc/wntHwstXh/ZB+yuo3Dw7qIIH1nr52+H9rDJfKl4/l/JsXd/u19M/Dm3Mf7MYt2AH/EkvgQT0yZa8C8G6XMs2/tvXYtfuniW7ZNkn/YNH/0mB7nGLf1LL7f8+l+UTsvDum+T5bP5Y2/Krf3q7Xw7Z2zWZSNJDufbukT7y1j+H4XaSOGEfO33a7LQ9NeO6Z3RSZF3f7tfjMj4uPu6Faz0+ETNsfcJN2+rEMKRwzQ2z+bM21UZn2rtrSuLN2meF4V2fd3L92orC1ht9014uyH7zSMm7btrjq04y0Z2U5cseYk8RWb+E/Ccvjm5RWWH91b7l+9J/u18q/Ha8m1XQ7m/vE+0brhXlbb8zfN/FX0N+1Jqzt/Ynh2N5vsn2fz/LVtqyfL8rV88/FCF5PBt8lh9/yv3Sr8zVth6cIxM/bSqS0+E8w8TfEKbQ/DZtdKfZuXbtjryG8tda1u9+1TCSRpG/irqm17TftEUOp8ruXzVb/0GvY/hd4+/Zp0eEP4q8K3F5L5W1FjZV2/7taR5VK8hy9rH4D5/wBN+HPiHULhE+xvhm27tlddL8ONF+H1n9v8eSeTNs/0ezX5pJG/vN/dr174hftIfD3SbO4sPg98PbW2m8rbb3l187r/ALq/3q+bvFF14h8QatLqus3M000j7mkmatfaR2gKn7WWtQl1zxN/bV4ttDttrb+COH7tQMqbWS2/76rJWN9x+RhRHJcq2yN2qNfiNuX+UvJZlW+d1ct/eqwtvDCux32t96qEM0zN8/y7fldqvW7PMy/e2/3m/iqjOXulu3td0Y8zrXcfDXQ4Lq4abZ86xMyLt/irkbE2y/O78t/DXf8Age6/s2MO/wAiM3zs1KK5SeX2m4y88P21jm81KZlTzd27dXDazrU2u6t9gs5pPJjf5F/hWuh+JniIaxeyab4c3GST7+1/lVawrfwvqXhvw9N4hubZi4X5G/u0x/DI5vxQ0J1hvI/hVQ/+9WbvX1p80jyuzzNlmfczVEy7aDoQ+iims2G49KBjqKKKXxAIq45NLRRR8QBRRRRyoBFXbS0qru5TpQy7TimAY249f4qlt4/3io/FRLwu+rOnIJJgnks5Zv4aXMiJS5UftF/waW6JeeHfi18RfGGzamqeDZrJmZFZfLh2yfe/vbmr76+OUk2jeLnvHdvKml2QMqbVVq+PP+Dea0s/g58OvGOoaq7Qy2/h+3tftEa/euriTzGj/wC/arX1d8aviJ4eutLXUrq8hfyWZoo2dV3V2U6sPZHyWKcqlY+WP2uPFmm2vjCz+23W77VB+9jh/wBn+KvFtQ8ZWF9uhO1Sqfwv/D/u1e/aY+JWleNfHxtobCREt4ttvMqbl+Zvm2tXlmqeIobXc8L/ACqrbG2fM1eLWxHNPlN4e6jsvCd79p8XQBJlK5l4Axn5TT/idOsWt2wKZLW4CnH+0a5X4TXsl14+thKQCEkxj+IbGrc+MG9vEVoI5FTZaZZm92YD9RX69gqkl9H/ABjf/QWvypBF8xl2947TN8/mlX3bm/8AQa+bf+CgGqRyr4Y0dbmRna8mnlhZ9y/d+Vq9xm1pLW1W5mbfKvyytH/F/tV8sftha1c6t8QtKtJm+W3tZGi+fd8rN96vxHCyhLERR2YOP708xuptyhPu1VuLpFxAn3vvfLRcSOqtvfj+7uqtdS+XGHSGvbl7x6luWVitqF5uXf2rPvJtu3f92rV1Nt++/wArf3apNvaPe6bl2VXulRKv/LWik2/Nmlo+E3CiiijlA/SD9nn/AJRup/2Jesf+hXVfm/X6Qfs8/wDKN1P+xL1j/wBCuq/N1G7H8K/afFZXyXIP+wWP/pMDhwnx1PUdRQw3daK/FDuCimqMrinFd3FABRRRV8qAKKC23mkVdtMBeCKXadu6hW29qAGOMrml8JMgY7jSUm75sUtMoKXcdu2hhtXFC4b5O9Zkx3JLdXDL/tVv+HLCbUb6Kws+biaVYItv8TM23/2asGNEUAua+hP+Ccvwgs/jP+1p4F8IahCz2f8Abcd7qP7rcvk2/wC8b/0FaitUjTpyl2FGMqlWMUftB8LPDKfCn4K+CfhpDbLEmg+FLOzlb7vzeXub5f8AearGrao8m5ESMxfeXa235aZ4s1ya61y5nv8Ac4kut0S71/1bfd21zepXG75Em3H+H+9/wKvlI1faT5n1PWq0fZqxY+1TSTK7vG6r8rL/AHv7tC3xe4TyZmX+F12fK1ZbTJdQt5txk72+ValtZPKuFR9vyr8kn96vQjLml7xzxp8psxs8lps2YfZtiqOZfs6ukn+ysvz/ACtUUN00y/67c6/xL/DUUc1sqrv5dnbey/MrUc0fiOjl/lHNC/kult+7WT5m/vKtZVxax3VvK/3fkbYzVof6M2XgTcV+XduqrfQwrvnTb9751/u1VPYVSXKc1eWexm3zbmZ1bayf6vbXPajpqPm2T91tl3eZ/tV1OrfKuxAzs33Fb5WZf9muY1SN4VZz5mJG3RNu3L/wKum3LHU4+b3vdMO+LtL0YozbU8z/ANlqs8Z/s+SJ4wuY2BUdB14q/fX1tJI1tNcqrRxfJ8+2qJIFi5WTOEb5j+NfrvgnCEc7zCy1+q1P/SoH23CElLE1tf8Al2/zRxDQ7bib7NZqh835/wDppXln7Tngv/hIvBNzctCryRqz/wC6teyzKtxdBP4Vi3blTduaub8SaDYa1o89teJJ/pETJLGybvL+WvwGMpRqxcTzJU41sLyH5ma1pP8AZuoTWbx7vLb726qGHVjv+Xb/ALFegfHTwj/wi/iq5h+7ulb5dteeyN+89q+zpfvIRZ8hKPJPlZF5iLGe/wA33qt2uqBCUf5l2VR2hW+T71OVZNy7Dz96nylG5b3yNj58f3Vp7SR+cz7Mlqxo45t3lp/vbqtWt1tk2TOqlv4qqJlyovbZuyZ/2qmWR2Vdgz/C6tUEVx91P7z1PD/pDeWtUHuG14ZvktbpXdMbf4a9k8H+NIVhS2ePLRpuVVrwuzjeGZHM25v9l66bRNUurVvMfdj/AGmpR934jOpHm+E9d1bxheXV150MKqq/NtX+9WPdapqOrXBmmdid21I/7tc3H4301secW3t/Ev3Vrb0L4g+G4VSaZFkdX+dvu0+axPw/ZJPiJpd5cfDe9sPIYLJB95k+avmaWNo5WRhyvDV9jTeLPD3izwq+lWd1Gr7Gby2/vV8n+ONGl0XxNd2Uibdtw22g2oe7LlMeiiig6QopGOBxS0AKn3hUsKuxb/dqJRuNTwr90VmRLc1dBgeRlTZ/uVP4vvHhtEsy/Lffq14ZjRlVJk27v4mrn/El4L3VJJE4VflWq90iPvFGtDQ7dJJWebd/sbapRQPM42c1p6aqJMLZ+mfvVRVSX2TV/wBav31X/wBmqu37xt/k/df5KvrDHJG1Zl1vt5tnzfN/DS5oxiY+zNfS5kklG9MBvlqe8t/MjCPtcqn8P8NZek3yeds+6W/vVrwsk0Z+fYd3zNRHYqUTFuoXjuVdPut99a/QT4yyFP2AVkB5/wCEP0nn/wABq+D7q1RszI27/ar7t+O+6L/gn3J6r4Q0r+dtX694YO+U55/2DS/9JmfYcJv/AGPHr/p0/wAmfDdrfPIoSZP/ALKpWjSTbcom3a27bWHZ322b+Iqvzbm+7W1FeQSRkQzZLLX5DzcsT4zl5ZleGN4WPnJ96rS7WjDpub/e/hpVt3ZmTeq7v/HabHHPCzb/AJt3yoq/w0fEHLyj44ZIPn61ZhWaSPbs2/J8lQpHcsE2dP7tTRs63P32/wCBU5e6RERm8tvJ27mZPmajzNsbDG35PmZadNHM0iu83H8LMtRXEyeS6Tvhv71KS5g+EwdY2bpX35Lf3a5S7YSNsT/x6um1aZ41O9Np2ferl7hjIx3n+Kj/AAm1Pcn0a4+yX6TbsfNX7L/8Ek/2jE1r4A/8Il/as00iq1vdNH8rfL8y1+Lu5lO9etfaf/BJH40P4Y+KqeD7yRtmobVijVvvSf8A7Nc9enOpSlynnZ5hnXwM0j93f2c/2nIfB9mnhLx/rW3TpGVF/vQ7v4t1flv/AMF+vhDpWofErVPiL4PuY7m2s71ZUa1+ZWt5vl3V9k+NNHmttmq6PMxh2xs/z/Kzf/FV4R+1X4Bf4neHdS0G7f5NY0aRJZJt3ysvzLt/2t1edk2ZV6K+qVz4PJM0q/WFQqfZPx2kVFdvkZX/ALrU+Nk279jbm/hqbWNL1DSdSutKvE2y2c7QOv8AtK22ooY3/ubW2f8AfNe7KPQ/Q4y5o3LEapIzBOd1fYH/AATLVRo3i9lGM3Nl/wCgzV8iRqiqd4ytfXv/AATQTZo/i4KwK/aLLaR/uzV+keEqkuPcL6VP/Tcz63g124jor/F/6Szwr9oREPx58WsIW3f8JFdfN/20Ncqruql9nzf3Vrrf2g02/HjxUSvH/CRXTf8AkQ1y/l/N5ao3zPuRq+Ozu39uYpf9PJ/+lM+bzPm/tCt/il+bEhk2rvTd8zfdp7N5279xt2/xNU0UE3yps2/xfL/dohgPzP8ANhv738NeV9g4/thDE7TMd+1fvPWhZq8knyIu1fuVWjt3j++6/wB7dVm2ZCwT74ao+yaxj7+hoRnyY9nys392rlmz7hv2tt+bczVRtm3SfIm1v4GarUNvtkCXO5mX77LXPKPMenSiXY1Rt8ny7W+6qtViz+aHzH3I235lb5t1VYY3WRZoYf49taNuzs2/f8v3flT71R7stjsUuYkt0RvKmcfJs+6v8VSJavIxcXjOsKfOuz7tLHC6xo6fOf4dvzbf9mp4ftKr5zn5G/iV/vf71MwqR5veIpLXzJPkTcu7+L5adHbw/Nbfd/ubamkj8lmd02bfvfxUNb7pN6Ozf3G2bflropy+yeNiqc+a5VlRVjXcfl3Ns3VRvFRYWfYvzVpXSo2zzNrfN91X/iqheQpbyfu9u2T+Gteb3fdOWMfe9483jk2t53nNuZ/lWrVnceZGzzIw8tdu7f8AerN+0TSSI/ULVu1k3R/I7Lt/iavLj8J7cDZs7942CbF2qv8AC33f96r1neeSyzJM27+8r1gWd0/nNbOn+sXdWjaNtwkMfH3dv8VL7HKb05cx2Gk+JHk+RJpCy/drtvDupagVXZIuJPl2t/DXm/h9X85/Mh3f8D212vhu68tt8FzGjq6/eWvKxkf5T6DA+9a56N4dupvM2TXLLtTbF/d3V3Hh2J7yQv8AbN8qrtddm5WrzzQ7pFUeWi7mlX7RJIm5v+A16H4UmT90j7lfd8nlrXzWIp8sZcx9Zg5fCjsdPheSNHm4Vl+RVT5d1aCwokiw7GU/eT/ZWovDtu8ccXnD5l+bb/eroY4Zl+fKurfN5a/w15sOaM7HrcseXmObvrFIIlmtkVZF3fvJHrA1C38u3E15bfvmdmVlbd/FXY6pp0LM38C7d33N22sfWLWFYRD9mUuq/eVfvLXtYX93seHmC5tbHCa1b7reZNi+az7fvfK1cT4i0O2mhZPJVPM+7t+bbXpfibSUtZNk0Pzt/CzbfLrmr6xhW3eG2jYyt9z+Kvq8JKPLzI+Hx0eaXvHWeDbZIfgObZwNv9lXYOemCZK8b0nSU8zZ5Klm+ZG/h2/w17l4bt1X4Sm1miCD+zrhXXsPv5ryzTNPSGIPCih2/i/h21+8+JbTybJE/wDoGj/6TA6uNLrC5e1/z6X5RNzw/pbIsdy6LFu/8d/2q63TbObc3yW+9UVd275mrnNLaGHykdN275d0f/s1dHpMiXDBLZ2bd/49/u1+Oy/unxMXyllYxeKZssiL8sqr8u3bXJeK/HGm6xcXOm6NNI0FjtWWOOX/AFjfxf8AAqPi38RoPAfh+Y2dzCb+4iZIoV/5Z/L96vO/hGZodH/tW88wtqkrPEzfLu/vNWEufnOipL3Trvj1O+seE/B3jmGZjp+tadvi+0LuaNl3Lt/2du2vCPFGpJ5MlhMnzN81er+LtSe8+C9/4G1PUma58G6tNPp27/lpbyfNtX/ZrwyS6e6U6leJuVk+Vfu7qJfETTly+6zwf4j6Fc6X4onhSNtn3kb/AHq5zzrhR99gK9X8dXmm6lqmblNzfd/3VrmtQ8BwszTWsy+Uy/eD1fLM6+Y5O31a8t2DpMwrptB+ImkC3Fh4k0fzkb780Z+asi88I3NuzbHyn8LVQm0ieKTy/MUk1oP3ep3LXHw11lv9GuFt2b+Gaib4f6VJH52m6layqz/djlrgJLaaNv8AVtt/vVNCupIuYJW/4C9HNL4ZC9mdPefD+/hdvJhU/wDA6rN4XvIVR3TarJ/frGTWtXtFBS8k3f7TU0a3qTf8vLf3vmalze6HL/MdFa2cNuyPNcqrK3zr96tqGR7zZD5jOFX+/trirXVn8xZJpuV/vVv+HfE0K6pG7/Ntf5lb+Kjm5hcvuHoOj6Homh2f2m88tJWVW8vZWV4k8TG8Y6b9jj+zN9+P+HbVu8l03XLhZodWjSST5dsj7aIfDum2sJmv7nzP7qq25v8AdojymSl/dOOl8KeGdVsZFgdoLn70S/w1xF3aTWd08EowyttNeu61oNna2ralZvHEf4l3/Mq1wHjaSw1K8a7sCvmR8S7f4qJfEbU5HOAE9Kcq7aFXbQzY4FL4TYWgNu5oYbutIq7akBaKRjgcUtXHYAoopdjelMA/g/GlaR2WlWPb99PmpfLf7j9aXKjMYoI+VDx3rtvgl4bh1zxnbSXO1orX9/KrDcrKv8NcfBD8wXNfQn7NXw5tlsV17UIZEEj7tzL8rL/drGtUjRhqc+KrezpSPsT4K/tTeMPgp8H7jwX4SSFJ9a1ePUri6b70e2Py1j/4DVKT40fFH4hak954q8W3XlRsyxQtP+72t95tteZWlvNfXCb9uyN2Tds+bb/dWuhhjMNq6JNHskdfNbZ8y14ft6tSR89GpKT94t+INavJr4F7ld6t96Nv4W/9mpVDzfJMm1t38TVWhhsLaZEuRHMytuSNl3bv96pri6kmuGS2s1iT70rNUSlyxL5TrPhOnk+NrOPaD8k2GK4P3DWj8apnXxHbwRKNzaf95ug+dqzfhUoHjqyO5WzDJtO7LY2Gr/xyuDF4js0W3Z/9Dy2JdoI3txX7dlf/ACj3jeb/AKC1+VIPhjocfJpthYyNc397uYJ86qny18k/tYapZ6h8cJrW2+VLPTo4k/8AQq+n9e1D7Pbb5n/hZU+f+H/4qvjn4uXqap8U9Zu4Yfl81URWbcy7Vr8Xy2PNX5jtwP8AF5jAl+b5KhvIXaFkRMLv3f71XI7dCux9oP8AeqvfXibWRP4fvf7Ve/yo9P8AxGRNb7c1HI22H50w1PupH3NDs3f7v8NU5pnk3I75qYxNIkH8W+iiiqlsbBSL8u40tFKIH6Qfs8/8o3U/7EvWP/Qrqvzfr9IP2ef+Ubqf9iXrH/oV1X5v1+0+K3/IlyD/ALBY/wDpMDhwnx1PUIztOXGRQ43nJpvXbTq/F47HcG7c2+ikVdtLTAKKKKzAR/umnK3bZndTX+6aFXHAoAcVY9qP4/xoY5bikKbuDxitCNmFFFL91PrWZYbG9KGb5VoVttG3a3I+Wq+EB9vvZxF8vzfxV+kH/BDX4O3lv4l8V/H54Gb+xdLj0nTm3/KtxcfNJ/5DWvzp0S0lvNQiVOBvX5ttfth/wTn+E/8AwpT9jfw3Z6lCsWoeIribW9WVdysvmfLEv/fK/wDj1eRnGI9jhH5npZPhfrOM9D2i+iSTe8zqvmJ8vy7q564VFbek0b/wsy/w/wC9WjqFwkrMjo0bK38T/My1l/aobVmfepLP83y18jha04e9I+ixWFK3lo0ium3K/wAS/LuqWOGDKb/m8v8AhptxMjYh3xosn3I9/wA1Ekc23y9igfdXb81evQlzazPFqU+WZFJqU0V0qTcIqMqMvy/99VHcalOWhdHYKr/Ky/7vzLTbqRvu/Y9vlptfc/8ArP8AarMmZLaTZbTKBvZZWb+Gu2jHm91HPL92bK6h51u00M21v7rfe/2qguNWhmV2srnczJ95vlrJtr2G1mebC7PurJJ95qikvN8b3EyMPL/5ZtXdTpzicUqnMTXEyTKby2TfIyfPu/h/2qwNTXzpnR7yPa3y/wCzVu9voZIVkdGiVUVtv8SrWNqV15as7w+Zt+Z2+7t/+KrflMoyKF02mx75kRS391lVmZf726q0eZdIbY28tE2CR1JzVHWrx5IHhhmt2RX2fu/4atWEijQvNReBE5APtmv1vwVhFZ1mHL/0C1P/AEqB9twfK+KrP/p3L80YDTTRwxrsZlVGVFj/AIW/2qxtckuLyGR9jJIvzMsfyrt21cmaG43pM7I7MrLtes7WryaOxmyVZ9jKys/zV+A+zlGem55NKtFQPjL9qyxhvtWluUfL+a33a8EmaRZG2c19EfG7R/t2qXaIkbM27ZXgeqWr29w8KH5l+X/Zr6ihHlpRPm6kuarLmM5bd3z/AHv9mrcMChc7GojVmb9y6/Kn3aGmeNtnzf7u6to+8TLm5hsjeSqpDTIVQyF/4qV1dm5p3lPtH8IpBIsQs7Kp2fN/erSs5DC2/ZuaqFu3SP7wZdtW4WSNU3/+O1UiTSsZE8xYXdd33t1dJo+n/wBqYh8ln3cKq1ws2oeQ2U5K/wAVdH4K8bPpd0iO+4fd21PxBL3Tb1D4d62rb7aGRU/grJvvC2vab99G/vLuSvXtH+IlneadDD+53fxbv4qW48TaVds/nabC6q/z7YqKcomMubmPIdL1zWdHul4YfP8AK392tTxxodt470p9VgRVvIV+bd/y0rubzw34M8SSBLbbaT/e2yVb0v4S3Nq2+0v43j/uxtTj8IlLld+U+XpoJreZ45kwV+VqZnJ612/x08G/8Ij4sZIn3JcJv/3Wrh1XbWnKdsZc0RQ27mikVdtLSKFVX3Vf0/5mEOzP8W6qEbDdnexrd8NQxzSMmzlv4amRlM1ZvJ0/RXmLsr+V8rVxrM7MXfq1dH43uBbwx6bF/F80tc/b2/mZfY2F/u0+ZFR90uaX5MJCTYNXJIyJt43bfvfLWXHHNHMuz738O6tKKRmjG/70dHvmcpRNfTZHaHzmh+X5dtQ6pbuqibY25vut/dqzpsyCLe/z/wAPy1FqkbqvD7xt/v8A3aI/3jP4fhMmS4fbnfyv8VbmkyOzfI+Rt+7WE3yts7/xVoabePbrscUw5u5u3EMzKP7lfcnx4Tzf2AJY1bGfCOl4I+tvXwzZz+fC3z79yfxV92fGdFk/YLKBcg+EdLwP/Aev1zwud8pzv/sGl/6TM+z4V0weP/69P8pH55tG8bfK/H92rGlyfZ5EfDLt/wBqrF9a/MxRFX+7uqlI3lrv2NivyM+LjLmN9bxJI/kjbc38VSrJNNJsT/vmsSxurlWZ/lxWnZ3+66SEv87fM9A5fEX2WONw7o26mf6tfMfduqe6kmjVU37hsqHb5g+/8mzc0jPQOQ/c8ny+c2GT5VaqepL5MZLpu/2t9WZWeM/PD5x+6rf7NZ2qt5a+c52qz/dpx5+UmXLI57WLwtv4Ynf96sNvmYvWxrVx95E/i/hrGpG9PYK7/wDZv8ay+CfizomvB1RYb+Nvm+796uAq1pN09nfxzI+Nrbt1AVI88LH9I/wh8P6l8Qv2WdJ+MGm20NxYNKsEskfytGzL8skleaeMvAt/dabd6zoFy0otWZ/MjbdXD/8ABGP9paH4ofst6j8B/FuvNCjQSQXC/wAW5V/dN/s/erQ+HPxI1X4P/EK8+EXxIvPtFvHcNAl5JF8qx/7VKrk0cTR+s4de9H4j8rzfBU8vzRTjpc/Kr9sfwOngv9obxCiWzQ299dfaLWNn3N8y/M3/AH1ury+H/VHen++1fdn/AAVo+Dtm0cPxL0HTY9kN1Iktxu+aaNvustfDNrCjf6N8oVf7zVvJNwiz7nLsRDFYWLH6evmN5bpsZfu7q+v/APgmmjx6L4tV8Em4sjke6zV8kWquzNsO5t33m/u19df8E2Tu0fxa3rc2fH/AZq/RvCX/AJL3C+lT/wBNzPtuClbiSj/29/6RI8Q+Puz/AIXj4udl3ga/c5T/ALaGues7W28tZndlMn3K6347Wu743+KF2L83iG5bcf8Aroa5lYUVgmzmT5V/vV8dnvL/AG3iv+vk/wD0pnz+ZytjqzX88vzZFJFuVk6fPtZv4qfDG8ah9/zbPu7PvVI1m+7+Jtvy7qdItzHGqQo2Y/uNXly5Dh5ve1K0mRF9xgu75FqW1VFXL8fxf71KqpMrb933/m3Utvb7d0kL7t3y7aykbU5e9EuW7ozJ91P/AGWrluPMk+d2Ct8q1BZ2/wAo3ov+0tXrPfErO+5v/Hq5/dPVpyLdtHDG6QpM23b91qsiGG3xsk3Fm+dt9V7fzmwiSLtZdv8A9jVq1je4bfInG77tZ8vKdnN7vulm2berQxvsff8AMy/dqzFb+ZD+8TaqtVO3CQt86YVW3ffq1GUWFfOfPybmZf4qqP8AeOeQeWjTOk24r/Cu/wD9Bp6yPFtZ9zJs+6vzf99UkLQvJ5zx+Wnlfd/i3f7NSQySLC8O9Q7fcbfW9P3ZHjYzmlsVpPmKOm5h93b93bVC+mSNf4dsO75t1X5Fe52vMmfl+9/DuqnqUNmpZHdWRtu9fvKrV0nHGPL8R5Uv2m3/AIN2779WbX94uxw3/AabIqLIGj6VYhx5yuifK38P+1Xi8yPoIx5iza26FfMjjw7fKlaNqmZEfHy7vnqpDCkaq6SM21vvN/FV21hmkf5Pvf7VRLm+ydtGJs6KqW7KmMszfP8APXXaLJCtx5Pk/LtXczfwrXJ6Xa7sfZnzI332/u12Whx7I0y6su/5mrzcRKZ7uFj2O38P3Ft5kaI/Ej133hmaFpvs0ybfLb5Nz/NXnGiyQqyRyQsoX+9/E3+zXX6FI8zR3MN78y/wsu5m/wCBV4FT3uY+jw8pRPVvDdwY40hm+5G+1tv3q7W3jE8KXMzttk+WJa8z8P3SWsjW8KN53yyvul3K3y13fh+dJIUciP8Ady/Iv8S15vLGnL3T2Iy5oamm0LtbvMm3cq/3Kw9XtIZVR32n5fmkX5drV0Sf6n5EVir7tu75mrO1KztmWR96/Km5vk+Vfmr1MJ73unkYzlOLvtPtriTznSRvMRt6yfeWsTUNDtre1kSGTe3/ADzX5q6rUtPuZpGfzl+X+FnrEu2dGNnBDv8A4UZV2qu7/ar6bCRltFnxWYe9zOxpaZbKngk2rDj7JKDg+u6vOv7L+zq9ym0Q7ti7q9Mso3fww0ccQDGCQKue/wA2K4u40+aRdkyNG2zcyr/DX774lTtlGRf9g0f/AEmB18YQ5sFgP+vS/KJm6P50MSQwt80n3dvy1f1jxJbeGdMa/mudrqn7r/ab/ZqnfWv2PF4qM38TfJ92vKPil44fULoWENyxSNPlXf8Aw1+Pz+I+Epx98wfFniDUvGXjTfePubdti2/N8v8AFXex6lDoun6H9js1kSO/aK6VU/1e5fl/4DXGeCbWG3sTquxvNm/1Uezd8taOoeJLO10G60q5dkmuIN1qu/5lkX7rUpR5Y8pfNzfCHxU1qw0fxBBrdy7Q2mof6LqNuybo9u75Wb/0GvFvirqiaDeTW2muzWDfNZf7K/71d54q16z8UeG5X1iFsN+7aPd8zMv3q8Z8UagmszS6VNNJ/ovyru/i/u1n7ppGP8xyt1cTSSPc3LttZ/4arWfiLU9LVw/zQ/3m/ho1C6muJnt/ubf4Veq1vIkjPbTfP5ny1fLzGxsQ69DeQ75nU/3qq3C2cm14UVNtYOoWt5pswTe3ltSQ6xJ8qSfrSDl/lNCSFAweb/gG2qupahDBH5EMK/7bfxU2S6RkZ8sStUJ980hffndVy+EqPvfERySNI2+kyGHBWpI7V2Vn9qlW12rh0o5UXzRKx+4PrTo5HVg6NytTpa+YSWX/AIDUn2FF+4agRu+GfEkc0KWV583zfJu/hre+y6k0zPp82V+9XBw27rcfI+K7zwnqTxwoknzv/C1BlL+6QXi63dK9hczMEZP4lrNTwy9vvkfaVVf4l+9Xd6hqVh5av5G41SuLVLiT9ynzN/DVxjMiXus8jnjkinaKRMYblabXXeOPCr731K3++v3465Gj/EdUZcwUUUUe+UFFFFHMgClCljhaWLofpTmyozioJluKu9k+ROf4mpHV2P8AdajG1dvyt/srVizs5riZIURvm/8AQqrmROx0/wALfh5qXjzxFDpVnDuXer3Df3Vr6+8H+B007T4dBsvM8mPb8v3d1Zv7IPwt0HwP4Q/tjxPpM1xqGobZfMX7sK/wq1ewx+KPDEcM8KabHskXam5f/QWrxMVW9tK0Tw8RW9tU5bnLNoOsRKYXSOLa/wArf3f9qpbXw7DbyKb+WR3/AI/9n/vmtS81rSmkRHePYy7nX+Hbu+WmyXln5zbPlK7m3L/drzpfvI2OL3Y+6VLeGzhVvJhWU793zf8AoNQNcuN3nbd0n3vL+7Vm4VJmZzMyKvzbt/zNUDKnzOY127t21azlyx3kXGXue8dP8I5JpvH9tK+zYUl2gdR8jVc+P8xi8QWvlJukNiNo/wCBtVH4PFG8fW0kM8mDBIGjZePuGtH46zRQeKbKWcttWxyAvc72r96y3ll9HvGf9ha/KkF1Y4G38OJNG9/fuyBn+Ztv8VfEvjG8e88fa9eRupaTVptu1dv8VfaXjjxVZ+G/D9zf/bGxJEzvG0v+rbbXxHDsvNQurxPmWaeSRmb/AGmr8ey+MT0sD8LZA0jtu86b73/jtQyWPmR74d2f9pq01sxJGziHb/D9ynR6bMI9mF/4FXsnb8PKYjaTCy797fLWZqNktv8AMiY9q7D+x3klDuMLs3bW/irH8Uaa8No03b+GlKPKXTlzHN0UUVB0Cb19aWiitAP0g/Z5/wCUbqf9iXrH/oV1X5v1+kH7PP8AyjdT/sS9Y/8AQrqvzeZscCv2fxW/5EuQf9gsf/SYHDhPjqeotFFFfi/947gooopgFFFIzeiVmA5RuNCfeFNf7ppY8pQAqv8ALx+FJIibsRmlYfNj1pKqQCLv705/vGkop8qAKXc8h+akUN61JCuXAc/epe6Znr/7F/wUufjf8efDfw9SFtmralGkr/wrGrbpP/Ha/cXVobC12aboO2Gxt4o7eyhVfljjjXaq/wDjtfn7/wAEX/gz9jXWvjfqulRuLGBtO06SZNv7yT7zbv7yrX3tcSTRwmYwN/u/3a+Bz3FyqYzk+yj9A4cy/wBnhPbS+0Zd5dFf9GmdX3S7E3N826s+4uHjLQ2ybv4n3fdq1fTPCsyJCwT5WfcnzbqqTL5jOmyOXdt/3vu/drxvby5/7p6lbCxqcyZPZw/MP9Tv/wDQastH9li/f7RG3937y1nxslvMieS33dztv/iq4l1ugX7TDtT7zru+7XbTx/tDyamB5djPvvlaO58jcGfbtZvmZa5y8jtlhUuV3+a3y7/vbm+7urqdcuraSMzTP/HtXb/DXK6tdQq0mLzlfuLs/wDHq97A4jueLjsM47le6kh+z7+rRv8AKq/w/wCzVZtReONv9Zuk+/UF5qVszF3diknzI38LLWHfaw8dx5KO2N+1vl+Va9unUj/MeBUjOJfvtWCyeT5zI+z/AJ6/NWJq2rbiYQiq+7d/rdytWTNfP5zfafl3S7dq/d21l3195d2yfaV/6ZK3/wAVWr2MeYtaheHb5lsixuy/vdvzbq2tJmz4OE20nFtIcMc5xurhptYDbUuU2M275V+V91dnoMsZ8BiQfdW1lHJ7AsP6V+w+DEEs5x9v+gWp/wClQPs+C5Xxlf8A69y/NHHXF8kMgjAyqqzP/stVC+urmPTbl3TzX8r/AIF/vNTI7yG4mMz/ACrJubav3Y6y/Fl4lnodzfpc7HZGX5X2/L/dr8QnTjz/AAnzNOtywPl79orxYmizSwr/AK64VlRm/wCWf+7XjGqR/ao47nfncm7dW9+0J4o/tzxlJDCu2KNvk+euc0u4e403yX/hr1+blhGxwy973ilGvlMU2fx0SRjdvd8VbktNqPsRmP8AHt/hqGRfupsz/tVp7nxEkDfu8fxUsMjtJv8AO+X+7UEkjqp7/PUTSOq1BUY8xr6bG810uybhv4a2JtLd/wDVpXPaXefZ5ld3xXVaXrltLb+X90/3qr7RnLYzbjQn+d0Rv+BVW/s28hdc7d/+zXSeYny/xf7NTR29sy/6j5lp8qDmMLTdU1iz2p5zKyv8ldNo/jbVYfkmDAs/zs3zVnNBCy79i/K/3Wpyy+T9yH733KOWURSkd7peoWesR+TdN5TyLteRflauk0qy1u2kjis79pQybfv/AC/8Cry/T/tPnJ/e+9uZq9b8B6x/Y+hvquq/c2/Kv/stHwwJly83Mef/ALSvha5m0i21sbWMPyy7W+7/AL1eG19A+NvFln4j02/hvHzDMrLEv3tteATBFmfZ93dT5uY3pjaKKKDYlgP7xQ4rpfDS/Z5Xnd9u1Nz7f4a5+xh3Scjd/erpppIdL8OvKj/Oy7drLU/FIwlvoc3rF9/aWqS3Mjs3bNWdJk8hPJcKwb5ttZ0a/NvFaenw/aGXen/AafxDkJdKizDZDt/2t1TJvjZf92ri6Wkm7ftbbS/2eVXGzHyVXwkcyHWd5/y28j733lqSZluY2hdGVKgt4fL++mV+7uVasrC6t8ny1HL9kPd5inJp6s2+Onra+WRvmqeON5ptj/LUv2fcuxIfm/vNT+EYiK8b7Iptvz/ItfffxpkC/sEGQN/zKOlkH/wHr4E+zuuU+Yn/ANBr75+NikfsCldhJHhHShgfW3r9f8MP+RTnn/YNL/0mZ9hwp/ueP/69P8pHwVc3HmTeS6bv92qzKm0O6NtWrKo64kmjxt/u1Ktv5keOu75ttfkMvePjI+6VIYfMO/qKuW8yWr/6nll/75oht1jbYE+XdTVh23D/ACMF/wBqlzIcpcxM19959/zbPu05pn8vYlRRRw/65x937tS/IzB3RlH96q5Y/ET78Szbtt43/wC1/wACrN1Ro2V/kZt3y7anbzlUun3W6bWrPvr4Q2+zf/F/FSBRly2MHVJELkbPutVCb5tz7PvVevpEkbekP3qz5Gz/AANQbQG1NbofMXZ96oMfvAuKt29u7SeZ5O6lzIuW59k/8Et/jvN8IfiNaSzXMf8ApE+147hvlZv4a+0/2kfiFpHxO+IVvr2laCti8lmv2xd3yNcf3lr8r/hZdTaRHFqlg7QyRvueRU+avuv9m39qnwl8TtDtfA3xIeG2v7WLZa3UkSqzNXtZLj6WCr+/8Mj5DiDKpY+HND4j2ib4C+Lfjt+zbqr6xon2qw2TW9vMqs22RVb5Wr8qNa8O3/hnWrvQdSt/Jns52ieH723a1fv9/wAE4Lyw0vUPEHwP8c38b6R4os9thNIq7d23crK1fkh/wVO/Z3f9n/8AbC8SaPb2fl2GoXTTRMv/AI83/Aq681p4ed50jmya+H5KUtP8z5xjV2kZIfl/3a+tf+Cb8ax6L4rC8j7RZ8/8Bmr5PtY3yHL4WvrD/gm/s/sTxV5aYH2iz5PU/LNX1XhN/wAl5hfSp/6bmfqXBcubiSj/ANvf+kSPH/jox/4XT4phdWA/t65ZX/u/vDXNeX+8TyeS33Gk/wDHq6X46KX+N3ikAKSNdudo/wC2hrmbeSWEbETb/CzN/DXx2ef8jvFf9fJ/+lM+czPTMK3+KX5smaP94d78L/yzqKZf33nI+x/4lo+eSTyelOaP94H35aRNvy15EY8pzS+EZ5PmL5z+Wdz0+1j8uOSZPlLP96nRqkmLZ4fu/fVf4aFLxx5Taqfx7qXxRF7seUnhj8pw/nfe+9V9bvy32Q3OF/2UrP8AOdV2I6/N8u6pI7h4/wDRn+f+F/n+9WMonqUfdgasMnkso2fP/A1WfOeXY/k8bvnjX+GsyG4+YR79qr/eepIWQqyJ5gMn/LSN/u1h9s64yNX9yqu7ybwrfd+7Uy3HnSMU3Afd2t92syGTcu9JtwZ/96rDXD+c6Ptx975aOb7JFSXMi5DNMriF0+6/zsv3ammkSSP7m1N38X3lqrDMi5jmPLL92rMcz7i/7tg3y/NXRH+U8rEcvLoElvJJE3+kfKv91KgvFT5/J3bWSrZt3+zo6Pv3fwx/w1XuGfbvNzt3ffVlrWP8qPN+H4jziS38lvs2/wC98u3Z/FT7ddzYdPm/2qmmDyXXz7nH8bbP4v71TLbozoifc+9uavO9n7vvH1FGUZFm1tHhUb0q6tvLEyYT73/jtJZ2qSRpv4b7y7q1bO1hkXycNt/ibbXFU54nsYenzajrGFFy6csy/Pt/vV02m27QwpNsUs38X92su1tUj2wpIv8Ad8xvlWtnTfkXY7/xfNtrzMRKUtj2sPTtD3jf0ffeTRpC7blX/gK12uk3Dqrwv+7Rvm8z/ZrjNHj8pjcv8is+1Nr/AHq6XSrh2aJNi5b5WXf8qrXk1pc0j0MPH3dTuPD+oGNkd412qnySfxV2+i6l5LJvlVopPm8z7rV5npvnW7LM7sqb/wCH7q11mk6o7SDZ8qL/AHk3bq8+VP3uU9WnU5oHpFjqFtIrP1SN/mk+781OuLx7iMpD99lVmjkSub0nVEa4/wBGufkX+Fl/hrQbWkuoRDPc52/eaP8Air1cLyR908nGe0KOsN5as8fzyt8rbfvVgTR3NvdSO6Llfm2s/wAv/fNauuXEKRsm/ak33Grm7i4fznhR8L/Fur6DDxjGPMj5XHRkdLZO0vhws7qSYHBK9O4rlNQeHy1+zbm+7v8AmrpdMkYeEPNVcH7LIQD+Nedav4gfT/MmmmVEW33PG3y7a/c/E+dsnyFr/oGj/wCkwPQ4sV8Hgb/8+1+UTI+J3ij+zdNTQoVZp5FZmk/2a8Zjs5ta1x7Z7aN2kf5ZGbayrV/UvGVz4m1S7ufOzE3zW+5/ur/dqxoumx28L39y7O+35G/551+UUY+6fBS+PmiaV00el6fsRPu2+1FVvu153qF9NqV00zyeWsabUZvmrd8SapczXImtpv3XlbW/vVwPjLxFNbw/Y4PkLbmZvvUv8IfDEg8aa88+pBNNmZ45G/f7V+61ch4wuIWmZLN95X5dyptqaG4CwmF9z+Z/t/drDuoprC7aG8m2/eZG3U+VGvumJq+yS3+0pDsm31jCZxJvR/m37q2bhrm6mZ/3bNvrKuoEhbzk/wCBKtHwmkYwN+3WHXNNRJpl3Knz/LXP6lphs3x/dq7o+rJHebPJ+X7u6ta80+G8h3w/NL/GtEthfCcms0ynDpuX+61TwTQ/LvRfl+bbUmp6bNbybmh2lv4aqbnjYj7rKtLlLi+Y0LeRC2/Zj/ZpfLRVV3+9Wesz7Vbdt/2hUv2p41+/u21JPKX1lRYfkeopJ4Wb7nH3qprcbv4G25p3mfMU/wDHq0KLPmJ5n31Vv/Qq3dC1B4WTY+0qtcyq7vnT+GrtrcvHJ/wDa9TzGfunWzao80yeYdqfera0uZJFb5Pu/c3f3a4+G4eTYm/dtrs/D8Pmafv3qZPvfN/dp/4SZDtUt7aS1Kb12f7VeceMPDK6XMt3bPuWT+Fa7LxRq0Ufyw7vlTa9c1Oj6pb+e4bbt20xxlynKUUs0bwzMjpgrSUHSFFFKoy3NKWwDo8fwVIsacv2qFW29qmjhdvv8/3afL9ozGLHuY8fLXuf7LXwQfxtqEfirUrZvsVnKuxWT/WSV518NPh7ceMdWFu7+VbxsrXEzf3f9mvqf4P65Z+D0l8N2yKLaNldVZPmas6kvZnBjK3ucsT6T8M+B9N0nQXcW3nSyRbtu35VWsW48J2GqaeiJprW5+9KsyVf8P8AjKfVvDdvMkzErAq7o2+9838VLdatquqQmF7/AHvu+Xy02t/u158qdOOi2PI5eWJyN94NtmZprBG/i/h+9WXN4fubWT5/MSZk2vt+bbXYw6s9vdPD9jZ1Vfmk/i2/xUNqGmyXzH7G0x3ruaP+7XP9XpSYbHEfPAsqO7Hc2x/M+9Un2tFl8lN27Z95q6S+sdHuL5U2SKGfCKyfMtMutC0pXb99t2/M/wDs1wVsLLnL5eUf8HXup/HdrIV+RVlB/wC/bVr/AByis18Q2l5dTqoWwIw/T7zc0z4avp9v43htYDGkjCQnauDJ8hri/wBsvxJe6b4m07SLVwBNpRc+o/eMM/pX7tlaUPo+4tf9Ra/KkONPmlyniH7RXjhNQ0u5ttKdhCsTbG/i214V4V0vdpaXOyRvnVV+eu8+K0k1v4XuIZgrSySqqNu+7WF4RtdukpC+0D7rfw1+R4GnyxZ6mHjy0hI7JFPkum4/7P8AFT00lJJvnRf+BPWs0KblgHzbaoSW80k29ywNehzS2NY+7rIhuLP7sezlf+BVz/jK2hbSZnSHdtT/AL5rr7PZIp/c43PtrH8eafDHo92+xtixMybaPekX9o8nf7ppaKKZ1BRQrIV96KiMQP0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs+cf8E21x/0Jes/+hXVfm8rbq/avFb/AJEuQf8AYLH/ANJgcOE+Op6i0UUV+MHcIfmbfmlDbuaF2c7aRV20viARVw3PpTqKKXwgFKvQ/SkoqQE37mNLRSMu6r+GQC0Ab+1FLyppgCr8xra8DaNNrXiC20yC2aZ2lXbGv8XzVip94V9K/wDBM/4JXPxd/aO0W2dF+zWc/wBovWk+6sa/Nub/AGa48XWjh8PKb6GmHoyr4iMP5j9O/wBkr4V2vwh+AOg+D7Z/Ku5rL7Vfq3/LOSRf7v8Au16ZJYTRtFvdWeP5nk37d1TtY/6Y+91fa+1WVPl2/wCzU50/7Ux3pIrruVm/vf7tfk+JxXtsRKUj9mwuHjh8NGPYwptPudqfaEZf3rb2j/iX+GqFxptzbskdskjf34d/zf71dra6enmGZIV/3W+61V9W8PvI2/ZM3mRN8y/wqv8ADurnVb3kE8PS5eZnE/Z3hk2dW3/O3+zUkk025Sibm/jaRflkro4fDcPkh3h8ot/e/iWs28sfLXyU+8sW7d/Eta0anN7xy+x5o80jmtUmdeX2q/3lVfmX/drk9WvLP7cIQ/kzSbldfu11niJYbhvvsj7dybk+b5a4XxNdQzaeZHjUFX+ZtnzN/tV7uWytK72PCzDD+7oZeoX6XCv9jdl8t9vmLWRdXVzMzO9yxMb7k+fbU91ePj7mFVNqqv8Ae/vVzepas8MzTecu5l2Ov8K19Lh6nQ+NxlPlkWNUmh8tn+b5U3Osf3t396ub1a5xb+dvYlt3zN/Ey1JqXiBJo3tkRS0ifejasC+1yGRt6bmk2/xP8terT5pcp5FTkJr7VHmjjmh2tti+9/Etej+GcN8L1/e7wbCb589fv140upQyfcdt3+y/y1654PlVfg0JnyoGnXJOeoGZK/afBtWzjHf9g1T/ANKgfW8Eu+PxD/6dS/OJ5nb3PmbPs025ZpW+X+7XLfFnVJofDNz9jfbui2/7taUN4beM/eUtu2bf/Qq8++KXiqG3aTTZtrvMm7y2/wB2vxrllzHx8ZSkfKPjpXk1y4uX++0rfM1UNGvPs0zF/utWp423yaxK7/cZ91c+spinVk+YK+6uiMfd5TSJ0/kyRq7+Tjd/erKvpHjwj7sf7NaEN4l1Ypvm+9/drMvPm3OjsW/u1X90jl98pyNlvdqjkx/B92pJJHZgmxai2nd8lSaRBt7SeY74NWLXUJoW3+c23+7UTQzMN/3qU27r8nf/AGqr/EBuWHih0Ub03D/arb0/xG8jNCm1d1cRHG7fJsartr5sbLNsb/dpc3KTI7LzPtDb9iq33dtT2VruzNlX/wBmsfR7xPIX52P+9W3p94gbyUT738S1rzc0TE0/Dun/AG64+zbNnzqq7v7tdf8AED+0o9JttEtrWQJDFulb/wBBrm/Cd3DHqUKXO1Pm2uzf71eqXzaa2jtqqQ/bC0Sqys9PmI5o854Rr032e1l+TCbPmXbXnEzFpi+zG6vc/H8fhvVrVLaGwktpGXd5f3q8e8ReH5tKuN6Qt5TfNUfD8RtTlcyqVWfdspKdCqCRd+6g6TX8O2vmP8g5/iq14wmEccNmlzuRfm21P4YtkjUzO642bvu1h65fPeX7vvVlZ/vLS+2Y8vNMpk+Y1aOnzTx252P81Z8Y/eYetnSVhWNhs/4E1QEi9pd472phd97tTdSmubfds+cVFbqnmHyX+Zal1L99OIUdt+z/AIDQR8PvIr6fq1zcN5P3f4fmq/dXiW8bPs2/w7lqO3sYbWMyVHfMkkJhfk/e+WtBc3vjrfUt0iu8y4b+GtixTdDLNC7FFrnbfT3umXCfdrqtBmezt3R0UIyfMq0B8RmyX32X3VvvV96/Gtt/7AxdCBnwhpZH/kvXwpqljbM29I2279v3a+7PjXb7v2CDbK2P+KS0pQfxt6/X/DHXKc7/AOwaX/pMz7LhR3weP/69P8pHwGusJ5nlu+dv96tGHe8P2npu+5WZb6SjNs+Ulfv/AN2tGO4e2jCPtx93bX5DKMYyufG/ZJbfevM0O8f3qkkWFmR3TBk+Wq011OJN6Ju3f3f4aiWaaS4SF0YBv4qQSjyli4t/9tg23/vmq8av5e/zmI/jp/2h1uPv/dfbTG+VVTfuT+81L3ugvd5hJGfOx5lP/stZ2qKqyfc3bf4lrRkt0uN2zctMbQ55ov4f71EolnNzWbtJ8nA/vVCbGZ12bN3y/wANdP8A8I/5Kqj/APA6kbwn5S74X4b7v+zTjEz5uU5C006a4uvISNifate80+bT1jieHYy/f3Vu/D/Rba68eQ2Tur/Oqv8A3a9C/ah1rwR4i1fw94a8B+A4dGHh/S2ttW1D7b5rapcM27zP9lVX7q1nKITqX5TA8G2v/EjV/vf31/urWnazzaTcLqtm+2aP/VN/dqv4NV5ND2bMbX2tuq9cRyRtKfJ+X7u5a3p7GNSMZaH2D+xb+3NqVjJZ+D/G2vNaTQp/oGpebtbd/Cq12/8AwVO0HVfjj8O2+MupaU02o6TErPNbxf66Pb95mr4A0+6vNJukubZ2/c/Mu371fUfwN/bAfxF8M9S+Dnj+/jZ7rTWt4rq63bWX/a/2q1jWnS0+ycFXDxn7/U+R/L8ldnzff/iSvq7/AIJzNnRvFKkYIns8/wDfM1fMOs6b9h1i5tra5V0hnZFZX3Ky7q+n/wDgnTn+yvFmQB/pNn0/3Zq/RvCb/kvML6VP/Tcz7bgdy/1ho3/vf+kSPG/jkUT42+KlMTBv7duWV/X94a5qO385ld41zv3bv71dL8dlQ/GfxSWbaf7eufm9P3hrnY1/eNs+6v8Adevjs8/5HeK/6+T/APSmfO5jJf2hW/xy/NiBXW4EMnyt/dX5ql2pI3k7GLbfu79u2ntC7R74H27fu0ohLQ7P3j7fvfJ8zV4/2DkjKW8iNdm5k37fm+9UCypCVRPm+fazbNy1buFQ7odm1tm5Vaq7RC3dnfzAI/4mquZBH4rC+Z5f+kiH5lptoySKuz+9/FUUzbf3Xk//ABNSbdtwyJJt3JtrGR3x2LMNw/2dkeDf821G3/dqeOaHaPn+X+CqTFNqu7t8y7akVkaFpEdW/uVly/aOqPOaX2tI4fJ6bl/herNjL5kaTP8AN8rbt1ZUf+qTfyV+bdVyN/JZXR2X5Nv+zS5UKUpGtbSecud/3f4f71WLdtyvNMija/y7X3Vm2s27Y7/LuT5NtXbObazO5VGb5mVnreJx1uXoaFjMVVdj7n2bVjX5aZcrBsKOjMZPlpkbAyIkKYXZu3Sfw0jMis2987fl8tfu/wC9V/4Tg92XunGX1r5LtM+7bu/v1LYrvj87yVbd9z/aWrGqRp882/I2fdp1vHtVY1Tjb8jKlR7GfKe3Rlyk9rbpM2Xh/h/75rVtWeFl/fbl/u1ShjeO3/cup/2a0bVfm2eTtG2vPrUZRPawtT/wIu2Me5Vab/lp/Ey/MtaOm3HnS7PJj+X7q/d3L/8AFVm29ykbF96/991dtZEutjwpsMnzfc+XdXj4inL7J7NOtKUuVyOg02R45g+xSzP/ABfwrXRabfJHCiI+1tm7d/8AFVyVi32eOKa5Crt3KzK33mrXjvNuJpLnai/NuVP9mvJqe9I9Gm4xOxtbra2938zcyq67/wCGug0vUPs00XnQ7k3bn3VxOl655bLv/wBIWRP3rK+1l+X5Vrd0nUplji2PDjeq7ZG+7XLKPKd8akTvtJ1GG3YvN0/ur/tVpLq1hG0iJtfy4vnk3bf4ttcTp+uOZGdEVjGjNt3/ADNWjJqkMLI7p/rE3fe+Wrw/PGRjiJQkaeuXEP2hEuZtiNKyfN93/erntQ1K2hm8mN8Ov8Wz5WqXWL+Z42hfaRvVkZvm/wDHqwdUvEW382ZtzKny7vvV7uFre57x8xi6fNJyPSdHS4vvAnlWjhpZbSVYiG/iO4Dn615V4p+E3xVutFns9F0dTLcjbMXvIhuHrktVnwz8UfEPhG1XSozHdCeTdELndtiz1AwRgd8Vz/iH9srxRp2uTadpmhaVJDDN5bSSCTJx94jD8iv6Mnn/AIZ8WZNl9PNqtenVw9KNO0EraJJu/LK9+W620eup62MxvDWY4SgsbOcZU4qPur08n2OftP2YvjQlwh/sC3jVW6m/iPHpw1dHqfwG+J4tvI07w/Gw27SrXsQ4/wC+qvWX7VPjCWyS6utA0zdIcqsaScD8XqjqX7YPi+0lkih8O6XlO8iyf/F1z/UfBiMr/WsT9y/+VnlQocD30rVfw/8AkTlLz9mH44OS0XhuMgggKmpQjb+b9K5PWP2Nv2h7243x+DoWG/duOrW4/wDZ66rVv2//AIj6fN5UPg/Q5CPvLtmyP/IlYdx/wUp+J0RITwR4f47Ms/8A8cpfUfBff61ifuX/AMrGqPA3/P2r9y/+ROef9iP9pBFaRPA0DMynC/2xbfL/AOP1jX37Bf7UupzNLP4KtVI+4TrNsf8A2euwX/gpn8WiQT4E8OYPQ7Lj/wCO0y6/4KefFS3GR4F8On5f7k/3v+/lV9T8GJe99axP3L/5WEcNwN0q1fw/+ROGb9gD9qcWjQjwBaFy2Q39tWv/AMcqhP8A8E8P2ry+6P4f2h/7jlr/APHK78f8FQ/jGyGT/hAPDIA6gpcZP/kWqkv/AAVV+MiqHT4eeGcHqClxkf8AkWo+oeC//QVifuX/AMrNPYcEW/i1fu/+1OKj/wCCdv7WET7ovAVoM/8AUbtfl/8AIlbuhfsHftRwZGoeA7RQRgkazbE/pJWmf+CrnxnySvw88LlR32XP/wAdqWD/AIKpfGl0WWb4deGVRjgER3H/AMdp/UfBeOv1rE/cv/lZLocDf8/av3f/AGpR1X9gH9oS6QmHwbakj7v/ABNLcZ/8frmr/wD4JzftTOxMPge0ceg1q2H83r1jwb/wUs8f+IZxbaj4O0GFs4IQTc/nJXW6l+2t8Tre386x8MaDJu+6WSbA+v7yksD4LS0WKxP3L/5WQqPA1P3vbVfuX/yJ85r/AME6f2smTa3gG0GemdctTt/8iUi/8E5f2slBA8CWgz6a5a//AByvVPEP/BSP446KzLH8PvDL7Wxylx/8drAk/wCCrnxpiQs/w78Lgjtsuf8A47Q8v8F4/wDMVifuX/ys0jS4HltVq/d/9qcYn/BOj9q8gq/gG2A3ZGNctf8A45SJ/wAE6f2s1BUeAbTP95tctf8A45XZD/grD8Zud3w58MDH+xcf/HaVv+Cr/wAZVfb/AMK88L/98XP/AMdo+o+C9v8AesT9y/8AlZXsOCf+ftX7l/8AInIJ/wAE7v2shFg+A7UMPu41u1/+OVLD/wAE8v2r1H7zwHabv7w1u1/+OV1p/wCCrXxmwGHw98L4P+xc/wDx2g/8FW/jKOB8PPDBPpsuP/jtH9n+C/8A0FYn7l/8rEqHBEdqtX7l/wDInP2f7AH7VEGC/gW1POcf21a//HK6HT/2JP2l4bcRT+B7ZMDGI9Zt/wD4uprT/gqh8Y7ghH+HfhoMfRLj/wCO1dP/AAVD+KkcZkn8DeGxt64W45/8i01gfBf/AKCsT9y/+Vk+w4H/AOftX7l/8icxqf7Bv7UF1K0ieBrZjuyrDWbYf+1Kji/YE/acEarJ4HthtGcLrNt97/vuunt/+CoPxjuX/d/D/wANY7jZcZ/9G1dP/BTL4rKGZ/BHhtQq5O5Ljn2/1vWj6j4L2/3rE/cv/lZLocC/8/av3L/5E8w1T/gnV+1bcXHnQeArQ7upGt2o/nJVX/h3J+1r/wBE+tP/AAeWv/xyvSj/AMFRvjC0vlxfDzw37FluMf8Ao2lvf+CoHxnjtzPZfD/wyxX7yOlxn/0bS+p+C8v+YrE/cv8A5Waxo8E/8/av3f8A2p5r/wAO4/2tP+hAtP8AweWv/wAco/4dx/taf9CBaf8Ag8tf/jldr/w9j+NP/ROfC/8A3xc//HaP+Hsfxp/6Jz4X/wC+Ln/47VfUfBj/AKCsT9y/+Vl+w4K/5+1fuX/yJxcf/BOb9rQct8PrP/weWv8A8cq7pn/BOb9qKS7jXUPBNtFGWXfINatjt/APXoHhL/gpj8evF+qx6Rpvw28MvI/UrHcYH/kWvXtG/av+ItzGBqfh3RVkC/P5SSgE+gy5rKdDwUpL3sXifuX/AMrOar/qJD3ZVqv3f/ann/hf9jP4teGNJj0y08K2o2jMji/h+Zv++q11/Zg+M0Uonh8NQqw6bdQhx/6FXZN+1Z4xTIbQdK3B9pBEn/xdMm/au8dJsRfDOlbmGcEydP8Avqud4bwQlvi8T9y/+VnO6PAL3rVfuX/yJ1nw18AeONC0RtM1/SFVpBkk3EbAH8GroYvBuqrIZ2i+cH5DuXgfnXMfCv47+KPHN3NbapoVqoiAO+zjfHP+8xrsZfGOpqp2WkJbOVVsjK/3utS8B4HdcXifuX/ys5o4bw8V0q1b7l/8gcpqHw18bx3sk2nxKyOrLhJlThvqaSDwL8QbcqsmkrIsabV2XSLn9a27/wCJup2dzHGLCBkkGMhWyD+dE3xN1NWXyLO2fPVBu3fhzWNTKvA7ri8V9y/+VFPCeH0d61b7l/8AIHOyeAfiXI27+yo1bbgMLiPj/wAeqpJ8OvinNGI20CDcFI3tdx//ABVdM/xS12P5ZbSyRiMpuD4P/j1D/Fy5t4Q86WjOekcatz+OamOWeBdv97xX3L/5UDwnh7/z+rfcv/kDO+Gfws8WeHfG0HiTxAwZIg4yJlOMxlegPqa539qj4NfEb4neMdN1PwXo8dxbQab5M8pu442VvMc4w5GeCK7zwf8AEzUfEniaHRrjT4EhmD4eIMWBCFuTnA6VoeP9R+KOnTrH8PfClpqCG33NJd3CpiTJ+XBde2Ofev1nJci4FzXw1r4HLnia2D9veXLByre0Sg7KMab91Llb917vUaoeHqldVqv3L/5A+PviL+xb+0Z4hhtrHSvBkEkUcu+RpdXtwT+b0ul/sU/tFWcYR/CNsNn3R/atv/8AF17rcfED9usayLa2/Z+0A2eObhtXhz+X2rP6VqWvjL9sJ3H2r4M6Ii98alHn/wBKK+fo+HXA8YWjh8x+dCf/AMpOlU+Abfx6n4f/ACJ4Kv7Gn7QDgmTwvbKSMfLqUHyj/vurQ/Y0+NJCRt4TtgqptyNRhz/6FXuq+Lv2tyxDfB/RQAeD/aEfI/8AAipovFf7Ve0mb4S6PnPAW/j6f9/60/4h3wT/ANA+Yf8Agif/AMpE4cAPevV/D/5E+fj+xj8b443SDwpD935M6nB/8XWP4y/Yn/aP1TQJ7TS/B1u08yBdp1e3GM9eS9fTg8VftS7xn4T6Tgrk/wCnx8H0/wBfWV4x8cftoWGkmfwZ8DdEvrzeAIZ9UhVcdzk3K/zo/wCIecE2/wB3zD/wRP8A+UlRhwCmrV6n4f8AyJ8Z/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV9P/APC0/wDgpZ/0a14X/wDB7b//ACbR/wALT/4KWf8ARrXhf/we2/8A8m0v+Ie8E/8AQPmP/gif/wApOj/jBP8An/U/D/5E+YP+Hcf7Wn/QgWn/AIPLX/45Qf8AgnH+1oeD4AtP/B5a/wDxyvp//haf/BS3/o1rwv8A+D23/wDk2j/haf8AwUs/6Na8L/8Ag9t//k2n/wAQ94J/6B8x/wDBE/8A5SH/ABgn/P8Aqfh/8idj8HvhP448JfsVr8G9d0yOLxAPDOpWZtFuUZfOlM/lrvBK8715zgZ5r4tb/gnF+1qeR8P7T/weWv8A8cr72tvHXxI8Pfs66h8Tfil4TtNK8S6VoF9f3+kwTCWGN4FldF3JI+4MqIThyfmPQ8D5A/4ex/Gn/onPhf8A74uf/jtfQcf4DgOGEy2hndStT5KKjTUVaXIlFfvE4NqWiurKzvoZYahwLeTp1qr112/+ROK/4dx/taf9E/tP/B5a/wDxyk/4dyfta/8ARPrT/wAHlr/8crtv+Hsfxp/6Jz4X/wC+Ln/47R/w9j+NP/ROvC//AHxc/wDx2vzf6j4Lv/mKxP3L/wCVnV7Dgr/n7U/r/t04r/h3H+1p/wBCBaf+Dy1/+OU3/h3D+1r1PgC0P/cdtf8A45Xb/wDD2P40/wDROfC//fFz/wDHaP8Ah7H8af8AonPhf/vi5/8AjtV9R8GP+grE/cv/AJWHsOCv+ftX7l/8icV/w7j/AGtP+hAtP/B5a/8Axyj/AIdx/taf9CBaf+Dy1/8Ajldr/wAPY/jT/wBE58L/APfFz/8AHaP+Hsfxp/6Jz4X/AO+Ln/47S+peC/8A0FYn7l/8rD2HBX/P2r9y/wDkTiv+Hcf7Wn/QgWn/AIPLX/45R/w7j/a0/wChAtP/AAeWv/xyu0b/AIKyfGkDP/CufC//AHxc/wDx2hv+CsnxpAz/AMK58L/98XP/AMdp/UfBj/oKxP3L/wCVh7Dgr/n7V+5f/InF/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV2v/AA9j+NP/AETnwv8A98XP/wAdo/4ex/Gn/onPhf8A74uf/jtH1HwY/wCgrE/cv/lYew4K/wCftX7l/wDInE/8O4v2tN27/hAbT/weWv8A8cpf+Hcf7Wn/AEIFp/4PLX/45XpGjf8ABUn4u63GbWDwJ4ZS8P8AqY3S42yH0B83rVC4/wCCrHxvtpmt5vhv4XR0bayslz/8dpfU/Bf/AKCsT9y/+Vi9hwX/AM/av4f/ACJw8f8AwTj/AGsiw8z4f2gHf/ieWv8A8cr7Z/4JvfAy8/Zo0HV9Y+JNvb2mr6gUt47aPbMRCRl2Lx5GcgcV87+Af+Clnx48eeI7bw7p3wz8OSS3MojRYorjJY9uZa+9/D/hS3v7Czk1K6cTyWiNeCIbVjlK7iozk4rxc6oeBEKPssVjMUk+yV//AE0z18nwHC9XEe0w85trvt/6Sj0TTPi74GtExLfyE55PkPkj8q1bP41/DJZWF1qjsrY+ZrSTt9Fri9I+EmgX8ImuNRvFHcIU5/8AHa6LTP2dfCN64WXWdSXK5XDxjn/vmvjHlv0a5R1x2N+5f/KT7hUsO4rc6OL46fCKJ939uyHnAP2GXgf981ZuPj58GpQXj12QF2GV/s+XgHr/AA1n237JPgqeHzz4i1XA7Bosn/xyrUf7H3gGRjjxRqxA7AxZH/jlZrL/AKNMf+Y7G/cv/lJoqNGPulS++NvwqeV2t9dkI2lVb7DJu29v4awdU+KvgK7RWg1hg38ebST5v/Ha6GX9kTwaCUh1/VmY/c5ix+PyVzOofs9+GLOZoF8QXuVJG5gmAR2Py9a2p5b9G37OOxv3L/5SRUpUOXW5zOr+KvC95OXTUJHGSQTAw/pXH6syXzq8NxsVRt2qnOM5rrNd+HWj6ZOI7bUpnUHErMV+X9K5q+0tbZ3FuzPtkKYbg5FejQy76O9PWONxnzS/+UnlVsLlc7qTl/XyOQ1XRNbmDC3t1kOc7vMALt/eOaxNT8GeMLmdwLZGiKfuhFIibW9+ea6XVvEOoabIyJaI+DgYycn0+tcnqfxi16wLL/ZVrlBkhg33f++q97DYDwHfvQxeK+aX/wAqPm8VhuGHJqpOfy//AGTL1X4Z+PrjIs9LQEr95rpMZ9etY0/wY+J1w29tMiVmfc7LdRnH+7k1b1D9pjxJaMUTSNNBVcvv8zj/AMerPH7Vvi3d5baBpeexUSEf+hV7NLL/AAV5fdxWJ+5f/Kzx6tDgm3vVav3L/wCRF/4Ut8TyVjXRYlj/AIl+2R5/PdXqPhjw5q9h8MB4ZvoFS8+wzxGPzAwDMXxyOO4rzSD9qHxbI6o3h/TTnrt8z/4qrtv+0j4klG+TQ7HGcYUSE5/76r6fhrOPCfhnFVa2ExFZupB03zRuuVtN2tBa6Lv6HTleO4MyitOpRq1G5RcXzK+jt2itdChdfAv4iGPFpYwLuXDBrpcr+Oa808dfsl/HzxH4lS+stDsjbxwldzahGCT9M16v4g/aa8RaLa/aV8NWLARlizyuAMV45bf8FNPHl74kvNHtfhzoxhtmISVriXLY9ea+fjl/gnusViPu/wDuZxww3AnLpVqfd/8Aannmv/8ABPD9p+/unktfDmnMrNkZ1iIf1rHb/gmv+1WTn/hF9M/8HUP+Nehar/wVU+JdhI8cfwy0FinUNPMP/Zqpf8PaPigeR8K9A/8AAif/AOKq/qPgp/0FYj7n/wDKzWGG4H6Van9f9unO6J/wTr/agtYXiu/DGmKD0H9sRH+tR3H/AATo/amlfJ8N6a3zZ3DWYR/Wu60n/gqT8TNRT5/hloSv6Ceb/Gi//wCCpHxNsjtHwy0Nj6edP/8AFVH1PwT/AOgrEfc//lZDocDc2tWp93/2p52P+CbX7U3O/wAL6Yc/9RqH/GlH/BNz9qf/AKFXSh9NZh/xru1/4KqfE9ip/wCFZeH8H7x+0T8f+PVKn/BU34myjKfDPQfu5/183/xVH1HwT/6CsR9z/wDlZXseBv8An7U+7/7U4P8A4dwftR5x/wAItpeP+wzF/jUh/wCCb/7TZw58N6duH/UZi/xrtx/wVO+JaDfP8NNAVT91/tE2D+tRt/wVX+JQGV+GGhdcczzf40/7P8FP+grEfd/9zF7HgaP/AC9qfd/9qceP+CcX7TOf+Ra00D21iL/GlT/gnV+1FGqxDwzppVf+ozD/AI11b/8ABV74lBii/DHQQf8Aanm/+KpU/wCCrXxPZtrfC7QR/wBt5/8A4qmsD4Kf9BWI+5//ACsaocD9KtT7v/tTnIP+Ce37TsChV8Madgdv7Yh/xrQtv2B/2loAWHhnTQx6/wDE2i/xrXX/AIKr/Ekjc3wx0PHtPN/8VV7Tv+CoHxGu5Ak/wy0ZQ33Cs03zfrT+o+Cn/QViPuf/AMrJ+r8Df8/an3f/AGpT0/8AYZ+P0TB5/DlgrbcM39qRn+tdT4a/Ze/aH0mOS3u9As3jP3VGqR4P61d8O/t/+O9bIWTwJpCE9Assv+Ndhp37VPxC1BFmPhPSYomztlkkk28fjSWB8E+mKxH3P/5WZyw/AcdHVqfd/wDamG37KXiy/iB1HwXYCXfw63qDav8Ad4Nc1rv7B/i/V/Nh/si0CSAgH7avFel3H7V2t2G2O80HTncpuYwzOV/nViH9p7xDPp7XqeHLFSMHa0j9D361p9R8F9vrWI+7/wC5h7LgP/n9U+7/AO1Pk/Xv+CaX7SsWpyroeh6bNb7v3UjatEpx7gmoLT/gm5+1OkgE/hbTNobOf7ah/wAa+hPGn7cHxE0ATnR/AmkzCEZ3TSy4I/A159B/wVG+KjTGGb4V6GpHpPN/8VUPL/BTrisR9z/+VmkafAvL/Gqfd/8AanNr/wAE/P2l7fT3hh8NaY0hTaudWi/xrnp/+Cbn7VLS+ZH4V0zHp/bUP+NelXf/AAVI+IUFwbdPhvoeV++Wnm4/WoJP+CpfxQCmSL4Y6CVHVjPN/wDFUfUfBT/oKxH3P/5WP2PAv/P2p93/ANqeeL/wTY/ao3iQ+GNMyGz/AMhqH/GtCL/gnX+0+iHd4X03J7LrMX+NdY//AAVV+KkbfP8AC7QMeouJ/wD4qr0P/BUP4lSQea3wz0IfS5m/xpPA+CnXFYj7n/8AKwdHgbrVqfd/9qcAf+CdX7U4kJHhjTto+7/xOof8asRf8E8v2pduZfDWmA/9hiL/ABrtl/4KifEpsEfDLRBnsZ5v8aQ/8FQ/iiEDn4YaFjGT+/m4/Wn9R8FI/wDMViPuf/ysPY8DSf8AFqfd/wDanJQf8E9/2mo1O/w3p59F/teH/Gqk3/BPD9qWWUlfDGmKD1xrMP8AjXbD/gqP8Tdm9vhjoQ9B9om5/WlP/BUn4mbcj4XaHk/d/wBIm+b9aj6j4J/9BWI+5/8AysPq/A0f+XtT7v8A7U5PTv8Agnv+03asHfw1p2QMc6vEf61r2/7Bf7Q4t2iuPDlhknII1WL/ABrYtf8AgqD8T7iLzT8MNDX2M83+NWB/wU88fjHm/DjRhn0mm/xo+o+Cf/QViPuf/wArF9X4G/5+1Pu/+1Obuf2CP2jnO+PQLHOM4GrRfe/OvqL4nfDLxh4k/ZSb4WaVp6Ta1/YFja/Z/tCqpliMO8b2IXA2NznnFfP3/Dz/AOIJGV+HOife/wCe83T161E//BTn4rTz7bX4daBGgHJladifycV9FkuceEvDuGxVLC4ms1iIOnK8W3Zpr3fcVnq97+h6OAxnB+W0qsKVWbVSPK7ro77e6tdTlk/YP/acjYlfBVr0x/yF7b/4ukf9g/8Aab2/L4EtCf8AsMW3/wAXXYJ/wUr+LAjDT+AvDwJ9Fn6f9/Ken/BSj4rlA58A6Bg8nCz8D1/1lfOfUfBn/oKxP3L/AOVnlfVuB/8An7V/D/5E4qP9gv8AadVh/wAUVajHrrFt/wDF0+T9gv8AaXaZXHgq2wv/AFGLb/4uu1/4eS/FLdtHgjw8SBlsJPx/5Epo/wCClHxRMXmHwR4eHttn/wDjlT9R8F/+grE/cv8A5WDo8DL/AJe1fuX/AMicdH+wX+0kUKz+B7Y4+7/xOLb/AOLpI/2Cv2lUO4eDbUD+6dXt/wD4uuxH/BSb4stEZh4D8PADswnyf/IlKv8AwUi+Lhj81vA/hwBl3KAlx/8AHKSwPgt0xWJ+5f8Aysf1fgjl/i1fuX/yJyqfsJftGiPL+CLVn/7C1v8A/F0L+w3+0qpYnwJbEEYx/bFt/wDF11K/8FJviyzbT4F8PAj7wKT/APxyh/8AgpL8WowrnwL4eKlsfLHcf/Haby/wY+J4rE/cv/lYex4Ij/y9q/cv/kTlh+w5+0vIY9/gG0XH3ydYtj/7PWh/wxF+0N9mMf8Awhlru9f7Vt//AIutpP8AgpN8VS7B/A/h0DOFG2fJ/wDIlXV/4KJfFA2T3h8G+HhsjLY2z9v+2lNYPwYlLTFYn7l/8rJlh+ButWr9y/8AkTh/A37CH7R2k+I7jVNW8GW0SnPkumr25z+T1qax+w78fLstLF4Rt5ZC+4FtUgH/ALPXQeA/+Ci3xV8V209ze+CPD0YjbagiE/Pp1kNa2oft8/Eq1hD2/gvQ3IOHY+dgf+P1DwPgv/0FYn7l/wDKwlhuBuZXq1fuX/yJyvh/9jP9oCxsXt7rwhboWkzj+1IDn8nqxP8AsdfHtovLj8J25P8AeOpwf/F10+nft5fEW8gSWXwnoa7unyzcj/v5Sz/t3/EqJiB4O0TAGcFZs/8AoyrjgfBi3+9Yn7l/8rM5YfgWUuZ1qv3L/wCROPk/Yx+Pi/6vwhA3GOdUg/8Ai6gP7GP7RSsWi8IW6jbjaNXg/wDi660ft+fE8u0f/CH6BlRlhib/AOOVH/w8D+J+WU+DNBBHTib/AOOVp9S8GtvrOJ+5f/KzH6rwDzfxqv3L/wCROUH7Ff7QxVI38HW+M5b/AImtv/8AF17z+x18G/Hvwg07XrTxzpUdqb2a3a18u4jk3BRIGzsJx94da8zX/goF8TGQk+DNCDDsVm/+OVDe/t/fE+6spbW38LaPBLLEypLHFKWjJGNwzJjI6816+QZl4TcNZpDMcJiK8qkOaylG6d4uL2guj01Wp6WW4ngrJsXHE0atRyjeya01TX8q79zg/jjIg+NvipHG7/ieXPHp+8Nc9b/vG6qn99tn3qq3WqX+qajNqmoX73F1dStJLPMS0kjk5LMTySTzmpbeTy1/fPvVf4tlfjGOxMcbj6uItZTlKSXa7b/U/PcVWWIxM5r7Tb+93Lp+zLgumz5Pvb6W6VPLEyf3P4XqD7QjNveFWRvl3MlRXVw7Rs6Pg/7P3dtcXvfaM/djSJbi73SfIkY/hRv4ttQXEaQ4HzbG/vNSec7M8Lop+T5G/vVHJNMsafI2F/hZaUpS+FBTp83vC/aEnymxTt+Z1amfaEjZv3O7+61OuJIYo1R0Xc3zVXa42/PMm5vvIq/xVjLY76dvtEse+SQzJ8n/AEzZvvVOvkNDs/h/2ap/aIZF3v8AeqZZkbaiD51/8dqDcvQt9nXeNqr/ABrvq5bzSRyb3TerfLtas6FvtDbLqFfm/iq/FJsk+zTbWVvuVO0+YUv7pdguizG2+zK4Xa27f/FWhbSJHMCiMrN/Cq/NurNh7b/lbd93ZVyO481vvsn8Tt1rWMebc4K0pRloaq3CeSqeTlf4l/ipitP+72TbG/g+Xa23/aqKznk3Nvfeuzcn+zTmuIfOSaZNzr8r7XrXl5fhOTm94//Z\n", - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [], - "image/jpeg": { - "width": 600 - } - }, - "execution_count": 38 + "Model Summary: 213 layers, 7225885 parameters, 0 gradients\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.007s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.007s)\n", + "Speed: 0.5ms pre-process, 6.9ms inference, 1.3ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" + ] } ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, { "cell_type": "markdown", "metadata": { "id": "0eq1SMWl6Sfn" }, "source": [ - "# 2. Test\n", - "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + "# 2. Validate\n", + "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { @@ -652,7 +504,7 @@ "id": "eyTZYGgRjnMc" }, "source": [ - "## COCO val2017\n", + "## COCO val\n", "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." ] }, @@ -662,24 +514,27 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 65, + "height": 48, "referenced_widgets": [ - "8815626359d84416a2f44a95500580a4", - "3b85609c4ce94a74823f2cfe141ce68e", - "876609753c2946248890344722963d44", - "8abfdd8778e44b7ca0d29881cb1ada05", - "78c6c3d97c484916b8ee167c63556800", - "9dd0f182db5d45378ceafb855e486eb8", - "a3dab28b45c247089a3d1b8b09f327de", - "32451332b7a94ba9aacddeaa6ac94d50" + "eb95db7cae194218b3fcefb439b6352f", + "769ecde6f2e64bacb596ce972f8d3d2d", + "384a001876054c93b0af45cd1e960bfe", + "dded0aeae74440f7ba2ffa0beb8dd612", + "5296d28be75740b2892ae421bbec3657", + "9f09facb2a6c4a7096810d327c8b551c", + "25621cff5d16448cb7260e839fd0f543", + "0ce7164fc0c74bb9a2b5c7037375a727", + "c4c4593c10904cb5b8a5724d60c7e181", + "473371611126476c88d5d42ec7031ed6", + "65efdfd0d26c46e79c8c5ff3b77126cc" ] }, - "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" + "outputId": "bcf9a448-1f9b-4a41-ad49-12f181faf05a" }, "source": [ - "# Download COCO val2017\n", - "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../ && rm tmp.zip" + "# Download COCO val\n", + "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", + "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], "execution_count": null, "outputs": [ @@ -687,24 +542,15 @@ "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8815626359d84416a2f44a95500580a4", + "model_id": "eb95db7cae194218b3fcefb439b6352f", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))" + " 0%| | 0.00/780M [00:00

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", + "

\n", "\n", - "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n" + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" ] }, { @@ -917,37 +719,37 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" + "outputId": "8724d13d-6711-4a12-d96a-1c655e5c3549" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" + "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], "execution_count": null, "outputs": [ { "output_type": "stream", + "name": "stdout", "text": [ + "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, patience=100, freeze=0, save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", " from n params module arguments \n", - " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", - " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", " 12 [-1, 6] 1 0 models.common.Concat [1] \n", @@ -963,43 +765,121 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "Model Summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.5 GFLOPs\n", "\n", - "Transferred 362/362 items from yolov5s.pt\n", + "Transferred 349/349 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", - "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 57 weight, 60 weight (no decay), 60 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00" + "

\"Weights

" ] }, { @@ -1035,67 +915,25 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "riPdhraOTCO0" - }, - "source": [ - "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n", - "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OYG4WFEnTVrI" - }, - "source": [ - "> \n", + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", + "\n", + "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", - "> \n", - "`test_batch0_labels.jpg` shows test batch 0 labels\n", + "> \n", + "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", - "> \n", - "`test_batch0_pred.jpg` shows test batch 0 _predictions_\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7KN5ghjE6ZWh" - }, - "source": [ - "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "MDznIqPF7nk3" - }, - "source": [ + "> \n", + "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", + "\n", + "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", + "\n", + "```python\n", "from utils.plots import plot_results \n", - "plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n", - "Image(filename='runs/train/exp/results.png', width=800)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lfrEegCSW3fK" - }, - "source": [ - "\n" + "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", + "```\n", + "\n", + "\"COCO128" ] }, { @@ -1124,7 +962,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1138,20 +976,6 @@ "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n" ] }, - { - "cell_type": "code", - "metadata": { - "id": "gI6NoBev8Ib1" - }, - "source": [ - "# Re-clone repo\n", - "%cd ..\n", - "%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n", - "%cd yolov5" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { @@ -1159,9 +983,9 @@ }, "source": [ "# Reproduce\n", - "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" + "for x in 'yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] @@ -1179,7 +1003,7 @@ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", "\n", "# Images\n", - "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n", + "dir = 'https://ultralytics.com/images/'\n", "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", "\n", "# Inference\n", @@ -1195,23 +1019,23 @@ "id": "FGH0ZjkGjejy" }, "source": [ - "# Unit tests\n", + "# CI Checks\n", "%%shell\n", "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", - "\n", "rm -rf runs # remove runs/\n", - "for m in yolov5s; do # models\n", - " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n", - " python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n", + "for m in yolov5n; do # models\n", + " python train.py --img 64 --batch 32 --weights $m.pt --epochs 1 --device 0 # train pretrained\n", + " python train.py --img 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device 0 # train scratch\n", " for d in 0 cpu; do # devices\n", + " python val.py --weights $m.pt --device $d # val official\n", + " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", " python detect.py --weights $m.pt --device $d # detect official\n", " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n", - " python test.py --weights $m.pt --device $d # test official\n", - " python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n", " done\n", " python hubconf.py # hub\n", - " python models/yolo.py --cfg $m.yaml # inspect\n", - " python models/export.py --weights $m.pt --img 640 --batch 1 # export\n", + " python models/yolo.py --cfg $m.yaml # build PyTorch model\n", + " python models/tf.py --weights $m.pt # build TensorFlow model\n", + " python export.py --img 64 --batch 1 --weights $m.pt --include torchscript onnx # export\n", "done" ], "execution_count": null, @@ -1224,11 +1048,11 @@ }, "source": [ "# Profile\n", - "from utils.torch_utils import profile \n", + "from utils.torch_utils import profile\n", "\n", "m1 = lambda x: x * torch.sigmoid(x)\n", "m2 = torch.nn.SiLU()\n", - "profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" + "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" ], "execution_count": null, "outputs": [] @@ -1253,11 +1077,26 @@ }, "source": [ "# VOC\n", - "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", - " !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" + "for b, m in zip([64, 64, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.VOC.yaml --project VOC --name {m}" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "VTRwsvA9u7ln" + }, + "source": [ + "# TensorRT \n", + "# https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-pip\n", + "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0 # export\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0 # inference" ], "execution_count": null, "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utility.py b/utility.py deleted file mode 100644 index 520ba3d3a576..000000000000 --- a/utility.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Utility functions for working with the YOLOv3 models. - -################ -Command Help: -usage: utility.py [-h] {strip} ... - -Utility functions for working with the YOLOv3 models - -positional arguments: - {strip} - -optional arguments: - -h, --help show this help message and exit - -################ -Strip Command Help: -usage: utility.py strip [-h] weights - -Strip the extra information from a models checkpoint for training from scratch - -positional arguments: - weights weights path - -optional arguments: - -h, --help show this help message and exit -""" - -import argparse - -from utils.general import strip_optimizer - -STRIP_COMMAND = "strip" - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description="Utility functions for working with the YOLOv3 models") - subparsers = parser.add_subparsers(dest="command") - strip_subparser = subparsers.add_parser( - STRIP_COMMAND, - description="Strip the extra information from a models checkpoint for training from scratch", - ) - strip_subparser.add_argument('weights', type=str, help='weights path') - args = parser.parse_args() - - if args.command == STRIP_COMMAND: - print(f"stripping extras from {args.weights}") - strip_optimizer(args.weights) - else: - raise ValueError(f"unknown command given of {args.command}") diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2d1..a63c473a4340 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,36 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_requirements, emojis, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + import psutil + from IPython import display # to display images and clear console output + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/utils/activations.py b/utils/activations.py index 07e864bcd241..a4ff789cf336 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,29 +1,13 @@ -# Activation functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" import torch import torch.nn as nn import torch.nn.functional as F -def is_activation(mod, act_types=None): - if not act_types: - act_types = (nn.ELU, nn.Hardshrink, nn.Hardsigmoid, nn.Hardtanh, nn.Hardswish, nn.LeakyReLU, - nn.LogSigmoid, nn.PReLU, nn.ReLU, nn.ReLU6, nn.RReLU, nn.SELU, nn.CELU, nn.GELU, - nn.Sigmoid, nn.SiLU, nn.Softplus, nn.Softshrink, nn.Softsign, nn.Tanh, nn.Tanhshrink, - SiLU, Hardswish, Mish, MemoryEfficientMish, FReLU) - - return isinstance(mod, act_types) - - -def replace_activations(mod, act, act_types=None): - for name, child in mod.named_children(): - if is_activation(child, act_types): - child_act = act if not isinstance(act, str) else eval(act)() - setattr(mod, name, child_act) - else: - replace_activations(child, act, act_types) - - # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- class SiLU(nn.Module): # export-friendly version of nn.SiLU() @staticmethod @@ -34,8 +18,8 @@ def forward(x): class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() @staticmethod def forward(x): - # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 000000000000..0311b97b63db --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,277 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.metrics import bbox_ioa + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + self.transform = A.Compose([ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)], + bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(colorstr('albumentations: ') + f'{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(p * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 87dc394c832e..77518abe9889 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,28 +1,32 @@ -# Auto-anchor utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +AutoAnchor utils +""" + +import random import numpy as np import torch import yaml from tqdm import tqdm -from utils.general import colorstr +from utils.general import LOGGER, colorstr, emojis + +PREFIX = colorstr('AutoAnchor: ') def check_anchor_order(m): # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchor_grid.prod(-1).view(-1) # anchor area + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order - print('Reversing anchor order') + if da and (da.sign() != ds.sign()): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) - m.anchor_grid[:] = m.anchor_grid.flip(0) def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary - prefix = colorstr('autoanchor: ') - print(f'\n{prefix}Analyzing anchors... ', end='') m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale @@ -30,39 +34,42 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640): def metric(k): # compute metric r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric + x = torch.min(r, 1 / r).min(2)[0] # ratio metric best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall return bpr, aat - anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors - bpr, aat = metric(anchors) - print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') - na = m.anchor_grid.numel() // 2 # number of anchors + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) + else: + LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) + na = m.anchors.numel() // 2 # number of anchors try: anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) except Exception as e: - print(f'{prefix}ERROR: {e}') + LOGGER.info(f'{PREFIX}ERROR: {e}') new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference - m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - check_anchor_order(m) - print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: - print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(emojis(s)) -def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): """ Creates kmeans-evolved anchors from training dataset Arguments: - path: path to dataset *.yaml, or a loaded dataset + dataset: path to data.yaml, or a loaded dataset n: number of anchors img_size: image size used for training thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 @@ -77,12 +84,12 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10 """ from scipy.cluster.vq import kmeans - thr = 1. / thr - prefix = colorstr('autoanchor: ') + npr = np.random + thr = 1 / thr def metric(k, wh): # compute metrics r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric + x = torch.min(r, 1 / r).min(2)[0] # ratio metric # x = wh_iou(wh, torch.tensor(k)) # iou metric return x, x.max(1)[0] # x, best_x @@ -90,24 +97,24 @@ def anchor_fitness(k): # mutation fitness _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness - def print_results(k): + def print_results(k, verbose=True): k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') - print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' - f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) return k - if isinstance(path, str): # *.yaml file - with open(path) as f: + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: data_dict = yaml.safe_load(f) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - else: - dataset = path # dataset # Get label wh shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) @@ -116,19 +123,22 @@ def print_results(k): # Filter i = (wh0 < 3.0).any(1).sum() if i: - print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans calculation - print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') - k *= s - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) + k = print_results(k, verbose=False) # Plot # k, d = [None] * 20, [None] * 20 @@ -143,19 +153,18 @@ def print_results(k): # fig.savefig('wh.png', dpi=200) # Evolve - npr = np.random f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) kg = (k.copy() * v).clip(min=2.0) fg = anchor_fitness(kg) if fg > f: f, k = fg, kg.copy() - pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' if verbose: - print_results(k) + print_results(k, verbose) return print_results(k) diff --git a/utils/autobatch.py b/utils/autobatch.py new file mode 100644 index 000000000000..e53b4787b87d --- /dev/null +++ b/utils/autobatch.py @@ -0,0 +1,58 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-batch utils +""" + +from copy import deepcopy + +import numpy as np +import torch +from torch.cuda import amp + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640): + # Check YOLOv5 training batch size + with amp.autocast(): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): + # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # (GiB) + r = torch.cuda.memory_reserved(device) / gb # (GiB) + a = torch.cuda.memory_allocated(device) / gb # (GiB) + f = t - (r + a) # free inside reserved + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] + y = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + y = [x[2] for x in y if x] # memory [2] + batch_sizes = batch_sizes[:len(y)] + p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') + return b diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 4b0d4246b594..b21731c979a1 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -8,7 +8,10 @@ import torch import yaml -sys.path.append('./') # to run '$ python *.py' files in subdirectories +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH port = 0 # --master_port path = Path('').resolve() @@ -18,7 +21,7 @@ continue # Load opt.yaml - with open(last.parent.parent / 'opt.yaml') as f: + with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: opt = yaml.safe_load(f) # Get device count @@ -28,7 +31,7 @@ if ddp: # multi-GPU port += 1 - cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' else: # single-GPU cmd = f'python train.py --resume {last}' diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 890606b76a06..5fc1332ac1b0 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -7,9 +7,9 @@ cd home/ubuntu if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 - bash data/scripts/get_coco.sh && echo "Data done." & + bash data/scripts/get_coco.sh && echo "COCO done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks diff --git a/utils/benchmarks.py b/utils/benchmarks.py new file mode 100644 index 000000000000..446248c03f68 --- /dev/null +++ b/utils/benchmarks.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 benchmarks on all supported export formats + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT + +Usage: + $ python utils/benchmarks.py --weights yolov5s.pt --img 640 +""" + +import argparse +import sys +import time +from pathlib import Path + +import pandas as pd + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import export +import val +from utils import notebook_init +from utils.general import LOGGER, print_args +from utils.torch_utils import select_device + + +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + ): + y, t = [], time.time() + formats = export.export_formats() + device = select_device(device) + for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) + try: + if device.type != 'cpu': + assert gpu, f'{name} inference not supported on GPU' + if f == '-': + w = weights # PyTorch format + else: + w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others + assert suffix in str(w), 'export failed' + result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) + speeds = result[2] # times (preprocess, inference, postprocess) + y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference + except Exception as e: + LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + y.append([name, None, None]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)']) + LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py)) + return py + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + opt = parser.parse_args() + print_args(FILE.stem, opt) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 000000000000..c51c268f20d6 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,78 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [], + } + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook The callback hook name to register the action to + name The name of the action for later reference + callback The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook The name of the hook to check, defaults to all + """ + if hook: + return self._callbacks[hook] + else: + return self._callbacks + + def run(self, hook, *args, **kwargs): + """ + Loop through the registered actions and fire all callbacks + + Args: + hook The name of the hook to check, defaults to all + args Arguments to receive from YOLOv5 + kwargs Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + + for logger in self._callbacks[hook]: + logger['callback'](*args, **kwargs) diff --git a/utils/datasets.py b/utils/datasets.py index 36416b14e138..f212e54633be 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,34 +1,45 @@ -# Dataset utils and dataloaders +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" import glob -import logging +import hashlib +import json import math import os import random import shutil import time from itertools import repeat -from multiprocessing.pool import ThreadPool +from multiprocessing.pool import Pool, ThreadPool from pathlib import Path from threading import Thread +from urllib.parse import urlparse +from zipfile import ZipFile import cv2 import numpy as np import torch import torch.nn.functional as F -from PIL import Image, ExifTags -from torch.utils.data import Dataset +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ - resample_segments, clean_str +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first +# Remap +cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # for Chinese filenames + # Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -logger = logging.getLogger(__name__) +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -36,9 +47,12 @@ break -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash def exif_size(img): @@ -50,42 +64,70 @@ def exif_size(img): s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) - except: + except Exception: pass return s -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = {2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches cache_images=cache, - single_cls=opt.single_cls, + single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None - loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader - # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() - dataloader = loader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader @@ -104,7 +146,7 @@ def __iter__(self): yield next(self.iterator) -class _RepeatSampler(object): +class _RepeatSampler: """ Sampler that repeats forever Args: @@ -119,9 +161,10 @@ def __iter__(self): yield from iter(self.sampler) -class LoadImages: # for inference - def __init__(self, path, img_size=640, stride=32): - p = str(Path(path).absolute()) # os-agnostic absolute path +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True): + p = str(Path(path).resolve()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): @@ -131,8 +174,8 @@ def __init__(self, path, img_size=640, stride=32): else: raise Exception(f'ERROR: {p} does not exist') - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size @@ -141,12 +184,13 @@ def __init__(self, path, img_size=640, stride=32): self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' + self.auto = auto if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 @@ -161,7 +205,7 @@ def __next__(self): # Read video self.mode = 'video' ret_val, img0 = self.cap.read() - if not ret_val: + while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video @@ -172,23 +216,23 @@ def __next__(self): ret_val, img0 = self.cap.read() self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='') + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - print(f'image {self.count}/{self.nf} {path}: ', end='') + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return path, img, img0, self.cap + return path, img, img0, self.cap, s def new_video(self, path): self.frame = 0 @@ -200,18 +244,12 @@ def __len__(self): class LoadWebcam: # for inference + # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size self.stride = stride - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): @@ -226,45 +264,36 @@ def __next__(self): raise StopIteration # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' - print(f'webcam {self.count}: ', end='') + s = f'webcam {self.count}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return img_path, img, img0, None + return img_path, img, img0, None, s def __len__(self): return 0 -class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640, stride=32): +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.mode = 'stream' self.img_size = img_size self.stride = stride if os.path.isfile(sources): - with open(sources, 'r') as f: + with open(sources) as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] @@ -272,43 +301,50 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream - print(f'{i + 1}/{n}: {s}... ', end='') - if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video - check_requirements(('pafy', 'youtube_dl')) + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('youtube.com', 'youtu.be'): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'Failed to open {s}' + assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame - self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() - print('') # newline + LOGGER.info('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') - def update(self, i, cap): + def update(self, i, cap, stream): # Read stream `i` frames in daemon thread - n, f = 0, self.frames[i] + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() - if n % 4: # read every 4th frame + if n % read == 0: success, im = cap.retrieve() - self.imgs[i] = im if success else self.imgs[i] * 0 + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time def __iter__(self): @@ -323,28 +359,31 @@ def __next__(self): # Letterbox img0 = self.imgs.copy() - img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) - return self.sources, img, img0, None + return self.sources, img, img0, None, '' def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version -class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size @@ -356,6 +395,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path + self.albumentations = Albumentations() if augment else None try: f = [] # image files @@ -363,50 +403,47 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r p = Path(p) # os-agnostic if p.is_dir(): # dir f += glob.glob(str(p / '**' / '*.*'), recursive=True) - # f = list(p.rglob('**/*.*')) # pathlib + # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file - with open(p, 'r') as t: + with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) - # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib - assert self.img_files, f'{prefix}No images found' + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache - self.label_files = img2label_paths(self.img_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels - if cache_path.is_file(): - cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed - cache, exists = self.cache_labels(cache_path, prefix), False # re-cache - else: + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # same version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash + except Exception: cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" - tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache - cache.pop('hash') # remove hash - cache.pop('version') # remove version + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update + self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update - if single_cls: - for x in self.labels: - x[:, 0] = 0 - n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches @@ -414,13 +451,27 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.n = n self.indices = range(n) + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + if segment: + self.segments[i][:, 0] = 0 + # Rectangular Training if self.rect: # Sort by aspect ratio s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] + self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh @@ -438,79 +489,62 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride - # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n + # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads - pbar = tqdm(enumerate(results), total=n) + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT) for i, x in pbar: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + if cache_images == 'disk': + gb += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate - pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) - for i, (im_file, lb_file) in enumerate(pbar): - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - segments = [] # instance segments - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' - - # verify labels - if os.path.isfile(lb_file): - nf += 1 # label found - with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines()] - if any([len(x) > 8 for x in l]): # is segment - classes = np.array([x[0] for x in l], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) - l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - l = np.array(l, dtype=np.float32) - if len(l): - assert l.shape[1] == 5, 'labels require 5 columns each' - assert (l >= 0).all(), 'negative labels' - assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' - else: - ne += 1 # label empty - l = np.zeros((0, 5), dtype=np.float32) - else: - nm += 1 # label missing - l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape, segments] - except Exception as e: - nc += 1 - logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ - f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - pbar.close() + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') - - x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, i + 1 - x['version'] = 0.1 # cache version + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version try: - torch.save(x, path) # save for next time - logging.info(f'{prefix}New cache created: {path}') + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): - return len(self.img_files) + return len(self.im_files) # def __iter__(self): # self.count = -1 @@ -525,19 +559,16 @@ def __getitem__(self, index): mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic - img, labels = load_mosaic(self, index) + img, labels = self.load_mosaic(index) shapes = None - # MixUp https://arxiv.org/pdf/1710.09412.pdf + # MixUp augmentation if random.random() < hyp['mixup']: - img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) else: # Load image - img, (h0, w0), (h, w) = load_image(self, index) + img, (h0, w0), (h, w) = self.load_image(index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape @@ -548,9 +579,7 @@ def __getitem__(self, index): if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - if self.augment: - # Augment imagespace - if not mosaic: + if self.augment: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], @@ -558,442 +587,234 @@ def __getitem__(self, index): shear=hyp['shear'], perspective=hyp['perspective']) - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations - nL = len(labels) # number of labels - if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - if self.augment: - # flip up-down + # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) - if nL: + if nl: labels[:, 2] = 1 - labels[:, 2] - # flip left-right + # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) - if nL: + if nl: labels[:, 1] = 1 - labels[:, 1] - labels_out = torch.zeros((nL, 6)) - if nL: + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return torch.from_numpy(img), labels_out, self.img_files[index], shapes + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + im = cv2.resize(im, + (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + else: + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 @staticmethod def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 - img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] - ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) - wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) - s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ 0].type(img[i].type()) - l = label[i] + lb = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) - l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - img4.append(im) - label4.append(l) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im) + label4.append(lb) - for i, l in enumerate(label4): - l[:, 0] = i # add target image index for build_targets() + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() - return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # ratio - if r != 1: # if sizes are not equal - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized - else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized - - -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=np.int16) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - -def hist_equalize(img, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - -def load_mosaic(self, index): - # loads images in a 4-mosaic - - labels4, segments4 = [], [] - s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img4 - if i == 0: # top left - img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) - x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) - elif i == 1: # top right - x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc - x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h - elif i == 2: # bottom left - x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) - x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) - elif i == 3: # bottom right - x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) - x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) - - img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - padw = x1a - x1b - padh = y1a - y1b - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padw, padh) for x in segments] - labels4.append(labels) - segments4.extend(segments) - - # Concat/clip labels - labels4 = np.concatenate(labels4, 0) - for x in (labels4[:, 1:], *segments4): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img4, labels4 = replicate(img4, labels4) # replicate - - # Augment - img4, labels4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img4, labels4 - - -def load_mosaic9(self, index): - # loads images in a 9-mosaic - - labels9, segments9 = [], [] - s = self.img_size - indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices - for i, index in enumerate(indices): - # Load image - img, _, (h, w) = load_image(self, index) - - # place img in img9 - if i == 0: # center - img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles - h0, w0 = h, w - c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates - elif i == 1: # top - c = s, s - h, s + w, s - elif i == 2: # top right - c = s + wp, s - h, s + wp + w, s - elif i == 3: # right - c = s + w0, s, s + w0 + w, s + h - elif i == 4: # bottom right - c = s + w0, s + hp, s + w0 + w, s + hp + h - elif i == 5: # bottom - c = s + w0 - w, s + h0, s + w0, s + h0 + h - elif i == 6: # bottom left - c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h - elif i == 7: # left - c = s - w, s + h0 - h, s, s + h0 - elif i == 8: # top left - c = s - w, s + h0 - hp - h, s, s + h0 - hp - - padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords - - # Labels - labels, segments = self.labels[index].copy(), self.segments[index].copy() - if labels.size: - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format - segments = [xyn2xy(x, w, h, padx, pady) for x in segments] - labels9.append(labels) - segments9.extend(segments) - - # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] - hp, wp = h, w # height, width previous - - # Offset - yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] - - # Concat/clip labels - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc - c = np.array([xc, yc]) # centers - segments9 = [x - c for x in segments9] - - for x in (labels9[:, 1:], *segments9): - np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() - # img9, labels9 = replicate(img9, labels9) # replicate - - # Augment - img9, labels9 = random_perspective(img9, labels9, segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove - - return img9, labels9 - - -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return img, targets - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - def create_folder(path='./new'): # Create folder if os.path.exists(path): @@ -1001,23 +822,22 @@ def create_folder(path='./new'): os.makedirs(path) # make new output folder -def flatten_recursive(path='../coco128'): +def flatten_recursive(path=DATASETS_DIR / 'coco128'): # Flatten a recursive directory by bringing all files to top level - new_path = Path(path + '_flat') + new_path = Path(str(path) + '_flat') create_folder(new_path) for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class - path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in img_formats: + if im_file.suffix[1:] in IMG_FORMATS: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] @@ -1025,7 +845,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): - with open(lb_file, 'r') as f: + with open(lb_file) as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): @@ -1044,24 +864,179 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' -def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.datasets import *; autosplit('../coco128') + Usage: from utils.datasets import *; autosplit() Arguments - path: Path to images directory - weights: Train, val, test weights (list) - annotated_only: Only use images with an annotated txt file + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files + random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path / txt[i], 'a') as f: - f.write(str(img) + '\n') # add image to txt file + with open(path.parent / txt[i], 'a') as f: + f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): + """ Return dataset statistics dictionary with images and instances counts per split per class + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip') + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + verbose: Print stats dictionary + """ + + def round_labels(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + def unzip(path): + # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' + if str(path).endswith('.zip'): # path is data.zip + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path + else: # path is data.yaml + return False, None, path + + def hub_ops(f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=75, optimize=True) # save + except Exception as e: # use OpenCV + print(f'WARNING: HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + zipped, data_dir, yaml_path = unzip(Path(path)) + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir # TODO: should this be dir.resolve()? + check_dataset(data, autodownload) # download dataset if missing + hub_dir = Path(data['path'] + ('-hub' if hub else '')) + stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + for split in 'train', 'val', 'test': + if data.get(split) is None: + stats[split] = None # i.e. no test set + continue + x = [] + dataset = LoadImagesAndLabels(data[split]) # load dataset + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): + x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) + x = np.array(x) # shape(128x80) + stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in + zip(dataset.im_files, dataset.labels)]} + + if hub: + im_dir = hub_dir / 'images' + im_dir.mkdir(parents=True, exist_ok=True) + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'): + pass + + # Profile + stats_path = hub_dir / 'stats.json' + if profile: + for _ in range(1): + file = stats_path.with_suffix('.npy') + t1 = time.time() + np.save(file, stats) + t2 = time.time() + x = np.load(file, allow_pickle=True) + print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + file = stats_path.with_suffix('.json') + t1 = time.time() + with open(file, 'w') as f: + json.dump(stats, f) # save stats *.json + t2 = time.time() + with open(file) as f: + x = json.load(f) # load hyps dict + print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + # Save, print and return + if hub: + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(stats, f) # save stats.json + if verbose: + print(json.dumps(stats, indent=2, sort_keys=False)) + return stats diff --git a/utils/google_utils.py b/utils/downloads.py similarity index 56% rename from utils/google_utils.py rename to utils/downloads.py index 5c5f52170268..d7b87cb2cadd 100644 --- a/utils/google_utils.py +++ b/utils/downloads.py @@ -1,10 +1,15 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" import os import platform import subprocess import time +import urllib from pathlib import Path +from zipfile import ZipFile import requests import torch @@ -16,52 +21,67 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes -def attempt_download(file, repo='ultralytics/yolov5'): - if not isinstance(file, str) or file.startswith("zoo:"): - return - +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f"ERROR: {assert_msg}\n{error_msg}") + print('') + + +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' - except: # fallback plan - assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', - 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + except Exception: # fallback plan + assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', + 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] try: tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] - except: - tag = 'v5.0' # current release + except Exception: + tag = 'v6.0' # current release - name = file.name if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() t = time.time() file = Path(file) cookie = Path('cookie') # gdrive cookie @@ -88,8 +108,8 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip print(f'Done ({time.time() - t:.1f}s)') return r @@ -102,6 +122,9 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# # def upload_blob(bucket_name, source_file_name, destination_blob_name): # # Uploads a file to a bucket # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 324c2416dcd9..a726acbd9204 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -1,9 +1,13 @@ # Flask REST API -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). ## Requirements [Flask](https://palletsprojects.com/p/flask/) is required. Install with: + ```shell $ pip install Flask ``` @@ -19,7 +23,7 @@ $ python3 restapi.py --port 5000 Then use [curl](https://curl.se/) to perform a request: ```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' ``` The model inference results are returned as a JSON response: @@ -65,4 +69,5 @@ The model inference results are returned as a JSON response: ] ``` -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index a54e2309715c..b93ad16a0f58 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -5,8 +5,8 @@ import io import torch -from PIL import Image from flask import Flask, request +from PIL import Image app = Flask(__name__) diff --git a/utils/general.py b/utils/general.py index 7e0ac772bb03..b0c5e9d69ab7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,5 +1,9 @@ -# YOLOv5 general utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" +import contextlib import glob import logging import math @@ -7,11 +11,16 @@ import platform import random import re -import subprocess +import shutil +import signal import time +import urllib +from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path +from subprocess import check_output +from zipfile import ZipFile import cv2 import numpy as np @@ -21,29 +30,157 @@ import torchvision import yaml -from utils.google_utils import gsutil_getsize -from utils.metrics import fitness -from utils.torch_utils import init_torch_seeds +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness # Settings +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = str(NUM_THREADS) # OpenMP max threads (PyTorch and SciPy) + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + try: + assert os.environ.get('PWD') == '/kaggle/working' + assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return True + except AssertionError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows + + +def set_logging(name=None, verbose=VERBOSE): + # Sets level and returns logger + if is_kaggle(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) + return logging.getLogger(name) + + +LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # Usage: @Profile() decorator or 'with Profile():' context manager + def __enter__(self): + self.start = time.time() + + def __exit__(self, type, value, traceback): + print(f'Profile results: {time.time() - self.start:.5f}s') + + +class Timeout(contextlib.ContextDecorator): + # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True -def set_logging(rank=-1, verbose=True): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def try_except(func): + # try-except function. Usage: @try_except decorator + def handler(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception as e: + print(e) + + return handler + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(name, opt): + # Print argparser arguments + LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) def init_seeds(seed=0): - # Initialize random number generator (RNG) seeds + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible + import torch.backends.cudnn as cudnn random.seed(seed) np.random.seed(seed) - init_torch_seeds(seed) + torch.manual_seed(seed) + cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} def get_latest_run(search_dir='.'): @@ -53,80 +190,130 @@ def get_latest_run(search_dir='.'): def is_docker(): - # Is environment a Docker container + # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): - # Is environment a Google Colab instance + # Is environment a Google Colab instance? try: import google.colab return True - except Exception as e: + except ImportError: return False +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).resolve().parts + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return True if re.search('[\u4e00-\u9fff]', str(s)) else False + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str -def file_size(file): - # Return file size in MB - return Path(file).stat().st_size / 1e6 +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_update_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@try_except +@WorkingDirectory(ROOT) def check_git_status(): # Recommend 'git pull' if code is out of date - print(colorstr('github: '), end='') - try: - assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not is_docker(), 'skipping check (Docker image)' - assert check_online(), 'skipping check (offline)' - - cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind - if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." - else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe - except Exception as e: - print(e) + msg = ', for updates see https://github.com/ultralytics/yolov5' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert not is_docker(), s + 'skipping check (Docker image)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + cmd = 'git fetch && git config --get remote.origin.url' + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(emojis(s)) # emoji-safe -def check_python(minimum='3.7.0', required=True): +def check_python(minimum='3.6.2'): # Check current python version vs. required python version - current = platform.python_version() - result = pkg.parse_version(current) >= pkg.parse_version(minimum) - if required: - assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed' + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, s # assert min requirements met + if verbose and not result: + LOGGER.warning(s) return result -def check_requirements(requirements='requirements.txt', exclude=()): +@try_except +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] @@ -134,26 +321,34 @@ def check_requirements(requirements='requirements.txt', exclude=()): for r in requirements: try: pkg.require(r) - except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 - print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") - try: - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) - except Exception as e: - print(f'{prefix} {e}') + except Exception: # DistributionNotFound or VersionConflict if requirements not met + s = f"{prefix} {r} not found and is required by YOLOv5" + if install: + LOGGER.info(f"{s}, attempting auto-update...") + try: + assert check_online(), f"'pip install {r}' skipped (offline)" + LOGGER.info(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 + except Exception as e: + LOGGER.warning(f'{prefix} {e}') + else: + LOGGER.info(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) # emoji-safe + LOGGER.info(emojis(s)) -def check_img_size(img_size, s=32): - # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple - if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -168,64 +363,146 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False -def check_file(file): - # Search for file if not found - if Path(file).is_file() or file == '': +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if Path(file).is_file() or file == '': # exists return file - else: - files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), f'File Not Found: {file}' # assert file was found + elif file.startswith(('http:/', 'https:/')): # download + url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file -def check_dataset(dict): - # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') - if val and len(val): +def check_font(font=FONT): + # Download font to CONFIG_DIR if necessary + font = Path(font) + if not font.exists() and not (CONFIG_DIR / font.name).exists(): + url = "https://ultralytics.com/assets/" + font.name + LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') + torch.hub.download_url_to_file(url, str(font), progress=False) + + +def check_dataset(data, autodownload=True): + # Download and/or unzip dataset if not found locally + # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + with open(data, errors='ignore') as f: + data = yaml.safe_load(f) # dictionary + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + + # Parse yaml + assert 'nc' in data, "Dataset 'nc' key missing." + if 'names' not in data: + data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script + LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])) + if s and autodownload: # download script + t = time.time() + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename - print(f'Downloading {s} ...') + LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip + Path(root).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=root) # unzip + Path(f).unlink() # remove zip + r = None # success elif s.startswith('bash '): # bash script - print(f'Running {s} ...') + LOGGER.info(f'Running {s} ...') r = os.system(s) else: # python script - r = exec(s) # return None - print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(emojis(f"Dataset download {s}")) else: - raise Exception('Dataset not found.') + raise Exception(emojis('Dataset not found ❌')) + + return data # dictionary + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + return file def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): - # Multi-threaded file download and unzip function + # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename - if not f.exists(): - print(f'Downloading {url} to {f}...') + if Path(url).is_file(): # exists in current path + Path(url).rename(f) # move to dir + elif not f.exists(): + LOGGER.info(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: - torch.hub.download_url_to_file(url, f, progress=True) # torch download + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download if unzip and f.suffix in ('.zip', '.gz'): - print(f'Unzipping {f}...') + LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': - s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite + ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': - s = f'tar xfz {f} --directory {f.parent}' # unzip - if delete: # delete zip file after unzip - s += f' && rm {f}' - os.system(s) + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory @@ -235,12 +512,14 @@ def download_one(url, dir): pool.close() pool.join() else: - for u in tuple(url) if isinstance(url, str) else url: + for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) def make_divisible(x, divisor): - # Returns x evenly divisible by divisor + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor @@ -250,7 +529,7 @@ def clean_str(s): def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 @@ -348,6 +627,18 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) @@ -398,90 +689,16 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): return coords -def clip_coords(boxes, img_shape): +def clip_coords(boxes, shape): # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - - -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, @@ -500,7 +717,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections @@ -511,16 +728,16 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] for xi, x in enumerate(prediction): # image index, image inference # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): - l = labels[xi] - v = torch.zeros((len(l), nc + 5), device=x.device) - v[:, :4] = l[:, 1:5] # box + lb = labels[xi] + v = torch.zeros((len(lb), nc + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf - v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image @@ -572,7 +789,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - print(f'WARNING: NMS time limit {time_limit}s exceeded') + LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output @@ -583,52 +800,62 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 - pickled = isinstance(x['model'], torch.nn.Module) - if pickled: - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize - print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) +def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + # Download (optional) if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.safe_dump(hyp, f, sort_keys=False) + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + + prefix + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + + prefix + ', '.join(f'{x:20.5g}' for x in vals) + '\n\n') if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload def apply_classifier(x, model, img, im0): - # Apply a second stage classifier to yolo outputs + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): @@ -649,11 +876,11 @@ def apply_classifier(x, model, img, im0): for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) + # cv2.imwrite('example%i.jpg' % j, cutout) im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255.0 # 0 - 255 to 0.0 - 1.0 + im /= 255 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction @@ -662,33 +889,20 @@ def apply_classifier(x, model, img, im0): return x -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop) - return crop - - def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: - suffix = path.suffix - path = path.with_suffix('') + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - path = Path(f"{path}{sep}{n}{suffix}") # update path - dir = path if path.suffix == '' else path.parent # directory - if not dir.exists() and mkdir: - dir.mkdir(parents=True, exist_ok=True) # make directory + path = Path(f"{path}{sep}{n}{suffix}") # increment path + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory return path + + +# Variables +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 5fcc30524a59..42d7ffc0eed8 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones -pip==18.1 +pip==21.1 Flask==1.0.2 gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index ac29d104b144..5056b7c1186d 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -11,4 +11,4 @@ manual_scaling: resources: cpu: 1 memory_gb: 4 - disk_size_gb: 20 \ No newline at end of file + disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py new file mode 100644 index 000000000000..ff6722ecd48a --- /dev/null +++ b/utils/loggers/__init__.py @@ -0,0 +1,171 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + +import os +import warnings +from threading import Thread + +import pkg_resources as pkg +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import colorstr, emojis +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_results +from utils.torch_utils import de_parallel + +LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases +RANK = int(os.getenv('RANK', -1)) + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False + if not wandb_login_success: + wandb = None +except (ImportError, AssertionError): + wandb = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.weights = weights + self.opt = opt + self.hyp = hyp + self.logger = logger # for printing results to console + self.include = include + self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + self.csv = True # always log to csv + + # Message + if not wandb: + prefix = colorstr('Weights & Biases: ') + s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" + self.logger.info(emojis(s)) + + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, run_id) + else: + self.wandb = None + + def on_pretrain_routine_end(self): + # Callback runs on pre-train routine end + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): + # Callback runs on train batch end + if plots: + if ni == 0: + if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + if self.wandb and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + + def on_val_end(self): + # Callback runs on val end + if self.wandb: + files = sorted(self.save_dir.glob('val*.jpg')) + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch + x = {k: v for k, v in zip(self.keys, vals)} # dict + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary + self.wandb.log(x) + self.wandb.end_epoch(best_result=best_fitness == fi) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if self.wandb: + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_train_end(self, last, best, plots, epoch, results): + # Callback runs on training end + if plots: + plot_results(file=self.save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + + if self.tb: + import cv2 + import numpy as np + + cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # remap for Chinese files + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + self.wandb.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + def on_params_update(self, params): + # Update hyperparams or configs of the experiment + # params: A dict containing {param: value} pairs + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md new file mode 100644 index 000000000000..63d999859e6d --- /dev/null +++ b/utils/loggers/wandb/README.md @@ -0,0 +1,152 @@ +📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. +* [About Weights & Biases](#about-weights-&-biases) +* [First-Time Setup](#first-time-setup) +* [Viewing runs](#viewing-runs) +* [Disabling wandb](#disabling-wandb) +* [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) +* [Reports: Share your work with the world!](#reports) + +## About Weights & Biases +Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. + +Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: + + * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time + * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically + * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization + * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators + * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently + * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models + +## First-Time Setup +
+ Toggle Details +When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. + +W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: + + ```shell + $ python train.py --project ... --name ... + ``` + +YOLOv5 notebook example: Open In Colab Open In Kaggle +Screen Shot 2021-09-29 at 10 23 13 PM + + +
+ +## Viewing Runs +
+ Toggle Details +Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + + * Training & Validation losses + * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 + * Learning Rate over time + * A bounding box debugging panel, showing the training progress over time + * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** + * System: Disk I/0, CPU utilization, RAM memory usage + * Your trained model as W&B Artifact + * Environment: OS and Python types, Git repository and state, **training command** + +

Weights & Biases dashboard

+
+ + ## Disabling wandb +* training after running `wandb disabled` inside that directory creates no wandb run +![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) + +* To enable wandb again, run `wandb online` +![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) + +## Advanced Usage +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. +
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python train.py --upload_data val + +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) +
+ +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python train.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +
+ +

Reports

+W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + +Weights & Biases Reports + + +## Environments + +YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + +- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + +## Status + +![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/__init__.py b/utils/loggers/wandb/__init__.py similarity index 100% rename from __init__.py rename to utils/loggers/wandb/__init__.py diff --git a/utils/wandb_logging/log_dataset.py b/utils/loggers/wandb/log_dataset.py similarity index 61% rename from utils/wandb_logging/log_dataset.py rename to utils/loggers/wandb/log_dataset.py index f45a23011f15..06e81fb69307 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -1,16 +1,16 @@ import argparse -import yaml - from wandb_utils import WandbLogger +from utils.general import LOGGER + WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def create_dataset_artifact(opt): - with open(opt.data) as f: - data = yaml.safe_load(f) # data dict - logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") if __name__ == '__main__': @@ -18,6 +18,9 @@ def create_dataset_artifact(opt): parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') + opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py new file mode 100644 index 000000000000..206059bc30bf --- /dev/null +++ b/utils/loggers/wandb/sweep.py @@ -0,0 +1,41 @@ +import sys +from pathlib import Path + +import wandb + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent + hyp_dict = vars(wandb.config).get("_items") + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml new file mode 100644 index 000000000000..688b1ea0285f --- /dev/null +++ b/utils/loggers/wandb/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/loggers/wandb/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [64] + epochs: + values: [10] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 4.0 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py new file mode 100644 index 000000000000..786e58a19972 --- /dev/null +++ b/utils/loggers/wandb/wandb_utils.py @@ -0,0 +1,562 @@ +"""Utilities and tools for tracking runs with Weights & Biases.""" + +import logging +import os +import sys +from contextlib import contextmanager +from pathlib import Path +from typing import Dict + +import yaml +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from utils.datasets import LoadImagesAndLabels, img2label_paths +from utils.general import LOGGER, check_dataset, check_file + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + +RANK = int(os.getenv('RANK', -1)) +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def check_wandb_dataset(data_file): + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and + data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) + is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + entity = run_path.parent.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return entity, project, run_id, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if RANK not in [-1, 0]: # For resuming DDP runs + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(check_file(opt.data), errors='ignore') as f: + data_dict = yaml.safe_load(f) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.safe_dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + """Log training runs, datasets, models, and predictions to Weights & Biases. + + This logger sends information to W&B at wandb.ai. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, + and basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + + For more on how this logger is used, see the Weights & Biases documentation: + https://docs.wandb.com/guides/integrations/yolov5 + """ + + def __init__(self, opt, run_id=None, job_type='Training'): + """ + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup trainig processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + job_type (str) -- To set the job_type for this run + + """ + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.bbox_media_panel_images = [] + self.val_table_path_map = None + self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != 'exp' else None, + job_type=job_type, + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if opt.upload_dataset: + if not opt.resume: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + if opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) + else: + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict + + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) + self.setup_training(opt) + + if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + """ + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + """ + assert wandb, 'Install wandb to upload dataset' + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + with open(config_path, errors='ignore') as f: + wandb_data_dict = yaml.safe_load(f) + return wandb_data_dict + + def setup_training(self, opt): + """ + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + + """ + self.log_dict, self.current_epoch = {}, 0 + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + config.hyp, config.imgsz + data_dict = self.data_dict + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + if opt.evolve: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict + + def download_dataset_artifact(self, path, alias): + """ + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + """ + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + """ + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + """ + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + # epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """ + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + """ + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + """ + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = ROOT / 'data' / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") + + if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! + self.wandb_run.use_artifact(self.val_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + """ + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + """ + self.val_table_path_map = {} + LOGGER.info("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_path_map[data[3]] = data[0] + + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): + """ + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact + + returns: + dataset artifact to be logged or used + """ + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.im_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + box_data, img_classes = [], {} + for cls, *xywh in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + """ + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + """ + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + cls = int(cls) + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel"}) + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + *avg_conf_per_class + ) + + def val_one_image(self, pred, predn, path, names, im): + """ + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + """ + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): + """ + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + """ + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + """ + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + """ + if self.wandb_run: + with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + self.wandb_run.finish() + self.wandb_run = None + + self.log_dict = {} + self.bbox_media_panel_images = [] + if self.result_artifact: + self.result_artifact.add(self.result_table, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + + wandb.log({"evaluation": self.result_table}) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + """ + Log metrics if any and finish the current W&B run + """ + if self.wandb_run: + if self.log_dict: + with all_logging_disabled(): + wandb.log(self.log_dict) + wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py index 9e78df17fdf3..bf9b592d4ad2 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,10 +1,13 @@ -# Loss functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" import torch import torch.nn as nn -from utils.general import bbox_iou -from utils.torch_utils import is_parallel +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 @@ -15,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() + super().__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() self.alpha = alpha @@ -32,7 +35,7 @@ def forward(self, pred, true): class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha @@ -62,7 +65,7 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(QFocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha @@ -86,9 +89,10 @@ def forward(self, pred, true): class ComputeLoss: + sort_obj_iou = False + # Compute losses def __init__(self, model, autobalance=False): - super(ComputeLoss, self).__init__() device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -104,42 +108,53 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors': - setattr(self, k, getattr(det, k)) - - def __call__(self, p, targets): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj n = b.shape[0] # number of targets if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression - pxy = ps[:, :2].sigmoid() * 2. - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) lbox += (1.0 - iou).mean() # iou loss # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t = torch.full_like(pcls, self.cn, device=self.device) # targets t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(ps[:, 5:], t) # BCE + lcls += self.BCEcls(pcls, t) # BCE # Append targets to text file # with open('targets.txt', 'a') as file: @@ -157,22 +172,21 @@ def __call__(self, p, targets): # predictions, targets, model lcls *= self.hyp['cls'] bs = tobj.shape[0] # batch size - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=targets.device) # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices g = 0.5 # bias off = torch.tensor([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets + ], device=self.device).float() * g # offsets for i in range(self.nl): anchors = self.anchors[i] @@ -183,15 +197,15 @@ def build_targets(self, p, targets): if nt: # Matches r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter # Offsets gxy = t[:, 2:4] # grid xy gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T j = torch.stack((torch.ones_like(j), j, k, l, m)) t = t.repeat((5, 1, 1))[j] offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] @@ -200,14 +214,12 @@ def build_targets(self, p, targets): offsets = 0 # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh + bc, gxy, gwh, a = t.unsafe_chunk(4, dim=1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices + gi, gj = gij.T # grid indices # Append - a = t[:, 6].long() # anchor indices indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices tbox.append(torch.cat((gxy - gij, gwh), 1)) # box anch.append(anchors[a]) # anchors diff --git a/utils/metrics.py b/utils/metrics.py index 323c84b6c873..857fa5d81f91 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,13 +1,16 @@ -# Model validation metrics +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" +import math +import warnings from pathlib import Path import matplotlib.pyplot as plt import numpy as np import torch -from . import general - def fitness(x): # Model fitness as a weighted combination of metrics @@ -15,7 +18,7 @@ def fitness(x): return (x[:, :4] * w).sum(1) -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -34,7 +37,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes - unique_classes = np.unique(target_cls) + unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class @@ -42,7 +45,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels + n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: @@ -53,7 +56,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tpc = tp[i].cumsum(0) # Recall - recall = tpc / (n_l + 1e-16) # recall curve + recall = tpc / (n_l + eps) # recall curve r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision @@ -67,7 +70,9 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = {i: v for i, v in enumerate(names)} # to dict if plot: plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') @@ -75,7 +80,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') def compute_ap(recall, precision): @@ -88,8 +96,8 @@ def compute_ap(recall, precision): """ # Append sentinel values to beginning and end - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) @@ -127,7 +135,7 @@ def process_batch(self, detections, labels): detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) + iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: @@ -157,30 +165,139 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix - def plot(self, save_dir='', names=()): + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) - sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size - labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, annot=nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() except Exception as e: - pass + print(f'WARNING: ConfusionMatrix plot failure: {e}') def print(self): for i in range(self.nc + 1): print(' '.join(map(str, self.matrix[i]))) +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def bbox_ioa(box1, box2, eps=1E-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + # Plots ---------------------------------------------------------------------------------------------------------------- def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): @@ -201,6 +318,7 @@ def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): @@ -221,3 +339,4 @@ def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence' ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() diff --git a/utils/plots.py b/utils/plots.py index 8313ef210f90..a30c0faf962a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,26 +1,29 @@ -# Plotting utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" -import glob import math import os -import random from copy import copy from pathlib import Path +from urllib.error import URLError import cv2 import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd -import seaborn as sns +import seaborn as sn import torch -import yaml from PIL import Image, ImageDraw, ImageFont -from utils.general import xywh2xyxy, xyxy2xywh +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings +RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only @@ -46,6 +49,106 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + try: + check_font(font) + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() + + +class Annotator: + if RANK in (-1, 0): + check_pil_font() # download TTF if necessary + + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + self.pil = pil or not is_ascii(example) or is_chinese(example) + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle((box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), fill=color) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) @@ -68,54 +171,6 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): - # Plots one bounding box on image 'im' using OpenCV - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' - tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness - c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) - if label: - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) - - -def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None): - # Plots one bounding box on image 'im' using PIL - im = Image.fromarray(im) - draw = ImageDraw.Draw(im) - line_thickness = line_thickness or max(int(min(im.size) / 200), 2) - draw.rectangle(box, width=line_thickness, outline=color) # plot - if label: - font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) - draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(im) - - -def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() - # Compares the two methods for width-height anchor multiplication - # https://github.com/ultralytics/yolov3/issues/168 - x = np.arange(-4.0, 4.0, .1) - ya = np.exp(x) - yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 - - fig = plt.figure(figsize=(6, 3), tight_layout=True) - plt.plot(x, ya, '.-', label='YOLOv3') - plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') - plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') - plt.xlim(left=-4, right=4) - plt.ylim(bottom=0, top=6) - plt.xlabel('input') - plt.ylabel('output') - plt.grid() - plt.legend() - fig.savefig('comparison.png', dpi=200) - - def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] @@ -125,82 +180,65 @@ def output_to_target(output): return np.array(targets) -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): # Plot image grid with labels - if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - - # un-normalise if np.max(images[0]) <= 1: - images *= 255 - - tl = 3 # line thickness - tf = max(tl - 1, 1) # font thickness + images *= 255 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) - # Check if we should resize - scale_factor = max_size / max(h, w) - if scale_factor < 1: - h = math.ceil(scale_factor * h) - w = math.ceil(scale_factor * w) - + # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, img in enumerate(images): + for i, im in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect break - - block_x = int(w * (i // ns)) - block_y = int(h * (i % ns)) - - img = img.transpose(1, 2, 0) - if scale_factor < 1: - img = cv2.resize(img, (w, h)) - - mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: - image_targets = targets[targets[:, 0] == i] - boxes = xywh2xyxy(image_targets[:, 2:6]).T - classes = image_targets[:, 1].astype('int') - labels = image_targets.shape[1] == 6 # labels if no conf column - conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) if boxes.shape[1]: if boxes.max() <= 1.01: # if normalized with tolerance 0.01 boxes[[0, 2]] *= w # scale to pixels boxes[[1, 3]] *= h - elif scale_factor < 1: # absolute coords need scale if image scales - boxes *= scale_factor - boxes[[0, 2]] += block_x - boxes[[1, 3]] += block_y - for j, box in enumerate(boxes.T): - cls = int(classes[j]) + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) - - # Draw image filename labels - if paths: - label = Path(paths[i]).name[:40] # trim to 40 char - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, - lineType=cv2.LINE_AA) - - # Image border - cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) - - if fname: - r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size - mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) - # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save - Image.fromarray(mosaic).save(fname) # PIL save - return mosaic + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): @@ -220,9 +258,9 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): plt.close() -def plot_test_txt(): # from utils.plots import *; plot_test() - # Plot test.txt histograms - x = np.loadtxt('test.txt', dtype=np.float32) +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] @@ -244,29 +282,32 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') ax[i].legend() ax[i].set_title(s[i]) plt.savefig('targets.jpg', dpi=200) -def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by test.py - fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - # ax = ax.ravel() +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(Path(path).glob('study*.txt')): + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - # for i in range(7): - # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - # ax[i].set_title(s[i]) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], @@ -275,22 +316,26 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) ax2.set_xlim(0, 57) - ax2.set_ylim(30, 55) + ax2.set_ylim(25, 55) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') ax2.legend(loc='lower right') - plt.savefig(str(Path(path).name) + '.png', dpi=300) + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) -def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 +def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels - print('Plotting labels... ') + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram - sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) plt.close() @@ -298,15 +343,18 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 + try: # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + except Exception: + pass ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(names, rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') - sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center @@ -325,34 +373,58 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('Agg') plt.close() - # loggers - for k, v in loggers.items() or {}: - if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) - -def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() - # Plot hyperparameter evolution results in evolve.txt - with open(yaml_file) as f: - hyp = yaml.safe_load(f) - x = np.loadtxt('evolve.txt', ndmin=2) +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values f = fitness(x) - # weights = (f - f.min()) ** 2 # for weighted results + j = np.argmax(f) # max fitness index plt.figure(figsize=(10, 12), tight_layout=True) matplotlib.rc('font', **{'size': 8}) - for i, (k, v) in enumerate(hyp.items()): - y = x[:, i + 7] - # mu = (y * weights).sum() / weights.sum() # best weighted result - mu = y[f.argmax()] # best single result + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result plt.subplot(6, 5, i + 1) - plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - plt.savefig('evolve.png', dpi=200) - print('\nPlot saved as evolve.png') + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for fi, f in enumerate(files): + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() def profile_idetection(start=0, stop=0, labels=(), save_dir=''): @@ -381,66 +453,24 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): else: a.remove() except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - + print(f'Warning: Plotting error for {f}; {e}') ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() - # Plot training 'results*.txt', overlaying train and val losses - s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends - t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles - for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) - ax = ax.ravel() - for i in range(5): - for j in [i, i + 5]: - y = results[j, x] - ax[i].plot(x, y, marker='.', label=s[j]) - # y_smooth = butter_lowpass_filtfilt(y) - # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) - - ax[i].set_title(t[i]) - ax[i].legend() - ax[i].set_ylabel(f) if i == 0 else None # add filename - fig.savefig(f.replace('.txt', '.png'), dpi=200) - - -def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): - # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') - fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) - ax = ax.ravel() - s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', - 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] - if bucket: - # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] - files = ['results%g.txt' % x for x in id] - c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) - os.system(c) - else: - files = list(Path(save_dir).glob('results*.txt')) - assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - for i in range(10): - y = results[i, x] - if i in [0, 1, 2, 5, 6, 7]: - y[y == 0] = np.nan # don't show zero loss values - # y /= y[0] # normalize - label = labels[fi] if len(labels) else f.stem - ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) - ax[i].set_title(s[i]) - # if i in [5, 6, 7]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - - ax[1].legend() - fig.savefig(Path(save_dir) / 'results.png', dpi=200) +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0) + return crop diff --git a/utils/sparse.py b/utils/sparse.py deleted file mode 100644 index e0f53c186443..000000000000 --- a/utils/sparse.py +++ /dev/null @@ -1,139 +0,0 @@ -import math - -from sparsezoo import Zoo -from sparseml.pytorch.optim import ScheduledModifierManager -from sparseml.pytorch.utils import SparsificationGroupLogger - -from utils.torch_utils import is_parallel - - -def _get_model_framework_file(model, path): - transfer_request = 'recipe_type=transfer' in path - checkpoint_available = any([file.checkpoint for file in model.framework_files]) - final_available = any([not file.checkpoint for file in model.framework_files]) - - if transfer_request and checkpoint_available: - # checkpoints are saved for transfer learning use cases, - # return checkpoint if avaiable and requested - return [file for file in model.framework_files if file.checkpoint][0] - elif final_available: - # default to returning final state, if available - return [file for file in model.framework_files if not file.checkpoint][0] - - raise ValueError(f"Could not find a valid framework file for {path}") - - -def check_download_sparsezoo_weights(path): - if isinstance(path, str): - if path.startswith("zoo:"): - # load model from the SparseZoo and override the path with the new download - model = Zoo.load_model_from_stub(path) - file = _get_model_framework_file(model, path) - path = file.downloaded_path() - - return path - - if isinstance(path, list): - return [check_download_sparsezoo_weights(p) for p in path] - - return path - - -class SparseMLWrapper(object): - def __init__(self, model, recipe): - self.enabled = bool(recipe) - self.model = model.module if is_parallel(model) else model - self.recipe = recipe - self.manager = ScheduledModifierManager.from_yaml(recipe) if self.enabled else None - self.logger = None - - def state_dict(self): - return { - 'recipe': str(self.manager) if self.enabled else None, - } - - def apply(self): - if not self.enabled: - return - - self.manager.apply(self.model) - - def initialize(self, start_epoch): - if not self.enabled: - return - - self.manager.initialize(self.model, start_epoch) - - def initialize_loggers(self, logger, tb_writer, wandb_logger, rank): - self.logger = logger - - if not self.enabled or rank not in [-1, 0]: - return - - def _logging_lambda(log_tag, log_val, log_vals, step, walltime): - if not wandb_logger or not wandb_logger.wandb: - return - - if log_val is not None: - wandb_logger.log({log_tag: log_val}) - - if log_vals: - wandb_logger.log(log_vals) - - self.manager.initialize_loggers([ - SparsificationGroupLogger( - lambda_func=_logging_lambda, - tensorboard=tb_writer, - ) - ]) - - if wandb_logger.wandb: - artifact = wandb_logger.wandb.Artifact('recipe', type='recipe') - with artifact.new_file('recipe.yaml') as file: - file.write(str(self.manager)) - wandb_logger.wandb.log_artifact(artifact) - - def modify(self, scaler, optimizer, model, dataloader): - if not self.enabled: - return scaler - - return self.manager.modify(model, optimizer, steps_per_epoch=len(dataloader), wrap_optim=scaler) - - def check_lr_override(self, scheduler): - # Override lr scheduler if recipe makes any LR updates - if self.enabled and self.manager.learning_rate_modifiers: - self.logger.info('Disabling LR scheduler, managing LR using SparseML recipe') - scheduler = None - - return scheduler - - def check_epoch_override(self, epochs): - # Override num epochs if recipe explicitly modifies epoch range - if self.enabled and self.manager.epoch_modifiers and self.manager.max_epochs: - epochs = self.manager.max_epochs or epochs # override num_epochs - self.logger.info(f'Overriding number of epochs from SparseML manager to {epochs}') - - return epochs - - def qat_active(self, epoch): - if not self.enabled or not self.manager.quantization_modifiers: - return False - - qat_start = min([mod.start_epoch for mod in self.manager.quantization_modifiers]) - - return qat_start < epoch + 1 - - def reset_best(self, epoch): - if not self.enabled: - return False - - # if pruning is active or quantization just started, need to reset best checkpoint - # this is in case the pruned and/or quantized model do not fully recover - pruning_start = math.floor(max([mod.start_epoch for mod in self.manager.pruning_modifiers])) \ - if self.manager.pruning_modifiers else -1 - pruning_end = math.ceil(max([mod.end_epoch for mod in self.manager.pruning_modifiers])) \ - if self.manager.pruning_modifiers else -1 - qat_start = math.floor(max([mod.start_epoch for mod in self.manager.quantization_modifiers])) \ - if self.manager.quantization_modifiers else -1 - - return (pruning_start <= epoch <= pruning_end) or epoch == qat_start diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 36360136e891..72f8a0fd1659 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,145 +1,152 @@ -# YOLOv5 PyTorch utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" -import datetime -import logging import math import os import platform import subprocess import time +import warnings from contextlib import contextmanager from copy import deepcopy from pathlib import Path import torch -import torch.backends.cudnn as cudnn +import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -import torchvision + +from utils.general import LOGGER, file_update_date, git_describe try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None -logger = logging.getLogger(__name__) + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') @contextmanager def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ + # Decorator to make all processes in distributed training wait for each local_master to do something if local_rank not in [-1, 0]: - torch.distributed.barrier() + dist.barrier(device_ids=[local_rank]) yield if local_rank == 0: - torch.distributed.barrier() - + dist.barrier(device_ids=[0]) -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - -def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. + assert platform.system() == 'Linux', 'device_count() function only works on Linux' try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository + cmd = 'nvidia-smi -L | wc -l' + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 -def select_device(device='', batch_size=None): +def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' + s = f'YOLOv5 🚀 {git_describe() or file_update_date()} torch {torch.__version__} ' # string + device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" cuda = not cpu and torch.cuda.is_available() if cuda: - devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count - if n > 1 and batch_size: # check batch_size is divisible by device_count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) + space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB else: s += 'CPU\n' - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + if not newline: + s = s.rstrip() + LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') -def time_synchronized(): - # pytorch-accurate time +def time_sync(): + # PyTorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input +def profile(input, ops, n=10, device=None): + # YOLOv5 speed/memory/FLOPs profiler + # + # Usage: + # input = torch.randn(16, 3, 640, 640) # m1 = lambda x: x * torch.sigmoid(x) # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - x = x.to(device) - x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS - except: - flops = 0 - - for _ in range(n): - t[0] = time_synchronized() - y = m(x) - t[1] = time_synchronized() + # profile(input, [m1, m2], n=100) # profile over 100 iterations + + results = [] + device = device or select_device() + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: - _ = y.sum().backward() - t[2] = time_synchronized() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results def is_parallel(model): + # Returns True if model is of type DP or DDP return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model def initialize_weights(model): @@ -150,7 +157,7 @@ def initialize_weights(model): elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True @@ -161,7 +168,7 @@ def find_modules(model, mclass=nn.Conv2d): def sparsity(model): # Return global model sparsity - a, b = 0., 0. + a, b = 0, 0 for p in model.parameters(): a += p.numel() b += (p == 0).sum() @@ -180,7 +187,7 @@ def prune(model, amount=0.3): def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, @@ -189,12 +196,12 @@ def fuse_conv_and_bn(conv, bn): groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) - # prepare filters + # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - # prepare spatial bias + # Prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) @@ -207,46 +214,28 @@ def model_info(model, verbose=False, img_size=640): n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - try: # FLOPS + try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): fs = '' - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") - - -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img else: @@ -254,7 +243,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean @@ -267,65 +256,57 @@ def copy_attr(a, b, include=(), exclude=()): setattr(a, k, v) +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models - Keep a moving average of everything in the model state_dict (parameters and buffers). - This is intended to allow functionality like - https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage - A smoothed version of the weights is necessary for some training schemes to perform well. - This class is sensitive where it is initialized in the sequence of model init, - GPU assignment and distributed training wrappers. + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, updates=0, enabled=True): + def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA - self._model = model - self._ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) - self.enabled = enabled - for p in self._ema.parameters(): + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): p.requires_grad_(False) - @property - def ema(self): - if not self.enabled: - return deepcopy(self._model.module if is_parallel(self._model) else self._model).eval() - return self._ema - - def state_dict(self, pickle=True): - ema = deepcopy(self.ema).float() - return { - 'ema': ema if pickle else ema.state_dict(), - 'updates': self.updates, - } - - def load_state_dict(self, state_dict): - if not self.enabled: - return - pickled = isinstance(state_dict['ema'], nn.Module) - self.ema.load_state_dict(state_dict['ema'].float().state_dict() if pickled else state_dict['ema']) - self.updates = state_dict['updates'] - def update(self, model): - self._model = model - if not self.enabled: - return - + # Update EMA parameters with torch.no_grad(): - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict self.updates += 1 d = self.decay(self.updates) + msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: - mv = msd[k].detach() v *= d - v += (1. - d) * mv - v *= mv != 0 # preserve pruned parameters in model (equal to 0) + v += (1 - d) * msd[k].detach() def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes - copy_attr(self.ema, model, include, exclude) \ No newline at end of file + copy_attr(self.ema, model, include, exclude) diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py deleted file mode 100644 index 76b91aa11291..000000000000 --- a/utils/wandb_logging/wandb_utils.py +++ /dev/null @@ -1,320 +0,0 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" -import json -import sys -from pathlib import Path - -import torch -import yaml -from tqdm import tqdm - -sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path -from utils.datasets import LoadImagesAndLabels -from utils.datasets import img2label_paths -from utils.general import colorstr, xywh2xyxy, check_dataset, check_file - -try: - import wandb - from wandb import init, finish -except ImportError: - wandb = None - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if opt.global_rank not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data)) as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - - -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. - - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. - - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. - - For more on how this logger is used, see the Weights & Biases documentation: - https://docs.wandb.com/guides/integrations/yolov5 - """ - def __init__(self, opt, name, run_id, data_dict, job_type='Training'): - # Pre-training routine -- - self.job_type = job_type - self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict - # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, project=project, entity=entity, resume='allow') - opt.resume = model_artifact_name - elif self.wandb: - self.wandb_run = wandb.init(config=opt, - resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=name, - job_type=job_type, - id=run_id) if not wandb.run else wandb.run - if self.wandb_run: - if self.job_type == 'Training': - if not opt.resume: - wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict - # Info useful for resuming from artifacts - self.wandb_run.config.update({ - 'opt': vars(opt), - 'data_dict': wandb_data_dict - }, allow_val_change=True) - self.data_dict = self.setup_training(opt, data_dict) - if self.job_type == 'Dataset Creation': - self.data_dict = self.check_and_upload_dataset(opt) - else: - prefix = colorstr('wandb: ') - print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") - - def check_and_upload_dataset(self, opt): - assert wandb, 'Install wandb to upload dataset' - check_dataset(self.data_dict) - config_path = self.log_dataset_artifact(check_file(opt.data), - opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - print("Created dataset config file ", config_path) - with open(config_path) as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - - def setup_training(self, opt, data_dict): - self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants - self.bbox_interval = opt.bbox_interval - if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" - config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( - self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ - config.opt['hyp'] - data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume - if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), - opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), - opt.artifact_alias) - self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - self.val_table = self.val_artifact.get("val") - self.map_val_table_path() - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) - if opt.bbox_interval == -1: - self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - return data_dict - - def download_dataset_artifact(self, path, alias): - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix()) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None - - def log_model(self, path, opt, epoch, fitness_score, best_model=False): - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score - }) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - print("Saving model artifact on epoch ", epoch + 1) - - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - with open(data_file) as f: - data = yaml.safe_load(f) # data dict - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None - self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path - data.pop('download', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - - if self.job_type == 'Training': # builds correct artifact pipeline graph - self.wandb_run.use_artifact(self.val_artifact) - self.wandb_run.use_artifact(self.train_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - self.val_table_map = {} - print("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_map[data[3]] = data[0] - - def create_dataset_table(self, dataset, class_to_id, name='dataset'): - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.img_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), - name='data/labels/' + label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - if self.val_table and self.result_table: - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - total_conf = 0 - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - box_data.append( - {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"}) - total_conf = total_conf + conf - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_map[Path(path).name] - self.result_table.add_data(self.current_epoch, - id, - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) - ) - - def log(self, log_dict): - if self.wandb_run: - for key, value in log_dict.items(): - self.log_dict[key] = value - - def end_epoch(self, best_result=False): - if self.wandb_run: - wandb.log(self.log_dict) - self.log_dict = {} - if self.result_artifact: - train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") - self.result_artifact.add(train_results, 'result') - wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - - def finish_run(self): - if self.wandb_run: - if self.log_dict: - wandb.log(self.log_dict) - wandb.run.finish() diff --git a/val.py b/val.py new file mode 100644 index 000000000000..2dd2aec679f9 --- /dev/null +++ b/val.py @@ -0,0 +1,381 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 model accuracy on a custom dataset + +Usage: + $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + +Usage - formats: + $ python path/to/val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU +""" + +import argparse +import json +import os +import sys +from pathlib import Path +from threading import Thread + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.callbacks import Callbacks +from utils.datasets import create_dataloader +from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, + coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, + scale_coords, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, ap_per_class +from utils.plots import output_to_target, plot_images, plot_val_study +from utils.torch_utils import select_device, time_sync + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(detections, labels, iouv): + """ + Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (Array[N, 10]), for 10 IoU levels + """ + correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) + iou = box_iou(labels[:, 1:], detections[:, :4]) + x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + matches = torch.from_numpy(matches).to(iouv.device) + correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv + return correct + + +@torch.no_grad() +def run(data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + callbacks=Callbacks(), + compute_loss=None, + ): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad = 0.0 if task in ('speed', 'benchmark') else 0.5 + rect = False if task == 'benchmark' else pt # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, + workers=workers, prefix=colorstr(f'{task}: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class = [], [], [], [] + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + t1 = time_sync() + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs + dt[1] += time_sync() - t2 + + # Loss + if compute_loss: + loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t3 = time_sync() + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + dt[2] += time_sync() - t3 + + # Metrics + for si, pred in enumerate(out): + labels = targets[targets[:, 0] == si, 1:] + nl = len(labels) + tcls = labels[:, 0].tolist() if nl else [] # target class + path, shape = Path(paths[si]), shapes[si][0] + seen += 1 + + if len(pred) == 0: + if nl: + stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + if plots: + confusion_matrix.process_batch(predn, labelsn) + else: + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels + Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start() + f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions + Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() + + # Compute metrics + stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class + else: + nt = torch.zeros(1) + + # Print results + pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + callbacks.run('on_val_end') + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements(['pycocotools']) + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(FILE.stem, opt) + return opt + + +def main(opt): + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/weights/download_weights.sh b/weights/download_weights.sh deleted file mode 100755 index 43c8e31d80fd..000000000000 --- a/weights/download_weights.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# Download latest models from https://github.com/ultralytics/yolov5/releases -# Usage: -# $ bash weights/download_weights.sh - -python - < Date: Fri, 8 Apr 2022 19:50:38 +0100 Subject: [PATCH 755/757] Update SparseML Integration to V6.1 (#26) * SparseML integration * Add SparseML dependancy * Update: add missing files * Update requirements.txt * Update: sparseml-nightly support * Update: remove model versioning * Partial update for multi-stage recipes * Update: multi-stage recipe support * Update: remove sparseml dep * Fix: multi-stage recipe handeling * Fix: multi stage support * Fix: non-recipe runs * Add: legacy hyperparam files * Fix: add copy-paste to hyps * Fix: nit * apply structure fixes --- data/hyps/hyp.finetune.yaml | 39 +++++++ data/hyps/hyp.scratch.yaml | 34 ++++++ detect.py | 3 +- export.py | 173 ++++++++++++++++++++++++++--- models/common.py | 48 ++++---- models/yolo.py | 16 ++- models_v5.0/yolov5l.yaml | 48 ++++++++ models_v5.0/yolov5m.yaml | 48 ++++++++ models_v5.0/yolov5s.yaml | 48 ++++++++ models_v5.0/yolov5x.yaml | 48 ++++++++ requirements.txt | 1 + train.py | 130 ++++++++++++++-------- utils/activations.py | 17 +++ utils/downloads.py | 3 + utils/general.py | 12 +- utils/loggers/__init__.py | 5 +- utils/loggers/wandb/wandb_utils.py | 4 + utils/sparse.py | 151 +++++++++++++++++++++++++ utils/torch_utils.py | 30 ++++- val.py | 3 +- 20 files changed, 758 insertions(+), 103 deletions(-) create mode 100644 data/hyps/hyp.finetune.yaml create mode 100644 data/hyps/hyp.scratch.yaml create mode 100644 models_v5.0/yolov5l.yaml create mode 100644 models_v5.0/yolov5m.yaml create mode 100644 models_v5.0/yolov5s.yaml create mode 100644 models_v5.0/yolov5x.yaml create mode 100644 utils/sparse.py diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml new file mode 100644 index 000000000000..3aa1923f78a6 --- /dev/null +++ b/data/hyps/hyp.finetune.yaml @@ -0,0 +1,39 @@ +# Hyperparameters for VOC finetuning +# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +# Hyperparameter Evolution Results +# Generations: 306 +# P R mAP.5 mAP.5:.95 box obj cls +# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146 + +lr0: 0.0032 +lrf: 0.12 +momentum: 0.843 +weight_decay: 0.00036 +warmup_epochs: 2.0 +warmup_momentum: 0.5 +warmup_bias_lr: 0.05 +box: 0.0296 +cls: 0.243 +cls_pw: 0.631 +obj: 0.301 +obj_pw: 0.911 +iou_t: 0.2 +anchor_t: 2.91 +# anchors: 3.63 +fl_gamma: 0.0 +hsv_h: 0.0138 +hsv_s: 0.664 +hsv_v: 0.464 +degrees: 0.373 +translate: 0.245 +scale: 0.898 +shear: 0.602 +perspective: 0.0 +flipud: 0.00856 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.243 +copy_paste: 0.0 diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml new file mode 100644 index 000000000000..e10b9893dd50 --- /dev/null +++ b/data/hyps/hyp.scratch.yaml @@ -0,0 +1,34 @@ +# Hyperparameters for COCO training from scratch +# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 diff --git a/detect.py b/detect.py index ccb9fbf5103f..559b3414f506 100644 --- a/detect.py +++ b/detect.py @@ -45,6 +45,7 @@ increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync +from export import load_checkpoint @torch.no_grad() @@ -89,7 +90,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + model, extras = load_checkpoint(type_='val', weights=weights, device=device) # load FP32 model stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size diff --git a/export.py b/export.py index 2d4a68e62f89..f489aaa28f07 100644 --- a/export.py +++ b/export.py @@ -43,6 +43,7 @@ """ import argparse +from copy import deepcopy import json import os import platform @@ -57,20 +58,26 @@ import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile +from sparseml.pytorch.utils import ModuleExporter +from sparseml.pytorch.sparsification.quantization import skip_onnx_input_quantize + FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import Conv +from models.common import Conv, DetectMultiBackend from models.experimental import attempt_load -from models.yolo import Detect +from models.yolo import Detect, Model from utils.activations import SiLU from utils.datasets import LoadImages from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, - file_size, print_args, url2file) -from utils.torch_utils import select_device + file_size, print_args, url2file, intersect_dicts) +from utils.torch_utils import select_device, torch_distributed_zero_first, is_parallel +from utils.downloads import attempt_download +from utils.sparse import SparseMLWrapper, check_download_sparsezoo_weights + def export_formats(): @@ -118,14 +125,33 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, im, f, verbose=False, opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) - 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + # export through SparseML so quantized and pruned graphs can be corrected + save_dir = f.parent.absolute() + save_name = str(f).split(os.path.sep)[-1] + + # get the number of outputs so we know how to name and change dynamic axes + # nested outputs can be returned if model is exported with dynamic + def _count_outputs(outputs): + count = 0 + if isinstance(outputs, list) or isinstance(outputs, tuple): + for out in outputs: + count += _count_outputs(out) + else: + count += 1 + return count + + outputs = model(im) + num_outputs = _count_outputs(outputs) + input_names = ['input'] + output_names = [f'out_{i}' for i in range(num_outputs)] + dynamic_axes = {k: {0: 'batch'} for k in (input_names + output_names)} if dynamic else None + exporter = ModuleExporter(model, save_dir) + exporter.export_onnx(im, name=save_name, convert_qat=True, + input_names=input_names, output_names=output_names, dynamic_axes=dynamic_axes) + try: + skip_onnx_input_quantize(f, f) + except: + pass # Checks model_onnx = onnx.load(f) # load onnx model @@ -407,6 +433,115 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') +def create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **kwargs): + pickle = not sparseml_wrapper.qat_active(epoch) # qat does not support pickled exports + ckpt_model = deepcopy(model.module if is_parallel(model) else model).float() + yaml = ckpt_model.yaml + if not pickle: + ckpt_model = ckpt_model.state_dict() + + return {'epoch': epoch, + 'model': ckpt_model, + 'optimizer': optimizer.state_dict(), + 'yaml': yaml, + 'hyp': model.hyp, + **ema.state_dict(pickle), + **sparseml_wrapper.state_dict(), + **kwargs} + +def load_checkpoint( + type_, + weights, + device, + cfg=None, + hyp=None, + nc=None, + data=None, + dnn=False, + half = False, + recipe=None, + resume=None, + rank=-1 + ): + with torch_distributed_zero_first(rank): + # download if not found locally or from sparsezoo if stub + weights = attempt_download(weights) or check_download_sparsezoo_weights(weights) + ckpt = torch.load(weights[0] if isinstance(weights, list) or isinstance(weights, tuple) + else weights, map_location="cpu") # load checkpoint + start_epoch = ckpt['epoch'] + 1 if 'epoch' in ckpt else 0 + pickled = isinstance(ckpt['model'], nn.Module) + train_type = type_ == 'train' + ensemble_type = type_ == 'ensemble' + val_type = type_ =='val' + + if pickled and ensemble_type: + cfg = None + if ensemble_type: + model = attempt_load(weights, map_location=device) # load ensemble using pickled + state_dict = model.state_dict() + elif val_type: + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + state_dict = model.model.state_dict() + else: + # load model from config and weights + cfg = cfg or (ckpt['yaml'] if 'yaml' in ckpt else None) or \ + (ckpt['model'].yaml if pickled else None) + model = Model(cfg, ch=3, nc=ckpt['nc'] if ('nc' in ckpt and not nc) else nc, + anchors=hyp.get('anchors') if hyp else None).to(device) + model_key = 'ema' if (not train_type and 'ema' in ckpt and ckpt['ema']) else 'model' + state_dict = ckpt[model_key].float().state_dict() if pickled else ckpt[model_key] + if val_type: + model = DetectMultiBackend(model=model, device=device, dnn=dnn, data=data, fp16=half) + + # turn gradients for params back on in case they were removed + for p in model.parameters(): + p.requires_grad = True + + # load sparseml recipe for applying pruning and quantization + checkpoint_recipe = train_recipe = None + if resume: + train_recipe = ckpt['recipe'] if ('recipe' in ckpt) else None + elif ckpt['recipe'] or recipe: + train_recipe, checkpoint_recipe = recipe, ckpt['recipe'] + + sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, checkpoint_recipe, train_recipe) + exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume + loaded = False + + sparseml_wrapper.apply_checkpoint_structure(float("inf")) + if train_type: + # intialize the recipe for training and restore the weights before if no quantized weights + quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) + if not quantized_state_dict: + state_dict = load_state_dict(model, state_dict, train=True, exclude_anchors=exclude_anchors) + loaded = True + sparseml_wrapper.initialize(start_epoch) + + if not loaded: + state_dict = load_state_dict(model, state_dict, train=train_type, exclude_anchors=exclude_anchors) + + model.float() + report = 'Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights) + + return model, { + 'ckpt': ckpt, + 'state_dict': state_dict, + 'sparseml_wrapper': sparseml_wrapper, + 'report': report, + } + + +def load_state_dict(model, state_dict, train, exclude_anchors): + # fix older state_dict names not porting to the new model setup + state_dict = {key if not key.startswith("module.") else key[7:]: val for key, val in state_dict.items()} + + if train: + # load any missing weights from the model + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=['anchor'] if exclude_anchors else []) + + model.load_state_dict(state_dict, strict=not train) # load + + return state_dict @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -414,7 +549,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats + include=('onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode @@ -430,7 +565,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25 # TF.js NMS: confidence threshold + conf_thres=0.25, # TF.js NMS: confidence threshold + remove_grid=False, ): t = time.time() include = [x.lower() for x in include] # to lowercase @@ -443,8 +579,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model - nc, names = model.nc, model.names # number of classes, class names + model, extras = load_checkpoint(type_='ensemble', weights=weights, device=device) # load FP32 model + sparseml_wrapper = extras['sparseml_wrapper'] + nc, names = extras["ckpt"]["nc"], model.names # number of classes, class names # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand @@ -469,6 +606,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' m.onnx_dynamic = dynamic if hasattr(m, 'forward_export'): m.forward = m.forward_export # assign custom forward (optional) + model.model[-1].export = not remove_grid # set Detect() layer grid export for _ in range(2): y = model(im) # dry runs @@ -541,6 +679,7 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument("--remove-grid", action="store_true", help="remove export of Detect() layer grid") parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') @@ -556,4 +695,4 @@ def main(opt): if __name__ == "__main__": opt = parse_opt() - main(opt) + main(opt) \ No newline at end of file diff --git a/models/common.py b/models/common.py index 115e3c3145ff..e0b783f55033 100644 --- a/models/common.py +++ b/models/common.py @@ -31,7 +31,7 @@ def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: - p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p @@ -121,7 +121,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) class C3(nn.Module): @@ -131,12 +131,12 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) class C3TR(C3): @@ -194,7 +194,7 @@ def forward(self, x): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) - return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) class Focus(nn.Module): @@ -205,7 +205,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) # return self.conv(self.contract(x)) @@ -219,7 +219,7 @@ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, s def forward(self, x): y = self.cv1(x) - return torch.cat((y, self.cv2(y)), 1) + return torch.cat([y, self.cv2(y)], 1) class GhostBottleneck(nn.Module): @@ -277,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): + def __init__(self, weights='yolov5s.pt', model=None, device=torch.device('cpu'), dnn=False, data=None, fp16=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -303,11 +303,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, names = yaml.safe_load(f)['names'] # class names if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + model = model or (attempt_load(weights if isinstance(weights, list) else w, map_location=device)) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() + self.model = model.model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata @@ -527,7 +527,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): + with amp.autocast(enabled=autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -550,19 +550,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(autocast): + with amp.autocast(enabled=autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, - self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, + agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) @@ -589,7 +589,7 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string @@ -606,7 +606,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, 'im': save_one_box(box, im, file=file, save=save)}) else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) + annotator.box_label(box, label, color=colors(cls)) im = annotator.im else: s += '(no detections)' @@ -633,19 +633,19 @@ def print(self): LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - def show(self, labels=True): - self.display(show=True, labels=labels) # show results + def show(self): + self.display(show=True) # show results - def save(self, labels=True, save_dir='runs/detect/exp'): + def save(self, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results + self.display(save=True, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None return self.display(crop=True, save=save, save_dir=save_dir) # crop results - def render(self, labels=True): - self.display(render=True, labels=labels) # render results + def render(self): + self.display(render=True) # render results return self.imgs def pandas(self): diff --git a/models/yolo.py b/models/yolo.py index 9f4701c49f9d..f08d41ce1585 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -19,6 +19,7 @@ from models.common import * from models.experimental import * +from utils.activations import replace_activations from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization @@ -33,6 +34,7 @@ class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter + export = True # onnx export def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() @@ -53,7 +55,7 @@ def forward(self, x): bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - if not self.training: # inference + if not self.training and self.export: # inference if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) @@ -67,7 +69,7 @@ def forward(self, x): y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) - return x if self.training else (torch.cat(z, 1), x) + return x if self.training or not self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device @@ -291,7 +293,15 @@ def parse_model(d, ch): # model_dict, input_channels(3) if i == 0: ch = [] ch.append(c2) - return nn.Sequential(*layers), sorted(save) + + model = nn.Sequential(*layers) + + # override all activations in model if provided in config + if 'act' in d: + LOGGER.info(f'overriding activations in model to {d["act"]}') + replace_activations(model, d["act"]) + + return model, sorted(save) if __name__ == '__main__': diff --git a/models_v5.0/yolov5l.yaml b/models_v5.0/yolov5l.yaml new file mode 100644 index 000000000000..71ebf86e5791 --- /dev/null +++ b/models_v5.0/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5m.yaml b/models_v5.0/yolov5m.yaml new file mode 100644 index 000000000000..3c749c916246 --- /dev/null +++ b/models_v5.0/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5s.yaml b/models_v5.0/yolov5s.yaml new file mode 100644 index 000000000000..aca669d60d8b --- /dev/null +++ b/models_v5.0/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models_v5.0/yolov5x.yaml b/models_v5.0/yolov5x.yaml new file mode 100644 index 000000000000..d3babdf7baf0 --- /dev/null +++ b/models_v5.0/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/requirements.txt b/requirements.txt index 96fc9d1a1f32..36f39017d6af 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,3 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation +sparseml[torch,torchvision] >= 0.12 \ No newline at end of file diff --git a/train.py b/train.py index 60be962d447f..738155ad1f77 100644 --- a/train.py +++ b/train.py @@ -40,6 +40,7 @@ import val # for end-of-epoch mAP from models.experimental import attempt_load +from export import load_checkpoint, create_checkpoint from models.yolo import Model from utils.autoanchor import check_anchors from utils.autobatch import check_train_batch_size @@ -56,6 +57,7 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.sparse import SparseMLWrapper LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -85,9 +87,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save run settings if not evolve: with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) + yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) + yaml.dump(vars(opt), f, sort_keys=False) # Loggers data_dict = None @@ -105,6 +107,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Config plots = not evolve # create plots cuda = device.type != 'cpu' + half_precision = cuda init_seeds(1 + RANK) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None @@ -115,20 +118,27 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') + check_suffix(weights, ['.pt', '.pth']) # check weights + pretrained = weights.endswith('.pt') or weights.endswith('.pth') or weights.startswith('zoo:') if pretrained: - with torch_distributed_zero_first(LOCAL_RANK): - weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + model, extras = load_checkpoint( + type_ = 'train', + weights=weights, + device=device, + cfg=opt.cfg, + hyp=hyp, + nc=nc, + recipe=opt.recipe, + resume=opt.resume, + rank=LOCAL_RANK + ) + ckpt, state_dict, sparseml_wrapper = extras['ckpt'], extras['state_dict'], extras['sparseml_wrapper'] + LOGGER.info(extras['report']) else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + sparseml_wrapper = SparseMLWrapper(model, None, opt.recipe) + sparseml_wrapper.initialize(start_epoch=0) + ckpt = None # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze @@ -183,11 +193,21 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model) if RANK in [-1, 0] else None + ema = ModelEMA(model, enabled=not opt.disable_ema) if RANK in [-1, 0] else None # Resume - start_epoch, best_fitness = 0, 0.0 + start_epoch = sparseml_wrapper.start_epoch or 0 + best_fitness = 0.0 if pretrained: + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, start_epoch-1, epochs)) + epochs += start_epoch # finetune additional epochs + if sparseml_wrapper.qat_active(start_epoch): + ema.enabled = False + # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) @@ -198,15 +218,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] - # Epochs - start_epoch = ckpt['epoch'] + 1 - if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' - if epochs < start_epoch: - LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") - epochs += ckpt['epoch'] # finetune additional epochs - - del ckpt, csd + del ckpt # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -247,7 +259,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - model.half().float() # pre-reduce anchor precision callbacks.run('on_pretrain_routine_end') @@ -273,15 +284,29 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) - scheduler.last_epoch = start_epoch - 1 # do not move - scaler = amp.GradScaler(enabled=cuda) + if scheduler: + scheduler.last_epoch = start_epoch - 1 # do not mov + scaler = amp.GradScaler(enabled=half_precision) stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') + + # SparseML Integration + if RANK in [-1, 0]: + sparseml_wrapper.initialize_loggers(loggers.logger, loggers.tb, loggers.wandb) + scaler = sparseml_wrapper.modify(scaler, optimizer, model, train_loader) + scheduler = sparseml_wrapper.check_lr_override(scheduler, RANK) + epochs = sparseml_wrapper.check_epoch_override(epochs, RANK) + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + if sparseml_wrapper.qat_active(epoch): + LOGGER.info('Disabling half precision and EMA, QAT scheduled to run') + half_precision = False + scaler._enabled = False + ema.enabled = False model.train() # Update image weights (optional, single-GPU only) @@ -313,7 +338,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if scheduler: + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) @@ -326,7 +352,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward - with amp.autocast(enabled=cuda): + with amp.autocast(enabled=half_precision): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: @@ -345,6 +371,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if ema: ema.update(model) last_opt_step = ni + elif hasattr(scaler, "emulated_step"): + # Call for SparseML integration since the number of steps per epoch can vary + # This keeps the number of steps per epoch equivalent to the number of batches per epoch + # Does not step the scaler or the optimizer + scaler.emulated_step() # Log if RANK in [-1, 0]: @@ -359,7 +390,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for loggers - scheduler.step() + if scheduler: + scheduler.step() if RANK in [-1, 0]: # mAP @@ -376,25 +408,23 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_dir=save_dir, plots=False, callbacks=callbacks, - compute_loss=compute_loss) + compute_loss=compute_loss, + half=half_precision) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] - if fi > best_fitness: + if fi > best_fitness or sparseml_wrapper.reset_best(epoch): best_fitness = fi log_vals = list(mloss) + list(results) + lr callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model - if (not nosave) or (final_epoch and not evolve): # if save - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat()} + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt_extras = {'nc': nc, + 'best_fitness': best_fitness, + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'date': datetime.now().isoformat()} + ckpt = create_checkpoint(epoch, model, optimizer, ema, sparseml_wrapper, **ckpt_extras) # Save last, best and delete torch.save(ckpt, last) @@ -422,7 +452,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + LOGGER.info(f'\n{epochs - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers @@ -431,7 +461,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, - model=attempt_load(f, device).half(), + model=load_checkpoint(type_='ensemble', weights=best, device=device)[0], iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 single_cls=single_cls, dataloader=val_loader, @@ -440,7 +470,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary verbose=True, plots=True, callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots + compute_loss=compute_loss, # val best model with plots + half=half_precision) if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) @@ -491,6 +522,9 @@ def parse_opt(known=False): parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + parser.add_argument('--recipe', type=str, default=None, help='Path to a sparsification recipe, ' + 'see https://github.com/neuralmagic/sparseml for more information') + parser.add_argument('--disable-ema', action='store_true', help='Disable EMA model updates (enabled by default)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt @@ -508,7 +542,7 @@ def main(opt, callbacks=Callbacks()): ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: - opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: @@ -518,8 +552,8 @@ def main(opt, callbacks=Callbacks()): if opt.evolve: if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') - opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -575,7 +609,7 @@ def main(opt, callbacks=Callbacks()): 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: - hyp = yaml.safe_load(f) # load hyps dict + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch diff --git a/utils/activations.py b/utils/activations.py index a4ff789cf336..b119d915e54c 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -7,6 +7,23 @@ import torch.nn as nn import torch.nn.functional as F +def is_activation(mod, act_types=None): + if not act_types: + act_types = (nn.ELU, nn.Hardshrink, nn.Hardsigmoid, nn.Hardtanh, nn.Hardswish, nn.LeakyReLU, + nn.LogSigmoid, nn.PReLU, nn.ReLU, nn.ReLU6, nn.RReLU, nn.SELU, nn.CELU, nn.GELU, + nn.Sigmoid, nn.SiLU, nn.Softplus, nn.Softshrink, nn.Softsign, nn.Tanh, nn.Tanhshrink, + SiLU, Hardswish, Mish, MemoryEfficientMish, FReLU) + + return isinstance(mod, act_types) + + +def replace_activations(mod, act, act_types=None): + for name, child in mod.named_children(): + if is_activation(child, act_types): + child_act = act if not isinstance(act, str) else eval(act)() + setattr(mod, name, child_act) + else: + replace_activations(child, act, act_types) # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- class SiLU(nn.Module): # export-friendly version of nn.SiLU() diff --git a/utils/downloads.py b/utils/downloads.py index d7b87cb2cadd..714ffb2a0452 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -42,6 +42,9 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() # Attempt file download if does not exist + if not isinstance(file, (Path, str)) or str(file).startswith("zoo:"): + return + file = Path(str(file).strip().replace("'", '')) if not file.exists(): diff --git a/utils/general.py b/utils/general.py index b0c5e9d69ab7..dcdbf95ddca1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -319,6 +319,10 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta n = 0 # number of packages updates for r in requirements: + if r.startswith("sparseml"): + version = r.split("sparseml")[1] + if pkg.working_set.find(pkg.Requirement("sparseml-nightly" + version)): + continue try: pkg.require(r) except Exception: # DistributionNotFound or VersionConflict if requirements not met @@ -803,9 +807,11 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False + pickled = isinstance(x['model'], torch.nn.Module) + if pickled: + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ff6722ecd48a..3b2230c02a14 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -87,7 +87,10 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + try: + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + except Exception: + warnings.warn("Couldn't create quantized graph for Tensorboard") if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 786e58a19972..a2c7102bce14 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -169,6 +169,10 @@ def __init__(self, opt, run_id=None, job_type='Training'): if opt.upload_dataset: if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + self.wandb_run.config.update({ + 'opt': vars(opt), + 'data_dict': self.wandb_artifact_data_dict + }, allow_val_change=True) if opt.resume: # resume from artifact diff --git a/utils/sparse.py b/utils/sparse.py new file mode 100644 index 000000000000..59b4640756f2 --- /dev/null +++ b/utils/sparse.py @@ -0,0 +1,151 @@ +import math + +from sparsezoo import Zoo +from sparseml.pytorch.optim import ScheduledModifierManager +from sparseml.pytorch.utils import SparsificationGroupLogger + +from utils.torch_utils import is_parallel + + +def _get_model_framework_file(model, path): + transfer_request = 'recipe_type=transfer' in path + checkpoint_available = any([file.checkpoint for file in model.framework_files]) + final_available = any([not file.checkpoint for file in model.framework_files]) + + if transfer_request and checkpoint_available: + # checkpoints are saved for transfer learning use cases, + # return checkpoint if avaiable and requested + return [file for file in model.framework_files if file.checkpoint][0] + elif final_available: + # default to returning final state, if available + return [file for file in model.framework_files if not file.checkpoint][0] + + raise ValueError(f"Could not find a valid framework file for {path}") + + +def check_download_sparsezoo_weights(path): + if isinstance(path, str): + if path.startswith("zoo:"): + # load model from the SparseZoo and override the path with the new download + model = Zoo.load_model_from_stub(path) + file = _get_model_framework_file(model, path) + path = file.downloaded_path() + + return path + + if isinstance(path, list): + return [check_download_sparsezoo_weights(p) for p in path] + + return path + + +class SparseMLWrapper(object): + def __init__(self, model, checkpoint_recipe, train_recipe): + self.enabled = bool(checkpoint_recipe or train_recipe) + self.model = model.module if is_parallel(model) else model + self.checkpoint_manager = ScheduledModifierManager.from_yaml(checkpoint_recipe) if checkpoint_recipe else None + self.manager = ScheduledModifierManager.from_yaml(train_recipe) if train_recipe else None + self.logger = None + self.start_epoch = None + + def state_dict(self): + if self.checkpoint_manager: + manager = ScheduledModifierManager.compose_staged(self.checkpoint_manager, self.manager) + else: + manager = self.manager + return { + 'recipe': str(manager) if self.enabled else None, + } + + def apply_checkpoint_structure(self, epoch): + if not self.enabled: + return + + if epoch < 0: + epoch = math.inf + + if self.checkpoint_manager: + self.checkpoint_manager.apply_structure(self.model, epoch) + + def initialize(self, start_epoch): + if not self.enabled: + return + + self.manager.initialize(self.model, start_epoch) + self.start_epoch = start_epoch + + def initialize_loggers(self, logger, tb_writer, wandb_logger): + self.logger = logger + + if not self.enabled: + return + + def _logging_lambda(tag, value, values, step, wall_time, level): + if not wandb_logger or not wandb_logger.wandb: + return + + if value is not None: + wandb_logger.log({tag: value}) + + if values: + wandb_logger.log(values) + + self.manager.initialize_loggers([ + SparsificationGroupLogger( + lambda_func=_logging_lambda, + tensorboard=tb_writer, + ) + ]) + + if wandb_logger and wandb_logger.wandb: + artifact = wandb_logger.wandb.Artifact('recipe', type='recipe') + with artifact.new_file('recipe.yaml') as file: + file.write(str(self.manager)) + wandb_logger.wandb.log_artifact(artifact) + + def modify(self, scaler, optimizer, model, dataloader): + if not self.enabled: + return scaler + + return self.manager.modify(model, optimizer, steps_per_epoch=len(dataloader), wrap_optim=scaler) + + def check_lr_override(self, scheduler, rank): + # Override lr scheduler if recipe makes any LR updates + if self.enabled and self.manager.learning_rate_modifiers: + if rank in [0,-1]: + self.logger.info('Disabling LR scheduler, managing LR using SparseML recipe') + scheduler = None + + return scheduler + + def check_epoch_override(self, epochs, rank): + # Override num epochs if recipe explicitly modifies epoch range + if self.enabled and self.manager.epoch_modifiers and self.manager.max_epochs: + if rank in [0,-1]: + self.logger.info(f'Overriding number of epochs from SparseML manager to {epochs}') + epochs = self.manager.max_epochs + self.start_epoch or epochs # override num_epochs + + return epochs + + def qat_active(self, epoch): + if not self.enabled or not self.manager.quantization_modifiers: + return False + + qat_start = min([mod.start_epoch for mod in self.manager.quantization_modifiers]) + + return qat_start < epoch + 1 + + def reset_best(self, epoch): + if not self.enabled: + return False + + # if pruning is active or quantization just started, need to reset best checkpoint + # this is in case the pruned and/or quantized model do not fully recover + pruning_start = math.floor(max([mod.start_epoch for mod in self.manager.pruning_modifiers])) \ + if self.manager.pruning_modifiers else -1 + pruning_end = math.ceil(max([mod.end_epoch for mod in self.manager.pruning_modifiers])) \ + if self.manager.pruning_modifiers else -1 + qat_start = math.floor(max([mod.start_epoch for mod in self.manager.quantization_modifiers])) \ + if self.manager.quantization_modifiers else -1 + + return (pruning_start <= epoch <= pruning_end) or epoch == qat_start \ No newline at end of file diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 72f8a0fd1659..02698e656481 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -285,27 +285,47 @@ class ModelEMA: For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, tau=2000, updates=0): + def __init__(self, model, decay=0.9999, tau=2000, updates=0, enabled=True): # Create EMA - self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self._model = model + self._ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) - for p in self.ema.parameters(): + self.enabled=enabled + for p in self._ema.parameters(): p.requires_grad_(False) + @property + def ema(self): + if not self.enabled: + return deepcopy(self._model.module if is_parallel(self._model) else self._model).eval() + return self._ema + + def state_dict(self, pickle=True): + ema = deepcopy(self.ema).float() + return { + 'ema': ema if pickle else ema.state_dict(), + 'updates': self.updates, + } + def update(self, model): + self._model = model + if not self.enabled: + return # Update EMA parameters with torch.no_grad(): + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict self.updates += 1 d = self.decay(self.updates) - msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: + mv = msd[k].detach() v *= d - v += (1 - d) * msd[k].detach() + v += (1. - d) * mv + v *= mv != 0 # preserve pruned parameters in model (equal to 0) def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes diff --git a/val.py b/val.py index 2dd2aec679f9..a7503a50f247 100644 --- a/val.py +++ b/val.py @@ -35,6 +35,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from export import load_checkpoint from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader @@ -135,7 +136,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + model, extras = load_checkpoint(type_='val', weights=weights, device=device) # load FP32 model stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA From ddd5851339bbbff5dcf395070f8f1b47ff2b9305 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 15:03:27 -0400 Subject: [PATCH 756/757] manager fixes --- export.py | 2 +- utils/sparse.py | 17 ++++++----------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/export.py b/export.py index bdf579768908..67a79c672b6a 100644 --- a/export.py +++ b/export.py @@ -508,7 +508,7 @@ def load_checkpoint( exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False - sparseml_wrapper.apply(ckpt['epoch'] if 'epoch' in ckpt else 0) + sparseml_wrapper.apply_checkpoint() if train_type: # intialize the recipe for training and restore the weights before if no quantized weights quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) diff --git a/utils/sparse.py b/utils/sparse.py index d624554a9ca6..e10b7d2930b8 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -41,7 +41,7 @@ def check_download_sparsezoo_weights(path): class SparseMLWrapper(object): def __init__(self, model, checkpoint_recipe, train_recipe): - self.enabled = bool(checkpoint_recipe or train_recipe) + self.enabled = bool(train_recipe) self.model = model.module if is_parallel(model) else model self.checkpoint_manager = ScheduledModifierManager.from_yaml(checkpoint_recipe) if checkpoint_recipe else None self.manager = ScheduledModifierManager.from_yaml(train_recipe) if train_recipe else None @@ -49,28 +49,23 @@ def __init__(self, model, checkpoint_recipe, train_recipe): self.start_epoch = None def state_dict(self): - if self.checkpoint_manager: - manager = ScheduledModifierManager.compose_staged(self.checkpoint_manager, self.manager) - else: - manager = self.manager + manager = (ScheduledModifierManager.compose_staged(self.checkpoint_manager, self.manager) + if self.checkpoint_manager and self.enabled else self.manager) + return { 'recipe': str(manager) if self.enabled else None, } - def apply(self, epoch): + def apply_checkpoint(self): if not self.enabled: return - if epoch < 0: - epoch = math.inf - if self.checkpoint_manager: - self.checkpoint_manager.apply_structure(self.model, epoch) + self.checkpoint_manager.apply_structure(self.model, math.inf) def initialize(self, start_epoch): if not self.enabled: return - self.manager.initialize(self.model, start_epoch) self.start_epoch = start_epoch From dd1ad4152ce93100c671e8067acc4ca83e49e5b9 Mon Sep 17 00:00:00 2001 From: Konstantin Date: Fri, 8 Apr 2022 15:05:12 -0400 Subject: [PATCH 757/757] Update function name --- export.py | 2 +- utils/sparse.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 67a79c672b6a..bcbf84a90077 100644 --- a/export.py +++ b/export.py @@ -508,7 +508,7 @@ def load_checkpoint( exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume loaded = False - sparseml_wrapper.apply_checkpoint() + sparseml_wrapper.apply_checkpoint_structure() if train_type: # intialize the recipe for training and restore the weights before if no quantized weights quantized_state_dict = any([name.endswith('.zero_point') for name in state_dict.keys()]) diff --git a/utils/sparse.py b/utils/sparse.py index e10b7d2930b8..6004223e2cc4 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -56,7 +56,7 @@ def state_dict(self): 'recipe': str(manager) if self.enabled else None, } - def apply_checkpoint(self): + def apply_checkpoint_structure(self): if not self.enabled: return