diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000000..5d7fa0864607 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,7 @@ +include *.md +include LICENSE +include setup.py +recursive-include yolov5/data * +recursive-include yolov5/models * +recursive-include yolov5/models_v5.0 * +include yolov5/requirements.txt diff --git a/build_wheel.sh b/build_wheel.sh new file mode 100755 index 000000000000..d22717596c27 --- /dev/null +++ b/build_wheel.sh @@ -0,0 +1,11 @@ +python3 -m pip install --upgrade build +mkdir yolov5 +cp -r data models models_v5.0 utils .pre-commit-config.yaml $(ls *.py) requirements.txt yolov5/ +grep --include=\*.py -rnl 'yolov5/' -e "from models" | xargs -i@ sed -i 's/from models/from yolov5.models/g' @ +grep --include=\*.py -rnl 'yolov5/' -e "from utils" | xargs -i@ sed -i 's/from utils/from yolov5.utils/g' @ +sed -i '$d' yolov5/requirements.txt +cat > yolov5/__init__.py << EOF +from yolov5.utils.general import NM_INTEGRATED +EOF +python3 -m build +rm -r yolov5 diff --git a/export.py b/export.py index bcbf84a90077..2a035736d1f2 100644 --- a/export.py +++ b/export.py @@ -500,9 +500,9 @@ def load_checkpoint( # load sparseml recipe for applying pruning and quantization checkpoint_recipe = train_recipe = None if resume: - train_recipe = ckpt['recipe'] if ('recipe' in ckpt) else None - elif ckpt['recipe'] or recipe: - train_recipe, checkpoint_recipe = recipe, ckpt['recipe'] + train_recipe = ckpt.get('recipe') + elif recipe or ckpt.get('recipe'): + train_recipe, checkpoint_recipe = recipe, ckpt.get('recipe') sparseml_wrapper = SparseMLWrapper(model.model if val_type else model, checkpoint_recipe, train_recipe) exclude_anchors = train_type and (cfg or hyp.get('anchors')) and not resume diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000000..b0f076532a06 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=42"] +build-backend = "setuptools.build_meta" diff --git a/requirements.txt b/requirements.txt index 36f39017d6af..c6a19e789077 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,4 +35,4 @@ seaborn>=0.11.0 # pycocotools>=2.0 # COCO mAP # roboflow thop # FLOPs computation -sparseml[torch,torchvision] >= 0.12 \ No newline at end of file +sparseml[torch,torchvision] >= 0.12 diff --git a/setup.py b/setup.py new file mode 100644 index 000000000000..09083e299981 --- /dev/null +++ b/setup.py @@ -0,0 +1,32 @@ +import setuptools +import os + +def read_requirements(): + build_dir = os.path.abspath(os.path.dirname(__file__)) + with open(os.path.join(build_dir, "yolov5", "requirements.txt")) as f: + return f.read().splitlines() + +setuptools.setup( + name="yolov5", + version='6.1.0', + author="", + long_description=open("README.md", "r", encoding="utf-8").read(), + long_description_content_type="text/markdown", + url="https://github.com/ultralytics/yolov5", + packages=['yolov5', 'yolov5.models', 'yolov5.utils'], + python_requires=">=3.6", + install_requires=read_requirements(), + include_package_data=True, + package_data={'': ['yolov5/models/*.yaml', 'yolov5/data/*']}, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "License :: OSI Approved :: GNU General Public License (GPL)", + "Operating System :: OS Independent", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Programming Language :: Python :: 3", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Education", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ] +) diff --git a/train.py b/train.py index 738155ad1f77..8ec3c4533a47 100644 --- a/train.py +++ b/train.py @@ -87,9 +87,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save run settings if not evolve: with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) + yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) + yaml.safe_dump(vars(opt), f, sort_keys=False) # Loggers data_dict = None @@ -492,7 +492,11 @@ def parse_opt(known=False): parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--resume', + nargs='?', + const=True, + default=False, + help='resume most recent training. When true, ignores --recipe arg and re-uses saved recipe (if exists)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--noval', action='store_true', help='only validate final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') @@ -542,7 +546,7 @@ def main(opt, callbacks=Callbacks()): ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace + opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: @@ -553,7 +557,7 @@ def main(opt, callbacks=Callbacks()): if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) diff --git a/utils/general.py b/utils/general.py index dcdbf95ddca1..f0a7859ff78a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -40,6 +40,7 @@ NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf +NM_INTEGRATED=True torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -405,7 +406,11 @@ def check_file(file, suffix=''): return file else: # search files = [] + if "models_v5.0/" in file: + files.extend(glob.glob(str(ROOT / '**' / file), recursive=True)) for d in 'data', 'models', 'utils': # search directories + if file.startswith(d + os.path.sep): + file = file[len(d)+1:] files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique @@ -707,12 +712,13 @@ def clip_coords(boxes, shape): def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300): - """Runs Non-Maximum Suppression (NMS) on inference results + """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + bs = prediction.shape[0] # batch size nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates @@ -721,18 +727,19 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings - min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 10.0 # seconds to quit after + time_limit = 0.030 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + output = [torch.zeros((0, 6), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints - x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling @@ -793,7 +800,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') + LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output diff --git a/utils/sparse.py b/utils/sparse.py index 6004223e2cc4..34a55029a8b6 100644 --- a/utils/sparse.py +++ b/utils/sparse.py @@ -57,9 +57,6 @@ def state_dict(self): } def apply_checkpoint_structure(self): - if not self.enabled: - return - if self.checkpoint_manager: self.checkpoint_manager.apply_structure(self.model, math.inf)