From e8a41e8916a9372709a09a0b1c538c6425fed778 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Jan 2021 01:15:38 -0800 Subject: [PATCH 001/254] prevent check_git_status() in docker images (#1951) * prevent check_git_status() running docker images * Update general.py --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3247b66da0ce..92cf683c9d48 100755 --- a/utils/general.py +++ b/utils/general.py @@ -60,7 +60,7 @@ def check_git_status(): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: - if Path('.git').exists() and check_online(): + if Path('.git').exists() and not Path('/.dockerenv').exists() and check_online(): url = subprocess.check_output( 'git fetch && git config --get remote.origin.url', shell=True).decode('utf-8')[:-1] n = int(subprocess.check_output( From b26a2f624286a4b6cfb8fd3f9a97ba95b356db62 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 17 Jan 2021 11:55:25 -0800 Subject: [PATCH 002/254] check_git_status() when not exist /workspace (#1966) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 92cf683c9d48..23dbc93ec1c4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -60,7 +60,7 @@ def check_git_status(): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: - if Path('.git').exists() and not Path('/.dockerenv').exists() and check_online(): + if Path('.git').exists() and not Path('/workspace').exists() and check_online(): # not exist '/.dockerenv' url = subprocess.check_output( 'git fetch && git config --get remote.origin.url', shell=True).decode('utf-8')[:-1] n = int(subprocess.check_output( From 3a42abd18a892cfc4942ad63d51a1744916b7a9f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 17 Jan 2021 13:04:16 -0800 Subject: [PATCH 003/254] Created using Colaboratory --- tutorial.ipynb | 256 +++++++++++++++++++++++++------------------------ 1 file changed, 131 insertions(+), 125 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 853f42f196d8..e60e546c53a2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "02ac0588602847eea00a0205f87bcce2": { + "811fd52fef65422c8267bafcde8a2c3d": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_c472ea49806447a68b5a9221a4ddae85", + "layout": "IPY_MODEL_8f41b90117224eef9133a9c3a103dbba", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_091fdf499bd44a80af7281d16da4aa93", - "IPY_MODEL_c79f69c959de4427ba102a87a9f46d80" + "IPY_MODEL_ca2fb37af6ed43d4a74cdc9f2ac5c4a5", + "IPY_MODEL_29419ae5ebb9403ea73f7e5a68037bdd" ] } }, - "c472ea49806447a68b5a9221a4ddae85": { + "8f41b90117224eef9133a9c3a103dbba": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "091fdf499bd44a80af7281d16da4aa93": { + "ca2fb37af6ed43d4a74cdc9f2ac5c4a5": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_c42ae5af74a0491187827d0a1fc259bb", + "style": "IPY_MODEL_6511b4dfb10b48d1bc98bcfb3987bfa0", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_5a90f72d3a2d46cb9ad915daa3ead8b4" + "layout": "IPY_MODEL_64f0badf1a8f489885aa984dd62d37dc" } }, - "c79f69c959de4427ba102a87a9f46d80": { + "29419ae5ebb9403ea73f7e5a68037bdd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_2a7ed6611da34662b10e37fd4f4e4438", + "style": "IPY_MODEL_f569911c5cfc4d81bb1bdfa83447afc8", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:23<00:00, 35.1MB/s]", + "value": " 781M/781M [00:23<00:00, 34.2MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_fead0160658445bf9e966daa4481cad0" + "layout": "IPY_MODEL_84943ade566440aaa2dcf3b3b27e7074" } }, - "c42ae5af74a0491187827d0a1fc259bb": { + "6511b4dfb10b48d1bc98bcfb3987bfa0": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "5a90f72d3a2d46cb9ad915daa3ead8b4": { + "64f0badf1a8f489885aa984dd62d37dc": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "2a7ed6611da34662b10e37fd4f4e4438": { + "f569911c5cfc4d81bb1bdfa83447afc8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "fead0160658445bf9e966daa4481cad0": { + "84943ade566440aaa2dcf3b3b27e7074": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "cf1ab9fde7444d3e874fcd407ba8f0f8": { + "8501ed1563e4452eac9df6b7a66e8f8c": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_9ee03f9c85f34155b2645e89c9211547", + "layout": "IPY_MODEL_d2bb96801e1f46f4a58e02534f7026ff", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_933ebc451c09490aadf71afbbb3dff2a", - "IPY_MODEL_8e7c55cbca624432a84fa7ad8f3a4016" + "IPY_MODEL_468a796ef06b4a24bcba6fbd4a0a8db5", + "IPY_MODEL_42ad5c1ea7be4835bffebf90642178f1" ] } }, - "9ee03f9c85f34155b2645e89c9211547": { + "d2bb96801e1f46f4a58e02534f7026ff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,50 +332,50 @@ "left": null } }, - "933ebc451c09490aadf71afbbb3dff2a": { + "468a796ef06b4a24bcba6fbd4a0a8db5": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_dd62d83b35d04a178840772e82bd2f2e", + "style": "IPY_MODEL_c58b5536d98f4814831934e9c30c4d78", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 22090455, + "max": 22091032, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 22090455, + "value": 22091032, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_d5c4f3d1c8b046e3a163faaa6b3a51ab" + "layout": "IPY_MODEL_505597101151486ea29e9ab754544d27" } }, - "8e7c55cbca624432a84fa7ad8f3a4016": { + "42ad5c1ea7be4835bffebf90642178f1": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_78d1da8efb504b03878ca9ce5b404006", + "style": "IPY_MODEL_de6e7b4b4a1c408c9f89d89b07a13bcd", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:01<00:00, 16.9MB/s]", + "value": " 21.1M/21.1M [00:01<00:00, 18.2MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_d28208ba1213436a93926a01d99d97ae" + "layout": "IPY_MODEL_f5cc9c7d4c274b2d81327ba3163c43fd" } }, - "dd62d83b35d04a178840772e82bd2f2e": { + "c58b5536d98f4814831934e9c30c4d78": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "d5c4f3d1c8b046e3a163faaa6b3a51ab": { + "505597101151486ea29e9ab754544d27": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "78d1da8efb504b03878ca9ce5b404006": { + "de6e7b4b4a1c408c9f89d89b07a13bcd": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "d28208ba1213436a93926a01d99d97ae": { + "f5cc9c7d4c274b2d81327ba3163c43fd": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "888d5c41-00e9-47d8-d230-dded99325bea" + "outputId": "c6ad57c2-40b7-4764-b07d-19ee2ceaabaf" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", @@ -670,32 +670,32 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 66, + "height": 65, "referenced_widgets": [ - "02ac0588602847eea00a0205f87bcce2", - "c472ea49806447a68b5a9221a4ddae85", - "091fdf499bd44a80af7281d16da4aa93", - "c79f69c959de4427ba102a87a9f46d80", - "c42ae5af74a0491187827d0a1fc259bb", - "5a90f72d3a2d46cb9ad915daa3ead8b4", - "2a7ed6611da34662b10e37fd4f4e4438", - "fead0160658445bf9e966daa4481cad0" + "811fd52fef65422c8267bafcde8a2c3d", + "8f41b90117224eef9133a9c3a103dbba", + "ca2fb37af6ed43d4a74cdc9f2ac5c4a5", + "29419ae5ebb9403ea73f7e5a68037bdd", + "6511b4dfb10b48d1bc98bcfb3987bfa0", + "64f0badf1a8f489885aa984dd62d37dc", + "f569911c5cfc4d81bb1bdfa83447afc8", + "84943ade566440aaa2dcf3b3b27e7074" ] }, - "outputId": "780d8f5f-766e-4b99-e370-11f9b884c27a" + "outputId": "59a7a546-8492-492e-861d-70a2c85a6794" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 6, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "02ac0588602847eea00a0205f87bcce2", + "model_id": "811fd52fef65422c8267bafcde8a2c3d", "version_minor": 0, "version_major": 2 }, @@ -723,56 +723,58 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "013935a5-ba81-4810-b723-0cb01cf7bc79" + "outputId": "427c211e-e283-4e87-f7b3-7b8dfb11a4a5" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 7, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5x.pt to yolov5x.pt...\n", - "100% 170M/170M [00:05<00:00, 32.6MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", + "100% 168M/168M [00:05<00:00, 31.9MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 484 layers, 88922205 parameters, 0 gradients\n", - "Scanning labels ../coco/labels/val2017.cache (4952 found, 0 missing, 48 empty, 0 duplicate, for 5000 images): 5000it [00:00, 14785.71it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.74it/s]\n", - " all 5e+03 3.63e+04 0.409 0.754 0.672 0.484\n", - "Speed: 5.9/2.1/7.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2791.81it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/labels/val2017.cache\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017.cache' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 13332180.55it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.73it/s]\n", + " all 5e+03 3.63e+04 0.419 0.765 0.68 0.486\n", + "Speed: 5.2/2.0/7.2 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.43s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.67s)\n", + "DONE (t=5.26s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=92.11s).\n", + "DONE (t=93.97s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.24s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.492\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.676\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.534\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.318\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.541\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.633\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.376\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.617\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.670\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.493\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.723\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.812\n", + "DONE (t=15.06s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n", "Results saved to runs/test/exp\n" ], "name": "stdout" @@ -833,37 +835,37 @@ "id": "Knxi2ncxWffW", "colab": { "base_uri": "https://localhost:8080/", - "height": 66, + "height": 65, "referenced_widgets": [ - "cf1ab9fde7444d3e874fcd407ba8f0f8", - "9ee03f9c85f34155b2645e89c9211547", - "933ebc451c09490aadf71afbbb3dff2a", - "8e7c55cbca624432a84fa7ad8f3a4016", - "dd62d83b35d04a178840772e82bd2f2e", - "d5c4f3d1c8b046e3a163faaa6b3a51ab", - "78d1da8efb504b03878ca9ce5b404006", - "d28208ba1213436a93926a01d99d97ae" + "8501ed1563e4452eac9df6b7a66e8f8c", + "d2bb96801e1f46f4a58e02534f7026ff", + "468a796ef06b4a24bcba6fbd4a0a8db5", + "42ad5c1ea7be4835bffebf90642178f1", + "c58b5536d98f4814831934e9c30c4d78", + "505597101151486ea29e9ab754544d27", + "de6e7b4b4a1c408c9f89d89b07a13bcd", + "f5cc9c7d4c274b2d81327ba3163c43fd" ] }, - "outputId": "59f9a94b-21e1-4626-f36a-a8e1b1e5c8f6" + "outputId": "c68a3db4-1314-46b4-9e52-83532eb65749" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "cf1ab9fde7444d3e874fcd407ba8f0f8", + "model_id": "8501ed1563e4452eac9df6b7a66e8f8c", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=22090455.0), HTML(value='')))" + "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))" ] }, "metadata": { @@ -923,86 +925,90 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "138f2d1d-364c-405a-cf13-ea91a2aff915" + "outputId": "6af7116a-01ab-4b94-e5d7-b37c17dc95de" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "text": [ - "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2020-11-20 11:45:17.042357: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\n", - "Hyperparameters {'lr0': 0.01, 'lrf': 0.2, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0, 'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0}\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt...\n", - "100% 14.5M/14.5M [00:01<00:00, 14.8MB/s]\n", + "2021-01-17 19:56:03.945851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 15.8MB/s]\n", "\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 19904 models.common.BottleneckCSP [64, 64, 1] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 1 161152 models.common.BottleneckCSP [128, 128, 3] \n", + " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 1 641792 models.common.BottleneckCSP [256, 256, 3] \n", + " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", - " 9 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n", + " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 378624 models.common.BottleneckCSP [512, 256, 1, False] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 95104 models.common.BottleneckCSP [256, 128, 1, False] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 313088 models.common.BottleneckCSP [256, 256, 1, False] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7468157 parameters, 7468157 gradients\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", "\n", - "Transferred 370/370 items from yolov5s.pt\n", - "Optimizer groups: 62 .bias, 70 conv.weight, 59 other\n", - "Scanning images: 100% 128/128 [00:00<00:00, 5395.63it/s]\n", - "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 13972.28it/s]\n", - "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 173.55it/s]\n", - "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 8693.98it/s]\n", - "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 133.30it/s]\n", - "NumExpr defaulting to 2 threads.\n", + "Transferred 362/362 items from yolov5s.pt\n", + "Scaled weight_decay = 0.0005\n", + "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2647.74it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.03it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 24200.82it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 123.25it/s]\n", + "Plotting labels... \n", "\n", - "Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", "Image sizes 640 train, 640 test\n", "Using 2 dataloader workers\n", "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 0/2 5.24G 0.04202 0.06745 0.01503 0.1245 194 640: 100% 8/8 [00:03<00:00, 2.01it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:03<00:00, 2.40it/s]\n", - " all 128 929 0.404 0.758 0.701 0.45\n", + " 0/2 3.27G 0.04357 0.06779 0.01869 0.1301 207 640: 100% 8/8 [00:04<00:00, 1.95it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:05<00:00, 1.36it/s]\n", + " all 128 929 0.392 0.732 0.657 0.428\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 1/2 5.12G 0.04461 0.05874 0.0169 0.1202 142 640: 100% 8/8 [00:01<00:00, 4.14it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.75it/s]\n", - " all 128 929 0.403 0.772 0.703 0.453\n", + " 1/2 7.47G 0.04308 0.06636 0.02083 0.1303 227 640: 100% 8/8 [00:02<00:00, 3.88it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.07it/s]\n", + " all 128 929 0.387 0.737 0.657 0.432\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 2/2 5.12G 0.04445 0.06545 0.01667 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.15it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:06<00:00, 1.18it/s]\n", - " all 128 929 0.395 0.767 0.702 0.452\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 15.2MB\n", - "3 epochs completed in 0.006 hours.\n", + " 2/2 7.48G 0.04461 0.06864 0.01866 0.1319 191 640: 100% 8/8 [00:02<00:00, 3.57it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:02<00:00, 2.82it/s]\n", + " all 128 929 0.385 0.742 0.658 0.431\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "3 epochs completed in 0.007 hours.\n", "\n" ], "name": "stdout" From 18c25889dc080d819b4ce29e26d059ede4b5a0b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 17 Jan 2021 13:11:28 -0800 Subject: [PATCH 004/254] Update tutorial.ipynb --- tutorial.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index e60e546c53a2..7fc9b37a31dc 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -604,15 +604,15 @@ { "output_type": "stream", "text": [ - "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n", + "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", + "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 232 layers, 7459581 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.011s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.011s)\n", "Results saved to runs/detect/exp\n", - "Done. (0.113s)\n" + "Done. (0.110s)\n" ], "name": "stdout" }, From b5d851d653a974e48c47b9fac7699360747c3520 Mon Sep 17 00:00:00 2001 From: "huntr.dev | the place to protect open source" Date: Sun, 17 Jan 2021 22:10:16 +0000 Subject: [PATCH 005/254] Security Fix for Arbitrary Code Execution - huntr.dev (#1962) Co-authored-by: Anon-Artist <61599526+Anon-Artist@users.noreply.github.com> Co-authored-by: Jamie Slome --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 43cae4b31d06..f403a3c65b05 100644 --- a/train.py +++ b/train.py @@ -59,7 +59,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] From 35400dc7b730b7816051b9be3feca36ccf5a694b Mon Sep 17 00:00:00 2001 From: Abhiram V <61599526+Anon-Artist@users.noreply.github.com> Date: Tue, 19 Jan 2021 00:16:20 +0530 Subject: [PATCH 006/254] Update plots.py with yaml.SafeLoader (#1968) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 47cd70776005..4765069e0377 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -301,7 +301,7 @@ def plot_labels(labels, save_dir=Path(''), loggers=None): def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt with open(yaml_file) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) + hyp = yaml.load(f, Loader=yaml.SafeLoader) x = np.loadtxt('evolve.txt', ndmin=2) f = fitness(x) # weights = (f - f.min()) ** 2 # for weighted results From 91c30e4effff5c1aa52325b95981ebe3d382326b Mon Sep 17 00:00:00 2001 From: Abhiram V <61599526+Anon-Artist@users.noreply.github.com> Date: Tue, 19 Jan 2021 00:16:46 +0530 Subject: [PATCH 007/254] Update test.py with yaml.SafeLoader (#1969) --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 36d18132c782..1c6ea103489b 100644 --- a/test.py +++ b/test.py @@ -68,7 +68,7 @@ def test(data, model.eval() is_coco = data.endswith('coco.yaml') # is COCO dataset with open(data) as f: - data = yaml.load(f, Loader=yaml.FullLoader) # model dict + data = yaml.load(f, Loader=yaml.SafeLoader) # model dict check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 From 45011695474c99398680d510f922fc26da8d357e Mon Sep 17 00:00:00 2001 From: Abhiram V <61599526+Anon-Artist@users.noreply.github.com> Date: Tue, 19 Jan 2021 00:17:00 +0530 Subject: [PATCH 008/254] Update yolo.py with yaml.SafeLoader (#1970) --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 5dc8b57f4d98..6cf9dde08d1a 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -71,7 +71,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels From 17751b9891ba1d171380ec9afa59a5047a60b492 Mon Sep 17 00:00:00 2001 From: Abhiram V <61599526+Anon-Artist@users.noreply.github.com> Date: Tue, 19 Jan 2021 00:17:42 +0530 Subject: [PATCH 009/254] Update autoanchor.py with yaml.SafeLoader (#1971) --- utils/autoanchor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index c00f0382ff71..5dba9f1ea22f 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -98,7 +98,7 @@ def print_results(k): if isinstance(path, str): # *.yaml file with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) else: From e9941d50fadce269e33a290f82bf5d9ad2e283b5 Mon Sep 17 00:00:00 2001 From: Abhiram V <61599526+Anon-Artist@users.noreply.github.com> Date: Tue, 19 Jan 2021 00:19:08 +0530 Subject: [PATCH 010/254] Update train.py with yaml.SafeLoader (#1972) --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index f403a3c65b05..83ae7fd92ace 100644 --- a/train.py +++ b/train.py @@ -479,7 +479,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: @@ -503,7 +503,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Hyperparameters with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps # Train logger.info(opt) From b1cf25dd9a7ba6a47703457bc04b8b77e469803b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 18 Jan 2021 10:49:28 -0800 Subject: [PATCH 011/254] check_git_status() asserts (#1977) --- Dockerfile | 4 ++-- utils/general.py | 23 +++++++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 24529d2b9415..4c11b036d6b4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ RUN apt update && apt install -y screen libgl1-mesa-glx # Install python dependencies RUN pip install --upgrade pip COPY requirements.txt . -RUN pip install -r requirements.txt -RUN pip install gsutil +RUN pip install -r requirements.txt gsutil wandb +RUN wandb disabled # Create working directory RUN mkdir -p /usr/src/app diff --git a/utils/general.py b/utils/general.py index 23dbc93ec1c4..4822709d1753 100755 --- a/utils/general.py +++ b/utils/general.py @@ -60,16 +60,19 @@ def check_git_status(): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: - if Path('.git').exists() and not Path('/workspace').exists() and check_online(): # not exist '/.dockerenv' - url = subprocess.check_output( - 'git fetch && git config --get remote.origin.url', shell=True).decode('utf-8')[:-1] - n = int(subprocess.check_output( - 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count', shell=True)) # commits behind - if n > 0: - print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " - f"Use 'git pull' to update or 'git clone {url}' to download latest.") - else: - print(f'up to date with {url} ✅') + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' # github repo url + url = subprocess.check_output(cmd, shell=True).decode()[:-1] + cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind + n = int(subprocess.check_output(cmd, shell=True)) + if n > 0: + print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " + f"Use 'git pull' to update or 'git clone {url}' to download latest.") + else: + print(f'up to date with {url} ✅') except Exception as e: print(e) From 1ca2d26b9f3247408b2480330f113ac57f35f9d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Jan 2021 11:45:15 -0800 Subject: [PATCH 012/254] Update Dockerfile (#1982) --- Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4c11b036d6b4..7ff38c449453 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,7 @@ RUN apt update && apt install -y screen libgl1-mesa-glx # Install python dependencies RUN pip install --upgrade pip COPY requirements.txt . -RUN pip install -r requirements.txt gsutil wandb -RUN wandb disabled +RUN pip install -r requirements.txt gsutil # Create working directory RUN mkdir -p /usr/src/app From d9212140b355b84e85a473be590720eb8221766c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Jan 2021 13:33:52 -0800 Subject: [PATCH 013/254] Add xywhn2xyxy() (#1983) --- utils/datasets.py | 35 ++++++++++------------------------- utils/general.py | 10 ++++++++++ 2 files changed, 20 insertions(+), 25 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6e6e3253771b..a6e3d3f56b9f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,7 +20,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, clean_str +from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -515,16 +515,9 @@ def __getitem__(self, index): img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - # Load labels - labels = [] - x = self.labels[index] - if x.size > 0: - # Normalized xywh to pixel xyxy format - labels = x.copy() - labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width - labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height - labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] - labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: # Augment imagespace @@ -674,13 +667,9 @@ def load_mosaic(self, index): padh = y1a - y1b # Labels - x = self.labels[index] - labels = x.copy() - if x.size > 0: # Normalized xywh to pixel xyxy format - labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw - labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh - labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw - labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh + labels = self.labels[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format labels4.append(labels) # Concat/clip labels @@ -737,13 +726,9 @@ def load_mosaic9(self, index): x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords # Labels - x = self.labels[index] - labels = x.copy() - if x.size > 0: # Normalized xywh to pixel xyxy format - labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx - labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady - labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx - labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady + labels = self.labels[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format labels9.append(labels) # Image diff --git a/utils/general.py b/utils/general.py index 4822709d1753..37534799c157 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,6 +223,16 @@ def xywh2xyxy(x): return y +def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape From 046c37e465d60b9047ff0e91f44594817e5a935c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 20 Jan 2021 18:27:38 -0800 Subject: [PATCH 014/254] verbose on final_epoch (#1997) --- test.py | 3 +-- train.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 1c6ea103489b..2cbbbba5c9a2 100644 --- a/test.py +++ b/test.py @@ -37,7 +37,6 @@ def test(data, plots=True, log_imgs=0, # number of logged images compute_loss=None): - # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -227,7 +226,7 @@ def test(data, print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class - if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats): + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) diff --git a/train.py b/train.py index 83ae7fd92ace..9e6bd8673726 100644 --- a/train.py +++ b/train.py @@ -344,6 +344,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, + verbose=nc < 50 and final_epoch, plots=plots and final_epoch, log_imgs=opt.log_imgs if wandb else 0, compute_loss=compute_loss) From 1445ab2b279a87f04d83ac4342142a11c878dc35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 12:37:12 -0800 Subject: [PATCH 015/254] check_git_status() Windows fix (#2015) * check_git_status() Windows fix * Update general.py * Update general.py * Update general.py * Update general.py * Update general.py * Update general.py --- utils/general.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index 37534799c157..aa137ebde5ff 100755 --- a/utils/general.py +++ b/utils/general.py @@ -4,6 +4,7 @@ import logging import math import os +import platform import random import re import subprocess @@ -65,14 +66,15 @@ def check_git_status(): assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' # github repo url - url = subprocess.check_output(cmd, shell=True).decode()[:-1] - cmd = 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count' # commits behind - n = int(subprocess.check_output(cmd, shell=True)) + url = subprocess.check_output(cmd, shell=True).decode().rstrip() + branch = subprocess.check_output('git branch --show-current', shell=True).decode().rstrip() # current + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " - f"Use 'git pull' to update or 'git clone {url}' to download latest.") + s = f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." else: - print(f'up to date with {url} ✅') + s = f'up to date with {url} ✅' + print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) except Exception as e: print(e) From 8dc68fcbcba6cac9578f5341318de0db3ad2d20b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 14:26:58 -0800 Subject: [PATCH 016/254] Update Dockerfile (#2016) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7ff38c449453..ce9cdc34f1c3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,10 +42,10 @@ COPY . /usr/src/app # sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest) # Bash into running container -# sudo docker container exec -it ba65811811ab bash +# sudo docker exec -it 5a9b5863d93d bash # Bash into stopped container -# sudo docker commit 092b16b25c5b usr/resume && sudo docker run -it --gpus all --ipc=host -v "$(pwd)"/coco:/usr/src/coco --entrypoint=sh usr/resume +# id=5a9b5863d93d && sudo docker start $id && sudo docker exec -it $id bash # Send weights to GCP # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt From 77fd83225fbd289ac8231ecc053f7cf5777e12ba Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 15:08:42 -0800 Subject: [PATCH 017/254] Update google_utils.py (#2017) --- utils/google_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/google_utils.py b/utils/google_utils.py index 024dc7802f15..0a7ca3b896d6 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -26,8 +26,8 @@ def attempt_download(file, repo='ultralytics/yolov5'): assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' except: # fallback plan - assets = ['yolov5.pt', 'yolov5.pt', 'yolov5l.pt', 'yolov5x.pt'] - tag = subprocess.check_output('git tag', shell=True).decode('utf-8').split('\n')[-2] + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] name = file.name if name in assets: From 85b75d6018ad6b5a34e06dffcb0359cdf1790f99 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 15:22:54 -0800 Subject: [PATCH 018/254] Update ci-testing.yml (#2018) --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 020658372f3c..5999ee6a9055 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -66,14 +66,14 @@ jobs: di=cpu # inference devices # define device # train - python train.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di # detect python detect.py --weights weights/${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di # test - python test.py --img 256 --batch 8 --weights weights/${{ matrix.model }}.pt --device $di - python test.py --img 256 --batch 8 --weights runs/train/exp/weights/last.pt --device $di + python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di + python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect - python models/export.py --img 256 --batch 1 --weights weights/${{ matrix.model }}.pt # export + python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export shell: bash From aac33f87ad7a592b639cc6c98173218369dd2c09 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 15:39:08 -0800 Subject: [PATCH 019/254] Update inference multiple-counting (#2019) * Update inference multiple-counting * update github check --- detect.py | 2 +- models/common.py | 2 +- utils/general.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/detect.py b/detect.py index 80e006251b69..d377f5e9c43e 100644 --- a/detect.py +++ b/detect.py @@ -97,7 +97,7 @@ def detect(save_img=False): # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class - s += f'{n} {names[int(c)]}s, ' # add to string + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): diff --git a/models/common.py b/models/common.py index 3bfdb3c7dc14..fba792e56022 100644 --- a/models/common.py +++ b/models/common.py @@ -248,7 +248,7 @@ def display(self, pprint=False, show=False, save=False, render=False): if pred is not None: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class - str += f'{n} {self.names[int(c)]}s, ' # add to string + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class diff --git a/utils/general.py b/utils/general.py index aa137ebde5ff..8421ba0c7ea7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -70,7 +70,7 @@ def check_git_status(): branch = subprocess.check_output('git branch --show-current', shell=True).decode().rstrip() # current n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. " \ + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ f"Use 'git pull' to update or 'git clone {url}' to download latest." else: s = f'up to date with {url} ✅' From 3b7feeafdffbf1dd955b565c31901f3055ab48a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 15:59:01 -0800 Subject: [PATCH 020/254] Update general.py check_git_status() fix (#2020) --- utils/general.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 8421ba0c7ea7..27cefe58a045 100755 --- a/utils/general.py +++ b/utils/general.py @@ -65,9 +65,9 @@ def check_git_status(): assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() assert check_online(), 'skipping check (offline)' - cmd = 'git fetch && git config --get remote.origin.url' # github repo url - url = subprocess.check_output(cmd, shell=True).decode().rstrip() - branch = subprocess.check_output('git branch --show-current', shell=True).decode().rstrip() # current + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ From 2fc4760257ac446df459ff0c8c040df51f6a4c27 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jan 2021 16:38:06 -0800 Subject: [PATCH 021/254] Update autoshape .print() and .save() (#2022) --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index fba792e56022..e8adb66293d5 100644 --- a/models/common.py +++ b/models/common.py @@ -244,7 +244,7 @@ def __init__(self, imgs, pred, names=None): def display(self, pprint=False, show=False, save=False, render=False): colors = color_list() for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' if pred is not None: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class @@ -255,13 +255,13 @@ def display(self, pprint=False, show=False, save=False, render=False): # str += '%s %.2f, ' % (names[int(cls)], conf) # label ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot if pprint: - print(str) + print(str.rstrip(', ')) if show: - img.show(f'Image {i}') # show + img.show(f'image {i}') # show if save: f = f'results{i}.jpg' - str += f"saved to '{f}'" img.save(f) # save + print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') if render: self.imgs[i] = np.asarray(img) From 9a3da79b4a54217f145ca29ffd8641c98ce1613d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 23 Jan 2021 12:51:04 -0800 Subject: [PATCH 022/254] Update requirements.txt (#2021) * Update requirements.txt * Update ci-testing.yml * Update hubconf.py --- .github/workflows/ci-testing.yml | 1 + hubconf.py | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5999ee6a9055..df508474a955 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -74,6 +74,7 @@ jobs: python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + python hubconf.py # hub python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export shell: bash diff --git a/hubconf.py b/hubconf.py index c4485a42e335..2a34813310e8 100644 --- a/hubconf.py +++ b/hubconf.py @@ -137,5 +137,5 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] results = model(imgs) - results.show() results.print() + results.save() diff --git a/requirements.txt b/requirements.txt index 3c23f2b750a2..2b447aef4d39 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 Pillow -PyYAML>=5.3 +PyYAML>=3.13 scipy>=1.4.1 tensorboard>=2.2 torch>=1.7.0 From c76c607265344276cbf2e9f53e637d76b4dc6e35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 24 Jan 2021 10:05:40 -0800 Subject: [PATCH 023/254] PyYAML==5.4.1 (#2030) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2b447aef4d39..759a100a212c 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 Pillow -PyYAML>=3.13 +PyYAML==5.4.1 scipy>=1.4.1 tensorboard>=2.2 torch>=1.7.0 From 3551b072b366989b82b3777c63ea485a99e0bf90 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 24 Jan 2021 11:54:15 -0800 Subject: [PATCH 024/254] Docker pyYAML>=5.3.1 fix (#2031) --- Dockerfile | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index ce9cdc34f1c3..1f301b2d1e2d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,7 +5,7 @@ FROM nvcr.io/nvidia/pytorch:20.12-py3 RUN apt update && apt install -y screen libgl1-mesa-glx # Install python dependencies -RUN pip install --upgrade pip +RUN python -m pip install --upgrade pip COPY requirements.txt . RUN pip install -r requirements.txt gsutil diff --git a/requirements.txt b/requirements.txt index 759a100a212c..d22b42f5d786 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 Pillow -PyYAML==5.4.1 +PyYAML>=5.3.1 scipy>=1.4.1 tensorboard>=2.2 torch>=1.7.0 From a41d910c5d7e2eb3e5998ff53222936f6cea0fc9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 24 Jan 2021 18:01:58 -0800 Subject: [PATCH 025/254] data-autodownload background tasks (#2034) --- data/scripts/get_coco.sh | 4 +++- data/scripts/get_voc.sh | 6 ++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 157a0b04cf86..b0df905c8525 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -20,5 +20,7 @@ f1='train2017.zip' # 19G, 118k images f2='val2017.zip' # 1G, 5k images f3='test2017.zip' # 7G, 41k images (optional) for f in $f1 $f2; do - echo 'Downloading' $url$f ' ...' && curl -L $url$f -o $f && unzip -q $f -d $d && rm $f # download, unzip, remove + echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) + unzip -q $f -d $d && rm $f & done +wait # finish background tasks diff --git a/data/scripts/get_voc.sh b/data/scripts/get_voc.sh index 6bdaa9bcc071..06414b085095 100644 --- a/data/scripts/get_voc.sh +++ b/data/scripts/get_voc.sh @@ -17,9 +17,11 @@ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images -for f in $f1 $f2 $f3; do - echo 'Downloading' $url$f ' ...' && curl -L $url$f -o $f && unzip -q $f -d $d && rm $f # download, unzip, remove +for f in $f3 $f2 $f1; do + echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) + unzip -q $f -d $d && rm $f & done +wait # finish background tasks end=$(date +%s) runtime=$((end - start)) From 8fcbe43423a49165f655dc10dcce2c61a5a2e1e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Jan 2021 20:55:35 -0800 Subject: [PATCH 026/254] Check im.format during dataset caching (#2042) * Check im.format during dataset caching * Update datasets.py --- utils/datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index a6e3d3f56b9f..d87952c9b61d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -445,7 +445,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels' + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' # verify labels if os.path.isfile(lb_file): From d68afedb32fb5f3b632f67f2cbea2c89a145f0ad Mon Sep 17 00:00:00 2001 From: ramonhollands Date: Tue, 26 Jan 2021 22:39:19 +0100 Subject: [PATCH 027/254] Confusion matrix native image-space fix (#2046) Make sure the labels and predictions are equally scaled on confusion_matrix.process_batch --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 2cbbbba5c9a2..891f6bef41c6 100644 --- a/test.py +++ b/test.py @@ -178,7 +178,7 @@ def test(data, tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: - confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) + confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): From 08d3119e09d6db5723cf074f4adc47bbb334b495 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 26 Jan 2021 17:30:42 -0800 Subject: [PATCH 028/254] Add histogram equalization fcn (#2049) --- Dockerfile | 4 ++-- models/yolo.py | 2 +- utils/datasets.py | 14 ++++++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1f301b2d1e2d..98dfee204770 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,13 +39,13 @@ COPY . /usr/src/app # sudo docker kill $(sudo docker ps -q) # Kill all image-based -# sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest) +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) # Bash into running container # sudo docker exec -it 5a9b5863d93d bash # Bash into stopped container -# id=5a9b5863d93d && sudo docker start $id && sudo docker exec -it $id bash +# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash # Send weights to GCP # python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt diff --git a/models/yolo.py b/models/yolo.py index 6cf9dde08d1a..db6ad01af541 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -107,7 +107,7 @@ def forward(self, x, augment=False, profile=False): for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) yi = self.forward_once(xi)[0] # forward - # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi[..., :4] /= si # de-scale if fi == 2: yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud diff --git a/utils/datasets.py b/utils/datasets.py index d87952c9b61d..eac632a46b11 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -631,10 +631,16 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - # Histogram equalization - # if random.random() < 0.2: - # for i in range(3): - # img[:, :, i] = cv2.equalizeHist(img[:, :, i]) + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB def load_mosaic(self, index): From 59c21c7bcba9dc2e4ec15783b549e46e024b39f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 26 Jan 2021 21:16:01 -0800 Subject: [PATCH 029/254] W&B log epoch (#1946) * W&B log epoch * capitalize * W&B log epoch * capitalize * Update train.py New try using https://docs.wandb.ai/library/log#incremental-logging * Update train.py * Update test.py * Update train.py * Update plots.py * Update train.py * Update train.py * label plot step -1 * update * update * update * update * update * update * Update train.py * Update train.py --- test.py | 4 ++-- train.py | 2 +- utils/plots.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index 891f6bef41c6..db344e722043 100644 --- a/test.py +++ b/test.py @@ -239,8 +239,8 @@ def test(data, if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb and wandb.run: - wandb.log({"Images": wandb_images}) - wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]}) + val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) # Save JSON if save_json and len(jdict): diff --git a/train.py b/train.py index 9e6bd8673726..5eff4bbac172 100644 --- a/train.py +++ b/train.py @@ -321,7 +321,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # tb_writer.add_graph(model, imgs) # add model to tensorboard elif plots and ni == 10 and wandb: wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') - if x.exists()]}) + if x.exists()]}, commit=False) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- diff --git a/utils/plots.py b/utils/plots.py index 4765069e0377..67f11bfd2011 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -295,7 +295,7 @@ def plot_labels(labels, save_dir=Path(''), loggers=None): # loggers for k, v in loggers.items() or {}: if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}) + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() From f59f80114cbe4a7b3d4ea743b771aa1919fa8c8a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 26 Jan 2021 21:17:36 -0800 Subject: [PATCH 030/254] Add 'exclude' tuple to check_requirements() (#2041) --- utils/general.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 27cefe58a045..bbc0f32b8425 100755 --- a/utils/general.py +++ b/utils/general.py @@ -79,11 +79,11 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt'): +def check_requirements(file='requirements.txt', exclude=()): # Check installed dependencies meet requirements import pkg_resources - requirements = pkg_resources.parse_requirements(Path(file).open()) - requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements] + requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open()) + if x.name not in exclude] pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met From 2a835c79a9ca68fa2525e9444df638cf8dcf7f59 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Jan 2021 16:01:24 -0800 Subject: [PATCH 031/254] Update run-once lines (#2058) --- detect.py | 4 ++-- test.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index d377f5e9c43e..f8b959aaadf7 100644 --- a/detect.py +++ b/detect.py @@ -56,9 +56,9 @@ def detect(save_img=False): colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # Run inference + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once t0 = time.time() - img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img - _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once for path, img, im0s, vid_cap in dataset: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 diff --git a/test.py b/test.py index db344e722043..3b76a507aae4 100644 --- a/test.py +++ b/test.py @@ -82,8 +82,8 @@ def test(data, # Dataloader if not training: - img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img - _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True, prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] From f639e14e4d20f8eaadfbe92dbc766fee7577c56b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Jan 2021 17:10:53 -0800 Subject: [PATCH 032/254] Metric-Confidence plots feature addition (#2057) * Metric-Confidence plots feature addition * cleanup * Metric-Confidence plots feature addition * cleanup * Update run-once lines * cleanup * save all 4 curves to wandb --- test.py | 2 +- train.py | 2 +- utils/metrics.py | 53 ++++++++++++++++++++++++++++++++++-------------- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/test.py b/test.py index 3b76a507aae4..22d20f0323e3 100644 --- a/test.py +++ b/test.py @@ -215,7 +215,7 @@ def test(data, stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) - p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95] + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: diff --git a/train.py b/train.py index 5eff4bbac172..ddb23e34a619 100644 --- a/train.py +++ b/train.py @@ -403,7 +403,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if plots: plot_results(save_dir=save_dir) # save as results.png if wandb: - files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png'] + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) if opt.log_artifacts: diff --git a/utils/metrics.py b/utils/metrics.py index 99d5bcfaf2af..ba812ff13a58 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -15,7 +15,7 @@ def fitness(x): return (x[:, :4] * w).sum(1) -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -35,12 +35,11 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision # Find unique classes unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class px, py = np.linspace(0, 1, 1000), [] # for plotting - pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898 - s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95) - ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s) + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c n_l = (target_cls == c).sum() # number of labels @@ -55,25 +54,28 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision # Recall recall = tpc / (n_l + 1e-16) # recall curve - r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision precision = tpc / (tpc + fpc) # precision curve - p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score # AP from recall-precision curve for j in range(tp.shape[1]): ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) - if plot and (j == 0): + if plot and j == 0: py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 - # Compute F1 score (harmonic mean of precision and recall) + # Compute F1 (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + 1e-16) - if plot: - plot_pr_curve(px, py, ap, save_dir, names) + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') - return p, r, ap, f1, unique_classes.astype('int32') + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') def compute_ap(recall, precision): @@ -181,13 +183,14 @@ def print(self): # Plots ---------------------------------------------------------------------------------------------------------------- -def plot_pr_curve(px, py, ap, save_dir='.', names=()): +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) - if 0 < len(names) < 21: # show mAP in legend if < 10 classes + if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision) + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) else: ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) @@ -197,4 +200,24 @@ def plot_pr_curve(px, py, ap, save_dir='.', names=()): ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250) + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) From 630ec06af6c01c17f2c2603a7a62b672bfcd8878 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 28 Jan 2021 12:37:21 -0800 Subject: [PATCH 033/254] Update to colors.TABLEAU_COLORS (#2069) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 67f11bfd2011..a199963d7d99 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -31,7 +31,7 @@ def color_list(): def hex2rgb(h): return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']] + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) def hist2d(x, y, n=100): From 2acbe9699a51a0efd8ae0dbfdd775c20a628f362 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 28 Jan 2021 18:57:24 -0800 Subject: [PATCH 034/254] W&B epoch logging update (#2073) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index ddb23e34a619..4ec97ae71e16 100644 --- a/train.py +++ b/train.py @@ -364,7 +364,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb: - wandb.log({tag: x}) # W&B + wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] From 6bfa9c2422b8b961d01806d86d9a9e7c8e8c1fc6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jan 2021 11:25:01 -0800 Subject: [PATCH 035/254] GhostConv update (#2082) --- models/experimental.py | 2 +- models/yolo.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 72dc877c83cf..5fe56858c54a 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -58,7 +58,7 @@ def forward(self, x): class GhostBottleneck(nn.Module): # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k, s): + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride super(GhostBottleneck, self).__init__() c_ = c2 // 2 self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw diff --git a/models/yolo.py b/models/yolo.py index db6ad01af541..11e6a65921a4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) from models.common import * -from models.experimental import MixConv2d, CrossConv +from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ @@ -210,7 +210,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) pass n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3]: c1, c2 = ch[f], args[0] # Normal From 6e6f77be47ea95ba3d1e65c01d829c189a532df2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jan 2021 11:57:29 -0800 Subject: [PATCH 036/254] Add YOLOv5-P6 models (#2083) --- models/hub/yolov5l6.yaml | 60 ++++++++++++++++++++++++++++++++++++++++ models/hub/yolov5m6.yaml | 60 ++++++++++++++++++++++++++++++++++++++++ models/hub/yolov5s6.yaml | 60 ++++++++++++++++++++++++++++++++++++++++ models/hub/yolov5x6.yaml | 60 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 240 insertions(+) create mode 100644 models/hub/yolov5l6.yaml create mode 100644 models/hub/yolov5m6.yaml create mode 100644 models/hub/yolov5s6.yaml create mode 100644 models/hub/yolov5x6.yaml diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml new file mode 100644 index 000000000000..e0699d413c9f --- /dev/null +++ b/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml new file mode 100644 index 000000000000..155170b3f938 --- /dev/null +++ b/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml new file mode 100644 index 000000000000..9f2302a09f53 --- /dev/null +++ b/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml new file mode 100644 index 000000000000..eda764406e93 --- /dev/null +++ b/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] From 64628d6fc5dcce75bf10c6844380e6bd005e24d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jan 2021 13:40:03 -0800 Subject: [PATCH 037/254] Update tutorial.ipynb --- tutorial.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7fc9b37a31dc..8b4b2b6c0a49 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1123,6 +1123,7 @@ "- **Google Colab Notebook** with free GPU: \"Open\n", "- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) \n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) \n", "- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)\n" ] }, @@ -1225,4 +1226,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 170d12e506cbb4925e4e69151fed9274d73d7e7d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jan 2021 13:49:00 -0800 Subject: [PATCH 038/254] Add Amazon Deep Learning AMI environment (#2085) * Update greetings.yml * Update README.md --- .github/workflows/greetings.yml | 3 ++- README.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 2bfa53c14fcd..1f31c5ac897f 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,7 +44,8 @@ jobs: - **Google Colab Notebook** with free GPU: Open In Colab - **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5) - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker) ## Status diff --git a/README.md b/README.md index a3b5b00f4d7e..6f2d0b7999e6 100755 --- a/README.md +++ b/README.md @@ -67,7 +67,8 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with - **Google Colab Notebook** with free GPU: Open In Colab - **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5) -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) +- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker) From eeb2bbf648869de817c5dba5f58aca7f561e2bae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jan 2021 11:48:03 -0800 Subject: [PATCH 039/254] Add Kaggle badge (#2090) * Update README.md * Update greetings.yml * Created using Colaboratory --- .github/workflows/greetings.yml | 6 +++--- README.md | 5 ++--- tutorial.ipynb | 36 ++++++++++++++++++++++----------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 1f31c5ac897f..d62cf5c1600d 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -42,11 +42,11 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - **Google Colab Notebook** with free GPU: Open In Colab - - **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5) + - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - - **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker) + - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + ## Status diff --git a/README.md b/README.md index 6f2d0b7999e6..3c14071698c5 100755 --- a/README.md +++ b/README.md @@ -65,11 +65,10 @@ $ pip install -r requirements.txt YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): -- **Google Colab Notebook** with free GPU: Open In Colab -- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5) +- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker) +- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls ## Inference diff --git a/tutorial.ipynb b/tutorial.ipynb index 8b4b2b6c0a49..c561c25ac180 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -689,7 +689,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 6, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -729,7 +729,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 7, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -854,7 +854,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -931,7 +931,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1120,11 +1120,23 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Google Colab Notebook** with free GPU: \"Open\n", - "- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) \n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) \n", - "- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)\n" + "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1226,4 +1238,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From aa02b9482520b7bccf2f9d9060bf9c4b2fbb4013 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jan 2021 11:51:16 -0800 Subject: [PATCH 040/254] Add Kaggle badge (#2090) --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index c561c25ac180..3f7133f4f7d7 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -1238,4 +1238,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From a18efc3a734dc29ca83e9789b2b378a52fd42202 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jan 2021 13:47:23 -0800 Subject: [PATCH 041/254] Add variable-stride inference support (#2091) --- detect.py | 7 ++++--- utils/datasets.py | 23 +++++++++++++---------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/detect.py b/detect.py index f8b959aaadf7..f9085e670916 100644 --- a/detect.py +++ b/detect.py @@ -31,7 +31,8 @@ def detect(save_img=False): # Load model model = attempt_load(weights, map_location=device) # load FP32 model - imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size if half: model.half() # to FP16 @@ -46,10 +47,10 @@ def detect(save_img=False): if webcam: view_img = True cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz) + dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: save_img = True - dataset = LoadImages(source, img_size=imgsz) + dataset = LoadImages(source, img_size=imgsz, stride=stride) # Get names and colors names = model.module.names if hasattr(model, 'module') else model.names diff --git a/utils/datasets.py b/utils/datasets.py index eac632a46b11..360d24c18874 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -119,7 +119,7 @@ def __iter__(self): class LoadImages: # for inference - def __init__(self, path, img_size=640): + def __init__(self, path, img_size=640, stride=32): p = str(Path(path)) # os-agnostic p = os.path.abspath(p) # absolute path if '*' in p: @@ -136,6 +136,7 @@ def __init__(self, path, img_size=640): ni, nv = len(images), len(videos) self.img_size = img_size + self.stride = stride self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv @@ -181,7 +182,7 @@ def __next__(self): print(f'image {self.count}/{self.nf} {path}: ', end='') # Padded resize - img = letterbox(img0, new_shape=self.img_size)[0] + img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 @@ -199,8 +200,9 @@ def __len__(self): class LoadWebcam: # for inference - def __init__(self, pipe='0', img_size=640): + def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size + self.stride = stride if pipe.isnumeric(): pipe = eval(pipe) # local camera @@ -243,7 +245,7 @@ def __next__(self): print(f'webcam {self.count}: ', end='') # Padded resize - img = letterbox(img0, new_shape=self.img_size)[0] + img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 @@ -256,9 +258,10 @@ def __len__(self): class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640): + def __init__(self, sources='streams.txt', img_size=640, stride=32): self.mode = 'stream' self.img_size = img_size + self.stride = stride if os.path.isfile(sources): with open(sources, 'r') as f: @@ -284,7 +287,7 @@ def __init__(self, sources='streams.txt', img_size=640): print('') # newline # check for common shapes - s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') @@ -313,7 +316,7 @@ def __next__(self): raise StopIteration # Letterbox - img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0] + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] # Stack img = np.stack(img, 0) @@ -784,8 +787,8 @@ def replicate(img, labels): return img, labels -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True): - # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232 +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints shape = img.shape[:2] # current shape [height, width] if isinstance(new_shape, int): new_shape = (new_shape, new_shape) @@ -800,7 +803,7 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding if auto: # minimum rectangle - dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding elif scaleFill: # stretch dw, dh = 0.0, 0.0 new_unpad = (new_shape[1], new_shape[0]) From be9edffded6b690168e8b92dd33cf471d09f8f13 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jan 2021 13:58:49 -0800 Subject: [PATCH 042/254] Update test.py --task speed and study (#2099) * Add --speed benchmark * test range 256 - 1536 * update * update * update * update --- test.py | 16 ++++++++++------ utils/plots.py | 2 +- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/test.py b/test.py index 22d20f0323e3..738764f15601 100644 --- a/test.py +++ b/test.py @@ -320,16 +320,20 @@ def test(data, save_conf=opt.save_conf, ) + elif opt.task == 'speed': # speed benchmarks + for w in opt.weights: + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) + elif opt.task == 'study': # run over a range of settings and save/plot - for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to - x = list(range(320, 800, 64)) # x axis + x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) + for w in opt.weights: + f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to y = [] # y axis for i in x: # img-size - print('\nRunning %s point %s...' % (f, i)) - r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, + print(f'\nRunning {f} point {i}...') + r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') - plot_study_txt(f, x) # plot + plot_study_txt(x=x) # plot diff --git a/utils/plots.py b/utils/plots.py index a199963d7d99..7dad3c72e996 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -223,7 +223,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() plt.savefig('targets.jpg', dpi=200) -def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt() +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() # Plot study.txt generated by test.py fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) ax = ax.ravel() From 9646ca438a175861e1819e70ae8b9afb6dee76ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Feb 2021 13:51:11 -0800 Subject: [PATCH 043/254] Update plot_study() (#2112) --- models/hub/yolov5l6.yaml | 2 +- models/hub/yolov5m6.yaml | 2 +- models/hub/yolov5s6.yaml | 2 +- models/hub/yolov5x6.yaml | 2 +- utils/plots.py | 19 ++++++++++--------- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index e0699d413c9f..11298b01f479 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -54,7 +54,7 @@ head: [ -1, 1, Conv, [ 768, 3, 2 ] ], [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 155170b3f938..48afc865593a 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -54,7 +54,7 @@ head: [ -1, 1, Conv, [ 768, 3, 2 ] ], [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 9f2302a09f53..1df577a2cc97 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -54,7 +54,7 @@ head: [ -1, 1, Conv, [ 768, 3, 2 ] ], [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index eda764406e93..5ebc02124fe7 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -54,7 +54,7 @@ head: [ -1, 1, Conv, [ 768, 3, 2 ] ], [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) ] diff --git a/utils/plots.py b/utils/plots.py index 7dad3c72e996..3ec793528fe5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -226,16 +226,17 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() # Plot study.txt generated by test.py fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - ax = ax.ravel() + # ax = ax.ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + for f in sorted(Path(path).glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - ax[i].set_title(s[i]) + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) j = y[3].argmax() + 1 ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, @@ -244,14 +245,14 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') - ax2.grid() - ax2.set_yticks(np.arange(30, 60, 5)) + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) ax2.set_xlim(0, 30) - ax2.set_ylim(29, 51) + ax2.set_ylim(30, 55) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') ax2.legend(loc='lower right') - plt.savefig('test_study.png', dpi=300) + plt.savefig(str(Path(path).name) + '.png', dpi=300) def plot_labels(labels, save_dir=Path(''), loggers=None): From 73a066993051339f6adfe5095a7852a2b9184c16 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 2 Feb 2021 11:08:41 +0530 Subject: [PATCH 044/254] Start setup for improved W&B integration (#1948) * Add helper functions for wandb and artifacts * cleanup * Reorganize files * Update wandb_utils.py * Update log_dataset.py We can remove this code, as the giou hyp has been deprecated for a while now. * Reorganize and update dataloader call * yaml.SafeLoader * PEP8 reformat * remove redundant checks * Add helper functions for wandb and artifacts * cleanup * Reorganize files * Update wandb_utils.py * Update log_dataset.py We can remove this code, as the giou hyp has been deprecated for a while now. * Reorganize and update dataloader call * yaml.SafeLoader * PEP8 reformat * remove redundant checks * Update util files * Update wandb_utils.py * Remove word size * Change path of labels.zip * remove unused imports * remove --rect * log_dataset.py cleanup * log_dataset.py cleanup2 * wandb_utils.py cleanup * remove redundant id_count * wandb_utils.py cleanup2 * rename cls * use pathlib for zip * rename dataloader to dataset * Change import order * Remove redundant code * remove unused import * remove unused imports Co-authored-by: Glenn Jocher --- utils/datasets.py | 3 +- utils/wandb_logging/__init__.py | 0 utils/wandb_logging/log_dataset.py | 39 ++++++++ utils/wandb_logging/wandb_utils.py | 145 +++++++++++++++++++++++++++++ 4 files changed, 186 insertions(+), 1 deletion(-) create mode 100644 utils/wandb_logging/__init__.py create mode 100644 utils/wandb_logging/log_dataset.py create mode 100644 utils/wandb_logging/wandb_utils.py diff --git a/utils/datasets.py b/utils/datasets.py index 360d24c18874..1e23934b63cc 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -348,7 +348,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride - + self.path = path + try: f = [] # image files for p in path if isinstance(path, list) else [path]: diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py new file mode 100644 index 000000000000..d790a9ce721e --- /dev/null +++ b/utils/wandb_logging/log_dataset.py @@ -0,0 +1,39 @@ +import argparse +from pathlib import Path + +import yaml + +from wandb_utils import WandbLogger +from utils.datasets import LoadImagesAndLabels + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='create_dataset') + nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset + logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset + + # Update data.yaml with artifact links + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train') + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val') + path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path + data.pop('download', None) # download via artifact instead of predefined field 'download:' + with open(path, 'w') as f: + yaml.dump(data, f) + print("New Config file => ", path) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml') + opt = parser.parse_args() + + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py new file mode 100644 index 000000000000..264cd4840e3c --- /dev/null +++ b/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,145 @@ +import json +import shutil +import sys +from datetime import datetime +from pathlib import Path + +import torch + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.general import colorstr, xywh2xyxy + +try: + import wandb +except ImportError: + wandb = None + print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix): + return from_string[len(prefix):] + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + self.wandb = wandb + self.wandb_run = wandb.init(config=opt, resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if self.wandb else None + + if job_type == 'Training': + self.setup_training(opt, data_dict) + if opt.bbox_interval == -1: + opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + if opt.save_period == -1: + opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + + def setup_training(self, opt, data_dict): + self.log_dict = {} + self.train_artifact_path, self.trainset_artifact = \ + self.download_dataset_artifact(data_dict['train'], opt.artifact_alias) + self.test_artifact_path, self.testset_artifact = \ + self.download_dataset_artifact(data_dict['val'], opt.artifact_alias) + self.result_artifact, self.result_table, self.weights = None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.test_artifact_path is not None: + test_path = Path(self.test_artifact_path) / 'data/images/' + data_dict['val'] = str(test_path) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.resume_from_artifact: + modeldir, _ = self.download_model_artifact(opt.resume_from_artifact) + if modeldir: + self.weights = Path(modeldir) / "best.pt" + opt.weights = self.weights + + def download_dataset_artifact(self, path, alias): + if path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + labels_zip = Path(datadir) / "data/labels.zip" + shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip') + print("Downloaded dataset to : ", datadir) + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, name): + model_artifact = wandb.use_artifact(name + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + print("Downloaded model to : ", modeldir) + return modeldir, model_artifact + + def log_model(self, path, opt, epoch): + datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S') + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epoch': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'datetime': datetime_suffix + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + model_artifact.add_file(str(path / 'best.pt'), name='best.pt') + wandb.log_artifact(model_artifact) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): + artifact = wandb.Artifact(name=name, type="dataset") + image_path = dataset.path + artifact.add_dir(image_path, name='data/images') + table = wandb.Table(columns=["id", "train_image", "Classes"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(dataset): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) + labels[:, 2:] *= torch.Tensor([width, height, width, height]) + box_data = [] + img_classes = {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes)) + artifact.add(table, name) + labels_path = 'labels'.join(image_path.rsplit('images', 1)) + zip_path = Path(labels_path).parent / (name + '_labels.zip') + if not zip_path.is_file(): # make_archive won't check if file exists + shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path) + artifact.add_file(str(zip_path), name='data/labels.zip') + wandb.log_artifact(artifact) + print("Saving data to W&B...") + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self): + if self.wandb_run and self.log_dict: + wandb.log(self.log_dict) + self.log_dict = {} + + def finish_run(self): + if self.wandb_run: + if self.result_artifact: + print("Add Training Progress Artifact") + self.result_artifact.add(self.result_table, 'result') + train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id") + self.result_artifact.add(train_results, 'joined_result') + wandb.log_artifact(self.result_artifact) + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() From 4bdc5a397e58504f6053dac0bf558c9af9cb0440 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Feb 2021 09:06:23 -0800 Subject: [PATCH 045/254] LoadImages() pathlib update (#2140) --- utils/datasets.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 1e23934b63cc..9b823eb85b75 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -120,8 +120,7 @@ def __iter__(self): class LoadImages: # for inference def __init__(self, path, img_size=640, stride=32): - p = str(Path(path)) # os-agnostic - p = os.path.abspath(p) # absolute path + p = str(Path(path).absolute()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): @@ -349,21 +348,24 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path - + try: f = [] # image files for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic if p.is_dir(): # dir f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib elif p.is_file(): # file with open(p, 'r') as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') From e9b3de463ad7734d41c059a4a513472ed618b7a1 Mon Sep 17 00:00:00 2001 From: train255 Date: Sat, 6 Feb 2021 02:12:43 +0700 Subject: [PATCH 046/254] Unique *.cache filenames fix (#2134) * fix #2121 * Update test.py * Update train.py * Update autoanchor.py * Update datasets.py * Update log_dataset.py * Update datasets.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 9b823eb85b75..7a8f073608cb 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -372,7 +372,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels - cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache = torch.load(cache_path) # load if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed From 86897e366312c01c2e32a004564480e4793febea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Feb 2021 10:29:32 -0800 Subject: [PATCH 047/254] Update train.py test batch_size (#2148) * Update train.py * Update loss.py --- train.py | 4 ++-- utils/loss.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 4ec97ae71e16..4cbd022bd231 100644 --- a/train.py +++ b/train.py @@ -190,7 +190,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Process 0 if rank in [-1, 0]: ema.updates = start_epoch * nb // accumulate # set EMA updates - testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -338,7 +338,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test(opt.data, - batch_size=total_batch_size, + batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, diff --git a/utils/loss.py b/utils/loss.py index 889ddf7295da..2490d4bb7cfc 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -105,8 +105,7 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [3.67, 1.0, 0.43], 4: [3.78, 1.0, 0.39, 0.22], 5: [3.88, 1.0, 0.37, 0.17, 0.10]}[det.nl] - # self.balance = [1.0] * det.nl + self.balance = {3: [3.67, 1.0, 0.43], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': From ad839eda388dcd12b0154b6bc3eef1555a7ff8f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Feb 2021 11:21:04 -0800 Subject: [PATCH 048/254] Update train.py (#2149) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4cbd022bd231..ba48896f1e94 100644 --- a/train.py +++ b/train.py @@ -414,7 +414,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if opt.data.endswith('coco.yaml') and nc == 80: # if COCO for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests results, _, _ = test.test(opt.data, - batch_size=total_batch_size, + batch_size=batch_size * 2, imgsz=imgsz_test, conf_thres=conf, iou_thres=iou, From 6b634c6b8749335d0c25009f0b6fec4dd619d084 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Feb 2021 11:26:54 -0800 Subject: [PATCH 049/254] Linear LR scheduler option (#2150) * Linear LR scheduler option * Update train.py --- train.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index ba48896f1e94..4065e1f149ef 100644 --- a/train.py +++ b/train.py @@ -120,7 +120,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + if opt.linear_lr: + lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + else: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) @@ -464,6 +467,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--linear-lr', action='store_true', help='linear LR') opt = parser.parse_args() # Set DDP variables From a5359f6c3288c0549193681632c8e9b324e81017 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Feb 2021 22:13:39 -0800 Subject: [PATCH 050/254] Update data-autodownload background tasks (#2154) * Update get_coco.sh * Update get_voc.sh --- data/scripts/get_coco.sh | 9 +++++---- data/scripts/get_voc.sh | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index b0df905c8525..02634c000dfe 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -10,8 +10,9 @@ # Download/unzip labels d='../' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco2017labels.zip' # 68 MB -echo 'Downloading' $url$f ' ...' && curl -L $url$f -o $f && unzip -q $f -d $d && rm $f # download, unzip, remove +f='coco2017labels.zip' # 68 MB +echo 'Downloading' $url$f ' ...' +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background # Download/unzip images d='../coco/images' # unzip directory @@ -20,7 +21,7 @@ f1='train2017.zip' # 19G, 118k images f2='val2017.zip' # 1G, 5k images f3='test2017.zip' # 7G, 41k images (optional) for f in $f1 $f2; do - echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) - unzip -q $f -d $d && rm $f & + echo 'Downloading' $url$f '...' + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background done wait # finish background tasks diff --git a/data/scripts/get_voc.sh b/data/scripts/get_voc.sh index 06414b085095..13b83c28d706 100644 --- a/data/scripts/get_voc.sh +++ b/data/scripts/get_voc.sh @@ -18,8 +18,8 @@ f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images for f in $f3 $f2 $f1; do - echo 'Downloading' $url$f '...' && curl -L $url$f -o $f # download, (unzip, remove in background) - unzip -q $f -d $d && rm $f & + echo 'Downloading' $url$f '...' + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background done wait # finish background tasks From c32b0aff76521ddf1d230921c5069b929c9dc161 Mon Sep 17 00:00:00 2001 From: ab-101 <56578530+ab-101@users.noreply.github.com> Date: Tue, 9 Feb 2021 12:13:40 +0500 Subject: [PATCH 051/254] Update detect.py (#2167) Without this cv2.imshow opens a window but nothing is visible --- detect.py | 1 + 1 file changed, 1 insertion(+) diff --git a/detect.py b/detect.py index f9085e670916..3f1d6c521b67 100644 --- a/detect.py +++ b/detect.py @@ -118,6 +118,7 @@ def detect(save_img=False): # Stream results if view_img: cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: From ace3e02e406307ed0cebc25aebe15835767107b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Feb 2021 22:03:29 -0800 Subject: [PATCH 052/254] Update requirements.txt (#2173) --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index d22b42f5d786..cb50cf8f32e1 100755 --- a/requirements.txt +++ b/requirements.txt @@ -21,8 +21,8 @@ seaborn>=0.11.0 pandas # export -------------------------------------- -# coremltools==4.0 -# onnx>=1.8.0 +# coremltools>=4.1 +# onnx>=1.8.1 # scikit-learn==0.19.2 # for coreml quantization # extras -------------------------------------- From c9bda112aebaa0be846864f9d224191d0e19d419 Mon Sep 17 00:00:00 2001 From: Transigent Date: Wed, 10 Feb 2021 18:16:38 +1000 Subject: [PATCH 053/254] Update utils/datasets.py to support .webp files (#2174) Simply added 'webp' as an image format to the img_formats array so that webp image files can be used as training data. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7a8f073608cb..05c8fdbf4c4f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -25,7 +25,7 @@ # Parameters help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes logger = logging.getLogger(__name__) From a5d5f9262d21556b0f91a6facaef77708ca4cbc4 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Thu, 11 Feb 2021 04:01:48 +0700 Subject: [PATCH 054/254] Changed socket port and added timeout (#2176) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bbc0f32b8425..f979a05c6e49 100755 --- a/utils/general.py +++ b/utils/general.py @@ -51,7 +51,7 @@ def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 53)) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability return True except OSError: return False From 404749a33cc29d119f54b2ce35bf3b33a847a487 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Feb 2021 16:10:43 -0800 Subject: [PATCH 055/254] PyTorch Hub results.save('path/to/dir') (#2179) --- models/common.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index e8adb66293d5..7cfea01f223e 100644 --- a/models/common.py +++ b/models/common.py @@ -1,6 +1,7 @@ # This file contains modules common to various models import math +from pathlib import Path import numpy as np import requests @@ -241,7 +242,7 @@ def __init__(self, imgs, pred, names=None): self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) - def display(self, pprint=False, show=False, save=False, render=False): + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): colors = color_list() for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' @@ -259,7 +260,7 @@ def display(self, pprint=False, show=False, save=False, render=False): if show: img.show(f'image {i}') # show if save: - f = f'results{i}.jpg' + f = Path(save_dir) / f'results{i}.jpg' img.save(f) # save print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') if render: @@ -271,8 +272,8 @@ def print(self): def show(self): self.display(show=True) # show results - def save(self): - self.display(save=True) # save results + def save(self, save_dir=''): + self.display(save=True, save_dir=save_dir) # save results def render(self): self.display(render=True) # render results From bdd88e1ed7c3d3c703f477e574a0db376104e0b6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Feb 2021 21:22:45 -0800 Subject: [PATCH 056/254] YOLOv5 Segmentation Dataloader Updates (#2188) * Update C3 module * Update C3 module * Update C3 module * Update C3 module * update * update * update * update * update * update * update * update * update * updates * updates * updates * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * updates * updates * updates * updates * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update datasets * update * update * update * update attempt_downlaod() * merge * merge * update * update * update * update * update * update * update * update * update * update * parameterize eps * comments * gs-multiple * update * max_nms implemented * Create one_cycle() function * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * GitHub API rate limit fix * update * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * astuple * epochs * update * update * ComputeLoss() * update * update * update * update * update * update * update * update * update * update * update * merge * merge * merge * merge * update * update * update * update * commit=tag == tags[-1] * Update cudnn.benchmark * update * update * update * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * update * mosaic9 * update * update * update * update * update * update * institute cache versioning * only display on existing cache * reverse cache exists booleans --- data/scripts/get_coco.sh | 2 +- utils/datasets.py | 134 ++++++++++++++++++++++----------------- utils/general.py | 36 ++++++++++- utils/loss.py | 2 +- 4 files changed, 113 insertions(+), 61 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 02634c000dfe..bbb1e9291d5b 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -10,7 +10,7 @@ # Download/unzip labels d='../' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco2017labels.zip' # 68 MB +f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background diff --git a/utils/datasets.py b/utils/datasets.py index 05c8fdbf4c4f..29a8812a20a2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,7 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str +from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ + clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -374,21 +375,23 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): - cache = torch.load(cache_path) # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed - cache = self.cache_labels(cache_path, prefix) # re-cache + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: - cache = self.cache_labels(cache_path, prefix) # cache + cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache - [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total - desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" - tqdm(None, desc=prefix + desc, total=n, initial=n) + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' # Read cache cache.pop('hash') # remove hash - labels, shapes = zip(*cache.values()) + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.img_files = list(cache.keys()) # update @@ -451,6 +454,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size + segments = [] # instance segments assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' @@ -458,7 +462,12 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if os.path.isfile(lb_file): nf += 1 # label found with open(lb_file, 'r') as f: - l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) if len(l): assert l.shape[1] == 5, 'labels require 5 columns each' assert (l >= 0).all(), 'negative labels' @@ -470,7 +479,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): else: nm += 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape] + x[im_file] = [l, shape, segments] except Exception as e: nc += 1 print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') @@ -482,7 +491,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = [nf, nm, ne, nc, i + 1] + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version torch.save(x, path) # save for next time logging.info(f'{prefix}New cache created: {path}') return x @@ -652,7 +662,7 @@ def hist_equalize(img, clahe=True, bgr=False): def load_mosaic(self, index): # loads images in a 4-mosaic - labels4 = [] + labels4, segments4 = [], [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices @@ -680,19 +690,21 @@ def load_mosaic(self, index): padh = y1a - y1b # Labels - labels = self.labels[index].copy() + labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) + segments4.extend(segments) # Concat/clip labels - if len(labels4): - labels4 = np.concatenate(labels4, 0) - np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective - # img4, labels4 = replicate(img4, labels4) # replicate + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4 = random_perspective(img4, labels4, + img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -706,7 +718,7 @@ def load_mosaic(self, index): def load_mosaic9(self, index): # loads images in a 9-mosaic - labels9 = [] + labels9, segments9 = [], [] s = self.img_size indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices for i, index in enumerate(indices): @@ -739,30 +751,34 @@ def load_mosaic9(self, index): x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords # Labels - labels = self.labels[index].copy() + labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] labels9.append(labels) + segments9.extend(segments) # Image img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous # Offset - yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels - if len(labels9): - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] - np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective - # img9, labels9 = replicate(img9, labels9) # replicate + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate # Augment - img9, labels9 = random_perspective(img9, labels9, + img9, labels9 = random_perspective(img9, labels9, segments9, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -823,7 +839,8 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale return img, ratio, (dw, dh) -def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] @@ -875,37 +892,38 @@ def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shea # Transform label coordinates n = len(targets) if n: - # warp points - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - if perspective: - xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale - else: # affine - xy = xy[:, :2].reshape(n, 8) - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # # apply angle-based reduction of bounding boxes - # radians = a * math.pi / 180 - # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 - # x = (xy[:, 2] + xy[:, 0]) / 2 - # y = (xy[:, 3] + xy[:, 1]) / 2 - # w = (xy[:, 2] - xy[:, 0]) * reduction - # h = (xy[:, 3] - xy[:, 1]) * reduction - # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T - - # clip boxes - xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) - xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) targets = targets[i] - targets[:, 1:5] = xy[i] + targets[:, 1:5] = new[i] return img, targets diff --git a/utils/general.py b/utils/general.py index f979a05c6e49..24807483f5f4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -225,7 +225,7 @@ def xywh2xyxy(x): return y -def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x @@ -235,6 +235,40 @@ def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): return y +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape diff --git a/utils/loss.py b/utils/loss.py index 2490d4bb7cfc..481d25e207f2 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -105,7 +105,7 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [3.67, 1.0, 0.43], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] + self.balance = {3: [4.0, 1.0, 0.4], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': From 17ac94b7968703e708bfeb7274de755c4b2f1f43 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Feb 2021 22:39:37 -0800 Subject: [PATCH 057/254] Created using Colaboratory --- tutorial.ipynb | 182 ++++++++++++++++++++++++------------------------- 1 file changed, 90 insertions(+), 92 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3f7133f4f7d7..7587d9f536fe 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "811fd52fef65422c8267bafcde8a2c3d": { + "1f8e9b8ebded4175b2eaa9f75c3ceb00": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_8f41b90117224eef9133a9c3a103dbba", + "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_ca2fb37af6ed43d4a74cdc9f2ac5c4a5", - "IPY_MODEL_29419ae5ebb9403ea73f7e5a68037bdd" + "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97", + "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6" ] } }, - "8f41b90117224eef9133a9c3a103dbba": { + "0a1246a73077468ab80e979cc0576cd2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "ca2fb37af6ed43d4a74cdc9f2ac5c4a5": { + "d327cde5a85a4a51bb8b1b3e9cf06c97": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_6511b4dfb10b48d1bc98bcfb3987bfa0", + "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_64f0badf1a8f489885aa984dd62d37dc" + "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1" } }, - "29419ae5ebb9403ea73f7e5a68037bdd": { + "d5ef1cb2cbed4b87b3c5d292ff2b0da6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_f569911c5cfc4d81bb1bdfa83447afc8", + "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:23<00:00, 34.2MB/s]", + "value": " 781M/781M [00:12<00:00, 65.5MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_84943ade566440aaa2dcf3b3b27e7074" + "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04" } }, - "6511b4dfb10b48d1bc98bcfb3987bfa0": { + "8d5dff8bca14435a88fa1814533acd85": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "64f0badf1a8f489885aa984dd62d37dc": { + "3d5136c19e7645ca9bc8f51ceffb2be1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "f569911c5cfc4d81bb1bdfa83447afc8": { + "2919396dbd4b4c8e821d12bd28665d8a": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "84943ade566440aaa2dcf3b3b27e7074": { + "6feb16f2b2fa4021b1a271e1dd442d04": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "8501ed1563e4452eac9df6b7a66e8f8c": { + "e6459e0bcee449b090fc9807672725bc": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_d2bb96801e1f46f4a58e02534f7026ff", + "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_468a796ef06b4a24bcba6fbd4a0a8db5", - "IPY_MODEL_42ad5c1ea7be4835bffebf90642178f1" + "IPY_MODEL_660afee173694231a6dce3cd94df6cae", + "IPY_MODEL_261218485cef48df961519dde5edfcbe" ] } }, - "d2bb96801e1f46f4a58e02534f7026ff": { + "c341e1d3bf3b40d1821ce392eb966c68": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "468a796ef06b4a24bcba6fbd4a0a8db5": { + "660afee173694231a6dce3cd94df6cae": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_c58b5536d98f4814831934e9c30c4d78", + "style": "IPY_MODEL_32736d503c06497abfae8c0421918255", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_505597101151486ea29e9ab754544d27" + "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c" } }, - "42ad5c1ea7be4835bffebf90642178f1": { + "261218485cef48df961519dde5edfcbe": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_de6e7b4b4a1c408c9f89d89b07a13bcd", + "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:01<00:00, 18.2MB/s]", + "value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f5cc9c7d4c274b2d81327ba3163c43fd" + "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb" } }, - "c58b5536d98f4814831934e9c30c4d78": { + "32736d503c06497abfae8c0421918255": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "505597101151486ea29e9ab754544d27": { + "e257738711f54d5280c8393d9d3dce1c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "de6e7b4b4a1c408c9f89d89b07a13bcd": { + "beb7a6fe34b840899bb79c062681696f": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "f5cc9c7d4c274b2d81327ba3163c43fd": { + "e639132395d64d70b99d8b72c32f8fbb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c6ad57c2-40b7-4764-b07d-19ee2ceaabaf" + "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -672,30 +672,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "811fd52fef65422c8267bafcde8a2c3d", - "8f41b90117224eef9133a9c3a103dbba", - "ca2fb37af6ed43d4a74cdc9f2ac5c4a5", - "29419ae5ebb9403ea73f7e5a68037bdd", - "6511b4dfb10b48d1bc98bcfb3987bfa0", - "64f0badf1a8f489885aa984dd62d37dc", - "f569911c5cfc4d81bb1bdfa83447afc8", - "84943ade566440aaa2dcf3b3b27e7074" + "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "0a1246a73077468ab80e979cc0576cd2", + "d327cde5a85a4a51bb8b1b3e9cf06c97", + "d5ef1cb2cbed4b87b3c5d292ff2b0da6", + "8d5dff8bca14435a88fa1814533acd85", + "3d5136c19e7645ca9bc8f51ceffb2be1", + "2919396dbd4b4c8e821d12bd28665d8a", + "6feb16f2b2fa4021b1a271e1dd442d04" ] }, - "outputId": "59a7a546-8492-492e-861d-70a2c85a6794" + "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "811fd52fef65422c8267bafcde8a2c3d", + "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00", "version_minor": 0, "version_major": 2 }, @@ -723,46 +723,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "427c211e-e283-4e87-f7b3-7b8dfb11a4a5" + "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:05<00:00, 31.9MB/s]\n", + "100% 168M/168M [00:04<00:00, 39.7MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2791.81it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/labels/val2017.cache\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017.cache' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 13332180.55it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.73it/s]\n", - " all 5e+03 3.63e+04 0.419 0.765 0.68 0.486\n", - "Speed: 5.2/2.0/7.2 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n", + " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n", + "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.44s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.26s)\n", + "DONE (t=4.47s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=93.97s).\n", + "DONE (t=94.87s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.06s).\n", + "DONE (t=15.96s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", @@ -837,30 +836,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "8501ed1563e4452eac9df6b7a66e8f8c", - "d2bb96801e1f46f4a58e02534f7026ff", - "468a796ef06b4a24bcba6fbd4a0a8db5", - "42ad5c1ea7be4835bffebf90642178f1", - "c58b5536d98f4814831934e9c30c4d78", - "505597101151486ea29e9ab754544d27", - "de6e7b4b4a1c408c9f89d89b07a13bcd", - "f5cc9c7d4c274b2d81327ba3163c43fd" + "e6459e0bcee449b090fc9807672725bc", + "c341e1d3bf3b40d1821ce392eb966c68", + "660afee173694231a6dce3cd94df6cae", + "261218485cef48df961519dde5edfcbe", + "32736d503c06497abfae8c0421918255", + "e257738711f54d5280c8393d9d3dce1c", + "beb7a6fe34b840899bb79c062681696f", + "e639132395d64d70b99d8b72c32f8fbb" ] }, - "outputId": "c68a3db4-1314-46b4-9e52-83532eb65749" + "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8501ed1563e4452eac9df6b7a66e8f8c", + "model_id": "e6459e0bcee449b090fc9807672725bc", "version_minor": 0, "version_major": 2 }, @@ -925,27 +924,27 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "6af7116a-01ab-4b94-e5d7-b37c17dc95de" + "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-01-17 19:56:03.945851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 15.8MB/s]\n", + "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n", "\n", "\n", " from n params module arguments \n", @@ -979,12 +978,11 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2647.74it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.03it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 24200.82it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 123.25it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -994,19 +992,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 0/2 3.27G 0.04357 0.06779 0.01869 0.1301 207 640: 100% 8/8 [00:04<00:00, 1.95it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:05<00:00, 1.36it/s]\n", - " all 128 929 0.392 0.732 0.657 0.428\n", + " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n", + " all 128 929 0.646 0.627 0.659 0.431\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 1/2 7.47G 0.04308 0.06636 0.02083 0.1303 227 640: 100% 8/8 [00:02<00:00, 3.88it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.07it/s]\n", - " all 128 929 0.387 0.737 0.657 0.432\n", + " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n", + " all 128 929 0.681 0.607 0.663 0.434\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 2/2 7.48G 0.04461 0.06864 0.01866 0.1319 191 640: 100% 8/8 [00:02<00:00, 3.57it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:02<00:00, 2.82it/s]\n", - " all 128 929 0.385 0.742 0.658 0.431\n", + " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n", + " all 128 929 0.642 0.632 0.662 0.432\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", "3 epochs completed in 0.007 hours.\n", "\n" @@ -1238,4 +1236,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 3e560e2faeeaba00adbb2c8e72716c0a133dd917 Mon Sep 17 00:00:00 2001 From: Daniel Khromov Date: Sat, 13 Feb 2021 02:37:51 +0300 Subject: [PATCH 058/254] YOLOv5 PyTorch Hub results.save() method retains filenames (#2194) * save results with name * debug * save original imgs names * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index 7cfea01f223e..4f4f331da583 100644 --- a/models/common.py +++ b/models/common.py @@ -196,10 +196,11 @@ def forward(self, imgs, size=640, augment=False, profile=False): # Pre-process n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1 = [], [] # image and inference shapes + shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): if isinstance(im, str): # filename or uri im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open + files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') im = np.array(im) # to numpy if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) @@ -224,18 +225,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) - return Detections(imgs, y, self.names) + return Detections(imgs, y, files, self.names) class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, names=None): + def __init__(self, imgs, pred, files, names=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names + self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized @@ -258,9 +260,9 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' if pprint: print(str.rstrip(', ')) if show: - img.show(f'image {i}') # show + img.show(self.files[i]) # show if save: - f = Path(save_dir) / f'results{i}.jpg' + f = Path(save_dir) / self.files[i] img.save(f) # save print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') if render: @@ -272,7 +274,8 @@ def print(self): def show(self): self.display(show=True) # show results - def save(self, save_dir=''): + def save(self, save_dir='results/'): + Path(save_dir).mkdir(exist_ok=True) self.display(save=True, save_dir=save_dir) # save results def render(self): From 3ff783c18f32ec790bba5d7ca2b8d067ecd2160b Mon Sep 17 00:00:00 2001 From: VdLMV Date: Mon, 15 Feb 2021 19:49:22 +0100 Subject: [PATCH 059/254] TTA augument boxes one pixel shifted in de-flip ud and lr (#2219) * TTA augument boxes one pixel shifted in de-flip ud and lr * PEP8 reformat Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Glenn Jocher --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 11e6a65921a4..704d0e6d260d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -110,9 +110,9 @@ def forward(self, x, augment=False, profile=False): # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi[..., :4] /= si # de-scale if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr y.append(yi) return torch.cat(y, 1), None # augmented inference, train else: From 7b833e37bf074758c94d66b3bf439582d0a08dfe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Feb 2021 11:02:20 -0800 Subject: [PATCH 060/254] LoadStreams() frame loss bug fix (#2222) --- utils/datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 29a8812a20a2..4f2939d4bef2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -300,7 +300,8 @@ def update(self, index, cap): # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame - _, self.imgs[index] = cap.retrieve() + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 time.sleep(0.01) # wait time From f8464b4f66e627ed2778c9a27dbe4a8642482baf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Feb 2021 21:21:53 -0800 Subject: [PATCH 061/254] Update yolo.py channel array (#2223) --- models/yolo.py | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 704d0e6d260d..41817098ccbc 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -2,7 +2,6 @@ import logging import sys from copy import deepcopy -from pathlib import Path sys.path.append('./') # to run '$ python *.py' files in subdirectories logger = logging.getLogger(__name__) @@ -213,43 +212,27 @@ def parse_model(d, ch): # model_dict, input_channels(3) if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: c1, c2 = ch[f], args[0] - - # Normal - # if i > 0 and args[0] != no: # channel expansion factor - # ex = 1.75 # exponential (default 2.0) - # e = math.log(c2 / ch[1]) / math.log(2) - # c2 = int(ch[1] * ex ** e) - # if m != Focus: - - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - # Experimental - # if i > 0 and args[0] != no: # channel expansion factor - # ex = 1 + gw # exponential (default 2.0) - # ch1 = 32 # ch[1] - # e = math.log(c2 / ch1) / math.log(2) # level 1-n - # c2 = int(ch1 * ex ** e) - # if m != Focus: - # c2 = make_divisible(c2, 8) if c2 != no else c2 + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3]: - args.insert(2, n) + args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[x if x < 0 else x + 1] for x in f]) + c2 = sum([ch[x] for x in f]) elif m is Detect: - args.append([ch[x + 1] for x in f]) + args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) elif m is Contract: - c2 = ch[f if f < 0 else f + 1] * args[0] ** 2 + c2 = ch[f] * args[0] ** 2 elif m is Expand: - c2 = ch[f if f < 0 else f + 1] // args[0] ** 2 + c2 = ch[f] // args[0] ** 2 else: - c2 = ch[f if f < 0 else f + 1] + c2 = ch[f] m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type @@ -258,6 +241,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) + if i == 0: + ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) From 26c2e54c8f97e66b646f92932eb521901d69f889 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 13:56:47 -0800 Subject: [PATCH 062/254] Add check_imshow() (#2231) * Add check_imshow() * Update general.py * Update general.py --- detect.py | 8 ++++---- utils/general.py | 13 +++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 3f1d6c521b67..22bf21b4c825 100644 --- a/detect.py +++ b/detect.py @@ -9,8 +9,8 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords, \ - xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized @@ -45,7 +45,7 @@ def detect(save_img=False): # Set Dataloader vid_path, vid_writer = None, None if webcam: - view_img = True + view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: @@ -118,7 +118,7 @@ def detect(save_img=False): # Stream results if view_img: cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond + cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: diff --git a/utils/general.py b/utils/general.py index 24807483f5f4..2d3e83ede35e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -95,6 +95,19 @@ def check_img_size(img_size, s=32): return new_size +def check_imshow(): + # Check if environment supports image displays + try: + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image previews\n{e}') + return False + + def check_file(file): # Search for file if not found if os.path.isfile(file) or file == '': From 5a40ce65ce215a79949b96f4ac2e6f4da90256ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 15:27:24 -0800 Subject: [PATCH 063/254] Update CI badge (#2230) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c14071698c5..233fc17f1c35 100755 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@   -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) +CI CPU testing This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. From d2e754b67bc08d3634df05932cc94d8c9314a7b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 15:58:07 -0800 Subject: [PATCH 064/254] Add isdocker() (#2232) * Add isdocker() * Update general.py * Update general.py --- utils/general.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2d3e83ede35e..64b360fbe7df 100755 --- a/utils/general.py +++ b/utils/general.py @@ -47,6 +47,11 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + def check_online(): # Check internet connectivity import socket @@ -62,7 +67,7 @@ def check_git_status(): print(colorstr('github: '), end='') try: assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() + assert not isdocker(), 'skipping check (Docker image)' assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' @@ -98,13 +103,14 @@ def check_img_size(img_size, s=32): def check_imshow(): # Check if environment supports image displays try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image previews\n{e}') + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False From 9d873077841434d1c6cbd1c4248ca2252820d3ba Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 11:22:42 -0800 Subject: [PATCH 065/254] YOLOv5 Hub URL inference bug fix (#2250) * Update common.py * Update common.py * Update common.py --- models/common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 4f4f331da583..f24ea7885668 100644 --- a/models/common.py +++ b/models/common.py @@ -199,7 +199,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): if isinstance(im, str): # filename or uri - im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open + im.filename = f # for uri files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') im = np.array(im) # to numpy if im.shape[0] < 5: # image in CHW @@ -253,7 +254,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + img = Image.fromarray(img) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class # str += '%s %.2f, ' % (names[int(cls)], conf) # label ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot From db28ce61acbeec9eaeb1577ccd417796ca138ee8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 12:35:38 -0800 Subject: [PATCH 066/254] Improved hubconf.py CI tests (#2251) --- hubconf.py | 9 +++++++-- models/common.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 2a34813310e8..47eee4477725 100644 --- a/hubconf.py +++ b/hubconf.py @@ -133,9 +133,14 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): # model = custom(path_or_model='path/to/model.pt') # custom example # Verify inference + import numpy as np from PIL import Image - imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] - results = model(imgs) + imgs = [Image.open('data/images/bus.jpg'), # PIL + 'data/images/zidane.jpg', # filename + 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI + np.zeros((640, 480, 3))] # numpy + + results = model(imgs) # batched inference results.print() results.save() diff --git a/models/common.py b/models/common.py index f24ea7885668..e8e5ff1eb2c1 100644 --- a/models/common.py +++ b/models/common.py @@ -254,7 +254,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img) if isinstance(img, np.ndarray) else img # from np + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class # str += '%s %.2f, ' % (names[int(cls)], conf) # label ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot From 5f42643a53125ccc450add998401e3529d9d59d1 Mon Sep 17 00:00:00 2001 From: Yann Defretin Date: Fri, 19 Feb 2021 21:38:05 +0100 Subject: [PATCH 067/254] Unified hub and detect.py box and labels plotting (#2243) --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index e8e5ff1eb2c1..efcc6071af63 100644 --- a/models/common.py +++ b/models/common.py @@ -11,7 +11,7 @@ from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh -from utils.plots import color_list +from utils.plots import color_list, plot_one_box def autopad(k, p=None): # kernel, padding @@ -254,10 +254,10 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class - # str += '%s %.2f, ' % (names[int(cls)], conf) # label - ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np if pprint: print(str.rstrip(', ')) if show: From 47faf95079d004b6114058fc9fa802190cbb95c5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 15:20:41 -0800 Subject: [PATCH 068/254] reset head --- utils/plots.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 3ec793528fe5..94f46a9a4026 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -15,7 +15,7 @@ import seaborn as sns import torch import yaml -from PIL import Image, ImageDraw +from PIL import Image, ImageDraw, ImageFont from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh @@ -68,6 +68,20 @@ def plot_one_box(x, img, color=None, label=None, line_thickness=None): cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) +def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + line_thickness = line_thickness or max(int(min(img.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + if label: + fontsize = max(round(max(img.size) / 40), 12) + font = ImageFont.truetype("Arial.ttf", fontsize) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(img) + + def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() # Compares the two methods for width-height anchor multiplication # https://github.com/ultralytics/yolov3/issues/168 From c09964c27cc275c8e32630715cca5be77078dae2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 15:39:09 -0800 Subject: [PATCH 069/254] Update inference default to multi_label=False (#2252) * Update inference default to multi_label=False * bug fix * Update plots.py * Update plots.py --- models/common.py | 2 +- test.py | 8 ++++---- utils/general.py | 9 +++++---- utils/plots.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/models/common.py b/models/common.py index efcc6071af63..ad35f908d865 100644 --- a/models/common.py +++ b/models/common.py @@ -7,7 +7,7 @@ import requests import torch import torch.nn as nn -from PIL import Image, ImageDraw +from PIL import Image from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh diff --git a/test.py b/test.py index 738764f15601..c30148dfb2f1 100644 --- a/test.py +++ b/test.py @@ -106,7 +106,7 @@ def test(data, with torch.no_grad(): # Run model t = time_synchronized() - inf_out, train_out = model(img, augment=augment) # inference and training outputs + out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss @@ -117,11 +117,11 @@ def test(data, targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() - output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) + out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image - for si, pred in enumerate(output): + for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class @@ -209,7 +209,7 @@ def test(data, f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions - Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy diff --git a/utils/general.py b/utils/general.py index 64b360fbe7df..3b5f4629b00a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -390,11 +390,12 @@ def wh_iou(wh1, wh2): return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) -def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): - """Performs Non-Maximum Suppression (NMS) on inference results +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results Returns: - detections with shape: nx6 (x1, y1, x2, y2, conf, cls) + list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ nc = prediction.shape[2] - 5 # number of classes @@ -406,7 +407,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections - multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() diff --git a/utils/plots.py b/utils/plots.py index 94f46a9a4026..aa9a1cab81f0 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -54,7 +54,7 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, img, color=None, label=None, line_thickness=None): +def plot_one_box(x, img, color=None, label=None, line_thickness=3): # Plots one bounding box on image img tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] From 6f5d6fcdaa8c1c5b24a06fdf9fd4e12c781fb4f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Feb 2021 11:19:01 -0800 Subject: [PATCH 070/254] Robust objectness loss balancing (#2256) --- utils/loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 481d25e207f2..2302d18de87d 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -105,8 +105,8 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] - self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) From 095d2c11d89892cd9c0c4d034cd1c768a0dba11c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Feb 2021 13:21:58 -0800 Subject: [PATCH 071/254] Created using Colaboratory --- tutorial.ipynb | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7587d9f536fe..7fce40c3824e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -689,7 +689,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -729,7 +729,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -853,7 +853,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -930,7 +930,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1222,6 +1222,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "metadata": { + "id": "RVRSOhEvUdb5" + }, + "source": [ + "# Evolve\n", + "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n", + "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)" + ], + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From e27ca0d8455ad91ec52e4dfd757825e653508bde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Feb 2021 21:46:42 -0800 Subject: [PATCH 072/254] Update minimum stride to 32 (#2266) --- test.py | 5 +++-- train.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index c30148dfb2f1..ecd45f5f4943 100644 --- a/test.py +++ b/test.py @@ -52,7 +52,8 @@ def test(data, # Load model model = attempt_load(weights, map_location=device) # load FP32 model - imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(imgsz, s=gs) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: @@ -85,7 +86,7 @@ def test(data, if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images - dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True, + dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] seen = 0 diff --git a/train.py b/train.py index 4065e1f149ef..e19cfa81d8da 100644 --- a/train.py +++ b/train.py @@ -161,7 +161,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): del ckpt, state_dict # Image sizes - gs = int(model.stride.max()) # grid size (max stride) + gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples From 95aefea49374a1fe867794971c76337526a4d6cb Mon Sep 17 00:00:00 2001 From: Aditya Lohia <64709773+aditya-dl@users.noreply.github.com> Date: Mon, 22 Feb 2021 11:20:44 +0530 Subject: [PATCH 073/254] Dynamic ONNX engine generation (#2208) * add: dynamic onnx export * delete: test onnx inference * fix dynamic output axis * Code reduction * fix: dynamic output axes, dynamic input naming * Remove fixed axes Co-authored-by: Shivam Swanrkar Co-authored-by: Glenn Jocher --- models/export.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/export.py b/models/export.py index 057658af53dc..cc817871f218 100644 --- a/models/export.py +++ b/models/export.py @@ -22,6 +22,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--batch-size', type=int, default=1, help='batch size') opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand @@ -70,7 +71,9 @@ print('\nStarting ONNX export with onnx %s...' % onnx.__version__) f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output']) + output_names=['classes', 'boxes'] if y is None else ['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) # Checks onnx_model = onnx.load(f) # load onnx model From 32dd1614f405d16678fea787137eb9662d7dc1e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Feb 2021 18:34:07 -0800 Subject: [PATCH 074/254] Update greetings.yml for auto-rebase on PR (#2272) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index d62cf5c1600d..ee472297107e 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -11,7 +11,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} pr-message: | 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master update by running the following, replacing 'feature' with the name of your local branch: + - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: ```bash git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream From cc79f3a9ea5d927475e7b896b18aa998c6e70795 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Feb 2021 22:50:00 -0800 Subject: [PATCH 075/254] Update Dockerfile with apt install zip (#2274) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 98dfee204770..fe64d6da29f9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM nvcr.io/nvidia/pytorch:20.12-py3 # Install linux packages -RUN apt update && apt install -y screen libgl1-mesa-glx +RUN apt update && apt install -y zip screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip From 83dc1b4484d8c5fe69c6a6ff50912ca90cace35a Mon Sep 17 00:00:00 2001 From: xiaowo1996 <429740343@qq.com> Date: Wed, 24 Feb 2021 01:38:56 +0800 Subject: [PATCH 076/254] FLOPS min stride 32 (#2276) Signed-off-by: xiaowo1996 <429740343@qq.com> --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2cb09e71ce71..1b1cc2038c55 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -205,7 +205,7 @@ def model_info(model, verbose=False, img_size=640): try: # FLOPS from thop import profile - stride = int(model.stride.max()) if hasattr(model, 'stride') else 32 + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float From 7a6870b81f31db40b06d2e899801febbeed96696 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Feb 2021 11:27:44 -0800 Subject: [PATCH 077/254] Update README.md --- README.md | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 233fc17f1c35..b7129e80adfe 100755 --- a/README.md +++ b/README.md @@ -89,17 +89,15 @@ To run inference on example images in `data/images`: ```bash $ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 -Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) -Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB) - -Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt... 100%|██████████████| 14.5M/14.5M [00:00<00:00, 21.3MB/s] +Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) +YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 232 layers, 7459581 parameters, 0 gradients -image 1/2 data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s) -image 2/2 data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s) -Results saved to runs/detect/exp -Done. (0.113s) +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) +image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) +Results saved to runs/detect/exp2 +Done. (0.103s) ``` @@ -108,18 +106,17 @@ Done. (0.113s) To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): ```python import torch -from PIL import Image # Model model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) # Images -img1 = Image.open('zidane.jpg') -img2 = Image.open('bus.jpg') -imgs = [img1, img2] # batched list of images +dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' +imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batched list of images # Inference -result = model(imgs) +results = model(imgs) +results.print() # or .show(), .save() ``` From d5d275b6e97766835ebb04d02e5d1e3478d3eeee Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Feb 2021 23:10:14 -0800 Subject: [PATCH 078/254] Amazon AWS EC2 startup and re-startup scripts (#2185) * Amazon AWS EC2 startup and re-startup scripts * Create resume.py * cleanup --- utils/aws/__init__.py | 0 utils/aws/mime.sh | 26 ++++++++++++++++++++++++++ utils/aws/resume.py | 34 ++++++++++++++++++++++++++++++++++ utils/aws/userdata.sh | 26 ++++++++++++++++++++++++++ 4 files changed, 86 insertions(+) create mode 100644 utils/aws/__init__.py create mode 100644 utils/aws/mime.sh create mode 100644 utils/aws/resume.py create mode 100644 utils/aws/userdata.sh diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 000000000000..c319a83cfbdf --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 000000000000..338c8b10127b --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,34 @@ +# Resume all interrupted trainings in yolov5/ dir including DPP trainings +# Usage: $ python utils/aws/resume.py + +import os +from pathlib import Path + +import torch +import yaml + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 000000000000..36405d1a1565 --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + # python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + docker start $id + # docker exec -it $id python train.py --resume # single-GPU + docker exec -d $id python utils/aws/resume.py + done <<<"$list" +fi From 0070995bd58629d4628d11b1c8de9788aa55379b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 01:43:59 -0800 Subject: [PATCH 079/254] Amazon AWS EC2 startup and re-startup scripts (#2282) --- utils/aws/resume.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 338c8b10127b..563f22be20dc 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -2,11 +2,14 @@ # Usage: $ python utils/aws/resume.py import os +import sys from pathlib import Path import torch import yaml +sys.path.append('./') # to run '$ python *.py' files in subdirectories + port = 0 # --master_port path = Path('').resolve() for last in path.rglob('*/**/last.pt'): From ca5b10b759d2e41221e7ffddcefe1f8087791dec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 13:31:20 -0800 Subject: [PATCH 080/254] Update train.py (#2290) * Update train.py * Update train.py * Update train.py * Update train.py * Create train.py --- train.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/train.py b/train.py index e19cfa81d8da..8533667fe57f 100644 --- a/train.py +++ b/train.py @@ -146,8 +146,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Results if ckpt.get('training_results') is not None: - with open(results_file, 'w') as file: - file.write(ckpt['training_results']) # write results.txt + results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 @@ -354,7 +353,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Write with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) @@ -375,15 +374,13 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): best_fitness = fi # Save model - save = (not opt.nosave) or (final_epoch and not opt.evolve) - if save: - with open(results_file, 'r') as f: # create checkpoint - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'training_results': f.read(), - 'model': ema.ema, - 'optimizer': None if final_epoch else optimizer.state_dict(), - 'wandb_id': wandb_run.id if wandb else None} + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': results_file.read_text(), + 'model': ema.ema, + 'optimizer': None if final_epoch else optimizer.state_dict(), + 'wandb_id': wandb_run.id if wandb else None} # Save last, best and delete torch.save(ckpt, last) @@ -396,9 +393,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if rank in [-1, 0]: # Strip optimizers final = best if best.exists() else last # final model - for f in [last, best]: + for f in last, best: if f.exists(): - strip_optimizer(f) # strip optimizers + strip_optimizer(f) if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload @@ -415,17 +412,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests + for m in (last, best) if best.exists() else (last): # speed, mAP tests results, _, _ = test.test(opt.data, batch_size=batch_size * 2, imgsz=imgsz_test, - conf_thres=conf, - iou_thres=iou, - model=attempt_load(final, device).half(), + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, - save_json=save_json, + save_json=True, plots=False) else: From ec1d8496baa6bff7cb3ea223fd23f2d0cf0804ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 18:26:46 -0800 Subject: [PATCH 081/254] Improved model+EMA checkpointing (#2292) * Enhanced model+EMA checkpointing * update * bug fix * bug fix 2 * always save optimizer * ema half * remove model.float() * model half * carry ema/model in fp32 * rm model.float() * both to float always * cleanup * cleanup --- test.py | 1 - train.py | 25 ++++++++++++++++--------- utils/general.py | 4 ++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index ecd45f5f4943..9f484c809052 100644 --- a/test.py +++ b/test.py @@ -272,7 +272,6 @@ def test(data, if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") - model.float() # for training maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] diff --git a/train.py b/train.py index 8533667fe57f..7aa57fa99e24 100644 --- a/train.py +++ b/train.py @@ -31,7 +31,7 @@ from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel logger = logging.getLogger(__name__) @@ -136,6 +136,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: @@ -144,6 +147,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] + # EMA + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'][0].float().state_dict()) + ema.updates = ckpt['ema'][1] + # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt @@ -173,9 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') - # EMA - ema = ModelEMA(model) if rank in [-1, 0] else None - # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) @@ -191,7 +196,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Process 0 if rank in [-1, 0]: - ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, @@ -335,8 +339,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP - if ema: - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test(opt.data, @@ -378,8 +381,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': ema.ema, - 'optimizer': None if final_epoch else optimizer.state_dict(), + 'model': (model.module if is_parallel(model) else model).half(), + 'ema': (ema.ema.half(), ema.updates), + 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} # Save last, best and delete @@ -387,6 +391,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if best_fitness == fi: torch.save(ckpt, best) del ckpt + + model.float(), ema.ema.float() + # end epoch ---------------------------------------------------------------------------------------------------- # end training diff --git a/utils/general.py b/utils/general.py index 3b5f4629b00a..e5bbc50c6177 100755 --- a/utils/general.py +++ b/utils/general.py @@ -484,8 +484,8 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) - for key in 'optimizer', 'training_results', 'wandb_id': - x[key] = None + for k in 'optimizer', 'training_results', 'wandb_id', 'ema': # keys + x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): From 71dd2768f28ed24e83087203a2dea565c99a1120 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 21:03:21 -0800 Subject: [PATCH 082/254] Improved model+EMA checkpointing 2 (#2295) --- test.py | 1 + train.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index 9f484c809052..91176eca01db 100644 --- a/test.py +++ b/test.py @@ -269,6 +269,7 @@ def test(data, print(f'pycocotools unable to run: {e}') # Return results + model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") diff --git a/train.py b/train.py index 7aa57fa99e24..e37cf816bcb1 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +from copy import deepcopy from pathlib import Path from threading import Thread @@ -381,8 +382,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': (model.module if is_parallel(model) else model).half(), - 'ema': (ema.ema.half(), ema.updates), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': (deepcopy(ema.ema).half(), ema.updates), 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} @@ -392,8 +393,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): torch.save(ckpt, best) del ckpt - model.float(), ema.ema.float() - # end epoch ---------------------------------------------------------------------------------------------------- # end training From a82dce7faa5d13d6f9c342f04aaaa3b5de80d749 Mon Sep 17 00:00:00 2001 From: Iden Craven Date: Thu, 25 Feb 2021 19:05:38 -0700 Subject: [PATCH 083/254] Fix labels being missed when image extension appears twice in filename (#2300) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4f2939d4bef2..d6ab16518034 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -335,7 +335,7 @@ def __len__(self): def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] class LoadImagesAndLabels(Dataset): # for training/testing From efa4946d158f4042890b243cf9314aa62dac83e4 Mon Sep 17 00:00:00 2001 From: Jan Hajek Date: Fri, 26 Feb 2021 04:18:19 +0100 Subject: [PATCH 084/254] W&B entity support (#2298) * W&B entity support * shorten wandb_entity to entity Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index e37cf816bcb1..bbf879f3af5f 100644 --- a/train.py +++ b/train.py @@ -134,6 +134,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=save_dir.stem, + entity=opt.entity, id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict @@ -467,6 +468,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--entity', default=None, help='W&B entity') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') From cbd55da5d24becbe3b94afaaa4cdd1187a512c3f Mon Sep 17 00:00:00 2001 From: oleg Date: Fri, 26 Feb 2021 15:07:40 -0800 Subject: [PATCH 085/254] Update yolo.py (#2120) * Avoid mutable state in Detect * LoadImages() pathlib update (#2140) * Unique *.cache filenames fix (#2134) * fix #2121 * Update test.py * Update train.py * Update autoanchor.py * Update datasets.py * Update log_dataset.py * Update datasets.py Co-authored-by: Glenn Jocher * Update train.py test batch_size (#2148) * Update train.py * Update loss.py * Update train.py (#2149) * Linear LR scheduler option (#2150) * Linear LR scheduler option * Update train.py * Update data-autodownload background tasks (#2154) * Update get_coco.sh * Update get_voc.sh * Update detect.py (#2167) Without this cv2.imshow opens a window but nothing is visible * Update requirements.txt (#2173) * Update utils/datasets.py to support .webp files (#2174) Simply added 'webp' as an image format to the img_formats array so that webp image files can be used as training data. * Changed socket port and added timeout (#2176) * PyTorch Hub results.save('path/to/dir') (#2179) * YOLOv5 Segmentation Dataloader Updates (#2188) * Update C3 module * Update C3 module * Update C3 module * Update C3 module * update * update * update * update * update * update * update * update * update * updates * updates * updates * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * updates * updates * updates * updates * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update datasets * update * update * update * update attempt_downlaod() * merge * merge * update * update * update * update * update * update * update * update * update * update * parameterize eps * comments * gs-multiple * update * max_nms implemented * Create one_cycle() function * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * GitHub API rate limit fix * update * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * astuple * epochs * update * update * ComputeLoss() * update * update * update * update * update * update * update * update * update * update * update * merge * merge * merge * merge * update * update * update * update * commit=tag == tags[-1] * Update cudnn.benchmark * update * update * update * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * update * mosaic9 * update * update * update * update * update * update * institute cache versioning * only display on existing cache * reverse cache exists booleans * Created using Colaboratory * YOLOv5 PyTorch Hub results.save() method retains filenames (#2194) * save results with name * debug * save original imgs names * Update common.py Co-authored-by: Glenn Jocher * TTA augument boxes one pixel shifted in de-flip ud and lr (#2219) * TTA augument boxes one pixel shifted in de-flip ud and lr * PEP8 reformat Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Glenn Jocher * LoadStreams() frame loss bug fix (#2222) * Update yolo.py channel array (#2223) * Add check_imshow() (#2231) * Add check_imshow() * Update general.py * Update general.py * Update CI badge (#2230) * Add isdocker() (#2232) * Add isdocker() * Update general.py * Update general.py * YOLOv5 Hub URL inference bug fix (#2250) * Update common.py * Update common.py * Update common.py * Improved hubconf.py CI tests (#2251) * Unified hub and detect.py box and labels plotting (#2243) * reset head * Update inference default to multi_label=False (#2252) * Update inference default to multi_label=False * bug fix * Update plots.py * Update plots.py * Robust objectness loss balancing (#2256) * Created using Colaboratory * Update minimum stride to 32 (#2266) * Dynamic ONNX engine generation (#2208) * add: dynamic onnx export * delete: test onnx inference * fix dynamic output axis * Code reduction * fix: dynamic output axes, dynamic input naming * Remove fixed axes Co-authored-by: Shivam Swanrkar Co-authored-by: Glenn Jocher * Update greetings.yml for auto-rebase on PR (#2272) * Update Dockerfile with apt install zip (#2274) * FLOPS min stride 32 (#2276) Signed-off-by: xiaowo1996 <429740343@qq.com> * Update README.md * Amazon AWS EC2 startup and re-startup scripts (#2185) * Amazon AWS EC2 startup and re-startup scripts * Create resume.py * cleanup * Amazon AWS EC2 startup and re-startup scripts (#2282) * Update train.py (#2290) * Update train.py * Update train.py * Update train.py * Update train.py * Create train.py * Improved model+EMA checkpointing (#2292) * Enhanced model+EMA checkpointing * update * bug fix * bug fix 2 * always save optimizer * ema half * remove model.float() * model half * carry ema/model in fp32 * rm model.float() * both to float always * cleanup * cleanup * Improved model+EMA checkpointing 2 (#2295) * Fix labels being missed when image extension appears twice in filename (#2300) * W&B entity support (#2298) * W&B entity support * shorten wandb_entity to entity Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher * Avoid mutable state in Detect * Update yolo and remove .to(device) Co-authored-by: Oleg Boiko Co-authored-by: Glenn Jocher Co-authored-by: train255 Co-authored-by: ab-101 <56578530+ab-101@users.noreply.github.com> Co-authored-by: Transigent Co-authored-by: NanoCode012 Co-authored-by: Daniel Khromov Co-authored-by: VdLMV Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Yann Defretin Co-authored-by: Aditya Lohia <64709773+aditya-dl@users.noreply.github.com> Co-authored-by: Shivam Swanrkar Co-authored-by: xiaowo1996 <429740343@qq.com> Co-authored-by: Iden Craven Co-authored-by: Jan Hajek Co-authored-by: Jan Hajek --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 41817098ccbc..85043f2b0205 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -49,7 +49,7 @@ def forward(self, x): self.grid[i] = self._make_grid(nx, ny).to(x[i].device) y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh z.append(y.view(bs, -1, self.no)) From dfeec198cbb0d19bf06a26e3712b7825f993fc47 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 12:51:33 -0800 Subject: [PATCH 086/254] final_epoch EMA bug fix (#2317) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index bbf879f3af5f..5c203f12651d 100644 --- a/train.py +++ b/train.py @@ -383,7 +383,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'model': ema.ema if final_epoch else deepcopy( + model.module if is_parallel(model) else model).half(), 'ema': (deepcopy(ema.ema).half(), ema.updates), 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} From cd30d838eb098b1c96219a83521e71bdd9360f60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 15:28:23 -0800 Subject: [PATCH 087/254] Update test.py (#2319) --- test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test.py b/test.py index 91176eca01db..fd4d339ffea6 100644 --- a/test.py +++ b/test.py @@ -326,6 +326,7 @@ def test(data, test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot + # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to From c2026a5f35fd632c71b10fdbaf9194e714906f02 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 15:55:31 -0800 Subject: [PATCH 088/254] Update Dockerfile install htop (#2320) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index fe64d6da29f9..a768774fa9c7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM nvcr.io/nvidia/pytorch:20.12-py3 # Install linux packages -RUN apt update && apt install -y zip screen libgl1-mesa-glx +RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip From fd96810518adcbb07ca0c5e1373c57e9025966c4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Feb 2021 21:14:08 -0800 Subject: [PATCH 089/254] remove TTA 1 pixel offset (#2325) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 85043f2b0205..a9e1da43d913 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -109,9 +109,9 @@ def forward(self, x, augment=False, profile=False): # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi[..., :4] /= si # de-scale if fi == 2: - yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud elif fi == 3: - yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr y.append(yi) return torch.cat(y, 1), None # augmented inference, train else: From fab5085674f7748dc16d7ca25afb225fa441bc9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Mar 2021 17:13:34 -0800 Subject: [PATCH 090/254] EMA bug fix 2 (#2330) * EMA bug fix 2 * update --- hubconf.py | 2 +- models/experimental.py | 3 ++- train.py | 10 +++++----- utils/general.py | 8 +++++--- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/hubconf.py b/hubconf.py index 47eee4477725..a8eb51681794 100644 --- a/hubconf.py +++ b/hubconf.py @@ -120,7 +120,7 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): """ model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint if isinstance(model, dict): - model = model['model'] # load model + model = model['ema' if model.get('ema') else 'model'] # load model hub_model = Model(model.yaml).to(next(model.parameters()).device) # create hub_model.load_state_dict(model.float().state_dict()) # load state_dict diff --git a/models/experimental.py b/models/experimental.py index 5fe56858c54a..d79052314f9b 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -115,7 +115,8 @@ def attempt_load(weights, map_location=None): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: attempt_download(w) - model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates for m in model.modules(): diff --git a/train.py b/train.py index 5c203f12651d..e2c82339f7fe 100644 --- a/train.py +++ b/train.py @@ -151,8 +151,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # EMA if ema and ckpt.get('ema'): - ema.ema.load_state_dict(ckpt['ema'][0].float().state_dict()) - ema.updates = ckpt['ema'][1] + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: @@ -383,9 +383,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': ema.ema if final_epoch else deepcopy( - model.module if is_parallel(model) else model).half(), - 'ema': (deepcopy(ema.ema).half(), ema.updates), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} diff --git a/utils/general.py b/utils/general.py index e5bbc50c6177..df8cf7bab60d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -481,10 +481,12 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non return output -def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) - for k in 'optimizer', 'training_results', 'wandb_id', 'ema': # keys + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 @@ -492,7 +494,7 @@ def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize - print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): From ab86cec85443f979ee7f99bdb60223ad36b07198 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 13:01:59 -0800 Subject: [PATCH 091/254] FROM nvcr.io/nvidia/pytorch:21.02-py3 (#2341) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a768774fa9c7..d42af2f78954 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:20.12-py3 +FROM nvcr.io/nvidia/pytorch:21.02-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 2c56ad5436cf0b84612c0c83842067d34df5c94b Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 2 Mar 2021 16:09:52 -0800 Subject: [PATCH 092/254] Confusion matrix background axis swap (#2114) --- utils/metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index ba812ff13a58..666b8c7ec1c0 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -147,12 +147,12 @@ def process_batch(self, detections, labels): if n and sum(j) == 1: self.matrix[gc, detection_classes[m1[j]]] += 1 # correct else: - self.matrix[gc, self.nc] += 1 # background FP + self.matrix[self.nc, gc] += 1 # background FP if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): - self.matrix[self.nc, dc] += 1 # background FN + self.matrix[dc, self.nc] += 1 # background FN def matrix(self): return self.matrix @@ -168,8 +168,8 @@ def plot(self, save_dir='', names=()): sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FN'] if labels else "auto", - yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) From fe6ebb96bbe630cc45ed02ec0ea3fa0a3aa8c506 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 19:20:51 -0800 Subject: [PATCH 093/254] Created using Colaboratory --- tutorial.ipynb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7fce40c3824e..f2b03dc57f0a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1169,11 +1169,9 @@ }, "source": [ "# Reproduce\n", - "%%shell\n", - "for x in yolov5s yolov5m yolov5l yolov5x; do\n", - " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", - " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n", - "done" + "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", + " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] From a3ecf0fd640465f9a7c009e81bcc5ecabf381004 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 23:08:21 -0800 Subject: [PATCH 094/254] Anchor override (#2350) --- models/yolo.py | 7 +++++-- train.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index a9e1da43d913..a047fef397ee 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,7 +62,7 @@ def _make_grid(nx=20, ny=20): class Model(nn.Module): - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super(Model, self).__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict @@ -75,8 +75,11 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: - logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc)) + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) diff --git a/train.py b/train.py index e2c82339f7fe..1b8b315ce927 100644 --- a/train.py +++ b/train.py @@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: - model = Model(opt.cfg, ch=3, nc=nc).to(device) # create + model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze freeze = [] # parameter names to freeze (full or partial) From e931b9da33f45551928059b8d61bddd50e401e48 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Mar 2021 21:06:36 -0800 Subject: [PATCH 095/254] Resume with custom anchors fix (#2361) * Resume with custom anchors fix * Update train.py --- train.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 1b8b315ce927..ecac59857ccc 100644 --- a/train.py +++ b/train.py @@ -75,10 +75,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint - if hyp.get('anchors'): - ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor - model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create - exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load @@ -216,6 +214,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision # Model parameters hyp['box'] *= 3. / nl # scale to layers From 300d518f73796cebb26f0a3233e180ef1665d6ee Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 6 Mar 2021 06:06:18 +0900 Subject: [PATCH 096/254] Faster random index generator for mosaic augmentation (#2345) * faster random index generator for mosaic augementation We don't need to access list to generate random index It makes augmentation slower. * Update datasets.py Co-authored-by: Glenn Jocher From 692e1f31dc1fecdd57bfada86380933953b6e899 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Mar 2021 15:26:27 -0800 Subject: [PATCH 097/254] --no-cache notebook (#2381) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d42af2f78954..1a8fe2e72885 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip COPY requirements.txt . -RUN pip install -r requirements.txt gsutil +RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory RUN mkdir -p /usr/src/app From c64fe219b4333b98c88a2a706101597f4059bb71 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Mar 2021 15:53:57 -0800 Subject: [PATCH 098/254] ENV HOME=/usr/src/app (#2382) Set HOME environment variable per Binder requirements. https://github.com/binder-examples/minimal-dockerfile --- Dockerfile | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1a8fe2e72885..e1b40c2d15c6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,8 @@ FROM nvcr.io/nvidia/pytorch:21.02-py3 RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies -RUN python -m pip install --upgrade pip COPY requirements.txt . +RUN python -m pip install --upgrade pip RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory @@ -16,11 +16,8 @@ WORKDIR /usr/src/app # Copy contents COPY . /usr/src/app -# Copy weights -#RUN python3 -c "from models import *; \ -#attempt_download('weights/yolov5s.pt'); \ -#attempt_download('weights/yolov5m.pt'); \ -#attempt_download('weights/yolov5l.pt')" +# Set environment variables +ENV HOME=/usr/src/app # --------------------------------------------------- Extras Below --------------------------------------------------- From cd8ed3521d98ea120d07f57ea5372c4b375241ca Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 6 Mar 2021 15:58:26 +0900 Subject: [PATCH 099/254] image weights compatible faster random index generator v2 for mosaic augmentation (#2383) image weights compatible faster random index generator v2 for mosaic augmentation --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d6ab16518034..ed18f449ddd3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -666,7 +666,7 @@ def load_mosaic(self, index): labels4, segments4 = [], [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) @@ -721,7 +721,7 @@ def load_mosaic9(self, index): labels9, segments9 = [], [] s = self.img_size - indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) From 7a0a81fd1d770bbfbf94ced5e38cc0f0573b765e Mon Sep 17 00:00:00 2001 From: Jan Hajek Date: Sat, 6 Mar 2021 21:02:10 +0100 Subject: [PATCH 100/254] GPU export options (#2297) * option for skip last layer and cuda export support * added parameter device * fix import * cleanup 1 * cleanup 2 * opt-in grid --grid will export with grid computation, default export will skip grid (same as current) * default --device cpu GPU export causes ONNX and CoreML errors. Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher --- models/export.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index cc817871f218..11e60c7a583d 100644 --- a/models/export.py +++ b/models/export.py @@ -17,13 +17,16 @@ from models.experimental import attempt_load from utils.activations import Hardswish, SiLU from utils.general import set_logging, check_img_size +from utils.torch_utils import select_device if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') + parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -31,7 +34,8 @@ t = time.time() # Load PyTorch model - model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names # Checks @@ -39,7 +43,7 @@ opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples # Input - img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model for k, m in model.named_modules(): @@ -51,7 +55,7 @@ m.act = SiLU() # elif isinstance(m, models.yolo.Detect): # m.forward = m.forward_export # assign forward (optional) - model.model[-1].export = True # set Detect() layer export=True + model.model[-1].export = not opt.grid # set Detect() layer grid export y = model(img) # dry run # TorchScript export From ba18528b4737a4b08b55653c54f3d3e830f8e151 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Mar 2021 13:07:34 -0800 Subject: [PATCH 101/254] bbox_iou() stability and speed improvements (#2385) --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index df8cf7bab60d..e1c14bdaa4b3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -312,7 +312,7 @@ def clip_coords(boxes, img_shape): boxes[:, 3].clamp_(0, img_shape[0]) # y2 -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9): +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 box2 = box2.T @@ -348,7 +348,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) + alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU else: # GIoU https://arxiv.org/pdf/1902.09630.pdf c_area = cw * ch + eps # convex area From 7c2c95732c3eaa10465080b693e14a9e12e08e8d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Mar 2021 20:18:30 -0800 Subject: [PATCH 102/254] AWS wait && echo "All tasks done." (#2391) --- utils/aws/userdata.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 36405d1a1565..a6d6e7976cf3 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -11,7 +11,8 @@ if [ ! -d yolov5 ]; then cd yolov5 bash data/scripts/get_coco.sh && echo "Data done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - # python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks else echo "Running re-start script." # resume interrupted runs i=0 From e8a2b83268950e346899a84e8d29e84d178553b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Mar 2021 20:21:49 -0800 Subject: [PATCH 103/254] GCP sudo docker userdata.sh (#2393) * GCP sudo docker * cleanup --- utils/aws/userdata.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index a6d6e7976cf3..890606b76a06 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -16,12 +16,12 @@ if [ ! -d yolov5 ]; then else echo "Running re-start script." # resume interrupted runs i=0 - list=$(docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' while IFS= read -r id; do ((i++)) echo "restarting container $i: $id" - docker start $id - # docker exec -it $id python train.py --resume # single-GPU - docker exec -d $id python utils/aws/resume.py + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario done <<<"$list" fi From c51dfec8ea554db6811579f6d618dac45766e647 Mon Sep 17 00:00:00 2001 From: Kartikeya Sharma Date: Tue, 9 Mar 2021 21:26:49 -0600 Subject: [PATCH 104/254] CVPR 2021 Argoverse-HD dataset autodownload support (#2400) * added argoverse-download ability * bugfix * add support for Argoverse dataset * Refactored code * renamed to argoverse-HD * unzip -q and YOLOv5 small cleanup items * add image counts Co-authored-by: Kartikeya Sharma Co-authored-by: Kartikeya Sharma Co-authored-by: Glenn Jocher --- data/argoverse_hd.yaml | 21 +++++++++++ data/scripts/get_argoverse_hd.sh | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 data/argoverse_hd.yaml create mode 100644 data/scripts/get_argoverse_hd.sh diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml new file mode 100644 index 000000000000..df7a9361e769 --- /dev/null +++ b/data/argoverse_hd.yaml @@ -0,0 +1,21 @@ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_argoverse_hd.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images +val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges +test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview + +# number of classes +nc: 8 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh new file mode 100644 index 000000000000..884862db03f5 --- /dev/null +++ b/data/scripts/get_argoverse_hd.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Download command: bash data/scripts/get_argoverse_hd.sh +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + +# Download/unzip images +d='../argoverse/' # unzip directory +mkdir $d +url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ +f=Argoverse-HD-Full.zip +wget $url$f -O $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +wait # finish background tasks + +cd ../argoverse/Argoverse-1.1/ +ln -s tracking images + +cd ../Argoverse-HD/annotations/ + +python3 - "$@" < Date: Tue, 9 Mar 2021 21:07:27 -0800 Subject: [PATCH 105/254] CVPR 2021 Argoverse-HD autodownload fix (#2418) --- data/scripts/get_argoverse_hd.sh | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index 884862db03f5..9e0db9fad91b 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -12,8 +12,8 @@ d='../argoverse/' # unzip directory mkdir $d url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ f=Argoverse-HD-Full.zip -wget $url$f -O $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background -wait # finish background tasks +wget $url$f -O $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background +wait # finish background tasks cd ../argoverse/Argoverse-1.1/ ln -s tracking images @@ -23,6 +23,7 @@ cd ../Argoverse-HD/annotations/ python3 - "$@" < Date: Tue, 9 Mar 2021 23:43:46 -0800 Subject: [PATCH 106/254] DDP after autoanchor reorder (#2421) --- train.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index ecac59857ccc..6bd65f063391 100644 --- a/train.py +++ b/train.py @@ -181,10 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') - # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) - # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, @@ -214,7 +210,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - model.half().float() # pre-reduce anchor precision + model.half().float() # pre-reduce anchor precision + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Model parameters hyp['box'] *= 3. / nl # scale to layers From f01f3223d564e40e7dfa99997c3c520ab128c925 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 13:35:44 -0800 Subject: [PATCH 107/254] Integer printout (#2450) * Integer printout * test.py 'Labels' * Update train.py --- test.py | 4 ++-- train.py | 2 +- utils/torch_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index fd4d339ffea6..46288019a8bd 100644 --- a/test.py +++ b/test.py @@ -93,7 +93,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -223,7 +223,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12.3g' * 6 # print format + pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class diff --git a/train.py b/train.py index 6bd65f063391..dcb89a3c199b 100644 --- a/train.py +++ b/train.py @@ -264,7 +264,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) + logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1b1cc2038c55..806d29470e55 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -120,7 +120,7 @@ def profile(x, ops, n=100, device=None): s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') def is_parallel(model): From f4197214aa3776ea2dfab0f5fdf1f36537b0b125 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:08:42 -0800 Subject: [PATCH 108/254] Update test.py --task train val study (#2453) * Update test.py --task train val study * update argparser --task --- test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index 46288019a8bd..39e0992264ec 100644 --- a/test.py +++ b/test.py @@ -85,9 +85,9 @@ def test(data, if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images - dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True, - prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] + task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, + prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) @@ -287,7 +287,7 @@ def test(data, parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') - parser.add_argument('--task', default='val', help="'val', 'test', 'study'") + parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') @@ -305,7 +305,7 @@ def test(data, print(opt) check_requirements() - if opt.task in ['val', 'test']: # run normally + if opt.task in ('train', 'val', 'test'): # run normally test(opt.data, opt.weights, opt.batch_size, From 08d4918d7f49055158b1cceb27ea0d1990251afc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:15:41 -0800 Subject: [PATCH 109/254] labels.jpg class names (#2454) * labels.png class names * fontsize=10 --- train.py | 2 +- utils/plots.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index dcb89a3c199b..005fdf60c021 100644 --- a/train.py +++ b/train.py @@ -203,7 +203,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: - plot_labels(labels, save_dir, loggers) + plot_labels(labels, names, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c, 0) diff --git a/utils/plots.py b/utils/plots.py index aa9a1cab81f0..47e7b7b74f1c 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -269,7 +269,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx plt.savefig(str(Path(path).name) + '.png', dpi=300) -def plot_labels(labels, save_dir=Path(''), loggers=None): +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): # plot dataset labels print('Plotting labels... ') c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes @@ -286,7 +286,12 @@ def plot_labels(labels, save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - ax[0].set_xlabel('classes') + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) From 747c2653eecfb870b1ed40b1e00e0ef209b036e9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:27:53 -0800 Subject: [PATCH 110/254] CVPR 2021 Argoverse-HD autodownload curl (#2455) curl preferred over wget for slightly better cross platform compatibility (i.e. out of the box macos compatible). --- data/scripts/get_argoverse_hd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index 9e0db9fad91b..caec61efed78 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -12,7 +12,7 @@ d='../argoverse/' # unzip directory mkdir $d url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ f=Argoverse-HD-Full.zip -wget $url$f -O $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background wait # finish background tasks cd ../argoverse/Argoverse-1.1/ From 569757ecc09d115e275a6ec3662514d72dfe18c2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 19:50:34 -0800 Subject: [PATCH 111/254] Add autoShape() speed profiling (#2459) * Add autoShape() speed profiling * Update common.py * Create README.md * Update hubconf.py * cleanuip --- README.md | 4 ++-- hubconf.py | 8 ++++---- models/common.py | 14 +++++++++++--- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b7129e80adfe..097b2750bf49 100755 --- a/README.md +++ b/README.md @@ -108,11 +108,11 @@ To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ul import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # Images dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' -imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batched list of images +imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images # Inference results = model(imgs) diff --git a/hubconf.py b/hubconf.py index a8eb51681794..e51ac90da36c 100644 --- a/hubconf.py +++ b/hubconf.py @@ -51,7 +51,7 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-small model from https://github.com/ultralytics/yolov5 Arguments: @@ -65,7 +65,7 @@ def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5s', pretrained, channels, classes, autoshape) -def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-medium model from https://github.com/ultralytics/yolov5 Arguments: @@ -79,7 +79,7 @@ def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5m', pretrained, channels, classes, autoshape) -def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-large model from https://github.com/ultralytics/yolov5 Arguments: @@ -93,7 +93,7 @@ def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5l', pretrained, channels, classes, autoshape) -def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 Arguments: diff --git a/models/common.py b/models/common.py index ad35f908d865..7ef5762efbf3 100644 --- a/models/common.py +++ b/models/common.py @@ -12,6 +12,7 @@ from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized def autopad(k, p=None): # kernel, padding @@ -190,6 +191,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,720,1280) # BCHW # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + t = [time_synchronized()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference @@ -216,22 +218,25 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) # Inference with torch.no_grad(): y = self.model(x, augment, profile)[0] # forward - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + t.append(time_synchronized()) # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) + t.append(time_synchronized()) - return Detections(imgs, y, files, self.names) + return Detections(imgs, y, files, t, self.names, x.shape) class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, names=None): + def __init__(self, imgs, pred, files, times, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations @@ -244,6 +249,8 @@ def __init__(self, imgs, pred, files, names=None): self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) + self.t = ((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): colors = color_list() @@ -271,6 +278,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results + print(f'Speed: %.1f/%.1f/%.1f ms pre-process/inference/NMS per image at shape {tuple(self.s)}' % tuple(self.t)) def show(self): self.display(show=True) # show results From f813f6dcc875901c6ba7a509c14227c2292efed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:00:03 -0800 Subject: [PATCH 112/254] autoShape() speed profiling update (#2460) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 7ef5762efbf3..464d639a1f0b 100644 --- a/models/common.py +++ b/models/common.py @@ -168,7 +168,6 @@ def forward(self, x): class autoShape(nn.Module): # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - img_size = 640 # inference size (pixels) conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class @@ -278,7 +277,8 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1f/%.1f/%.1f ms pre-process/inference/NMS per image at shape {tuple(self.s)}' % tuple(self.t)) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + tuple(self.t)) def show(self): self.display(show=True) # show results From 20d879db36c4b5f72f4002127a9ebbdf30da11de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:05:21 -0800 Subject: [PATCH 113/254] Update tutorial.ipynb --- tutorial.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f2b03dc57f0a..5eeb78d12faa 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -605,14 +605,14 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-132-gf813f6d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.011s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.011s)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", - "Done. (0.110s)\n" + "Done. (0.087)\n" ], "name": "stdout" }, @@ -1247,4 +1247,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6f718cee740e7cd423edd1136db78c5be49fa7c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:20:09 -0800 Subject: [PATCH 114/254] Created using Colaboratory --- tutorial.ipynb | 185 +++++++++++++++++++++++++------------------------ 1 file changed, 93 insertions(+), 92 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 5eeb78d12faa..b678e4bec9c2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "1f8e9b8ebded4175b2eaa9f75c3ceb00": { + "b54ab52f1d4f4903897ab6cd49a3b9b2": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2", + "layout": "IPY_MODEL_1852f93fc2714d40adccb8aa161c42ff", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97", - "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6" + "IPY_MODEL_3293cfe869bd4a1bbbe18b49b6815de1", + "IPY_MODEL_8d5ee8b8ab6d46b98818bd2c562ddd1c" ] } }, - "0a1246a73077468ab80e979cc0576cd2": { + "1852f93fc2714d40adccb8aa161c42ff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "d327cde5a85a4a51bb8b1b3e9cf06c97": { + "3293cfe869bd4a1bbbe18b49b6815de1": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85", + "style": "IPY_MODEL_49fcb2adb0354430b76f491af98abfe9", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1" + "layout": "IPY_MODEL_c7d76e0c53064363add56b8d05e561f5" } }, - "d5ef1cb2cbed4b87b3c5d292ff2b0da6": { + "8d5ee8b8ab6d46b98818bd2c562ddd1c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a", + "style": "IPY_MODEL_48f321f789634aa584f8a29a3b925dd5", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:12<00:00, 65.5MB/s]", + "value": " 781M/781M [00:13<00:00, 62.6MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04" + "layout": "IPY_MODEL_6610d6275f3e49d9937d50ed0a105947" } }, - "8d5dff8bca14435a88fa1814533acd85": { + "49fcb2adb0354430b76f491af98abfe9": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "3d5136c19e7645ca9bc8f51ceffb2be1": { + "c7d76e0c53064363add56b8d05e561f5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "2919396dbd4b4c8e821d12bd28665d8a": { + "48f321f789634aa584f8a29a3b925dd5": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "6feb16f2b2fa4021b1a271e1dd442d04": { + "6610d6275f3e49d9937d50ed0a105947": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "e6459e0bcee449b090fc9807672725bc": { + "0fffa335322b41658508e06aed0acbf0": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68", + "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_660afee173694231a6dce3cd94df6cae", - "IPY_MODEL_261218485cef48df961519dde5edfcbe" + "IPY_MODEL_85823e71fea54c39bd11e2e972348836", + "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd" ] } }, - "c341e1d3bf3b40d1821ce392eb966c68": { + "a354c6f80ce347e5a3ef64af87c0eccb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "660afee173694231a6dce3cd94df6cae": { + "85823e71fea54c39bd11e2e972348836": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_32736d503c06497abfae8c0421918255", + "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c" + "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f" } }, - "261218485cef48df961519dde5edfcbe": { + "fb11acd663fa4e71b041d67310d045fd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f", + "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]", + "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb" + "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb" } }, - "32736d503c06497abfae8c0421918255": { + "8a919053b780449aae5523658ad611fa": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "e257738711f54d5280c8393d9d3dce1c": { + "5bae9393a58b44f7b69fb04816f94f6f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "beb7a6fe34b840899bb79c062681696f": { + "d26c6d16c7f24030ab2da5285bf198ee": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "e639132395d64d70b99d8b72c32f8fbb": { + "f7767886b2364c8d9efdc79e175ad8eb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56" + "outputId": "20027455-bf84-41fd-c902-b7282d53c91d" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -672,30 +672,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "1f8e9b8ebded4175b2eaa9f75c3ceb00", - "0a1246a73077468ab80e979cc0576cd2", - "d327cde5a85a4a51bb8b1b3e9cf06c97", - "d5ef1cb2cbed4b87b3c5d292ff2b0da6", - "8d5dff8bca14435a88fa1814533acd85", - "3d5136c19e7645ca9bc8f51ceffb2be1", - "2919396dbd4b4c8e821d12bd28665d8a", - "6feb16f2b2fa4021b1a271e1dd442d04" + "b54ab52f1d4f4903897ab6cd49a3b9b2", + "1852f93fc2714d40adccb8aa161c42ff", + "3293cfe869bd4a1bbbe18b49b6815de1", + "8d5ee8b8ab6d46b98818bd2c562ddd1c", + "49fcb2adb0354430b76f491af98abfe9", + "c7d76e0c53064363add56b8d05e561f5", + "48f321f789634aa584f8a29a3b925dd5", + "6610d6275f3e49d9937d50ed0a105947" ] }, - "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298" + "outputId": "f0884441-78d9-443c-afa6-d00ec387908d" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "model_id": "b54ab52f1d4f4903897ab6cd49a3b9b2", "version_minor": 0, "version_major": 2 }, @@ -723,45 +723,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c" + "outputId": "5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:04<00:00, 39.7MB/s]\n", + "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n", - " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n", - "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:20<00:00, 1.95it/s]\n", + " all 5000 36335 0.749 0.619 0.68 0.486\n", + "Speed: 5.3/1.7/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.44s)\n", + "Done (t=0.43s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.47s)\n", + "DONE (t=5.10s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=94.87s).\n", + "DONE (t=88.52s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.96s).\n", + "DONE (t=17.17s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", @@ -836,30 +836,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "e6459e0bcee449b090fc9807672725bc", - "c341e1d3bf3b40d1821ce392eb966c68", - "660afee173694231a6dce3cd94df6cae", - "261218485cef48df961519dde5edfcbe", - "32736d503c06497abfae8c0421918255", - "e257738711f54d5280c8393d9d3dce1c", - "beb7a6fe34b840899bb79c062681696f", - "e639132395d64d70b99d8b72c32f8fbb" + "0fffa335322b41658508e06aed0acbf0", + "a354c6f80ce347e5a3ef64af87c0eccb", + "85823e71fea54c39bd11e2e972348836", + "fb11acd663fa4e71b041d67310d045fd", + "8a919053b780449aae5523658ad611fa", + "5bae9393a58b44f7b69fb04816f94f6f", + "d26c6d16c7f24030ab2da5285bf198ee", + "f7767886b2364c8d9efdc79e175ad8eb" ] }, - "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700" + "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "e6459e0bcee449b090fc9807672725bc", + "model_id": "0fffa335322b41658508e06aed0acbf0", "version_minor": 0, "version_major": 2 }, @@ -924,27 +924,27 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e" + "outputId": "cf494627-09b9-4399-ff0c-fdb62b32340a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n", "\n", "\n", " from n params module arguments \n", @@ -978,11 +978,11 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -991,21 +991,22 @@ "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n", - " all 128 929 0.646 0.627 0.659 0.431\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 0/2 3.29G 0.04237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.41it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.04s/it]\n", + " all 128 929 0.642 0.637 0.661 0.432\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n", - " all 128 929 0.681 0.607 0.663 0.434\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 1/2 6.65G 0.04431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", + " all 128 929 0.662 0.626 0.658 0.433\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n", - " all 128 929 0.642 0.632 0.662 0.432\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n", + " all 128 929 0.658 0.625 0.661 0.433\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", "3 epochs completed in 0.007 hours.\n", "\n" ], @@ -1247,4 +1248,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 38ff499b26b9f8bf183cd1c08746dd33d000eb59 Mon Sep 17 00:00:00 2001 From: Yann Defretin Date: Mon, 15 Mar 2021 01:11:27 +0100 Subject: [PATCH 115/254] Update autosplit() with annotated_only option (#2466) * Be able to create dataset from annotated images only Add the ability to create a dataset/splits only with images that have an annotation file, i.e a .txt file, associated to it. As we talked about this, the absence of a txt file could mean two things: * either the image wasn't yet labelled by someone, * either there is no object to detect. When it's easy to create small datasets, when you have to create datasets with thousands of images (and more coming), it's hard to track where you at and you don't want to wait to have all of them annotated before starting to train. Which means some images would lack txt files and annotations, resulting in label inconsistency as you say in #2313. By adding the annotated_only argument to the function, people could create, if they want to, datasets/splits only with images that were labelled, for sure. * Cleanup and update print() Co-authored-by: Glenn Jocher --- utils/datasets.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index ed18f449ddd3..9a4b3f9fcc9f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1032,20 +1032,24 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - -def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - # Arguments - path: Path to images directory - weights: Train, val, test weights (list) + Usage: from utils.datasets import *; autosplit('../coco128') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = list(path.rglob('*.*')) + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only n = len(files) # number of files indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): - if img.suffix[1:] in img_formats: + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file From 2d41e70e828c215a3c8486bb24ac2169084079f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 21:58:12 -0700 Subject: [PATCH 116/254] Scipy kmeans-robust autoanchor update (#2470) Fix for https://github.com/ultralytics/yolov5/issues/2394 --- utils/autoanchor.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 5dba9f1ea22f..57777462e89f 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -37,17 +37,21 @@ def metric(k): # compute metric bpr = (best > 1. / thr).float().mean() # best possible recall return bpr, aat - bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') if bpr < 0.98: # threshold to recompute print('. Attempting to improve anchors, please wait...') na = m.anchor_grid.numel() // 2 # number of anchors - new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(new_anchors.reshape(-1, 2))[0] + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors - new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference - m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') else: @@ -119,6 +123,7 @@ def print_results(k): print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered From 9b11f0c58b7c1f775ee32acb7dcc6a36407a779b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 23:16:17 -0700 Subject: [PATCH 117/254] PyTorch Hub models default to CUDA:0 if available (#2472) * PyTorch Hub models default to CUDA:0 if available * device as string bug fix --- hubconf.py | 4 +++- utils/datasets.py | 4 ++-- utils/general.py | 2 +- utils/torch_utils.py | 6 +++--- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/hubconf.py b/hubconf.py index e51ac90da36c..b7b740d39c06 100644 --- a/hubconf.py +++ b/hubconf.py @@ -12,6 +12,7 @@ from models.yolo import Model from utils.general import set_logging from utils.google_utils import attempt_download +from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] set_logging() @@ -43,7 +44,8 @@ def create(name, pretrained, channels, classes, autoshape): model.names = ckpt['model'].names # set class names attribute if autoshape: model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - return model + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' diff --git a/utils/datasets.py b/utils/datasets.py index 9a4b3f9fcc9f..86d7be39bec0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -385,7 +385,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total if exists: - d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' @@ -485,7 +485,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nc += 1 print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \ + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" if nf == 0: diff --git a/utils/general.py b/utils/general.py index e1c14bdaa4b3..621df64c6cf1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -79,7 +79,7 @@ def check_git_status(): f"Use 'git pull' to update or 'git clone {url}' to download latest." else: s = f'up to date with {url} ✅' - print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) + print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe except Exception as e: print(e) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 806d29470e55..8f3538ab152a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,8 +1,8 @@ # PyTorch utils - import logging import math import os +import platform import subprocess import time from contextlib import contextmanager @@ -53,7 +53,7 @@ def git_describe(): def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe()} torch {torch.__version__} ' # string cpu = device.lower() == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False @@ -73,7 +73,7 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' - logger.info(s) # skip a line + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') From ed2c74218d6d46605cc5fa68ce9bd6ece213abe4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 23:32:39 -0700 Subject: [PATCH 118/254] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b678e4bec9c2..c710685b7e75 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -605,7 +605,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 v4.0-132-gf813f6d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -735,7 +735,7 @@ "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", @@ -936,7 +936,7 @@ "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", From e8fc97aa3891f05812d7dfff90ca66d3481bda2c Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 23 Mar 2021 05:14:50 +0530 Subject: [PATCH 119/254] Improved W&B integration (#2125) * Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Add dataset creation in training script * Change scope: self.wandb_run * Add wandb-artifact:// natively you can now use --resume with wandb run links * Add suuport for logging dataset while training * Cleanup * Fix: Merge conflict * Fix: CI tests * Automatically use wandb config * Fix: Resume * Fix: CI * Enhance: Using val_table * More resume enhancement * FIX : CI * Add alias * Get useful opt config data * train.py cleanup * Cleanup train.py * more cleanup * Cleanup| CI fix * Reformat using PEP8 * FIX:CI * rebase * remove uneccesary changes * remove uneccesary changes * remove uneccesary changes * remove unecessary chage from test.py * FIX: resume from local checkpoint * FIX:resume * FIX:resume * Reformat * Performance improvement * Fix local resume * Fix local resume * FIX:CI * Fix: CI * Imporve image logging * (:(:Redo CI tests:):) * Remember epochs when resuming * Remember epochs when resuming * Update DDP location Potential fix for #2405 * PEP8 reformat * 0.25 confidence threshold * reset train.py plots syntax to previous * reset epochs completed syntax to previous * reset space to previous * remove brackets * reset comment to previous * Update: is_coco check, remove unused code * Remove redundant print statement * Remove wandb imports * remove dsviz logger from test.py * Remove redundant change from test.py * remove redundant changes from train.py * reformat and improvements * Fix typo * Add tqdm tqdm progress when scanning files, naming improvements Co-authored-by: Glenn Jocher --- models/common.py | 2 +- test.py | 49 +++--- train.py | 116 +++++++------ utils/wandb_logging/log_dataset.py | 16 +- utils/wandb_logging/wandb_utils.py | 267 +++++++++++++++++++++-------- 5 files changed, 282 insertions(+), 168 deletions(-) diff --git a/models/common.py b/models/common.py index 464d639a1f0b..83cc8b5ce27b 100644 --- a/models/common.py +++ b/models/common.py @@ -278,7 +278,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % - tuple(self.t)) + tuple(self.t)) def show(self): self.display(show=True) # show results diff --git a/test.py b/test.py index 39e0992264ec..61d6965f7414 100644 --- a/test.py +++ b/test.py @@ -35,8 +35,9 @@ def test(data, save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, - log_imgs=0, # number of logged images - compute_loss=None): + wandb_logger=None, + compute_loss=None, + is_coco=False): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -66,21 +67,19 @@ def test(data, # Configure model.eval() - is_coco = data.endswith('coco.yaml') # is COCO dataset - with open(data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # model dict + if isinstance(data, str): + is_coco = data.endswith('coco.yaml') + with open(data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging - log_imgs, wandb = min(log_imgs, 100), None # ceil - try: - import wandb # Weights & Biases - except ImportError: - log_imgs = 0 - + log_imgs = 0 + if wandb_logger and wandb_logger.wandb: + log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': @@ -147,15 +146,17 @@ def test(data, with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - # W&B logging - if plots and len(wandb_images) < log_imgs: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) + # W&B logging - Media Panel Plots + if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation + if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) + wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables # Append to pycocotools JSON dictionary if save_json: @@ -239,9 +240,11 @@ def test(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - if wandb and wandb.run: - val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] - wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) + if wandb_logger and wandb_logger.wandb: + val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb_logger.log({"Validation": val_batches}) + if wandb_images: + wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) # Save JSON if save_json and len(jdict): diff --git a/train.py b/train.py index 005fdf60c021..62a72375c7a3 100644 --- a/train.py +++ b/train.py @@ -1,3 +1,4 @@ + import argparse import logging import math @@ -33,11 +34,12 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id, check_wandb_config_file logger = logging.getLogger(__name__) -def train(hyp, opt, device, tb_writer=None, wandb=None): +def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank @@ -61,10 +63,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict - with torch_distributed_zero_first(rank): - check_dataset(data_dict) # check - train_path = data_dict['train'] - test_path = data_dict['val'] + is_coco = opt.data.endswith('coco.yaml') + + # Logging- Doing this before checking the dataset. Might update data_dict + if rank in [-1, 0]: + opt.hyp = hyp # add hyperparameters + run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + data_dict = wandb_logger.data_dict + if wandb_logger.wandb: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + loggers = {'wandb': wandb_logger.wandb} # loggers dict nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -83,6 +92,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) @@ -126,16 +139,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - # Logging - if rank in [-1, 0] and wandb and wandb.run is None: - opt.hyp = hyp # add hyperparameters - wandb_run = wandb.init(config=opt, resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - name=save_dir.stem, - entity=opt.entity, - id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) - loggers = {'wandb': wandb} # loggers dict - # EMA ema = ModelEMA(model) if rank in [-1, 0] else None @@ -326,9 +329,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard - elif plots and ni == 10 and wandb: - wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') - if x.exists()]}, commit=False) + elif plots and ni == 10 and wandb_logger.wandb: + wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- @@ -343,8 +346,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP - results, maps, times = test.test(opt.data, - batch_size=batch_size * 2, + wandb_logger.current_epoch = epoch + 1 + results, maps, times = test.test(data_dict, + batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, @@ -352,8 +356,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): save_dir=save_dir, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - log_imgs=opt.log_imgs if wandb else 0, - compute_loss=compute_loss) + wandb_logger=wandb_logger, + compute_loss=compute_loss, + is_coco=is_coco) # Write with open(results_file, 'a') as f: @@ -369,8 +374,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard - if wandb: - wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B + if wandb_logger.wandb: + wandb_logger.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -386,36 +391,29 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_run.id if wandb else None} + 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) + if wandb_logger.wandb: + if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: + wandb_logger.log_model( + last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - + wandb_logger.end_epoch(best_result=best_fitness == fi) + # end epoch ---------------------------------------------------------------------------------------------------- # end training - if rank in [-1, 0]: - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - # Plots if plots: plot_results(save_dir=save_dir) # save as results.png - if wandb: + if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files - if (save_dir / f).exists()]}) - if opt.log_artifacts: - wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) - + wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO @@ -430,13 +428,24 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): dataloader=testloader, save_dir=save_dir, save_json=True, - plots=False) + plots=False, + is_coco=is_coco) + # Strip optimizers + final = best if best.exists() else last # final model + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(final), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['last', 'best', 'stripped']) else: dist.destroy_process_group() - - wandb.run.finish() if wandb and wandb.run else None torch.cuda.empty_cache() + wandb_logger.finish_run() return results @@ -464,8 +473,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') - parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') @@ -473,6 +480,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') + parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') + parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') opt = parser.parse_args() # Set DDP variables @@ -484,7 +495,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): check_requirements() # Resume - if opt.resume: # resume an interrupted run + wandb_run = resume_and_get_id(opt) + if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank @@ -517,18 +529,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Train logger.info(opt) - try: - import wandb - except ImportError: - wandb = None - prefix = colorstr('wandb: ') - logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer, wandb) + train(hyp, opt, device, tb_writer) # Evolve hyperparameters (optional) else: @@ -602,7 +608,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): hyp[k] = round(hyp[k], 5) # significant digits # Train mutation - results = train(hyp.copy(), opt, device, wandb=wandb) + results = train(hyp.copy(), opt, device) # Write mutation results print_mutation(hyp.copy(), results, yaml_file, opt.bucket) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index d790a9ce721e..97e68425cddd 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -12,20 +12,7 @@ def create_dataset_artifact(opt): with open(opt.data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) # data dict - logger = WandbLogger(opt, '', None, data, job_type='create_dataset') - nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset - logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset - - # Update data.yaml with artifact links - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train') - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val') - path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path - data.pop('download', None) # download via artifact instead of predefined field 'download:' - with open(path, 'w') as f: - yaml.dump(data, f) - print("New Config file => ", path) + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') if __name__ == '__main__': @@ -33,7 +20,6 @@ def create_dataset_artifact(opt): parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml') opt = parser.parse_args() create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 264cd4840e3c..c9a32f5b6026 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,13 +1,18 @@ +import argparse import json +import os import shutil import sys +import torch +import yaml from datetime import datetime from pathlib import Path - -import torch +from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path -from utils.general import colorstr, xywh2xyxy +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset try: import wandb @@ -22,87 +27,183 @@ def remove_prefix(from_string, prefix): return from_string[len(prefix):] +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def resume_and_get_id(opt): + # It's more elegant to stick to 1 wandb.init call, but as useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_path = Path(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = WANDB_ARTIFACT_PREFIX + 'run_' + run_id + '_model' + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + return run + return None + + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): - self.wandb = wandb - self.wandb_run = wandb.init(config=opt, resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - name=name, - job_type=job_type, - id=run_id) if self.wandb else None - - if job_type == 'Training': - self.setup_training(opt, data_dict) - if opt.bbox_interval == -1: - opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs - if opt.save_period == -1: - opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + if self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict def setup_training(self, opt, data_dict): - self.log_dict = {} - self.train_artifact_path, self.trainset_artifact = \ - self.download_dataset_artifact(data_dict['train'], opt.artifact_alias) - self.test_artifact_path, self.testset_artifact = \ - self.download_dataset_artifact(data_dict['val'], opt.artifact_alias) - self.result_artifact, self.result_table, self.weights = None, None, None - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.test_artifact_path is not None: - test_path = Path(self.test_artifact_path) / 'data/images/' - data_dict['val'] = str(test_path) + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) - if opt.resume_from_artifact: - modeldir, _ = self.download_model_artifact(opt.resume_from_artifact) - if modeldir: - self.weights = Path(modeldir) / "best.pt" - opt.weights = self.weights + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict def download_dataset_artifact(self, path, alias): if path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() - labels_zip = Path(datadir) / "data/labels.zip" - shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip') - print("Downloaded dataset to : ", datadir) return datadir, dataset_artifact return None, None - def download_model_artifact(self, name): - model_artifact = wandb.use_artifact(name + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - print("Downloaded model to : ", modeldir) - return modeldir, model_artifact + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None - def log_model(self, path, opt, epoch): - datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S') + def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), - 'epoch': epoch + 1, + 'epochs_trained': epoch + 1, 'save period': opt.save_period, 'project': opt.project, - 'datetime': datetime_suffix + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score }) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - model_artifact.add_file(str(path / 'best.pt'), name='best.pt') - wandb.log_artifact(model_artifact) + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) print("Saving model artifact on epoch ", epoch + 1) - def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") - image_path = dataset.path - artifact.add_dir(image_path, name='data/images') - table = wandb.Table(columns=["id", "train_image", "Classes"]) + for img_file in tqdm([dataset.path]) if Path(dataset.path).is_dir() else tqdm(dataset.img_files): + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(dataset): + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): height, width = shapes[0] - labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) - labels[:, 2:] *= torch.Tensor([width, height, width, height]) - box_data = [] - img_classes = {} + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} for cls, *xyxy in labels[:, 1:].tolist(): cls = int(cls) box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -112,34 +213,52 @@ def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): "domain": "pixel"}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes)) + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) artifact.add(table, name) - labels_path = 'labels'.join(image_path.rsplit('images', 1)) - zip_path = Path(labels_path).parent / (name + '_labels.zip') - if not zip_path.is_file(): # make_archive won't check if file exists - shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path) - artifact.add_file(str(zip_path), name='data/labels.zip') - wandb.log_artifact(artifact) - print("Saving data to W&B...") + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self): - if self.wandb_run and self.log_dict: + def end_epoch(self, best_result=False): + if self.wandb_run: wandb.log(self.log_dict) - self.log_dict = {} + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): if self.wandb_run: - if self.result_artifact: - print("Add Training Progress Artifact") - self.result_artifact.add(self.result_table, 'result') - train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id") - self.result_artifact.add(train_results, 'joined_result') - wandb.log_artifact(self.result_artifact) if self.log_dict: wandb.log(self.log_dict) wandb.run.finish() From 1c132a1f9426d91c18ec7eff6ab95a727344c690 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Mar 2021 14:10:47 +0100 Subject: [PATCH 120/254] Update Detections() times=None (#2570) Fix for results.tolist() method breaking after YOLOv5 Hub profiling PRshttps://github.com/ultralytics/yolov5/pull/2460 https://github.com/ultralytics/yolov5/pull/2459 and --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 83cc8b5ce27b..721171393e04 100644 --- a/models/common.py +++ b/models/common.py @@ -235,7 +235,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, times, names=None, shape=None): + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations From 0d891c601e8178e4b9665da46d630456668b1996 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Mar 2021 14:25:55 +0100 Subject: [PATCH 121/254] check_requirements() exclude pycocotools, thop (#2571) Exclude non-critical packages from dependency checks in detect.py. pycocotools and thop in particular are not required for inference. Issue first raised in https://github.com/ultralytics/yolov5/issues/1944 and also raised in https://github.com/ultralytics/yolov5/discussions/2556 --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 22bf21b4c825..c843447260ba 100644 --- a/detect.py +++ b/detect.py @@ -164,7 +164,7 @@ def detect(save_img=False): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') opt = parser.parse_args() print(opt) - check_requirements() + check_requirements(exclude=('pycocotools', 'thop')) with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) From 1bf936528018c36bcbd22e9b9f76a8c61e97d2a6 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 23 Mar 2021 21:24:34 +0530 Subject: [PATCH 122/254] W&B DDP fix (#2574) --- train.py | 8 +++++--- utils/wandb_logging/wandb_utils.py | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 62a72375c7a3..fd2d6745ab46 100644 --- a/train.py +++ b/train.py @@ -66,14 +66,16 @@ def train(hyp, opt, device, tb_writer=None): is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict + loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - loggers = {'wandb': wandb_logger.wandb} # loggers dict + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -381,6 +383,7 @@ def train(hyp, opt, device, tb_writer=None): fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi + wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model if (not opt.nosave) or (final_epoch and not opt.evolve): # if save @@ -402,7 +405,6 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - wandb_logger.end_epoch(best_result=best_fitness == fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training @@ -442,10 +444,10 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.wandb.log_artifact(str(final), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['last', 'best', 'stripped']) + wandb_logger.finish_run() else: dist.destroy_process_group() torch.cuda.empty_cache() - wandb_logger.finish_run() return results diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index c9a32f5b6026..d6dd256366e0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -16,9 +16,9 @@ try: import wandb + from wandb import init, finish except ImportError: wandb = None - print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -71,6 +71,9 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) + else: + print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' From 2b329b0945a69431fd8bf36668307069e6e999b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 01:05:59 +0100 Subject: [PATCH 123/254] Enhanced check_requirements() with auto-install (#2575) * Update check_requirements() with auto-install This PR builds on an idea I had to automatically install missing dependencies rather than simply report an error message. YOLOv5 should now 1) display all dependency issues and not simply display the first missing dependency, and 2) attempt to install/update each missing/VersionConflict package. * cleanup * cleanup 2 * Check requirements.txt file exists * cleanup 3 --- utils/general.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index 621df64c6cf1..ef89ea3a0f03 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# General utils +# YOLOv5 general utils import glob import logging @@ -86,10 +86,20 @@ def check_git_status(): def check_requirements(file='requirements.txt', exclude=()): # Check installed dependencies meet requirements - import pkg_resources - requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open()) - if x.name not in exclude] - pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + file = Path(file) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") + print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) def check_img_size(img_size, s=32): From e5b0200cd250759c782207160761ca9756300065 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 01:29:00 +0100 Subject: [PATCH 124/254] Update tensorboard>=2.4.1 (#2576) * Update tensorboard>=2.4.1 Update tensorboard version to attempt to address https://github.com/ultralytics/yolov5/issues/2573 (tensorboard logging fail in Docker image). * cleanup --- requirements.txt | 2 +- train.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index cb50cf8f32e1..51de7735d301 100755 --- a/requirements.txt +++ b/requirements.txt @@ -8,12 +8,12 @@ opencv-python>=4.1.2 Pillow PyYAML>=5.3.1 scipy>=1.4.1 -tensorboard>=2.2 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 # logging ------------------------------------- +tensorboard>=2.4.1 # wandb # plotting ------------------------------------ diff --git a/train.py b/train.py index fd2d6745ab46..b9e4eea613dc 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,3 @@ - import argparse import logging import math @@ -34,7 +33,7 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id, check_wandb_config_file +from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id logger = logging.getLogger(__name__) @@ -75,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None): data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -405,7 +404,7 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - + # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: @@ -534,7 +533,8 @@ def train(hyp, opt, device, tb_writer=None): if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: - logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer) From 2bcc89d76225a704bb9a21c926bd28ef7847d81d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 15:42:00 +0100 Subject: [PATCH 125/254] YOLOv5 PyTorch Hub models >> check_requirements() (#2577) * Update hubconf.py with check_requirements() Dependency checks have been missing from YOLOv5 PyTorch Hub model loading, causing errors in some cases when users are attempting to import hub models in unsupported environments. This should examine the YOLOv5 requirements.txt file and pip install any missing or version-conflict packages encountered. This is highly experimental (!), please let us know if this creates problems in your custom workflows. * Update hubconf.py --- hubconf.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index b7b740d39c06..4b4ae04cf332 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,8 +1,8 @@ -"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ +"""File for accessing YOLOv5 models via PyTorch Hub https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ from pathlib import Path @@ -10,11 +10,12 @@ import torch from models.yolo import Model -from utils.general import set_logging +from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] +check_requirements(exclude=('pycocotools', 'thop')) set_logging() From 9f98201dd98651a768acefdd87856c86a031ff89 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 15:43:32 +0100 Subject: [PATCH 126/254] W&B DDP fix 2 (#2587) Revert unintentional change to test batch sizes caused by PR https://github.com/ultralytics/yolov5/pull/2125 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index b9e4eea613dc..25a9accd3be0 100644 --- a/train.py +++ b/train.py @@ -349,7 +349,7 @@ def train(hyp, opt, device, tb_writer=None): if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, times = test.test(data_dict, - batch_size=total_batch_size, + batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, From 8ace1b1b992433f31721c0553287cd664f2efe6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 16:23:54 +0100 Subject: [PATCH 127/254] YOLOv5 PyTorch Hub models >> check_requirements() (#2588) * YOLOv5 PyTorch Hub models >> check_requirements() Update YOLOv5 PyTorch Hub requirements.txt path to cache path. * Update hubconf.py --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 4b4ae04cf332..710882cf158f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -15,7 +15,7 @@ from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] -check_requirements(exclude=('pycocotools', 'thop')) +check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) set_logging() From 75feeb797c4a9553f4274860c6c278f1fc628f60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 16:42:54 +0100 Subject: [PATCH 128/254] YOLOv5 PyTorch Hub models >> check_requirements() (#2591) Prints 'Please restart runtime or rerun command for update to take effect.' following package auto-install to inform users to restart/rerun. --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index ef89ea3a0f03..50d60c519b04 100755 --- a/utils/general.py +++ b/utils/general.py @@ -100,6 +100,7 @@ def check_requirements(file='requirements.txt', exclude=()): except Exception as e: # DistributionNotFound or VersionConflict if requirements not met print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + print(f'Please restart runtime or rerun command for update to take effect.') def check_img_size(img_size, s=32): From 333ccc5b0f66c7d0aba096e3e2d9d1912db1e610 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 17:51:39 +0100 Subject: [PATCH 129/254] YOLOv5 PyTorch Hub models >> check_requirements() (#2592) Improved user-feedback following requirements auto-update. --- utils/general.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 50d60c519b04..284146c87e10 100755 --- a/utils/general.py +++ b/utils/general.py @@ -52,6 +52,11 @@ def isdocker(): return Path('/workspace').exists() # or Path('/.dockerenv').exists() +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + def check_online(): # Check internet connectivity import socket @@ -79,7 +84,7 @@ def check_git_status(): f"Use 'git pull' to update or 'git clone {url}' to download latest." else: s = f'up to date with {url} ✅' - print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + print(emojis(s)) # emoji-safe except Exception as e: print(e) @@ -93,14 +98,20 @@ def check_requirements(file='requirements.txt', exclude=()): print(f"{prefix} {file.resolve()} not found, check failed.") return + n = 0 # number of packages updates requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] for r in requirements: try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) - print(f'Please restart runtime or rerun command for update to take effect.') + + if n: # if packages updated + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe def check_img_size(img_size, s=32): From 16206692f245e713a5beb380de6dc4bed944986c Mon Sep 17 00:00:00 2001 From: Max Kolomeychenko Date: Thu, 25 Mar 2021 02:57:34 +0300 Subject: [PATCH 130/254] Supervisely Ecosystem (#2519) guide describes YOLOv5 apps collection in Supervisely Ecosystem --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 097b2750bf49..1240f83be2a5 100755 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ $ pip install -r requirements.txt * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW * [ONNX and TorchScript Export](https://github.com/ultralytics/yolov5/issues/251) From ad05e37d99bd1b86f7223540ad93381b8269d75c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 14:09:49 +0100 Subject: [PATCH 131/254] Save webcam results, add --nosave option (#2598) This updates the default detect.py behavior to automatically save all inference images/videos/webcams unless the new argument --nosave is used (python detect.py --nosave) or unless a list of streaming sources is passed (python detect.py --source streams.txt) --- detect.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index c843447260ba..2a4d6f4550c8 100644 --- a/detect.py +++ b/detect.py @@ -17,6 +17,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size + save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://')) @@ -49,7 +50,6 @@ def detect(save_img=False): cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: - save_img = True dataset = LoadImages(source, img_size=imgsz, stride=stride) # Get names and colors @@ -124,17 +124,19 @@ def detect(save_img=False): if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) - else: # 'video' + else: # 'video' or 'stream' if vid_path != save_path: # new video vid_path = save_path if isinstance(vid_writer, cv2.VideoWriter): vid_writer.release() # release previous video writer - - fourcc = 'mp4v' # output video codec - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer.write(im0) if save_txt or save_img: @@ -155,6 +157,7 @@ def detect(save_img=False): parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') From d4456e43b23be03dfd5098d2a1992cd338581801 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 15:12:49 +0100 Subject: [PATCH 132/254] Update segment2box() comment (#2600) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 284146c87e10..9822582cdb86 100755 --- a/utils/general.py +++ b/utils/general.py @@ -289,7 +289,7 @@ def segment2box(segment, width=640, height=640): x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def segments2boxes(segments): From 3bb414890a253bb1a269fb81cc275d11c8fffa72 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 20:55:20 +0100 Subject: [PATCH 133/254] resume.py typo (#2603) --- utils/aws/resume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 563f22be20dc..faad8d247411 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -1,4 +1,4 @@ -# Resume all interrupted trainings in yolov5/ dir including DPP trainings +# Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py import os From fca16dc4b3b877391a9e2710b52ab78b3ee59130 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 21:48:25 +0100 Subject: [PATCH 134/254] Remove Cython from requirements.txt (#2604) Cython should be a dependency of the remaining packages in requirements.txt, so should be installed anyway even if not a direct requirement. --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 51de7735d301..fd187eb56cfe 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ # pip install -r requirements.txt # base ---------------------------------------- -Cython matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 From 77415a42e5975ea356393c9f1d5cff0ae8acae2c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 01:44:00 +0100 Subject: [PATCH 135/254] Update git_describe() for remote dir usage (#2606) --- utils/torch_utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8f3538ab152a..78c42b6d0c05 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -45,9 +45,10 @@ def init_torch_seeds(seed=0): def git_describe(): # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - if Path('.git').exists(): - return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1] - else: + s = f'git -C {Path(__file__).resolve().parent} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True).decode()[:-1] + except subprocess.CalledProcessError as e: return '' From 196bf10603a1c5257852106e8f6b44011ad0256b Mon Sep 17 00:00:00 2001 From: maxupp Date: Fri, 26 Mar 2021 12:45:22 +0100 Subject: [PATCH 136/254] Add '*.mpo' to supported image formats (#2615) Co-authored-by: Max Uppenkamp --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 86d7be39bec0..dfe1dcc52971 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,7 +26,7 @@ # Parameters help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes logger = logging.getLogger(__name__) From 0ff5aeca6152f25b7239ff3ca72b50a56a86390b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 12:52:47 +0100 Subject: [PATCH 137/254] Create date_modified() (#2616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated device selection string with fallback for non-git directories. ```python def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string ... ``` --- utils/torch_utils.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 78c42b6d0c05..0499da49782e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,6 @@ -# PyTorch utils +# YOLOv5 PyTorch utils + +import datetime import logging import math import os @@ -43,9 +45,15 @@ def init_torch_seeds(seed=0): cudnn.benchmark, cudnn.deterministic = True, False -def git_describe(): +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {Path(__file__).resolve().parent} describe --tags --long --always' + s = f'git -C {path} describe --tags --long --always' try: return subprocess.check_output(s, shell=True).decode()[:-1] except subprocess.CalledProcessError as e: @@ -54,7 +62,7 @@ def git_describe(): def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string cpu = device.lower() == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False From a57f23d18b8e76658dd4d4f1445ac4c05a52fae7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 13:23:11 +0100 Subject: [PATCH 138/254] Update detections() self.t = tuple() (#2617) * Update detections() self.t = tuple() Fix multiple results.print() bug. * Update experimental.py * Update yolo.py --- models/common.py | 7 +++---- models/experimental.py | 2 +- models/yolo.py | 2 ++ 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 721171393e04..c6b9cda32e29 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# This file contains modules common to various models +# YOLOv5 common modules import math from pathlib import Path @@ -248,7 +248,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) - self.t = ((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): @@ -277,8 +277,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % - tuple(self.t)) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) def show(self): self.display(show=True) # show results diff --git a/models/experimental.py b/models/experimental.py index d79052314f9b..548353c93be0 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# This file contains experimental modules +# YOLOv5 experimental modules import numpy as np import torch diff --git a/models/yolo.py b/models/yolo.py index a047fef397ee..e5c676dae558 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,3 +1,5 @@ +# YOLOv5 YOLO-specific modules + import argparse import logging import sys From 8f6e447729e34a46fdbe9552fcfac705b82deac5 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 27 Mar 2021 01:17:11 +0530 Subject: [PATCH 139/254] Fix Indentation in test.py (#2614) * Fix Indentation in test.py * CI fix * Comply with PEP8: 80 characters per line --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 61d6965f7414..c0af91120e60 100644 --- a/test.py +++ b/test.py @@ -156,7 +156,7 @@ def test(data, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) - wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables + wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: From 005d7a8c54a39d89bf2b9dc03fba82a489cd0628 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 21:19:15 +0100 Subject: [PATCH 140/254] Update Detections() self.n comment (#2620) ```python self.n = len(self.pred) # number of images (batch size) ``` --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index c6b9cda32e29..21a2ed5a2ca7 100644 --- a/models/common.py +++ b/models/common.py @@ -247,7 +247,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) + self.n = len(self.pred) # number of images (batch size) self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape From 2dfe32030ad5f73d08275b93c0baa089bd513cf3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Mar 2021 18:31:53 +0100 Subject: [PATCH 141/254] Remove conflicting nvidia-tensorboard package (#2622) Attempt to resolve tensorboard Docker error in https://github.com/ultralytics/yolov5/issues/2573 --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index e1b40c2d15c6..a3d870cafba3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies COPY requirements.txt . RUN python -m pip install --upgrade pip +RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory From 9b92d3ee769e8f26f2d535879dc69708998c47a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Mar 2021 18:35:53 +0100 Subject: [PATCH 142/254] FROM nvcr.io/nvidia/pytorch:21.03-py3 (#2623) Update Docker FROM nvcr.io/nvidia/pytorch:21.03-py3 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a3d870cafba3..c0484e5b9c1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.02-py3 +FROM nvcr.io/nvidia/pytorch:21.03-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 6e8c5b767866ecebe08dc1b673537348394680f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 15:39:31 +0200 Subject: [PATCH 143/254] Improve git_describe() (#2633) Catch 'fatal: not a git repository' returns and return '' instead (observed in GCP Hub checks). --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 0499da49782e..dfab83d5374a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -55,7 +55,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe s = f'git -C {path} describe --tags --long --always' try: - return subprocess.check_output(s, shell=True).decode()[:-1] + r = subprocess.check_output(s, shell=True).decode()[:-1] + return '' if r.startswith('fatal: not a git repository') else r except subprocess.CalledProcessError as e: return '' From dc51e80b005c0e63c794ae20c712e5db7eb0ba90 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 28 Mar 2021 19:39:35 +0530 Subject: [PATCH 144/254] Fix: evolve with wandb (#2634) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 25a9accd3be0..211cc04fb63b 100644 --- a/train.py +++ b/train.py @@ -439,7 +439,7 @@ def train(hyp, opt, device, tb_writer=None): strip_optimizer(f) # strip optimizers if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb: # Log the stripped model + if wandb_logger.wandb and not opt.evolve: # Log the stripped model wandb_logger.wandb.log_artifact(str(final), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['last', 'best', 'stripped']) From 518c09578e90d71c798cd1f0bb3274959376539c Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 28 Mar 2021 19:41:36 +0530 Subject: [PATCH 145/254] W&B resume ddp from run link fix (#2579) * W&B resume ddp from run link fix * Native DDP W&B support for training, resuming --- train.py | 4 +- utils/wandb_logging/wandb_utils.py | 66 +++++++++++++++++++++++------- 2 files changed, 54 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 211cc04fb63b..d5b2d1b75c52 100644 --- a/train.py +++ b/train.py @@ -33,7 +33,7 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id +from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) @@ -496,7 +496,7 @@ def train(hyp, opt, device, tb_writer=None): check_requirements() # Resume - wandb_run = resume_and_get_id(opt) + wandb_run = check_wandb_resume(opt) if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d6dd256366e0..17132874e0d0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -23,7 +23,7 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' -def remove_prefix(from_string, prefix): +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): return from_string[len(prefix):] @@ -33,35 +33,73 @@ def check_wandb_config_file(data_config_file): return wandb_config return data_config_file +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name -def resume_and_get_id(opt): - # It's more elegant to stick to 1 wandb.init call, but as useful config data is overwritten in the WandbLogger's wandb.init call +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - run_path = Path(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - model_artifact_name = WANDB_ARTIFACT_PREFIX + 'run_' + run_id + '_model' - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - run = wandb.init(id=run_id, project=project, resume='allow') - opt.resume = model_artifact_name - return run + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True return None +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict - if self.wandb: + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict From 2e95cf3d794fe8b04dadea63d8cab523b959d853 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 17:09:06 +0200 Subject: [PATCH 146/254] Improve git_describe() fix 1 (#2635) Add stderr=subprocess.STDOUT to catch error messages. --- utils/torch_utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index dfab83d5374a..d6da0cae8945 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -55,10 +55,9 @@ def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe s = f'git -C {path} describe --tags --long --always' try: - r = subprocess.check_output(s, shell=True).decode()[:-1] - return '' if r.startswith('fatal: not a git repository') else r + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] except subprocess.CalledProcessError as e: - return '' + return '' # not a git repository def select_device(device='', batch_size=None): From ee169834bd0edf4e03b555688053da7bdd05a71e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 17:22:00 +0200 Subject: [PATCH 147/254] PyTorch Hub custom model to CUDA device fix (#2636) Fix for #2630 raised by @Pro100rus32 --- hubconf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 710882cf158f..0eaf70787e64 100644 --- a/hubconf.py +++ b/hubconf.py @@ -128,7 +128,10 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): hub_model = Model(model.yaml).to(next(model.parameters()).device) # create hub_model.load_state_dict(model.float().state_dict()) # load state_dict hub_model.names = model.names # class names - return hub_model.autoshape() if autoshape else hub_model + if autoshape: + hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return hub_model.to(device) if __name__ == '__main__': From 2bf34f50fda2d5997f301364f9a0b196fa57117b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 20:23:40 +0200 Subject: [PATCH 148/254] PyTorch Hub amp.autocast() inference (#2641) I think this should help speed up CUDA inference, as currently models may be running in FP32 inference mode on CUDA devices unnecesarily. --- models/common.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 21a2ed5a2ca7..5c0e571b752f 100644 --- a/models/common.py +++ b/models/common.py @@ -8,6 +8,7 @@ import torch import torch.nn as nn from PIL import Image +from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh @@ -219,17 +220,17 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - # Inference - with torch.no_grad(): + with torch.no_grad(), amp.autocast(enabled=p.device.type != 'cpu'): + # Inference y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + t.append(time_synchronized()) return Detections(imgs, y, files, t, self.names, x.shape) From 1e8ab3f5f2048b91f8f5e8ec0b15fe855853eebc Mon Sep 17 00:00:00 2001 From: zzttqu <80448114+zzttqu@users.noreply.github.com> Date: Mon, 29 Mar 2021 05:21:25 -0500 Subject: [PATCH 149/254] Add tqdm pbar.close() (#2644) When using tqdm, sometimes it can't print in one line and roll to next line. --- utils/datasets.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index dfe1dcc52971..5ef89ab6ea83 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -443,7 +443,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' - + pbar.close() + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -487,7 +488,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - + pbar.close() + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') From 866bc7d640b04913943820c45636e7c2da6d8245 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Mar 2021 15:19:07 +0200 Subject: [PATCH 150/254] Speed profiling improvements (#2648) * Speed profiling improvements * Update torch_utils.py deepcopy() required to avoid adding elements to model. * Update torch_utils.py --- hubconf.py | 7 ++++--- utils/torch_utils.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hubconf.py b/hubconf.py index 0eaf70787e64..1e6b9c78ac6a 100644 --- a/hubconf.py +++ b/hubconf.py @@ -38,9 +38,10 @@ def create(name, pretrained, channels, classes, autoshape): fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - state_dict = ckpt['model'].float().state_dict() # to FP32 - state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter - model.load_state_dict(state_dict, strict=False) # load + msd = model.state_dict() # model state_dict + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d6da0cae8945..9991e5ec87d8 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -191,7 +191,7 @@ def fuse_conv_and_bn(conv, bn): # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias From 1b100cd53e3344cf9d95d29e3de1e5a6a9c0f1a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Mar 2021 16:43:37 +0200 Subject: [PATCH 151/254] Created using Colaboratory (#2649) --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index c710685b7e75..9d8f08d5fcf5 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -689,7 +689,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -729,7 +729,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -798,9 +798,9 @@ "source": [ "# Download COCO test-dev2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", + "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", - "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5" + "%mv ./test2017 ../coco/images # move to /coco" ], "execution_count": null, "outputs": [] @@ -853,7 +853,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -930,7 +930,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", From 7cdc5165a1f8f67d46458c0229ef42379140d7fe Mon Sep 17 00:00:00 2001 From: Youngjin Shin Date: Tue, 30 Mar 2021 00:05:52 +0900 Subject: [PATCH 152/254] Update requirements.txt (#2564) * Add opencv-contrib-python to requirements.txt * Update requirements.txt Co-authored-by: Glenn Jocher From fd1679975bf55325f606631b28d5d3feb47fbda5 Mon Sep 17 00:00:00 2001 From: Benjamin Fineran Date: Mon, 29 Mar 2021 11:15:26 -0400 Subject: [PATCH 153/254] add option to disable half precision in test.py (#2507) Co-authored-by: Glenn Jocher --- test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index c0af91120e60..d099699bcad8 100644 --- a/test.py +++ b/test.py @@ -37,6 +37,7 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, + half_precision=True, is_coco=False): # Initialize/load model and set device training = model is not None @@ -61,7 +62,7 @@ def test(data, # model = nn.DataParallel(model) # Half - half = device.type != 'cpu' # half precision only supported on CUDA + half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() From 9c803f2f7e4f3759e8121c9c02fc0d0b4a7b04b1 Mon Sep 17 00:00:00 2001 From: Phat Tran <36766404+ptran1203@users.noreply.github.com> Date: Mon, 29 Mar 2021 23:45:46 +0700 Subject: [PATCH 154/254] Add --label-smoothing eps argument to train.py (default 0.0) (#2344) * Add label smoothing option * Correct data type * add_log * Remove log * Add log * Update loss.py remove comment (too versbose) Co-authored-by: phattran Co-authored-by: Glenn Jocher --- train.py | 2 ++ utils/loss.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index d5b2d1b75c52..d55833bf45a3 100644 --- a/train.py +++ b/train.py @@ -224,6 +224,7 @@ def train(hyp, opt, device, tb_writer=None): hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) @@ -481,6 +482,7 @@ def train(hyp, opt, device, tb_writer=None): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') diff --git a/utils/loss.py b/utils/loss.py index 2302d18de87d..9e78df17fdf3 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -97,7 +97,7 @@ def __init__(self, model, autobalance=False): BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=0.0) + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets # Focal loss g = h['fl_gamma'] # focal loss gamma From 1b475c1797fdf116e363bc54593a8f1289aeae22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Mar 2021 20:07:18 +0200 Subject: [PATCH 155/254] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9d8f08d5fcf5..d11f6822d94c 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -787,7 +787,7 @@ }, "source": [ "## COCO test-dev2017\n", - "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." + "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." ] }, { From 2a28ef374be61653ce3f68fd414efe03292356d7 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 31 Mar 2021 17:17:54 +0530 Subject: [PATCH 156/254] Set resume flag to false (#2657) --- utils/wandb_logging/log_dataset.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index 97e68425cddd..0ccb8735bd42 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -21,5 +21,6 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') opt = parser.parse_args() - + opt.resume = False # Explicitly disallow resume check for dataset upload Job + create_dataset_artifact(opt) From 51cc0962b5688e0769592f2ba646d35eda957da8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 11:16:56 +0200 Subject: [PATCH 157/254] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1240f83be2a5..c708f058fc93 100755 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ This repository represents Ultralytics open-source research into future object d ** GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. -- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. +- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. - **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. - **June 22, 2020**: [PANet](https://arxiv.org/abs/1803.01534) updates: new heads, reduced parameters, improved speed and mAP [364fcfd](https://github.com/ultralytics/yolov5/commit/364fcfd7dba53f46edd4f04c037a039c0a287972). From 877b826e3af3a0c7fc9da49cff47d57b5993064d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 11:31:27 +0200 Subject: [PATCH 158/254] Created using Colaboratory --- tutorial.ipynb | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d11f6822d94c..8a191609b24d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1031,9 +1031,9 @@ "source": [ "## Weights & Biases Logging 🌟 NEW\n", "\n", - "[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", + "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", + "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", "" ] @@ -1177,6 +1177,29 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "metadata": { + "id": "GMusP4OAxFu6" + }, + "source": [ + "# PyTorch Hub\n", + "import torch\n", + "\n", + "# Model\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", + "\n", + "# Images\n", + "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n", + "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", + "\n", + "# Inference\n", + "results = model(imgs)\n", + "results.print() # or .show(), .save()" + ], + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From b8b862993d8e0a267f7d96eb94307f3f0f7dce51 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 15:01:00 +0200 Subject: [PATCH 159/254] Update README with Tips for Best Results tutorial (#2682) * Update README with Tips for Best Results tutorial * Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c708f058fc93..6e3f38761543 100755 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ $ pip install -r requirements.txt ## Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW * [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) From 1148e2ea63f498645df5c742fb8078ef6317b46f Mon Sep 17 00:00:00 2001 From: Ding Yiwei <16083536+dingyiwei@users.noreply.github.com> Date: Thu, 1 Apr 2021 23:26:53 +0800 Subject: [PATCH 160/254] Add TransformerLayer, TransformerBlock, C3TR modules (#2333) * yolotr * transformer block * Remove bias in Transformer * Remove C3T * Remove a deprecated class * put the 2nd LayerNorm into the 2nd residual block * move example model to models/hub, rename to -transformer * Add module comments and TODOs * Remove LN in Transformer * Add comments for Transformer * Solve the problem of MA with DDP * cleanup * cleanup find_unused_parameters * PEP8 reformat Co-authored-by: DingYiwei <846414640@qq.com> Co-authored-by: Glenn Jocher --- models/common.py | 54 +++++++++++++++++++++++++++++ models/hub/yolov5s-transformer.yaml | 48 +++++++++++++++++++++++++ models/yolo.py | 4 +-- train.py | 4 ++- 4 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 models/hub/yolov5s-transformer.yaml diff --git a/models/common.py b/models/common.py index 5c0e571b752f..a25172dcfcac 100644 --- a/models/common.py +++ b/models/common.py @@ -43,6 +43,52 @@ def fuseforward(self, x): return self.act(self.conv(x)) +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + + class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion @@ -90,6 +136,14 @@ def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml new file mode 100644 index 000000000000..f2d666722b30 --- /dev/null +++ b/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index e5c676dae558..f730a1efa3b3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -215,13 +215,13 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3]: + C3, C3TR]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3]: + if m in [BottleneckCSP, C3, C3TR]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: diff --git a/train.py b/train.py index d55833bf45a3..1f2b467e732b 100644 --- a/train.py +++ b/train.py @@ -218,7 +218,9 @@ def train(hyp, opt, device, tb_writer=None): # DDP mode if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 + find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) # Model parameters hyp['box'] *= 3. / nl # scale to layers From 514ebcdf3395b1977f2663f206d6d3c93afac235 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 2 Apr 2021 15:24:50 +0530 Subject: [PATCH 161/254] Fix: #2674 (#2683) * Set resume flag to false * Check existance of val dataset --- utils/wandb_logging/wandb_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 17132874e0d0..86038e199dc8 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -158,7 +158,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - if path.startswith(WANDB_ARTIFACT_PREFIX): + if path and path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() From 2af059c0d85f89813254a644443fb074033e3629 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 11:55:10 +0200 Subject: [PATCH 162/254] PyTorch Hub model.save() increment as runs/hub/exp (#2684) * PyTorch Hub model.save() increment as runs/hub/exp This chane will align PyTorch Hub results saving with the existing unified results saving directory structure of runs/ /train /detect /test /hub /exp /exp2 ... * cleanup --- models/common.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index a25172dcfcac..9970fbc8e2d9 100644 --- a/models/common.py +++ b/models/common.py @@ -11,7 +11,7 @@ from torch.cuda import amp from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh from utils.plots import color_list, plot_one_box from utils.torch_utils import time_synchronized @@ -324,9 +324,9 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' if show: img.show(self.files[i]) # show if save: - f = Path(save_dir) / self.files[i] - img.save(f) # save - print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') if render: self.imgs[i] = np.asarray(img) @@ -337,8 +337,9 @@ def print(self): def show(self): self.display(show=True) # show results - def save(self, save_dir='results/'): - Path(save_dir).mkdir(exist_ok=True) + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) self.display(save=True, save_dir=save_dir) # save results def render(self): From 17300a4c7b6f2dfe3e30eb9a4feb0bd21f697856 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 12:36:38 +0200 Subject: [PATCH 163/254] autoShape forward im = np.asarray(im) # to numpy (#2689) Slight speedup. --- models/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 9970fbc8e2d9..713297c14433 100644 --- a/models/common.py +++ b/models/common.py @@ -258,7 +258,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open im.filename = f # for uri files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') - im = np.array(im) # to numpy + if not isinstance(im, np.ndarray): + im = np.asarray(im) # to numpy if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input From 9ccfa85249a2409d311bdf2e817f99377e135091 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 13:00:46 +0200 Subject: [PATCH 164/254] pip install coremltools onnx (#2690) Requested in https://github.com/ultralytics/yolov5/issues/2686 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c0484e5b9c1c..b47e5bbff194 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt gsutil notebook +RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook # Create working directory RUN mkdir -p /usr/src/app From 74276d51894497ea6193fd4e09435453ed2df6ca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Apr 2021 22:20:09 +0200 Subject: [PATCH 165/254] Updated filename attributes for YOLOv5 Hub results (#2708) Proposed fix for 'Model predict with forward will fail if PIL image does not have filename attribute' #2702 --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 713297c14433..f6da7ad3113b 100644 --- a/models/common.py +++ b/models/common.py @@ -254,12 +254,12 @@ def forward(self, imgs, size=640, augment=False, profile=False): n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): + f = f'image{i}' # filename if isinstance(im, str): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open - im.filename = f # for uri - files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') - if not isinstance(im, np.ndarray): - im = np.asarray(im) # to numpy + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) + files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input From ec8979f1d2f99b6873c2eafe05ec5bc2febad468 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 6 Apr 2021 13:18:56 +0200 Subject: [PATCH 166/254] Updated filename attributes for YOLOv5 Hub BytesIO (#2718) Fix 2 for 'Model predict with forward will fail if PIL image does not have filename attribute' #2702 --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index f6da7ad3113b..4fd1a8159c64 100644 --- a/models/common.py +++ b/models/common.py @@ -258,7 +258,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): if isinstance(im, str): # filename or uri im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) + im, f = np.asarray(im), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) From 3067429307873bb85361076a810a8eb1b9405fda Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 6 Apr 2021 20:27:13 +0530 Subject: [PATCH 167/254] Add support for list-of-directory data format for wandb (#2719) --- utils/wandb_logging/wandb_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 86038e199dc8..d407e6cd54fb 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -57,14 +57,14 @@ def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict train_dir, val_dir = None, None - if data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) - if data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) val_dir = val_artifact.download() @@ -158,7 +158,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - if path and path.startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() @@ -229,7 +229,9 @@ def map_val_table_path(self): def create_dataset_table(self, dataset, class_to_id, name='dataset'): # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") - for img_file in tqdm([dataset.path]) if Path(dataset.path).is_dir() else tqdm(dataset.img_files): + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) From c8c8da60792e37e2941fc27ee4d0594fcdcee34a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 6 Apr 2021 17:54:47 +0200 Subject: [PATCH 168/254] Update README with collapsable notes (#2721) * Update README with collapsable notes. * cleanup * center table --- README.md | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 6e3f38761543..f51ccd97712f 100755 --- a/README.md +++ b/README.md @@ -6,7 +6,13 @@ This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -** GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +

+
+ Figure Notes (click to expand) + + * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. + * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +
- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. @@ -31,11 +37,15 @@ This repository represents Ultralytics open-source research into future object d | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases) |1280 |53.0 |53.0 |70.8 |12.3ms |81 ||77.2M |117.7 ---> -** APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. -** All AP numbers are for single-model single-scale without ensemble or TTA. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -** SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes image preprocessing, FP16 inference, postprocessing and NMS. NMS is 1-2ms/img. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` -** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). -** Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) runs at 3 image sizes. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` +
+ Table Notes (click to expand) + + * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` +
## Requirements From c03d590320ea875a9ce5288c077a9ce5c7a1c160 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Apr 2021 16:28:07 +0200 Subject: [PATCH 169/254] Add Hub results.pandas() method (#2725) * Add Hub results.pandas() method New method converts results from torch tensors to pandas DataFrames with column names. This PR may partially resolve issue https://github.com/ultralytics/yolov5/issues/2703 ```python results = model(imgs) print(results.pandas().xyxy[0]) xmin ymin xmax ymax confidence class name 0 57.068970 391.770599 241.383545 905.797852 0.868964 0 person 1 667.661255 399.303589 810.000000 881.396667 0.851888 0 person 2 222.878387 414.774231 343.804474 857.825073 0.838376 0 person 3 4.205386 234.447678 803.739136 750.023376 0.658006 5 bus 4 0.000000 550.596008 76.681190 878.669922 0.450596 0 person ``` * Update comments torch example input now shown resized to size=640 and also now a multiple of P6 stride 64 (see https://github.com/ultralytics/yolov5/issues/2722#issuecomment-814785930) * apply decorators * PEP8 * Update common.py * pd.options.display.max_columns = 10 * Update common.py --- hubconf.py | 2 +- models/common.py | 46 +++++++++++++++++++++++++++++----------------- utils/general.py | 2 ++ 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/hubconf.py b/hubconf.py index 1e6b9c78ac6a..0f9aa150a34e 100644 --- a/hubconf.py +++ b/hubconf.py @@ -38,7 +38,7 @@ def create(name, pretrained, channels, classes, autoshape): fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - msd = model.state_dict() # model state_dict + msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter model.load_state_dict(csd, strict=False) # load diff --git a/models/common.py b/models/common.py index 4fd1a8159c64..412e9bf1e411 100644 --- a/models/common.py +++ b/models/common.py @@ -1,14 +1,15 @@ # YOLOv5 common modules import math +from copy import copy from pathlib import Path import numpy as np +import pandas as pd import requests import torch import torch.nn as nn from PIL import Image -from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh @@ -235,14 +236,16 @@ def autoshape(self): print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() return self + @torch.no_grad() + @torch.cuda.amp.autocast() def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=720, width=1280, RGB images example inputs are: + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) - # numpy: = np.zeros((720,1280,3)) # HWC - # torch: = torch.zeros(16,3,720,1280) # BCHW + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images t = [time_synchronized()] @@ -275,15 +278,14 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - with torch.no_grad(), amp.autocast(enabled=p.device.type != 'cpu'): - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) t.append(time_synchronized()) return Detections(imgs, y, files, t, self.names, x.shape) @@ -347,17 +349,27 @@ def render(self): self.display(render=True) # render results return self.imgs - def __len__(self): - return self.n + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] for d in x: for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: setattr(d, k, getattr(d, k)[0]) # pop out of list return x + def __len__(self): + return self.n + class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) diff --git a/utils/general.py b/utils/general.py index 9822582cdb86..a8aad16a8ab9 100755 --- a/utils/general.py +++ b/utils/general.py @@ -13,6 +13,7 @@ import cv2 import numpy as np +import pandas as pd import torch import torchvision import yaml @@ -24,6 +25,7 @@ # Settings torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads From fca5e2a48fb526b57bda0c66be6b7ac1aaa8d83d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Apr 2021 13:34:49 +0200 Subject: [PATCH 170/254] autocast enable=torch.cuda.is_available() (#2748) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 412e9bf1e411..c77ecbeceace 100644 --- a/models/common.py +++ b/models/common.py @@ -237,7 +237,7 @@ def autoshape(self): return self @torch.no_grad() - @torch.cuda.amp.autocast() + @torch.cuda.amp.autocast(torch.cuda.is_available()) def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' From b5de52c4cdfefb3c7acfbff7d7f450a46b4aaada Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Apr 2021 18:19:49 +0200 Subject: [PATCH 171/254] torch.cuda.amp bug fix (#2750) PR https://github.com/ultralytics/yolov5/pull/2725 introduced a very specific bug that only affects multi-GPU trainings. Apparently the cause was using the torch.cuda.amp decorator in the autoShape forward method. I've implemented amp more traditionally in this PR, and the bug is resolved. --- models/common.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/models/common.py b/models/common.py index c77ecbeceace..1130471e904b 100644 --- a/models/common.py +++ b/models/common.py @@ -10,6 +10,7 @@ import torch import torch.nn as nn from PIL import Image +from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh @@ -237,7 +238,6 @@ def autoshape(self): return self @torch.no_grad() - @torch.cuda.amp.autocast(torch.cuda.is_available()) def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' @@ -251,7 +251,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): t = [time_synchronized()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images @@ -278,17 +279,18 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) - return Detections(imgs, y, files, t, self.names, x.shape) + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) class Detections: From 0cae7576a9241110157cd154fc2237e703c2719e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Apr 2021 21:09:23 +0200 Subject: [PATCH 172/254] utils/wandb_logging PEP8 reformat (#2755) * wandb_logging PEP8 reformat * Update wandb_utils.py --- utils/wandb_logging/log_dataset.py | 6 ++---- utils/wandb_logging/wandb_utils.py | 31 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index 0ccb8735bd42..d7a521f1414b 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -1,10 +1,8 @@ import argparse -from pathlib import Path import yaml from wandb_utils import WandbLogger -from utils.datasets import LoadImagesAndLabels WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -21,6 +19,6 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload Job - + opt.resume = False # Explicitly disallow resume check for dataset upload job + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d407e6cd54fb..d8f50ae8a80e 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,12 +1,9 @@ -import argparse import json -import os -import shutil import sys +from pathlib import Path + import torch import yaml -from datetime import datetime -from pathlib import Path from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path @@ -33,6 +30,7 @@ def check_wandb_config_file(data_config_file): return wandb_config return data_config_file + def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem @@ -40,11 +38,12 @@ def get_run_info(run_path): model_artifact_name = 'run_' + run_id + '_model' return run_id, project, model_artifact_name + def check_wandb_resume(opt): process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if opt.global_rank not in [-1, 0]: # For resuming DDP runs + if opt.global_rank not in [-1, 0]: # For resuming DDP runs run_id, project, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(project + '/' + model_artifact_name + ':latest') @@ -53,6 +52,7 @@ def check_wandb_resume(opt): return True return None + def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict @@ -63,7 +63,7 @@ def process_wandb_config_ddp_mode(opt): train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) - + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) @@ -71,12 +71,11 @@ def process_wandb_config_ddp_mode(opt): val_path = Path(val_dir) / 'data/images/' data_dict['val'] = str(val_path) if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: yaml.dump(data_dict, f) opt.data = ddp_data_path - - + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): @@ -84,7 +83,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.job_type = job_type self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact + if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): run_id, project, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name @@ -98,7 +97,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if not opt.resume: @@ -110,15 +109,15 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) else: - print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") - + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' check_dataset(self.data_dict) config_path = self.log_dataset_artifact(opt.data, - opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) with open(config_path) as f: wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) From 6dd1083bbbc5d29643aafef3373853f03a317a92 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 01:33:55 +0200 Subject: [PATCH 173/254] Tensorboard model visualization bug fix (#2758) This fix should allow for visualizing YOLOv5 model graphs correctly in Tensorboard by uncommenting line 335 in train.py: ```python if tb_writer: tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph ``` The problem was that the detect() layer checks the input size to adapt the grid if required, and tracing does not seem to like this shape check (even if the shape is fine and no grid recomputation is required). The following will warn: https://github.com/ultralytics/yolov5/blob/0cae7576a9241110157cd154fc2237e703c2719e/train.py#L335 Solution is below. This is a YOLOv5s model displayed in TensorBoard. You can see the Detect() layer merging the 3 layers into a single output for example, and everything appears to work and visualize correctly. ```python tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) ``` Screenshot 2021-04-11 at 01 10 09 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 1f2b467e732b..82043b7fff34 100644 --- a/train.py +++ b/train.py @@ -332,7 +332,7 @@ def train(hyp, opt, device, tb_writer=None): Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - # tb_writer.add_graph(model, imgs) # add model to tensorboard + # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) From 9029759cb3b39de724f148f0c9eee8c70e0ffdc4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:28:32 +0200 Subject: [PATCH 174/254] Created using Colaboratory --- tutorial.ipynb | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8a191609b24d..f334f5a15ef0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -582,7 +582,9 @@ "source": [ "# 1. Inference\n", "\n", - "`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)." + "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", + "\n", + " " ] }, { @@ -634,16 +636,6 @@ } ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "4qbaa3iEcrcE" - }, - "source": [ - "Results are saved to `runs/detect`. A full list of available inference sources:\n", - " " - ] - }, { "cell_type": "markdown", "metadata": { From e2b7bc0b32ecf306fc179bb87bad82216a470b37 Mon Sep 17 00:00:00 2001 From: Ben Milanko Date: Mon, 12 Apr 2021 02:53:40 +1000 Subject: [PATCH 175/254] YouTube Livestream Detection (#2752) * Youtube livestream detection * dependancy update to auto install pafy * Remove print * include youtube_dl in deps * PEP8 reformat * youtube url check fix * reduce lines * add comment * update check_requirements * stream framerate fix * Update README.md * cleanup * PEP8 * remove cap.retrieve() failure code Co-authored-by: Glenn Jocher --- README.md | 5 ++--- detect.py | 2 +- utils/datasets.py | 23 +++++++++++++++-------- utils/general.py | 20 ++++++++++++-------- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index f51ccd97712f..d409b3fdeadf 100755 --- a/README.md +++ b/README.md @@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream - rtmp://192.168.1.105/live/test # rtmp stream - http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` To run inference on example images in `data/images`: diff --git a/detect.py b/detect.py index 2a4d6f4550c8..c0707da69e6a 100644 --- a/detect.py +++ b/detect.py @@ -19,7 +19,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://')) + ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run diff --git a/utils/datasets.py b/utils/datasets.py index 5ef89ab6ea83..ec597b628106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,8 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ - clean_str +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -275,14 +275,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): for i, s in enumerate(sources): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') - cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + url = eval(s) if s.isnumeric() else s + if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + url = pafy.new(url).getbest(preftype="mp4").url + cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {fps:.2f} FPS).') + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') thread.start() print('') # newline @@ -303,7 +309,7 @@ def update(self, index, cap): success, im = cap.retrieve() self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 - time.sleep(0.01) # wait time + time.sleep(1 / self.fps) # wait time def __iter__(self): self.count = -1 @@ -444,7 +450,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() - + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -489,7 +495,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() - + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') @@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.datasets import *; autosplit('../coco128') diff --git a/utils/general.py b/utils/general.py index a8aad16a8ab9..5482629ac8c0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -91,17 +91,20 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt', exclude=()): - # Check installed dependencies meet requirements +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - file = Path(file) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] n = 0 # number of packages updates - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] for r in requirements: try: pkg.require(r) @@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()): print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) if n: # if packages updated - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" print(emojis(s)) # emoji-safe From f5b8f7d54c9fa69210da0177fec7ac2d9e4a627c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 19:23:47 +0200 Subject: [PATCH 176/254] YOLOv5 v5.0 Release (#2762) --- README.md | 43 +++++++++++++---------- hubconf.py | 92 +++++++++++++++++++++----------------------------- utils/plots.py | 6 ++-- 3 files changed, 66 insertions(+), 75 deletions(-) diff --git a/README.md b/README.md index d409b3fdeadf..02908db0fd18 100755 --- a/README.md +++ b/README.md @@ -6,36 +6,43 @@ This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -

+

+
+ YOLOv5-P5 640 Figure (click to expand) + +

+
Figure Notes (click to expand) * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. + * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
+- **April 11, 2021**: [v5.0 release](https://github.com/ultralytics/yolov5/releases/tag/v5.0): YOLOv5-P6 1280 models, [AWS](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart), [Supervise.ly](https://github.com/ultralytics/yolov5/issues/2518) and [YouTube](https://github.com/ultralytics/yolov5/pull/2752) integrations. - **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. - **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. -- **June 22, 2020**: [PANet](https://arxiv.org/abs/1803.01534) updates: new heads, reduced parameters, improved speed and mAP [364fcfd](https://github.com/ultralytics/yolov5/commit/364fcfd7dba53f46edd4f04c037a039c0a287972). -- **June 19, 2020**: [FP16](https://pytorch.org/docs/stable/nn.html#torch.nn.Module.half) as new default for smaller checkpoints and faster inference [d4c6674](https://github.com/ultralytics/yolov5/commit/d4c6674c98e19df4c40e33a777610a18d1961145). ## Pretrained Checkpoints -| Model | size | APval | APtest | AP50 | SpeedV100 | FPSV100 || params | GFLOPS | -|---------- |------ |------ |------ |------ | -------- | ------| ------ |------ | :------: | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases) |640 |36.8 |36.8 |55.6 |**2.2ms** |**455** ||7.3M |17.0 -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases) |640 |44.5 |44.5 |63.1 |2.9ms |345 ||21.4M |51.3 -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases) |640 |48.1 |48.1 |66.4 |3.8ms |264 ||47.0M |115.4 -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases) |640 |**50.1** |**50.1** |**68.7** |6.0ms |167 ||87.7M |218.8 -| | | | | | | || | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases) + TTA |832 |**51.9** |**51.9** |**69.6** |24.9ms |40 ||87.7M |1005.3 - - +[assets]: https://github.com/ultralytics/yolov5/releases + +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) +--- |--- |--- |--- |--- |--- |---|--- |--- +[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +[YOLOv5m][assets] |640 |44.5 |44.5 |63.3 |2.7 | |21.4 |51.3 +[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +| | | | | | || | +[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +| | | | | | || | +[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) @@ -44,7 +51,7 @@ This repository represents Ultralytics open-source research into future object d * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -85,7 +92,7 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with ## Inference -detect.py runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash $ python detect.py --source 0 # webcam file.jpg # image diff --git a/hubconf.py b/hubconf.py index 0f9aa150a34e..d26db45695de 100644 --- a/hubconf.py +++ b/hubconf.py @@ -55,84 +55,68 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-small model from https://github.com/ultralytics/yolov5 +def custom(path_or_model='path/to/model.pt', autoshape=True): + """YOLOv5-custom model https://github.com/ultralytics/yolov5 - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 + Arguments (3 options): + path_or_model (str): 'path/to/model.pt' + path_or_model (dict): torch.load('path/to/model.pt') + path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] Returns: pytorch model """ - return create('yolov5s', pretrained, channels, classes, autoshape) + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint + if isinstance(model, dict): + model = model['ema' if model.get('ema') else 'model'] # load model + hub_model = Model(model.yaml).to(next(model.parameters()).device) # create + hub_model.load_state_dict(model.float().state_dict()) # load state_dict + hub_model.names = model.names # class names + if autoshape: + hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return hub_model.to(device) -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-medium model from https://github.com/ultralytics/yolov5 - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-small model https://github.com/ultralytics/yolov5 + return create('yolov5s', pretrained, channels, classes, autoshape) - Returns: - pytorch model - """ + +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-medium model https://github.com/ultralytics/yolov5 return create('yolov5m', pretrained, channels, classes, autoshape) def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-large model from https://github.com/ultralytics/yolov5 - - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 - - Returns: - pytorch model - """ + # YOLOv5-large model https://github.com/ultralytics/yolov5 return create('yolov5l', pretrained, channels, classes, autoshape) def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 + # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 + return create('yolov5x', pretrained, channels, classes, autoshape) - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 - Returns: - pytorch model - """ - return create('yolov5x', pretrained, channels, classes, autoshape) +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-small model https://github.com/ultralytics/yolov5 + return create('yolov5s6', pretrained, channels, classes, autoshape) -def custom(path_or_model='path/to/model.pt', autoshape=True): - """YOLOv5-custom model from https://github.com/ultralytics/yolov5 +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-medium model https://github.com/ultralytics/yolov5 + return create('yolov5m6', pretrained, channels, classes, autoshape) - Arguments (3 options): - path_or_model (str): 'path/to/model.pt' - path_or_model (dict): torch.load('path/to/model.pt') - path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] - Returns: - pytorch model - """ - model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint - if isinstance(model, dict): - model = model['ema' if model.get('ema') else 'model'] # load model +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-large model https://github.com/ultralytics/yolov5 + return create('yolov5l6', pretrained, channels, classes, autoshape) - hub_model = Model(model.yaml).to(next(model.parameters()).device) # create - hub_model.load_state_dict(model.float().state_dict()) # load state_dict - hub_model.names = model.names # class names - if autoshape: - hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available - return hub_model.to(device) + +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 + return create('yolov5x6', pretrained, channels, classes, autoshape) if __name__ == '__main__': diff --git a/utils/plots.py b/utils/plots.py index 47e7b7b74f1c..5b23a34f5141 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -243,7 +243,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx # ax = ax.ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: for f in sorted(Path(path).glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) @@ -253,7 +253,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx # ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], @@ -261,7 +261,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 30) + ax2.set_xlim(0, 57) ax2.set_ylim(30, 55) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') From 0f395b3e3bccbc019ab3d1cbd41303a5b50dc0f0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 23:11:43 +0200 Subject: [PATCH 177/254] YOLOv5 v5.0 Release patch 1 (#2764) * torch.jit.trace(model, img, strict=False) * Update check_file() * Update hubconf.py * Update README.md --- README.md | 2 +- hubconf.py | 6 +++--- models/export.py | 2 +- utils/general.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 02908db0fd18..577c908de304 100755 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ This repository represents Ultralytics open-source research into future object d Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.3 |2.7 | |21.4 |51.3 +[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 [YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 [YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | diff --git a/hubconf.py b/hubconf.py index d26db45695de..a2a43a7a10cb 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -"""File for accessing YOLOv5 models via PyTorch Hub https://pytorch.org/hub/ultralytics_yolov5/ +"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch @@ -31,9 +31,9 @@ def create(name, pretrained, channels, classes, autoshape): Returns: pytorch model """ - config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path try: - model = Model(config, channels, classes) + cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + model = Model(cfg, channels, classes) if pretrained: fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally diff --git a/models/export.py b/models/export.py index 11e60c7a583d..0bb5398e4841 100644 --- a/models/export.py +++ b/models/export.py @@ -62,7 +62,7 @@ try: print('\nStarting TorchScript export with torch %s...' % torch.__version__) f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img) + ts = torch.jit.trace(model, img, strict=False) ts.save(f) print('TorchScript export success, saved as %s' % f) except Exception as e: diff --git a/utils/general.py b/utils/general.py index 5482629ac8c0..413eb5b8fa97 100755 --- a/utils/general.py +++ b/utils/general.py @@ -144,12 +144,12 @@ def check_imshow(): def check_file(file): # Search for file if not found - if os.path.isfile(file) or file == '': + if Path(file).is_file() or file == '': return file else: files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), 'File Not Found: %s' % file # assert file was found - assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From 54d65160b799ec75c2a8c01de6cb069bf417eabe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:26:28 +0200 Subject: [PATCH 178/254] Update tutorial.ipynb --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f334f5a15ef0..881632daa375 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -607,7 +607,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -1263,4 +1263,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6b718e91275fde8367ec8d3fc4cda5d7ba6a5ca0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:31:28 +0200 Subject: [PATCH 179/254] Created using Colaboratory --- tutorial.ipynb | 164 ++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 84 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 881632daa375..e4344d3ddcec 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "b54ab52f1d4f4903897ab6cd49a3b9b2": { + "8815626359d84416a2f44a95500580a4": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_1852f93fc2714d40adccb8aa161c42ff", + "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_3293cfe869bd4a1bbbe18b49b6815de1", - "IPY_MODEL_8d5ee8b8ab6d46b98818bd2c562ddd1c" + "IPY_MODEL_876609753c2946248890344722963d44", + "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" ] } }, - "1852f93fc2714d40adccb8aa161c42ff": { + "3b85609c4ce94a74823f2cfe141ce68e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "3293cfe869bd4a1bbbe18b49b6815de1": { + "876609753c2946248890344722963d44": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_49fcb2adb0354430b76f491af98abfe9", + "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_c7d76e0c53064363add56b8d05e561f5" + "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" } }, - "8d5ee8b8ab6d46b98818bd2c562ddd1c": { + "8abfdd8778e44b7ca0d29881cb1ada05": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_48f321f789634aa584f8a29a3b925dd5", + "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:13<00:00, 62.6MB/s]", + "value": " 781M/781M [08:43<00:00, 1.56MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_6610d6275f3e49d9937d50ed0a105947" + "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" } }, - "49fcb2adb0354430b76f491af98abfe9": { + "78c6c3d97c484916b8ee167c63556800": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "c7d76e0c53064363add56b8d05e561f5": { + "9dd0f182db5d45378ceafb855e486eb8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "48f321f789634aa584f8a29a3b925dd5": { + "a3dab28b45c247089a3d1b8b09f327de": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "6610d6275f3e49d9937d50ed0a105947": { + "32451332b7a94ba9aacddeaa6ac94d50": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "20027455-bf84-41fd-c902-b7282d53c91d" + "outputId": "4576b05f-d6d1-404a-fc99-5663c71e3dc4" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.1+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -607,7 +607,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -664,30 +664,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "b54ab52f1d4f4903897ab6cd49a3b9b2", - "1852f93fc2714d40adccb8aa161c42ff", - "3293cfe869bd4a1bbbe18b49b6815de1", - "8d5ee8b8ab6d46b98818bd2c562ddd1c", - "49fcb2adb0354430b76f491af98abfe9", - "c7d76e0c53064363add56b8d05e561f5", - "48f321f789634aa584f8a29a3b925dd5", - "6610d6275f3e49d9937d50ed0a105947" + "8815626359d84416a2f44a95500580a4", + "3b85609c4ce94a74823f2cfe141ce68e", + "876609753c2946248890344722963d44", + "8abfdd8778e44b7ca0d29881cb1ada05", + "78c6c3d97c484916b8ee167c63556800", + "9dd0f182db5d45378ceafb855e486eb8", + "a3dab28b45c247089a3d1b8b09f327de", + "32451332b7a94ba9aacddeaa6ac94d50" ] }, - "outputId": "f0884441-78d9-443c-afa6-d00ec387908d" + "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b54ab52f1d4f4903897ab6cd49a3b9b2", + "model_id": "8815626359d84416a2f44a95500580a4", "version_minor": 0, "version_major": 2 }, @@ -715,57 +715,57 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d" + "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 6, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", + "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:20<00:00, 1.95it/s]\n", - " all 5000 36335 0.749 0.619 0.68 0.486\n", - "Speed: 5.3/1.7/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", + " all 5000 36335 0.745 0.627 0.68 0.49\n", + "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.43s)\n", + "Done (t=0.48s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.10s)\n", + "DONE (t=5.08s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=88.52s).\n", + "DONE (t=90.51s).\n", "Accumulating evaluation results...\n", - "DONE (t=17.17s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n", + "DONE (t=15.16s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", "Results saved to runs/test/exp\n" ], "name": "stdout" @@ -916,28 +916,25 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "cf494627-09b9-4399-ff0c-fdb62b32340a" + "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 12, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", - "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n", - "\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", @@ -970,11 +967,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -984,23 +980,23 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 3.29G 0.04237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.41it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.04s/it]\n", - " all 128 929 0.642 0.637 0.661 0.432\n", + " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n", + " all 128 929 0.605 0.657 0.666 0.434\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.65G 0.04431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", - " all 128 929 0.662 0.626 0.658 0.433\n", + " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n", + " all 128 929 0.61 0.66 0.669 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n", - " all 128 929 0.658 0.625 0.661 0.433\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", - "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", + " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n", + " all 128 929 0.618 0.659 0.671 0.438\n", "3 epochs completed in 0.007 hours.\n", - "\n" + "\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n" ], "name": "stdout" } @@ -1263,4 +1259,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2eab46e2cfbd4e99b0f5d3d17a5f8c2acfb3285b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:33:04 +0200 Subject: [PATCH 180/254] Update tutorial.ipynb --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index e4344d3ddcec..a8d41d3e1be9 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -607,12 +607,12 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.008s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", "Done. (0.087)\n" ], @@ -1259,4 +1259,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From cac8a765c896bdd5a7912b51a476da5abf974a1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:02:40 +0200 Subject: [PATCH 181/254] Created using Colaboratory --- tutorial.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a8d41d3e1be9..e36046731afd 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4576b05f-d6d1-404a-fc99-5663c71e3dc4" + "outputId": "9b022435-4197-41fc-abea-81f86ce857d0" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -561,14 +561,14 @@ "from IPython.display import Image, clear_output # to display images\n", "\n", "clear_output()\n", - "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" + "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": 31, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.1+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n" ], "name": "stdout" } @@ -681,7 +681,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -721,7 +721,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 6, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -922,7 +922,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 12, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1259,4 +1259,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 860ca98832fd59e8495915af829b7caa5e7ec3d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:10:08 +0200 Subject: [PATCH 182/254] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index e36046731afd..245b46aa7d9f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -528,8 +528,8 @@ "source": [ "\n", "\n", - "This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com." + "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" ] }, { @@ -643,7 +643,7 @@ }, "source": [ "# 2. Test\n", - "Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { From 1487bc84ff3babfb502dffb5ffbdc7e02fcb1879 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:27:40 +0200 Subject: [PATCH 183/254] Update README.md --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 577c908de304..3de9271232c1 100755 --- a/README.md +++ b/README.md @@ -1,16 +1,16 @@ - - + +   CI CPU testing This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -

+

YOLOv5-P5 640 Figure (click to expand) -

+

Figure Notes (click to expand) @@ -117,7 +117,7 @@ image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done Results saved to runs/detect/exp2 Done. (0.103s) ``` - + ### PyTorch Hub @@ -147,7 +147,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size yolov5l 24 yolov5x 16 ``` - + ## Citation From 14797370646d25e226f0093a5982d5cd54ba729a Mon Sep 17 00:00:00 2001 From: Robin Date: Thu, 15 Apr 2021 12:26:08 +0100 Subject: [PATCH 184/254] Flask REST API Example (#2732) * add files * Update README.md * Update README.md * Update restapi.py pretrained=True and model.eval() are used by default when loading a model now, so no need to call them manually. * PEP8 reformat * PEP8 reformat Co-authored-by: Glenn Jocher --- utils/flask_rest_api/README.md | 51 +++++++++++++++++++++++++ utils/flask_rest_api/example_request.py | 13 +++++++ utils/flask_rest_api/restapi.py | 38 ++++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 utils/flask_rest_api/README.md create mode 100644 utils/flask_rest_api/example_request.py create mode 100644 utils/flask_rest_api/restapi.py diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md new file mode 100644 index 000000000000..0cdc51be692d --- /dev/null +++ b/utils/flask_rest_api/README.md @@ -0,0 +1,51 @@ +# Flask REST API +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +``` + +The model inference results are returned: + +```shell +[{'class': 0, + 'confidence': 0.8197850585, + 'name': 'person', + 'xmax': 1159.1403808594, + 'xmin': 750.912902832, + 'ymax': 711.2583007812, + 'ymin': 44.0350036621}, + {'class': 0, + 'confidence': 0.5667674541, + 'name': 'person', + 'xmax': 1065.5523681641, + 'xmin': 116.0448303223, + 'ymax': 713.8904418945, + 'ymin': 198.4603881836}, + {'class': 27, + 'confidence': 0.5661227107, + 'name': 'tie', + 'xmax': 516.7975463867, + 'xmin': 416.6880187988, + 'ymax': 717.0524902344, + 'ymin': 429.2020568848}] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py new file mode 100644 index 000000000000..ff21f30f93ca --- /dev/null +++ b/utils/flask_rest_api/example_request.py @@ -0,0 +1,13 @@ +"""Perform test request""" +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +TEST_IMAGE = "zidane.jpg" + +image_data = open(TEST_IMAGE, "rb").read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py new file mode 100644 index 000000000000..9d88f618905d --- /dev/null +++ b/utils/flask_rest_api/restapi.py @@ -0,0 +1,38 @@ +""" +Run a rest API exposing the yolov5s object detection model +""" +import argparse +import io + +import torch +from PIL import Image +from flask import Flask, request + +app = Flask(__name__) + +DETECTION_URL = "/v1/object-detection/yolov5s" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(): + if not request.method == "POST": + return + + if request.files.get("image"): + image_file = request.files["image"] + image_bytes = image_file.read() + + img = Image.open(io.BytesIO(image_bytes)) + + results = model(img, size=640) + data = results.pandas().xyxy[0].to_json(orient="records") + return data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + args = parser.parse_args() + + model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache + app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat From e5d71223b83b9de2911a3d53685de6a20a2dc0f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Apr 2021 16:45:50 +0200 Subject: [PATCH 185/254] Update README.md --- README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 3de9271232c1..27ea18da1932 100755 --- a/README.md +++ b/README.md @@ -121,19 +121,18 @@ Done. (0.103s) ### PyTorch Hub -To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): ```python import torch # Model model = torch.hub.load('ultralytics/yolov5', 'yolov5s') -# Images -dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' -imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images +# Image +img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # Inference -results = model(imgs) +results = model(img) results.print() # or .show(), .save() ``` From 1f3e482bce89a348bcdace91dfc89c5e47862066 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Apr 2021 14:03:27 +0200 Subject: [PATCH 186/254] ONNX Simplifier (#2815) * ONNX Simplifier Add ONNX Simplifier to ONNX export pipeline in export.py. Will auto-install onnx-simplifier if onnx is installed but onnx-simplifier is not. * Update general.py --- models/export.py | 45 ++++++++++++++++++++++++++++++--------------- utils/general.py | 2 +- 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/models/export.py b/models/export.py index 0bb5398e4841..bec9194319c1 100644 --- a/models/export.py +++ b/models/export.py @@ -1,7 +1,7 @@ """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats Usage: - $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 + $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1 """ import argparse @@ -16,7 +16,7 @@ import models from models.experimental import attempt_load from utils.activations import Hardswish, SiLU -from utils.general import set_logging, check_img_size +from utils.general import colorstr, check_img_size, check_requirements, set_logging from utils.torch_utils import select_device if __name__ == '__main__': @@ -59,20 +59,22 @@ y = model(img) # dry run # TorchScript export + prefix = colorstr('TorchScript:') try: - print('\nStarting TorchScript export with torch %s...' % torch.__version__) + print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) ts.save(f) - print('TorchScript export success, saved as %s' % f) + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('TorchScript export failure: %s' % e) + print(f'{prefix} export failure: {e}') # ONNX export + prefix = colorstr('ONNX:') try: import onnx - print('\nStarting ONNX export with onnx %s...' % onnx.__version__) + print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], output_names=['classes', 'boxes'] if y is None else ['output'], @@ -80,25 +82,38 @@ 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) # Checks - onnx_model = onnx.load(f) # load onnx model - onnx.checker.check_model(onnx_model) # check onnx model - # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model - print('ONNX export success, saved as %s' % f) + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('ONNX export failure: %s' % e) + print(f'{prefix} export failure: {e}') # CoreML export + prefix = colorstr('CoreML:') try: import coremltools as ct - print('\nStarting CoreML export with coremltools %s...' % ct.__version__) + print(f'{prefix} starting export with coremltools {onnx.__version__}...') # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) - print('CoreML export success, saved as %s' % f) + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('CoreML export failure: %s' % e) + print(f'{prefix} export failure: {e}') # Finish - print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) + print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') diff --git a/utils/general.py b/utils/general.py index 413eb5b8fa97..ac3a6981b3d4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -111,7 +111,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): except Exception as e: # DistributionNotFound or VersionConflict if requirements not met n += 1 print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") - print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From aff03be35a8f5c7fb7da8bfd2f26a93cde416fbc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Apr 2021 17:58:28 +0200 Subject: [PATCH 187/254] YouTube Bug Fix (#2818) Fix for #2810 ```shell python detect.py --source 0 ``` introduced by YouTube Livestream Detection PR #2752 --- utils/datasets.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index ec597b628106..b81c634dcb7a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -272,15 +272,15 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): n = len(sources) self.imgs = [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later - for i, s in enumerate(sources): - # Start the thread to read frames from the video stream + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream print(f'{i + 1}/{n}: {s}... ', end='') - url = eval(s) if s.isnumeric() else s - if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video check_requirements(('pafy', 'youtube_dl')) import pafy - url = pafy.new(url).getbest(preftype="mp4").url - cap = cv2.VideoCapture(url) + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) From c15e25c40fa03e91a10708f9af27e23184d8faa2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 13:47:40 +0200 Subject: [PATCH 188/254] PyTorch Hub cv2 .save() .show() bug fix (#2831) * PyTorch Hub cv2 .save() .show() bug fix cv2.rectangle() was failing on non-contiguous np array inputs. This checks for contiguous arrays and applies is necessary: ```python imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update ``` * Update plots.py ```python assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' ``` * Update hubconf.py Expand CI tests to OpenCV image. --- hubconf.py | 10 ++++++---- models/common.py | 4 ++-- utils/plots.py | 26 ++++++++++++++------------ 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/hubconf.py b/hubconf.py index a2a43a7a10cb..d89502f4ee76 100644 --- a/hubconf.py +++ b/hubconf.py @@ -124,13 +124,15 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): # model = custom(path_or_model='path/to/model.pt') # custom example # Verify inference + import cv2 import numpy as np from PIL import Image - imgs = [Image.open('data/images/bus.jpg'), # PIL - 'data/images/zidane.jpg', # filename - 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI - np.zeros((640, 480, 3))] # numpy + imgs = ['data/images/zidane.jpg', # filename + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI + cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV + Image.open('data/images/bus.jpg'), # PIL + np.zeros((320, 640, 3))] # numpy results = model(imgs) # batched inference results.print() diff --git a/models/common.py b/models/common.py index 1130471e904b..2fdc0e0b70ca 100644 --- a/models/common.py +++ b/models/common.py @@ -240,7 +240,7 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/samples/zidane.jpg' + # filename: imgs = 'data/images/zidane.jpg' # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) @@ -271,7 +271,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0.append(s) # image shape g = (size / max(s)) # gain shape1.append([y * g for y in s]) - imgs[i] = im # update + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad x = np.stack(x, 0) if n > 1 else x[0][None] # stack diff --git a/utils/plots.py b/utils/plots.py index 5b23a34f5141..09b6bcd15a9f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -54,32 +54,34 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, img, color=None, label=None, line_thickness=3): - # Plots one bounding box on image img - tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness +def plot_one_box(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) -def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): - img = Image.fromarray(img) - draw = ImageDraw.Draw(img) - line_thickness = line_thickness or max(int(min(img.size) / 200), 2) +def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot if label: - fontsize = max(round(max(img.size) / 40), 12) + fontsize = max(round(max(im.size) / 40), 12) font = ImageFont.truetype("Arial.ttf", fontsize) txt_width, txt_height = font.getsize(label) draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(img) + return np.asarray(im) def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() From 803f51bceedb502e8f112b05911b805bf9ddac6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 14:28:27 +0200 Subject: [PATCH 189/254] Create FUNDING.yml (#2832) --- FUNDING.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 FUNDING.yml diff --git a/FUNDING.yml b/FUNDING.yml new file mode 100644 index 000000000000..56798bae1769 --- /dev/null +++ b/FUNDING.yml @@ -0,0 +1,5 @@ +# These are supported funding model platforms + +github: [glenn-jocher] +patreon: ultralytics +open_collective: ultralytics From 238583b7d5c19029920d56c417c406c829569c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 14:40:58 +0200 Subject: [PATCH 190/254] Update FUNDING.yml (#2833) * Update FUNDING.yml * move FUNDING.yml to ./github --- FUNDING.yml => .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename FUNDING.yml => .github/FUNDING.yml (80%) diff --git a/FUNDING.yml b/.github/FUNDING.yml similarity index 80% rename from FUNDING.yml rename to .github/FUNDING.yml index 56798bae1769..3da386f7e724 100644 --- a/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,5 +1,5 @@ # These are supported funding model platforms -github: [glenn-jocher] +github: glenn-jocher patreon: ultralytics open_collective: ultralytics From 1df8c6c963d31ce84895101a70e45e0afdcb0bc2 Mon Sep 17 00:00:00 2001 From: Tim Stokman <41363+timstokman@users.noreply.github.com> Date: Tue, 20 Apr 2021 13:54:03 +0200 Subject: [PATCH 191/254] Fix ONNX dynamic axes export support with onnx simplifier, make onnx simplifier optional (#2856) * Ensure dynamic export works succesfully, onnx simplifier optional * Update export.py * add dashes Co-authored-by: Tim Co-authored-by: Glenn Jocher --- models/export.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/models/export.py b/models/export.py index bec9194319c1..c527a47951cb 100644 --- a/models/export.py +++ b/models/export.py @@ -21,12 +21,13 @@ if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only + parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -58,7 +59,7 @@ model.model[-1].export = not opt.grid # set Detect() layer grid export y = model(img) # dry run - # TorchScript export + # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') try: print(f'\n{prefix} starting export with torch {torch.__version__}...') @@ -69,7 +70,7 @@ except Exception as e: print(f'{prefix} export failure: {e}') - # ONNX export + # ONNX export ------------------------------------------------------------------------------------------------------ prefix = colorstr('ONNX:') try: import onnx @@ -87,21 +88,24 @@ # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f}') except Exception as e: print(f'{prefix} export failure: {e}') - # CoreML export + # CoreML export ---------------------------------------------------------------------------------------------------- prefix = colorstr('CoreML:') try: import coremltools as ct From c5c647e2816f70f17843755ecfc913a11e1d6492 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Apr 2021 19:47:07 +0200 Subject: [PATCH 192/254] Update increment_path() to handle file paths (#2867) --- utils/general.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index ac3a6981b3d4..c7d084e09326 100755 --- a/utils/general.py +++ b/utils/general.py @@ -591,14 +591,16 @@ def apply_classifier(x, model, img, im0): return x -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. +def increment_path(path, exist_ok=False, sep=''): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): + if not path.exists() or exist_ok: return str(path) else: + suffix = path.suffix + path = path.with_suffix('') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path + return f"{path}{sep}{n}{suffix}" # update path From c949fc86d1914bdbf0a61d193855c1b4e1536da5 Mon Sep 17 00:00:00 2001 From: Burhan Date: Wed, 21 Apr 2021 05:51:08 +0800 Subject: [PATCH 193/254] Detection cropping+saving feature addition for detect.py and PyTorch Hub (#2827) * Update detect.py * Update detect.py * Update greetings.yml * Update cropping * cleanup * Update increment_path() * Update common.py * Update detect.py * Update detect.py * Update detect.py * Update common.py * cleanup * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 18 +++++++++++------- models/common.py | 32 ++++++++++++++++++++------------ test.py | 2 +- train.py | 6 +++--- utils/general.py | 27 +++++++++++++++++++++------ 5 files changed, 56 insertions(+), 29 deletions(-) diff --git a/detect.py b/detect.py index c0707da69e6a..081ae3d89e2e 100644 --- a/detect.py +++ b/detect.py @@ -10,19 +10,19 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized -def detect(save_img=False): +def detect(): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize @@ -84,7 +84,7 @@ def detect(save_img=False): if webcam: # batch_size >= 1 p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count else: - p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg @@ -108,9 +108,12 @@ def detect(save_img=False): with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - if save_img or view_img: # Add bbox to image - label = f'{names[int(cls)]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + if save_img or opt.save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = f'{names[c]} {conf:.2f}' + plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=3) + if opt.save_crop: + save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference + NMS) print(f'{s}Done. ({t2 - t1:.3f}s)') @@ -157,6 +160,7 @@ def detect(save_img=False): parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') diff --git a/models/common.py b/models/common.py index 2fdc0e0b70ca..a28621904b0e 100644 --- a/models/common.py +++ b/models/common.py @@ -13,7 +13,7 @@ from torch.cuda import amp from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import color_list, plot_one_box from utils.torch_utils import time_synchronized @@ -311,29 +311,33 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): colors = color_list() - for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred is not None: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render: + if show or save or render or crop: for *box, conf, cls in pred: # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' - plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if crop: + save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + else: # all others + plot_one_box(box, im, label=label, color=colors[int(cls) % 10]) + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: print(str.rstrip(', ')) if show: - img.show(self.files[i]) # show + im.show(self.files[i]) # show if save: f = self.files[i] - img.save(Path(save_dir) / f) # save + im.save(save_dir / f) # save print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') if render: - self.imgs[i] = np.asarray(img) + self.imgs[i] = np.asarray(im) def print(self): self.display(pprint=True) # print results @@ -343,10 +347,14 @@ def show(self): self.display(show=True) # show results def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir - Path(save_dir).mkdir(parents=True, exist_ok=True) + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results + def crop(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + self.display(crop=True, save_dir=save_dir) # crop results + print(f'Saved results to {save_dir}\n') + def render(self): self.display(render=True) # render results return self.imgs diff --git a/test.py b/test.py index d099699bcad8..db1651d07f65 100644 --- a/test.py +++ b/test.py @@ -49,7 +49,7 @@ def test(data, device = select_device(opt.device, batch_size=batch_size) # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model diff --git a/train.py b/train.py index 82043b7fff34..17b5ac5dda50 100644 --- a/train.py +++ b/train.py @@ -41,7 +41,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + opt.save_dir, opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' @@ -69,7 +69,7 @@ def train(hyp, opt, device, tb_writer=None): if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: @@ -577,7 +577,7 @@ def train(hyp, opt, device, tb_writer=None): assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + yaml_file = opt.save_dir / 'hyp_evolved.yaml' # save best result here if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists diff --git a/utils/general.py b/utils/general.py index c7d084e09326..817023f33dd3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -557,7 +557,7 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs + # Apply a second stage classifier to yolo outputs im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): @@ -591,16 +591,31 @@ def apply_classifier(x, model, img, im0): return x -def increment_path(path, exist_ok=False, sep=''): +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False): + # Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])] + cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1]) + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic - if not path.exists() or exist_ok: - return str(path) - else: + if path.exists() and not exist_ok: suffix = path.suffix path = path.with_suffix('') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}{suffix}" # update path + path = Path(f"{path}{sep}{n}{suffix}") # update path + dir = path if path.suffix == '' else path.parent # directory + if not dir.exists() and mkdir: + dir.mkdir(parents=True, exist_ok=True) # make directory + return path From f7bc685c2c0f57005b83355715cb7282e61416eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Apr 2021 14:34:45 +0200 Subject: [PATCH 194/254] Implement yaml.safe_load() (#2876) * Implement yaml.safe_load() * yaml.safe_dump() --- data/coco.yaml | 2 +- models/yolo.py | 2 +- test.py | 2 +- train.py | 19 ++++++++++--------- utils/autoanchor.py | 2 +- utils/aws/resume.py | 2 +- utils/general.py | 2 +- utils/plots.py | 2 +- utils/wandb_logging/log_dataset.py | 2 +- utils/wandb_logging/wandb_utils.py | 10 +++++----- 10 files changed, 23 insertions(+), 22 deletions(-) diff --git a/data/coco.yaml b/data/coco.yaml index b9da2bf5919b..fa33a1210004 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -30,6 +30,6 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' # Print classes # with open('data/coco.yaml') as f: -# d = yaml.load(f, Loader=yaml.FullLoader) # dict +# d = yaml.safe_load(f) # dict # for i, x in enumerate(d['names']): # print(i, x) diff --git a/models/yolo.py b/models/yolo.py index f730a1efa3b3..7db0e7da2629 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -72,7 +72,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels diff --git a/test.py b/test.py index db1651d07f65..43c03cf0e094 100644 --- a/test.py +++ b/test.py @@ -71,7 +71,7 @@ def test(data, if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) + data = yaml.safe_load(f) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 diff --git a/train.py b/train.py index 17b5ac5dda50..acfc9ef5527b 100644 --- a/train.py +++ b/train.py @@ -41,7 +41,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' @@ -52,16 +52,16 @@ def train(hyp, opt, device, tb_writer=None): # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) + yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) + yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data_dict = yaml.safe_load(f) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict @@ -506,8 +506,9 @@ def train(hyp, opt, device, tb_writer=None): assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate + opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ + '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -515,7 +516,7 @@ def train(hyp, opt, device, tb_writer=None): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode opt.total_batch_size = opt.batch_size @@ -530,7 +531,7 @@ def train(hyp, opt, device, tb_writer=None): # Hyperparameters with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) @@ -577,7 +578,7 @@ def train(hyp, opt, device, tb_writer=None): assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = opt.save_dir / 'hyp_evolved.yaml' # save best result here + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 57777462e89f..75b350da729c 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -102,7 +102,7 @@ def print_results(k): if isinstance(path, str): # *.yaml file with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + data_dict = yaml.safe_load(f) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) else: diff --git a/utils/aws/resume.py b/utils/aws/resume.py index faad8d247411..4b0d4246b594 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -19,7 +19,7 @@ # Load opt.yaml with open(last.parent.parent / 'opt.yaml') as f: - opt = yaml.load(f, Loader=yaml.SafeLoader) + opt = yaml.safe_load(f) # Get device count d = opt['device'].split(',') # devices diff --git a/utils/general.py b/utils/general.py index 817023f33dd3..9898549d3eaf 100755 --- a/utils/general.py +++ b/utils/general.py @@ -550,7 +550,7 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): results = tuple(x[0, :7]) c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) + yaml.safe_dump(hyp, f, sort_keys=False) if bucket: os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload diff --git a/utils/plots.py b/utils/plots.py index 09b6bcd15a9f..f24513c6998d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -323,7 +323,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt with open(yaml_file) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) + hyp = yaml.safe_load(f) x = np.loadtxt('evolve.txt', ndmin=2) f = fitness(x) # weights = (f - f.min()) ** 2 # for weighted results diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index d7a521f1414b..f45a23011f15 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -9,7 +9,7 @@ def create_dataset_artifact(opt): with open(opt.data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data = yaml.safe_load(f) # data dict logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d8f50ae8a80e..d8fbd1ef42aa 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -55,7 +55,7 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() @@ -73,7 +73,7 @@ def process_wandb_config_ddp_mode(opt): if train_dir or val_dir: ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: - yaml.dump(data_dict, f) + yaml.safe_dump(data_dict, f) opt.data = ddp_data_path @@ -120,7 +120,7 @@ def check_and_upload_dataset(self, opt): 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) with open(config_path) as f: - wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + wandb_data_dict = yaml.safe_load(f) return wandb_data_dict def setup_training(self, opt, data_dict): @@ -192,7 +192,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): with open(data_file) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data = yaml.safe_load(f) # data dict nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -206,7 +206,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path data.pop('download', None) with open(path, 'w') as f: - yaml.dump(data, f) + yaml.safe_dump(data, f) if self.job_type == 'Training': # builds correct artifact pipeline graph self.wandb_run.use_artifact(self.val_artifact) From 5f7d39fede4de8af98472bd009c63c3a86568e2d Mon Sep 17 00:00:00 2001 From: JoshSong Date: Wed, 21 Apr 2021 23:50:28 +1000 Subject: [PATCH 195/254] Cleanup load_image() (#2871) * don't resize up in load_image if augmenting * cleanup Co-authored-by: Glenn Jocher --- utils/datasets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index b81c634dcb7a..3fcdddd7c013 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -634,10 +634,10 @@ def load_image(self, index): img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # resize image to img_size - if r != 1: # always resize down, only resize up if training with augmentation - interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized From d48a34dca72c7df1f684a1ff33c18beebc0f0ed9 Mon Sep 17 00:00:00 2001 From: Michael Heilig <75843816+MichHeilig@users.noreply.github.com> Date: Thu, 22 Apr 2021 00:49:55 +0200 Subject: [PATCH 196/254] bug fix: switched rows and cols for correct detections in confusion matrix (#2883) --- utils/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index 666b8c7ec1c0..323c84b6c873 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -145,7 +145,7 @@ def process_batch(self, detections, labels): for i, gc in enumerate(gt_classes): j = m0 == i if n and sum(j) == 1: - self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: self.matrix[self.nc, gc] += 1 # background FP From 78fd0776571589a2a85b9245b15798497ef104d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 12:10:26 +0200 Subject: [PATCH 197/254] VisDrone2019-DET Dataset Auto-Download (#2882) * VisDrone Dataset Auto-Download * add visdrone.yaml * cleanup * add VisDrone2019-DET-test-dev * cleanup VOC --- data/argoverse_hd.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/scripts/get_argoverse_hd.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_voc.sh | 113 ++++++++++++------------------- data/visdrone.yaml | 65 ++++++++++++++++++ data/voc.yaml | 2 +- utils/general.py | 32 +++++++-- 9 files changed, 144 insertions(+), 78 deletions(-) create mode 100644 data/visdrone.yaml diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml index df7a9361e769..0ba314d82ce1 100644 --- a/data/argoverse_hd.yaml +++ b/data/argoverse_hd.yaml @@ -1,6 +1,6 @@ # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /argoverse # /yolov5 diff --git a/data/coco.yaml b/data/coco.yaml index fa33a1210004..f818a49ff0fa 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,6 +1,6 @@ # COCO 2017 dataset http://cocodataset.org # Train command: python train.py --data coco.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco # /yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index c41bccf2b8d5..83fbc29d3404 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,6 +1,6 @@ # COCO 2017 dataset http://cocodataset.org - first 128 training images # Train command: python train.py --data coco128.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco128 # /yolov5 diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index caec61efed78..18131a6764d6 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -2,7 +2,7 @@ # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Download command: bash data/scripts/get_argoverse_hd.sh # Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /argoverse # /yolov5 diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index bbb1e9291d5b..caae37504780 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -2,7 +2,7 @@ # COCO 2017 dataset http://cocodataset.org # Download command: bash data/scripts/get_coco.sh # Train command: python train.py --data coco.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco # /yolov5 diff --git a/data/scripts/get_voc.sh b/data/scripts/get_voc.sh index 13b83c28d706..4c04aaa95a29 100644 --- a/data/scripts/get_voc.sh +++ b/data/scripts/get_voc.sh @@ -2,7 +2,7 @@ # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ # Download command: bash data/scripts/get_voc.sh # Train command: python train.py --data voc.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /VOC # /yolov5 @@ -29,34 +29,27 @@ echo "Completed in" $runtime "seconds" echo "Splitting dataset..." python3 - "$@" <train.txt cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt -python3 - "$@" < Date: Thu, 22 Apr 2021 16:51:21 +0200 Subject: [PATCH 198/254] Uppercase model filenames enabled (#2890) --- utils/google_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_utils.py b/utils/google_utils.py index 0a7ca3b896d6..db36fa9d6822 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -18,7 +18,7 @@ def gsutil_getsize(url=''): def attempt_download(file, repo='ultralytics/yolov5'): # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) + file = Path(str(file).strip().replace("'", '')) if not file.exists(): try: From 264d860f8dc36e7d9125d6fc347a02d34a7d5e37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 17:26:05 +0200 Subject: [PATCH 199/254] ACON activation function (#2893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ACON Activation Function ## 🚀 Feature There is a new activation function [ACON (CVPR 2021)](https://arxiv.org/pdf/2009.04759.pdf) that unifies ReLU and Swish. ACON is simple but very effective, code is here: https://github.com/nmaac/acon/blob/main/acon.py#L19 ![image](https://user-images.githubusercontent.com/5032208/115676962-a38dfe80-a382-11eb-9883-61fa3216e3e6.png) The improvements are very significant: ![image](https://user-images.githubusercontent.com/5032208/115680180-eac9be80-a385-11eb-9c7a-8643db552c69.png) ## Alternatives It also has an enhanced version meta-ACON that uses a small network to learn beta explicitly, which may influence the speed a bit. ## Additional context [Code](https://github.com/nmaac/acon) and [paper](https://arxiv.org/pdf/2009.04759.pdf). * Update activations.py --- utils/activations.py | 58 +++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/utils/activations.py b/utils/activations.py index aa3ddf071d28..1d095c1cf0f1 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -19,23 +19,6 @@ def forward(x): return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX -class MemoryEfficientSwish(nn.Module): - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x * torch.sigmoid(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - return grad_output * (sx * (1 + x * (1 - sx))) - - def forward(self, x): - return self.F.apply(x) - - # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- class Mish(nn.Module): @staticmethod @@ -70,3 +53,44 @@ def __init__(self, c1, k=3): # ch_in, kernel def forward(self, x): return torch.max(x, self.bn(self.conv(x))) + + +# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- +class AconC(nn.Module): + r""" ACON activation (activate or not). + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not). + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False) + self.bn1 = nn.BatchNorm2d(c2) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False) + self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x From b40dd991674e78dc73e67d2f7d415c65073592fc Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Thu, 22 Apr 2021 19:17:30 +0300 Subject: [PATCH 200/254] Explicit opt function arguments (#2817) * more explicit function arguments * fix typo in detect.py * revert import order * revert import order * remove default value --- detect.py | 6 +++--- test.py | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 081ae3d89e2e..d90d2aa8c4f5 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ from utils.torch_utils import select_device, load_classifier, time_synchronized -def detect(): +def detect(opt): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -176,7 +176,7 @@ def detect(): with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect() + detect(opt=opt) strip_optimizer(opt.weights) else: - detect() + detect(opt=opt) diff --git a/test.py b/test.py index 43c03cf0e094..91b2b981c45b 100644 --- a/test.py +++ b/test.py @@ -38,7 +38,8 @@ def test(data, wandb_logger=None, compute_loss=None, half_precision=True, - is_coco=False): + is_coco=False, + opt=None): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -323,11 +324,12 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + opt=opt ) elif opt.task == 'speed': # speed benchmarks for w in opt.weights: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -338,7 +340,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False) + plots=False, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From cdb678f4181bdbad01a6c88e2840871e4058b7cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 19:27:21 +0200 Subject: [PATCH 201/254] Update yolo.py (#2899) --- models/yolo.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 7db0e7da2629..36fa27e89134 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -264,14 +264,14 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Create model model = Model(opt.cfg).to(device) model.train() - + # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) # y = model(img, profile=True) - # Tensorboard + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer = SummaryWriter('.') + # print("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard From 14d2d2d75fff27a9deb183c9cb76f107f43ca3ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 20:27:32 +0200 Subject: [PATCH 202/254] Update google_utils.py (#2900) --- utils/google_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/google_utils.py b/utils/google_utils.py index db36fa9d6822..6a4660bad509 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -26,8 +26,12 @@ def attempt_download(file, repo='ultralytics/yolov5'): assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' except: # fallback plan - assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] - tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', + 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except: + tag = 'v5.0' # current release name = file.name if name in assets: From f662c1850739fafaf7e76b1157e9c936032e4cc4 Mon Sep 17 00:00:00 2001 From: Maximilian Peters Date: Fri, 23 Apr 2021 21:07:48 +0200 Subject: [PATCH 203/254] Add detect.py --hide-conf --hide-labels --line-thickness options (#2658) * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for hiding confidence values * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index d90d2aa8c4f5..358ef9e3eb1c 100644 --- a/detect.py +++ b/detect.py @@ -110,8 +110,9 @@ def detect(opt): if save_img or opt.save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = f'{names[c]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=3) + label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') + + plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=opt.line_thickness) if opt.save_crop: save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) @@ -169,6 +170,9 @@ def detect(opt): parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=True, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) check_requirements(exclude=('pycocotools', 'thop')) From 28db23763904bf0800fe9647fc7e25b10f4f8e3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Apr 2021 21:21:58 +0200 Subject: [PATCH 204/254] Default optimize_for_mobile() on TorchScript models (#2908) Per https://pytorch.org/tutorials/recipes/script_optimized.html this should improve performance on torchscript models (and maybe coreml models also since coremltools operates on a torchscript model input, though this still requires testing). --- models/export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/export.py b/models/export.py index c527a47951cb..312e949f56ac 100644 --- a/models/export.py +++ b/models/export.py @@ -12,6 +12,7 @@ import torch import torch.nn as nn +from torch.utils.mobile_optimizer import optimize_for_mobile import models from models.experimental import attempt_load @@ -65,6 +66,7 @@ print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) + ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html ts.save(f) print(f'{prefix} export success, saved as {f}') except Exception as e: From a2a514dec8a7a96c4442f50885a46abdb4b7fba1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Apr 2021 23:50:02 +0200 Subject: [PATCH 205/254] Update export.py (#2909) --- models/export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/export.py b/models/export.py index 312e949f56ac..5b7b6bda08ae 100644 --- a/models/export.py +++ b/models/export.py @@ -112,7 +112,7 @@ try: import coremltools as ct - print(f'{prefix} starting export with coremltools {onnx.__version__}...') + print(f'{prefix} starting export with coremltools {ct.__version__}...') # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename From 646386ff09f8fce34cb8665a99dfd523f2dc138c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 00:10:38 +0200 Subject: [PATCH 206/254] Update export.py for 2 dry runs (#2910) * Update export.py for 2 dry runs * Update export.py --- models/export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index 5b7b6bda08ae..a820ff94ac17 100644 --- a/models/export.py +++ b/models/export.py @@ -58,7 +58,8 @@ # elif isinstance(m, models.yolo.Detect): # m.forward = m.forward_export # assign forward (optional) model.model[-1].export = not opt.grid # set Detect() layer grid export - y = model(img) # dry run + for _ in range(2): + y = model(img) # dry runs # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') @@ -80,7 +81,6 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) From 1b1ab4cca20aff2b88fedefc01c0482fcdc1a475 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 01:31:11 +0200 Subject: [PATCH 207/254] Add file_size() function (#2911) * Add file_size() function * Update export.py --- models/export.py | 9 +++++---- utils/general.py | 5 +++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index a820ff94ac17..38fefa7e891c 100644 --- a/models/export.py +++ b/models/export.py @@ -17,7 +17,7 @@ import models from models.experimental import attempt_load from utils.activations import Hardswish, SiLU -from utils.general import colorstr, check_img_size, check_requirements, set_logging +from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device if __name__ == '__main__': @@ -60,6 +60,7 @@ model.model[-1].export = not opt.grid # set Detect() layer grid export for _ in range(2): y = model(img) # dry runs + print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') @@ -69,7 +70,7 @@ ts = torch.jit.trace(model, img, strict=False) ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html ts.save(f) - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') @@ -103,7 +104,7 @@ onnx.save(model_onnx, f) except Exception as e: print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') @@ -117,7 +118,7 @@ model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') diff --git a/utils/general.py b/utils/general.py index 92c6ca5df208..ba88759c2983 100755 --- a/utils/general.py +++ b/utils/general.py @@ -61,6 +61,11 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str +def file_size(file): + # Return file size in MB + return Path(file).stat().st_size / 1e6 + + def check_online(): # Check internet connectivity import socket From 45632b27049734e5c73289b10d90a5dc7c2dd6f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 15:53:15 +0200 Subject: [PATCH 208/254] Update download() for tar.gz files (#2919) * Update download() for tar.gz files * Update general.py --- utils/general.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba88759c2983..8efeb5ea59cc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -184,14 +184,19 @@ def check_dataset(dict): def download(url, dir='.', multi_thread=False): - # Multi-threaded file download function + # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename - print(f'Downloading {url} to {f}...') - torch.hub.download_url_to_file(url, f, progress=True) # download - if f.suffix == '.zip': - os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite + if not f.exists(): + print(f'Downloading {url} to {f}...') + torch.hub.download_url_to_file(url, f, progress=True) # download + if f.suffix in ('.zip', '.gz'): + print(f'Unzipping {f}...') + if f.suffix == '.zip': + os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent} && rm {f}') # unzip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory From de19d396e713b8517e555f12d05d906e9d6891b3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 16:28:27 +0200 Subject: [PATCH 209/254] Update visdrone.yaml (#2921) --- data/visdrone.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/visdrone.yaml b/data/visdrone.yaml index 59f597a9c6f1..c23e6bc286f8 100644 --- a/data/visdrone.yaml +++ b/data/visdrone.yaml @@ -56,7 +56,7 @@ download: | dir = Path('../VisDrone') # dataset directory urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip' + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] download(urls, dir=dir) From eae28a93b026de2ea2d9b9f535b0c9fb747b19f6 Mon Sep 17 00:00:00 2001 From: albinxavi <62288451+albinxavi@users.noreply.github.com> Date: Sat, 24 Apr 2021 23:28:02 +0530 Subject: [PATCH 210/254] Change default value of hide label argument to False (#2923) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 358ef9e3eb1c..a2b3045bc500 100644 --- a/detect.py +++ b/detect.py @@ -171,7 +171,7 @@ def detect(opt): parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=True, action='store_true', help='hide labels') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) From aa78069c585115e29ba5759a2d856be0222bc12c Mon Sep 17 00:00:00 2001 From: albinxavi <62288451+albinxavi@users.noreply.github.com> Date: Sun, 25 Apr 2021 17:48:14 +0530 Subject: [PATCH 211/254] Change default value of hide-conf argument to false (#2925) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index a2b3045bc500..f5e53d991504 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(opt): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) check_requirements(exclude=('pycocotools', 'thop')) From 3665c0f59bf00fc8cda90323cf189364f9a28974 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 20:05:16 +0200 Subject: [PATCH 212/254] test.py native --single-cls (#2928) --- test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index 91b2b981c45b..2b9e90c05367 100644 --- a/test.py +++ b/test.py @@ -119,7 +119,7 @@ def test(data, targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() - out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) t1 += time_synchronized() - t # Statistics per image @@ -136,6 +136,8 @@ def test(data, continue # Predictions + if single_cls: + pred[:, 5] = 0 predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred From c0d3f80544ab7a99556eb40c021f817caada9c31 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Mon, 26 Apr 2021 03:01:05 +0700 Subject: [PATCH 213/254] Add verbose option to pytorch hub models (#2926) * Add verbose and update print to logging * Fix positonal param * Revert auto formatting changes * Update hubconf.py Co-authored-by: Glenn Jocher --- hubconf.py | 53 +++++++++++++++++++++++++----------------------- models/yolo.py | 22 ++++++++++---------- utils/general.py | 4 ++-- 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/hubconf.py b/hubconf.py index d89502f4ee76..e42d0b59bd2a 100644 --- a/hubconf.py +++ b/hubconf.py @@ -16,10 +16,9 @@ dependencies = ['torch', 'yaml'] check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) -set_logging() -def create(name, pretrained, channels, classes, autoshape): +def create(name, pretrained, channels, classes, autoshape, verbose): """Creates a specified YOLOv5 model Arguments: @@ -32,6 +31,8 @@ def create(name, pretrained, channels, classes, autoshape): pytorch model """ try: + set_logging(verbose=verbose) + cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) if pretrained: @@ -55,7 +56,7 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def custom(path_or_model='path/to/model.pt', autoshape=True): +def custom(path_or_model='path/to/model.pt', autoshape=True, verbose=True): """YOLOv5-custom model https://github.com/ultralytics/yolov5 Arguments (3 options): @@ -66,6 +67,8 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): Returns: pytorch model """ + set_logging(verbose=verbose) + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint if isinstance(model, dict): model = model['ema' if model.get('ema') else 'model'] # load model @@ -79,49 +82,49 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): return hub_model.to(device) -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-small model https://github.com/ultralytics/yolov5 - return create('yolov5s', pretrained, channels, classes, autoshape) + return create('yolov5s', pretrained, channels, classes, autoshape, verbose) -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return create('yolov5m', pretrained, channels, classes, autoshape) + return create('yolov5m', pretrained, channels, classes, autoshape, verbose) -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-large model https://github.com/ultralytics/yolov5 - return create('yolov5l', pretrained, channels, classes, autoshape) + return create('yolov5l', pretrained, channels, classes, autoshape, verbose) -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return create('yolov5x', pretrained, channels, classes, autoshape) + return create('yolov5x', pretrained, channels, classes, autoshape, verbose) -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return create('yolov5s6', pretrained, channels, classes, autoshape) +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5s6', pretrained, channels, classes, autoshape, verbose) -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return create('yolov5m6', pretrained, channels, classes, autoshape) +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5m6', pretrained, channels, classes, autoshape, verbose) -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return create('yolov5l6', pretrained, channels, classes, autoshape) +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5l6', pretrained, channels, classes, autoshape, verbose) -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return create('yolov5x6', pretrained, channels, classes, autoshape) +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5x6', pretrained, channels, classes, autoshape, verbose) if __name__ == '__main__': - model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example - # model = custom(path_or_model='path/to/model.pt') # custom example + model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained + # model = custom(path_or_model='path/to/model.pt') # custom # Verify inference import cv2 diff --git a/models/yolo.py b/models/yolo.py index 36fa27e89134..dd505e22a68d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -84,7 +84,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -95,7 +95,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) + # logger.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) @@ -134,13 +134,13 @@ def forward_once(self, x, profile=False): for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + logger.info('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) x = m(x) # run y.append(x if m.i in self.save else None) # save output if profile: - print('%.1fms total' % sum(dt)) + logger.info('%.1fms total' % sum(dt)) return x def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency @@ -157,15 +157,15 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + logger.info(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') + logger.info('Fusing layers... ') for m in self.model.modules(): if type(m) is Conv and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv @@ -177,19 +177,19 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers def nms(self, mode=True): # add or remove NMS module present = type(self.model[-1]) is NMS # last layer is NMS if mode and not present: - print('Adding NMS... ') + logger.info('Adding NMS... ') m = NMS() # module m.f = -1 # from m.i = self.model[-1].i + 1 # index self.model.add_module(name='%s' % m.i, module=m) # add self.eval() elif not mode and present: - print('Removing NMS... ') + logger.info('Removing NMS... ') self.model = self.model[:-1] # remove return self def autoshape(self): # add autoShape module - print('Adding autoShape... ') + logger.info('Adding autoShape... ') m = autoShape(self) # wrap model copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes return m @@ -272,6 +272,6 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') - # print("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/utils/general.py b/utils/general.py index 8efeb5ea59cc..f77ae3331538 100755 --- a/utils/general.py +++ b/utils/general.py @@ -32,10 +32,10 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads -def set_logging(rank=-1): +def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) + level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) def init_seeds(seed=0): From 9c7bb5a52cc716166c2145ce1a878a0ad2cf93be Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 22:54:07 +0200 Subject: [PATCH 214/254] ACON Activation batch-size 1 bug patch (#2901) * ACON Activation batch-size 1 bug path This is not a great solution to https://github.com/nmaac/acon/issues/4 but it's all I could think of at the moment. WARNING: YOLOv5 models with MetaAconC() activations are incapable of running inference at batch-size 1 properly due to a known bug in https://github.com/nmaac/acon/issues/4 with no known solution. * Update activations.py * Update activations.py * Update activations.py * Update activations.py --- utils/activations.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/utils/activations.py b/utils/activations.py index 1d095c1cf0f1..92a3b5eaa54b 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -84,13 +84,15 @@ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False) - self.bn1 = nn.BatchNorm2d(c2) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False) - self.bn2 = nn.BatchNorm2d(c1) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) def forward(self, x): y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(beta * dpx) + self.p2 * x From 184991672636838453e796f72268833dff788d07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 23:33:25 +0200 Subject: [PATCH 215/254] Check_requirements() enclosing apostrophe bug fix (#2929) This fixes a bug where the '>' symbol in python package requirements was not running correctly with subprocess.check_output() commands. --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index f77ae3331538..fbb99b9e7f99 100755 --- a/utils/general.py +++ b/utils/general.py @@ -117,8 +117,8 @@ def check_requirements(requirements='requirements.txt', exclude=()): pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met n += 1 - print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") - print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) + print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") + print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From 6c1290fe034e08cb60790d641507d75dbe3e2d61 Mon Sep 17 00:00:00 2001 From: BZFYS Date: Tue, 27 Apr 2021 05:56:25 +0800 Subject: [PATCH 216/254] Update README.md (#2934) * Update README.md dependencies: ImportError: libGL.so.1: cannot open shared object file: No such file or directory ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory ImportError: libSM.so.6: cannot open shared object file: No such file or directory ImportError: libXrender.so.1: cannot open shared object file: No such file or directory * replace older apt-get with apt Code commented for now until a better understanding of the issue, and also code is not cross-platform compatible. Co-authored-by: Glenn Jocher --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 27ea18da1932..d98bd7bfa7da 100755 --- a/README.md +++ b/README.md @@ -58,6 +58,7 @@ Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 ## Requirements Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: + ```bash $ pip install -r requirements.txt ``` From 4890499344e21950d985e1a77e84a0a4161d1db0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Apr 2021 23:58:21 +0200 Subject: [PATCH 217/254] Improved yolo.py profiling (#2940) * Improved yolo.py profiling Improved column order and labelling. * Update yolo.py --- models/yolo.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index dd505e22a68d..d573c5a290e2 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -134,7 +134,9 @@ def forward_once(self, x, profile=False): for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) - logger.info('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + if m == self.model[0]: + logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run y.append(x if m.i in self.save else None) # save output @@ -157,7 +159,8 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - logger.info(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + logger.info( + ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): From 4200674a136a5589972f352790f76d3f37e98dd6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Apr 2021 17:02:07 +0200 Subject: [PATCH 218/254] Add yolov5/ to sys.path() for *.py subdir exec (#2949) * Add yolov5/ to sys.path() for *.py subdir exec * Update export.py --- models/export.py | 3 ++- models/yolo.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/models/export.py b/models/export.py index 38fefa7e891c..da15079149a1 100644 --- a/models/export.py +++ b/models/export.py @@ -7,8 +7,9 @@ import argparse import sys import time +from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories import torch import torch.nn as nn diff --git a/models/yolo.py b/models/yolo.py index d573c5a290e2..cbff70fc83d4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -4,8 +4,9 @@ import logging import sys from copy import deepcopy +from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories logger = logging.getLogger(__name__) from models.common import * @@ -267,7 +268,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Create model model = Model(opt.cfg).to(device) model.train() - + # Profile # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) # y = model(img, profile=True) From cb425b0c1bfbacc1db793c4f0bbd40ff90988beb Mon Sep 17 00:00:00 2001 From: Max Kolomeychenko Date: Fri, 7 May 2021 14:28:54 +0300 Subject: [PATCH 219/254] update UI + latest yolov5 sources (#15) * merge latest version done, not tested * split tabs with radio buttons * models table -wip * models table -wip * start split html template to parts * ui refactoring * compile-template wip - paths confusion * compile wip * train/val splits * keep/ignore unlabeled images * models table * training hyperparameters * UI templates - done * unlabeled count in UI * add adam optimizer * convert_project to detection - works * start train/val splits * splits wip * splits done, only simple tests * splits validation * data preprocessing - not tested * download weights - wip * init_script_arguments - not tested * init_script_arguments - not tested * prepare weights - wip * not tested * add metrics period * set output * artifacts dirs * train_batches_uploaded flag * pre-release for debug * update config --- requirements.txt | 12 +- supervisely/train/config.json | 2 +- supervisely/train/debug.env | 11 +- supervisely/train/src/gui.html | 355 ++---------------- supervisely/train/src/sly_init_ui.py | 153 -------- supervisely/train/src/sly_metrics.py | 45 +-- supervisely/train/src/sly_train.py | 124 +++--- supervisely/train/src/sly_train_globals.py | 39 +- supervisely/train/src/sly_train_utils.py | 55 +-- supervisely/train/src/sly_train_val_split.py | 55 --- supervisely/train/src/ui/architectures.html | 46 +++ supervisely/train/src/ui/architectures.py | 152 ++++++++ supervisely/train/src/ui/artifacts.html | 14 + supervisely/train/src/ui/artifacts.py | 17 + supervisely/train/src/ui/classes.html | 27 ++ supervisely/train/src/ui/classes.py | 25 ++ supervisely/train/src/ui/hyperparameters.html | 75 ++++ supervisely/train/src/ui/hyperparameters.py | 20 + supervisely/train/src/ui/input_project.html | 8 + supervisely/train/src/ui/input_project.py | 8 + supervisely/train/src/ui/monitoring.html | 115 ++++++ supervisely/train/src/ui/monitoring.py | 44 +++ supervisely/train/src/ui/splits.html | 133 +++++++ supervisely/train/src/ui/splits.py | 71 ++++ supervisely/train/src/ui/ui.py | 18 + .../{sly_prepare_data.py => yolov5_format.py} | 44 ++- train.py | 25 +- 27 files changed, 989 insertions(+), 704 deletions(-) delete mode 100644 supervisely/train/src/sly_init_ui.py delete mode 100644 supervisely/train/src/sly_train_val_split.py create mode 100644 supervisely/train/src/ui/architectures.html create mode 100644 supervisely/train/src/ui/architectures.py create mode 100644 supervisely/train/src/ui/artifacts.html create mode 100644 supervisely/train/src/ui/artifacts.py create mode 100644 supervisely/train/src/ui/classes.html create mode 100644 supervisely/train/src/ui/classes.py create mode 100644 supervisely/train/src/ui/hyperparameters.html create mode 100644 supervisely/train/src/ui/hyperparameters.py create mode 100644 supervisely/train/src/ui/input_project.html create mode 100644 supervisely/train/src/ui/input_project.py create mode 100644 supervisely/train/src/ui/monitoring.html create mode 100644 supervisely/train/src/ui/monitoring.py create mode 100644 supervisely/train/src/ui/splits.html create mode 100644 supervisely/train/src/ui/splits.py create mode 100644 supervisely/train/src/ui/ui.py rename supervisely/train/src/{sly_prepare_data.py => yolov5_format.py} (57%) diff --git a/requirements.txt b/requirements.txt index 92519641ad8a..3bd62b213f07 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # pip install -r requirements.txt -supervisely==6.1.52 +supervisely==6.1.74 # base ---------------------------------------- #matplotlib>=3.2.2 @@ -8,21 +8,17 @@ supervisely==6.1.52 #Pillow PyYAML>=5.3.1 #scipy>=1.4.1 -torch>=1.7.0 -torchvision>=0.8.1 +#torch>=1.7.0 +#torchvision>=0.8.1 tqdm>=4.41.0 # logging ------------------------------------- tensorboard>=2.4.1 # wandb -# logging ------------------------------------- -tensorboard>=2.4.1 -# wandb - # plotting ------------------------------------ seaborn==0.11.1 -pandas +#pandas # export -------------------------------------- # coremltools>=4.1 diff --git a/supervisely/train/config.json b/supervisely/train/config.json index 3dcf3042b0ab..f92022b6b8b9 100644 --- a/supervisely/train/config.json +++ b/supervisely/train/config.json @@ -19,5 +19,5 @@ "context_root": "Neural Networks", "context_category": "YOLO v5" }, - "instance_version": "6.3.2" + "instance_version": "6.4.14" } diff --git a/supervisely/train/debug.env b/supervisely/train/debug.env index 561feaa23f0f..cb6f81ed7bb9 100644 --- a/supervisely/train/debug.env +++ b/supervisely/train/debug.env @@ -2,14 +2,13 @@ PYTHONUNBUFFERED=1 DEBUG_APP_DIR="/app_debug_data" DEBUG_CACHE_DIR="/app_cache" +LOG_LEVEL="debug" -TASK_ID=2391 +TASK_ID=4326 -context.teamId=7 -context.workspaceId=263 -#modal.state.slyProjectId=1843 # coco-128 -modal.state.slyProjectId=1805 # lemons-annotated -#modal.state.slyFile="put your value here" +context.teamId=229 +context.workspaceId=287 +modal.state.slyProjectId=2128 # lemons-annotated SERVER_ADDRESS="put your value here" API_TOKEN="put your value here" diff --git a/supervisely/train/src/gui.html b/supervisely/train/src/gui.html index 463bfe2816ab..be2bb8140293 100644 --- a/supervisely/train/src/gui.html +++ b/supervisely/train/src/gui.html @@ -1,317 +1,40 @@ -
- - - {{data.projectName}} ({{data.projectImagesCount}} images) - - - - - - - - - - - - - - - - - - - Random - Based on image tags (not implemented yet) - Train = Val (not implemented yet) - - -
- - - - - - - - - - - -
-
- - - - - - - - -
If image does not have such tags, it will be assigned to training set
-
-
- All images are in both training and validation sets -
-
- - - - - Pretrained on COCO - From custom model - - - -
- - - - - - - -
-
- - - -
-
-
- - - - - - - - - - - - - - - Multi-scale - - - Single class - - - - - - - - - -
-
Training hyperparameters templates:
- - scratch - finetune - - Restore - Defaults - -
-
Edit settings in YAML format:
- -
-
-
- - - - Start training - -
- 0 training classes are selected -
-
- Path to model weights is not defined -
- -
-
{{data.progressName}}: {{data.currentProgressLabel}} / - {{data.totalProgressLabel}} -
- -
- - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - -
-
- - - - -
-
-
- - - - - -
-
- - -
- Link to the directory with training artifacts will be here once training is finished -
- - - {{data.outputName}} - - - - -
- +
+ + #yolov5-train .el-tabs.el-tabs-cards .el-radio { + display: flex; + align-items: start; + /*margin-bottom: 10px;*/ + margin-left: 0; + white-space: normal; + } + + #yolov5-train .el-tabs.el-tabs-cards .el-radio__label div { + color: #7f858e; + font-size: 13px; + } + + .beautiful-table { border-collapse: collapse; } + .beautiful-table tr:nth-child(2n) { background-color: #f6f8fa; } + .beautiful-table td, .beautiful-table th { + border: 1px solid #dfe2e5; + padding: 6px 13px; + text-align: center; + line-height: 20px; + } + + #yolov5-train .el-tabs.el-tabs-cards { border-radius: 4px; box-shadow: none; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__header { background-color: #f6fafd; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__nav { float: none; display: flex; justify-content: + space-between; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__item { flex: 1; margin-bottom: -3px; padding: 9px 16px 13px; + height: auto; line-height: normal; border-radius: 4px; } + + + {% include 'supervisely/train/src/ui/input_project.html' %} + {% include 'supervisely/train/src/ui/classes.html' %} + {% include 'supervisely/train/src/ui/splits.html' %} + {% include 'supervisely/train/src/ui/architectures.html' %} + {% include 'supervisely/train/src/ui/hyperparameters.html' %} + {% include 'supervisely/train/src/ui/monitoring.html' %} + {% include 'supervisely/train/src/ui/artifacts.html' %}
\ No newline at end of file diff --git a/supervisely/train/src/sly_init_ui.py b/supervisely/train/src/sly_init_ui.py deleted file mode 100644 index a36940f5a4c2..000000000000 --- a/supervisely/train/src/sly_init_ui.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import supervisely_lib as sly - -import sly_train_globals as globals -import sly_metrics as metrics - - -empty_gallery = { - "content": { - "projectMeta": sly.ProjectMeta().to_json(), - "annotations": {}, - "layout": [] - } -} - - -def init_input_project(data, project_info): - data["projectId"] = globals.project_id - data["projectName"] = project_info.name - data["projectImagesCount"] = project_info.items_count - data["projectPreviewUrl"] = globals.api.image.preview_url(project_info.reference_image_url, 100, 100) - - -def init_classes_stats(data, state, project_meta): - stats = globals.api.project.get_stats(globals.project_id) - class_images = {} - for item in stats["images"]["objectClasses"]: - class_images[item["objectClass"]["name"]] = item["total"] - class_objects = {} - for item in stats["objects"]["items"]: - class_objects[item["objectClass"]["name"]] = item["total"] - - classes_json = project_meta.obj_classes.to_json() - for obj_class in classes_json: - obj_class["imagesCount"] = class_images[obj_class["title"]] - obj_class["objectsCount"] = class_objects[obj_class["title"]] - - data["classes"] = classes_json - state["selectedClasses"] = [] - - state["classes"] = len(classes_json) * [True] - - -def init_random_split(PROJECT, data, state): - data["randomSplit"] = [ - {"name": "train", "type": "success"}, - {"name": "val", "type": "primary"}, - {"name": "total", "type": "gray"}, - ] - data["totalImagesCount"] = PROJECT.items_count - - train_percent = 80 - train_count = int(PROJECT.items_count / 100 * train_percent) - state["randomSplit"] = { - "count": { - "total": PROJECT.items_count, - "train": train_count, - "val": PROJECT.items_count - train_count - }, - "percent": { - "total": 100, - "train": train_percent, - "val": 100 - train_percent - }, - "shareImagesBetweenSplits": False, - "sliderDisabled": False, - } - - state["splitMethod"] = 1 - state["trainTagName"] = "" - state["valTagName"] = "" - - -def init_model_settings(data, state): - data["modelSizes"] = [ - {"label": "yolov5s", "config": "yolov5s.yaml", "params": "7.3M"}, - {"label": "yolov5m", "config": "yolov5m.yaml", "params": "21.4M"}, - {"label": "yolov5l", "config": "yolov5l.yaml", "params": "47.0M"}, - {"label": "yolov5x", "config": "yolov5x.yaml", "params": "87.7M"}, - ] - state["modelSize"] = data["modelSizes"][0]["label"] - state["modelWeightsOptions"] = 1 - state["pretrainedWeights"] = f'{data["modelSizes"][0]["label"]}.pt' - - # @TODO: for debug - #state["weightsPath"] = "/yolov5_train/coco128_002/2390/weights/best.pt" - state["weightsPath"] = "" - - -def init_training_hyperparameters(state): - state["epochs"] = 10 - state["batchSize"] = 16 - state["imgSize"] = 640 - state["multiScale"] = False - state["singleClass"] = False - state["device"] = '0' - state["workers"] = 8 # 0 - for debug - state["activeTabName"] = "General" - state["hyp"] = { - "scratch": globals.scratch_str, - "finetune": globals.finetune_str, - } - state["hypRadio"] = "scratch" - - -def init_start_state(state): - state["started"] = False - state["activeNames"] = [] - - -def init_galleries(data): - data["vis"] = empty_gallery - data["labelsVis"] = empty_gallery - data["predVis"] = empty_gallery - data["syncBindings"] = [] - - -def init_progress(data): - data["progressName"] = "" - data["currentProgress"] = 0 - data["totalProgress"] = 0 - data["currentProgressLabel"] = "" - data["totalProgressLabel"] = "" - - -def init_output(data): - data["outputUrl"] = "" - data["outputName"] = "" - - -def init(data, state): - init_input_project(data, globals.project_info) - init_classes_stats(data, state, globals.project_meta) - init_random_split(globals.project_info, data, state) - init_model_settings(data, state) - init_training_hyperparameters(state) - init_start_state(state) - init_galleries(data) - init_progress(data) - init_output(data) - metrics.init(data, state) - - -def set_output(): - file_info = globals.api.file.get_info_by_path(globals.team_id, - os.path.join(globals.remote_artifacts_dir, 'results.png')) - fields = [ - {"field": "data.outputUrl", "payload": globals.api.file.get_url(file_info.id)}, - {"field": "data.outputName", "payload": globals.remote_artifacts_dir}, - ] - globals.api.app.set_fields(globals.task_id, fields) - globals.api.task.set_output_directory(globals.task_id, file_info.id, globals.remote_artifacts_dir) - diff --git a/supervisely/train/src/sly_metrics.py b/supervisely/train/src/sly_metrics.py index dd6f3b7ab800..594e96774826 100644 --- a/supervisely/train/src/sly_metrics.py +++ b/supervisely/train/src/sly_metrics.py @@ -1,5 +1,5 @@ import supervisely_lib as sly -import sly_train_globals as globals +import supervisely.train.src.sly_train_globals as globals def init_chart(title, names, xs, ys, smoothing=None): @@ -24,19 +24,19 @@ def init_chart(title, names, xs, ys, smoothing=None): def init(data, state): demo_x = [[], []] #[[1, 2, 3, 4], [2, 4, 6, 8]] demo_y = [[], []] #[[10, 15, 13, 17], [16, 5, 11, 9]] - data["mBox"] = init_chart("Box Loss", - names=["train", "val"], - xs=demo_x, - ys=demo_y, - smoothing=0.6) + data["mGIoU"] = init_chart("GIoU", + names=["train", "val"], + xs=demo_x, + ys=demo_y, + smoothing=0.6) - data["mObjectness"] = init_chart("Obj Loss", + data["mObjectness"] = init_chart("Objectness", names=["train", "val"], xs=demo_x, ys=demo_y, smoothing=0.6) - data["mClassification"] = init_chart("Cls Loss", + data["mClassification"] = init_chart("Classification", names=["train", "val"], xs=demo_x, ys=demo_y, @@ -54,23 +54,24 @@ def init(data, state): state["smoothing"] = 0.6 -def send_metrics(epoch, epochs, metrics): +def send_metrics(epoch, epochs, metrics, log_period=1): sly.logger.debug(f"Metrics: epoch {epoch} / {epochs}", extra={"metrics": metrics}) - fields = [ - {"field": "data.mBox.series[0].data", "payload": [[epoch, metrics["train/box_loss"]]], "append": True}, - {"field": "data.mBox.series[1].data", "payload": [[epoch, metrics["val/box_loss"]]], "append": True}, + if epoch % log_period == 0 or epoch == epochs: + fields = [ + {"field": "data.mGIoU.series[0].data", "payload": [[epoch, metrics["train/box_loss"]]], "append": True}, + {"field": "data.mGIoU.series[1].data", "payload": [[epoch, metrics["val/box_loss"]]], "append": True}, - {"field": "data.mObjectness.series[0].data", "payload": [[epoch, metrics["train/obj_loss"]]], "append": True}, - {"field": "data.mObjectness.series[1].data", "payload": [[epoch, metrics["val/obj_loss"]]], "append": True}, + {"field": "data.mObjectness.series[0].data", "payload": [[epoch, metrics["train/obj_loss"]]], "append": True}, + {"field": "data.mObjectness.series[1].data", "payload": [[epoch, metrics["val/obj_loss"]]], "append": True}, - {"field": "data.mClassification.series[0].data", "payload": [[epoch, metrics["train/cls_loss"]]], "append": True}, - {"field": "data.mClassification.series[1].data", "payload": [[epoch, metrics["val/cls_loss"]]], "append": True}, + {"field": "data.mClassification.series[0].data", "payload": [[epoch, metrics["train/cls_loss"]]], "append": True}, + {"field": "data.mClassification.series[1].data", "payload": [[epoch, metrics["val/cls_loss"]]], "append": True}, - {"field": "data.mPR.series[0].data", "payload": [[epoch, metrics["metrics/precision"]]], "append": True}, - {"field": "data.mPR.series[1].data", "payload": [[epoch, metrics["metrics/recall"]]], "append": True}, + {"field": "data.mPR.series[0].data", "payload": [[epoch, metrics["metrics/precision"]]], "append": True}, + {"field": "data.mPR.series[1].data", "payload": [[epoch, metrics["metrics/recall"]]], "append": True}, - {"field": "data.mMAP.series[0].data", "payload": [[epoch, metrics["metrics/mAP_0.5"]]], "append": True}, - {"field": "data.mMAP.series[1].data", "payload": [[epoch, metrics["metrics/mAP_0.5:0.95"]]], "append": True}, - ] - globals.api.app.set_fields(globals.task_id, fields) + {"field": "data.mMAP.series[0].data", "payload": [[epoch, metrics["metrics/mAP_0.5"]]], "append": True}, + {"field": "data.mMAP.series[1].data", "payload": [[epoch, metrics["metrics/mAP_0.5:0.95"]]], "append": True}, + ] + globals.api.app.set_fields(globals.task_id, fields) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index 77d77c00c691..82246c1eaf99 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -1,23 +1,20 @@ import os import supervisely_lib as sly -from sly_train_globals import init_project_info_and_meta, \ - my_app, task_id, \ - team_id, workspace_id, project_id, \ - root_source_path, scratch_str, finetune_str - -# to import correct values -# project_info, project_meta, \ -# local_artifacts_dir, remote_artifacts_dir -import sly_train_globals as g - -from sly_train_val_split import train_val_split -import sly_init_ui as ui -from sly_prepare_data import filter_and_transform_labels -from sly_train_utils import init_script_arguments -from sly_utils import get_progress_cb, upload_artifacts +import supervisely.train.src.sly_train_globals as g +from supervisely.train.src.sly_train_globals import \ + my_app, task_id, \ + team_id, workspace_id, project_id, \ + root_source_dir, scratch_str, finetune_str +import ui.ui as ui +from sly_train_utils import init_script_arguments +from sly_utils import get_progress_cb, upload_artifacts +from supervisely.train.src.ui.splits import get_train_val_sets, verify_train_val_sets +import supervisely.train.src.yolov5_format as yolov5_format +from supervisely.train.src.ui.architectures import prepare_weights +from supervisely.train.src.ui.artifacts import set_task_output import train as train_yolov5 @@ -33,48 +30,51 @@ def restore_hyp(api: sly.Api, task_id, context, state, app_logger): @my_app.callback("train") @sly.timeit def train(api: sly.Api, task_id, context, state, app_logger): - api.app.set_field(task_id, "state.activeNames", ["labels", "train", "pred", "metrics"]) #"logs", - - # prepare directory for original Supervisely project - project_dir = os.path.join(my_app.data_dir, "sly_project") - sly.fs.mkdir(project_dir) - sly.fs.clean_dir(project_dir) # useful for debug, has no effect in production - - # download Sypervisely project (using cache) - sly.download_project(api, project_id, project_dir, cache=my_app.cache, - progress_cb=get_progress_cb("Download data (using cache)", g.project_info.items_count * 2)) - - # prepare directory for transformed data (nn will use it for training) - yolov5_format_dir = os.path.join(my_app.data_dir, "train_data") - sly.fs.mkdir(yolov5_format_dir) - sly.fs.clean_dir(yolov5_format_dir) # useful for debug, has no effect in production - - # split data to train/val sets, filter objects by classes, convert Supervisely project to YOLOv5 format(COCO) - train_split, val_split = train_val_split(project_dir, state) - train_classes = state["selectedClasses"] - progress_cb = get_progress_cb("Convert Supervisely to YOLOv5 format", g.project_info.items_count) - filter_and_transform_labels(project_dir, train_classes, train_split, val_split, yolov5_format_dir, progress_cb) - - # download initial weights from team files - if state["modelWeightsOptions"] == 2: # transfer learning from custom weights - weights_path_remote = state["weightsPath"] - weights_path_local = os.path.join(my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote)) - file_info = api.file.get_info_by_path(team_id, weights_path_remote) - api.file.download(team_id, weights_path_remote, weights_path_local, my_app.cache, - progress_cb=get_progress_cb("Download weights", file_info.sizeb, is_size=True)) - - # init sys.argv for main training script - init_script_arguments(state, yolov5_format_dir, g.project_info.name) - - # start train script - get_progress_cb("YOLOv5: Scanning data ", 1)(1) - train_yolov5.main() - - # upload artifacts directory to Team Files - upload_artifacts(g.local_artifacts_dir, g.remote_artifacts_dir) - - # show path to the artifacts directory in Team Files - ui.set_output() + try: + prepare_weights(state) + + # prepare directory for original Supervisely project + project_dir = os.path.join(my_app.data_dir, "sly_project") + sly.fs.mkdir(project_dir, remove_content_if_exists=True) # clean content for debug, has no effect in prod + + # download and preprocess Sypervisely project (using cache) + download_progress = get_progress_cb("Download data (using cache)", g.project_info.items_count * 2) + sly.download_project(api, project_id, project_dir, cache=my_app.cache, progress_cb=download_progress) + + # preprocessing: transform labels to bboxes, filter classes, ... + sly.Project.to_detection_task(project_dir, inplace=True) + train_classes = state["selectedClasses"] + sly.Project.remove_classes_except(project_dir, classes_to_keep=train_classes, inplace=True) + if state["unlabeledImages"] == "ignore": + sly.Project.remove_items_without_objects(project_dir, inplace=True) + + # split to train / validation sets (paths to images and annotations) + train_set, val_set = get_train_val_sets(project_dir, state) + verify_train_val_sets(train_set, val_set) + + # prepare directory for data in YOLOv5 format (nn will use it for training) + train_data_dir = os.path.join(my_app.data_dir, "train_data") + sly.fs.mkdir(train_data_dir, remove_content_if_exists=True) # clean content for debug, has no effect in prod + + # convert Supervisely project to YOLOv5 format + progress_cb = get_progress_cb("Convert Supervisely to YOLOv5 format", len(train_set) + len(val_set)) + yolov5_format.transform(project_dir, train_data_dir, train_set, val_set, progress_cb) + + # init sys.argv for main training script + init_script_arguments(state, train_data_dir, g.project_info.name) + + # start train script + api.app.set_field(task_id, "state.activeNames", ["labels", "train", "pred", "metrics"]) # "logs", + get_progress_cb("YOLOv5: Scanning data ", 1)(1) + train_yolov5.main() + + # upload artifacts directory to Team Files + upload_artifacts(g.local_artifacts_dir, g.remote_artifacts_dir) + set_task_output() + except Exception as e: + my_app.show_modal_window(f"Oops! Something went wrong, please try again or contact tech support. " + f"Find more info in the app logs. Error: {repr(e)}", level="error") + api.app.set_field(task_id, "state.started", False) # stop application get_progress_cb("Finished, app is stopped automatically", 1)(1) @@ -92,8 +92,7 @@ def main(): state = {} data["taskId"] = task_id - # read project information and meta (classes + tags) - init_project_info_and_meta() + my_app.compile_template(g.root_source_dir) # init data for UI widgets ui.init(data, state) @@ -101,13 +100,8 @@ def main(): my_app.run(data=data, state=state) -# @TODO: change pip requirements to quickly skip them (already installed) -# @TODO: handle soft stop event - # New features: -# @TODO: adam or SGD opt? -# @TODO: train == val - handle case in data_config.yaml to avoid data duplication # @TODO: resume training -# @TODO: repeat dataset (for small lemons) +# @TODO: save checkpoint every N-th epochs if __name__ == "__main__": sly.main_wrapper("main", main) diff --git a/supervisely/train/src/sly_train_globals.py b/supervisely/train/src/sly_train_globals.py index 549a8976c3f9..00230a36a2e7 100644 --- a/supervisely/train/src/sly_train_globals.py +++ b/supervisely/train/src/sly_train_globals.py @@ -14,30 +14,31 @@ local_artifacts_dir = None remote_artifacts_dir = None +project_info = api.project.get_info_by_id(project_id) +project_meta = sly.ProjectMeta.from_json(api.project.get_meta(project_id)) -project_info = None -project_meta = None +root_source_dir = str(Path(sys.argv[0]).parents[3]) +sly.logger.info(f"Root source directory: {root_source_dir}") +sys.path.append(root_source_dir) -root_source_path = str(Path(sys.argv[0]).parents[3]) -sly.logger.info(f"Root source directory: {root_source_path}") -sys.path.append(root_source_path) +source_path = str(Path(sys.argv[0]).parents[0]) +sly.logger.info(f"Source directory: {source_path}") +sys.path.append(source_path) -# script_path = str(Path(sys.argv[0]).parents[3])) -# root_app_dir = script_path.parent.parent.absolute() -# sly.logger.info(f"Root app directory: {root_app_dir}") -# sys.path.append(root_app_dir) +with open(os.path.join(root_source_dir, "data/hyp.scratch.yaml"), 'r') as file: + scratch_str = file.read() # yaml.safe_load( -def init_project_info_and_meta(): - global project_info, project_meta - project_info = api.project.get_info_by_id(project_id) - project_meta_json = api.project.get_meta(project_id) - project_meta = sly.ProjectMeta.from_json(project_meta_json) - +with open(os.path.join(root_source_dir, "data/hyp.finetune.yaml"), 'r') as file: + finetune_str = file.read() # yaml.safe_load( -with open(os.path.join(root_source_path, "data/hyp.scratch.yaml"), 'r') as file: - scratch_str = file.read() # yaml.safe_load( -with open(os.path.join(root_source_path, "data/hyp.finetune.yaml"), 'r') as file: - finetune_str = file.read() # yaml.safe_load( \ No newline at end of file +runs_dir = os.path.join(my_app.data_dir, 'runs') +sly.fs.mkdir(runs_dir, remove_content_if_exists=True) # for debug, does nothing in production +experiment_name = str(task_id) +local_artifacts_dir = os.path.join(runs_dir, experiment_name) +sly.logger.info(f"All training artifacts will be saved to local directory {local_artifacts_dir}") +remote_artifacts_dir = os.path.join("/yolov5_train", project_info.name, experiment_name) +remote_artifacts_dir = api.file.get_free_dir_name(team_id, remote_artifacts_dir) +sly.logger.info(f"After training artifacts will be uploaded to Team Files: {remote_artifacts_dir}") \ No newline at end of file diff --git a/supervisely/train/src/sly_train_utils.py b/supervisely/train/src/sly_train_utils.py index 0d7f6a0e3570..80e7db28d4a3 100644 --- a/supervisely/train/src/sly_train_utils.py +++ b/supervisely/train/src/sly_train_utils.py @@ -11,28 +11,23 @@ def init_script_arguments(state, yolov5_format_dir, input_project_name): global local_artifacts_dir, remote_artifacts_dir + sys.argv.append("--sly") + data_path = os.path.join(yolov5_format_dir, 'data_config.yaml') sys.argv.extend(["--data", data_path]) - try: - hyp_content = yaml.safe_load(state["hyp"][state["hypRadio"]]) - hyp = os.path.join(my_app.data_dir, 'hyp.custom.yaml') - with open(hyp, 'w') as f: - f.write(state["hyp"][state["hypRadio"]]) - except yaml.YAMLError as e: - sly.logger.error(repr(e)) - api.app.set_field(task_id, "state.started", False) - return + hyp_content = yaml.safe_load(state["hyp"][state["hypRadio"]]) + hyp = os.path.join(my_app.data_dir, 'hyp.custom.yaml') + with open(hyp, 'w') as f: + f.write(state["hyp"][state["hypRadio"]]) sys.argv.extend(["--hyp", hyp]) - weights = "" # random (not tested) - if state["modelWeightsOptions"] == 1: - weights = state["pretrainedWeights"] - cfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../models', f"{state['modelSize']}.yaml") + if state["weightsInitialization"] == "coco": + model_name = state['selectedModel'].lower() + _sub_path = "models/hub" if model_name.endswith('6') else "models" + cfg = os.path.join(g.root_source_dir, _sub_path, f"{model_name}.yaml") sys.argv.extend(["--cfg", cfg]) - elif state["modelWeightsOptions"] == 2: - weights = state["weightsPath"] - sys.argv.extend(["--weights", weights]) + sys.argv.extend(["--weights", state["weightsPath"]]) sys.argv.extend(["--epochs", str(state["epochs"])]) sys.argv.extend(["--batch-size", str(state["batchSize"])]) @@ -45,30 +40,12 @@ def init_script_arguments(state, yolov5_format_dir, input_project_name): if "workers" in state: sys.argv.extend(["--workers", str(state["workers"])]) + if state["optimizer"] == "Adam": + sys.argv.append("--adam") - training_dir = os.path.join(my_app.data_dir, 'experiment', input_project_name) - experiment_name = str(task_id) - local_artifacts_dir = os.path.join(training_dir, experiment_name) - _exp_index = 1 - while sly.fs.dir_exists(local_artifacts_dir): - experiment_name = "{}_{:03d}".format(task_id, _exp_index) - local_artifacts_dir = os.path.join(training_dir, experiment_name) - _exp_index += 1 - g.local_artifacts_dir = local_artifacts_dir - - sys.argv.extend(["--project", training_dir]) - sys.argv.extend(["--name", experiment_name]) - - sys.argv.append("--sly") - - remote_experiment_name = str(task_id) - remote_artifacts_dir = os.path.join("/yolov5_train", input_project_name, remote_experiment_name) - _exp_index = 1 - while api.file.dir_exists(team_id, remote_artifacts_dir): - remote_experiment_name = "{}_{:03d}".format(task_id, _exp_index) - remote_artifacts_dir = os.path.join("/yolov5_train", input_project_name, remote_experiment_name) - _exp_index += 1 - g.remote_artifacts_dir = remote_artifacts_dir + sys.argv.extend(["--metrics_period", str(state["metricsPeriod"])]) + sys.argv.extend(["--project", g.runs_dir]) + sys.argv.extend(["--name", g.experiment_name]) def send_epoch_log(epoch, epochs, progress): diff --git a/supervisely/train/src/sly_train_val_split.py b/supervisely/train/src/sly_train_val_split.py deleted file mode 100644 index e9807c5c3b0a..000000000000 --- a/supervisely/train/src/sly_train_val_split.py +++ /dev/null @@ -1,55 +0,0 @@ -import random -import supervisely_lib as sly - - -def _list_items(project_dir): - items = [] - project = sly.Project(project_dir, sly.OpenMode.READ) - for dataset in project: - for item_name in dataset: - items.append((dataset.name, item_name)) - return items - - -def _split_random(project_dir, train_count, val_count): - items = _list_items(project_dir) - random.shuffle(items) - train_items = items[:train_count] - val_items = items[train_count:] - if len(val_items) != val_count: - sly.logger.warn("Issue in train/val random split in GUI", extra={ - "train_count": train_count, - "val_count": val_count, - "items_count": len(items), - "train_count + val_count": train_count + val_count - }) - #raise RuntimeError("Incorrect train/val random split") - return train_items, val_items - - -def _split_same(project_dir): - items = _list_items(project_dir) - return items, items.copy() - - -def _split_tags(project_dir, train_tag_name, val_tag_name): - raise NotImplementedError() - - -def train_val_split(project_dir, state): - split_method = state["splitMethod"] - train_count = state["randomSplit"]["count"]["train"] - val_count = state["randomSplit"]["count"]["val"] - - train_split = None - val_split = None - if split_method == 1: # Random - train_split, val_split = _split_random(project_dir, train_count, val_count) - elif split_method == 2: # Based on image tags - train_split, val_split = _split_tags() - elif split_method == 3: # Train = Val - train_split, val_split = _split_same() - else: - raise ValueError(f"Train/val split method: {split_method} unknown") - - return train_split, val_split \ No newline at end of file diff --git a/supervisely/train/src/ui/architectures.html b/supervisely/train/src/ui/architectures.html new file mode 100644 index 000000000000..4b31235c18d9 --- /dev/null +++ b/supervisely/train/src/ui/architectures.html @@ -0,0 +1,46 @@ + + + + + Pretrained on COCO +
Default pretrained checkpoints provided by authors of YOLOv5
+
+ + + + + + + + + + + +
+
+
+ {{row["subtitle"]}} +
+
+
+ + {{model[column.key]}} + +
+
+ {{model[column.key]}} +
+
+
+ + + Custom weights + + + + + +
+
\ No newline at end of file diff --git a/supervisely/train/src/ui/architectures.py b/supervisely/train/src/ui/architectures.py new file mode 100644 index 000000000000..ab00109ee5dd --- /dev/null +++ b/supervisely/train/src/ui/architectures.py @@ -0,0 +1,152 @@ +import errno +import os +import supervisely.train.src.sly_train_globals as g +from supervisely.train.src.sly_utils import get_progress_cb +import supervisely_lib as sly + + +def get_models_list(): + return [ + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5s", + "Size": 640, + "mAP^val": 36.7, + "mAP^test": 36.7, + "mAP^val_0.5": 55.4, + "Speed": 2.0, + "Params": 7.3, + "FLOPS": 17.0, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5m", + "Size": 640, + "mAP^val": 44.5, + "mAP^test": 44.5, + "mAP^val_0.5": 63.1, + "Speed": 2.7, + "Params": 21.4, + "FLOPS": 51.3, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5l", + "Size": 640, + "mAP^val": 48.2, + "mAP^test": 48.2, + "mAP^val_0.5": 66.9, + "Speed": 3.8, + "Params": 47.0, + "FLOPS": 115.4, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5x", + "Size": 640, + "mAP^val": 50.4, + "mAP^test": 50.4, + "mAP^val_0.5": 68.8, + "Speed": 6.1, + "Params": 87.7, + "FLOPS": 218.8, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5s6", + "Size": 1280, + "mAP^val": 43.3, + "mAP^test": 43.3, + "mAP^val_0.5": 61.9, + "Speed": 4.3, + "Params": 12.7, + "FLOPS": 17.4, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5m6", + "Size": 1280, + "mAP^val": 50.5, + "mAP^test": 50.5, + "mAP^val_0.5": 68.7, + "Speed": 8.4, + "Params": 35.9, + "FLOPS": 52.4, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5l6", + "Size": 1280, + "mAP^val": 53.4, + "mAP^test": 53.4, + "mAP^val_0.5": 71.1, + "Speed": 12.3, + "Params": 77.2, + "FLOPS": 117.7, + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5x6", + "Size": 1280, + "mAP^val": 54.4, + "mAP^test": 54.4, + "mAP^val_0.5": 72.0, + "Speed": 22.4, + "Params": 141.8, + "FLOPS": 222.9, + }, + ] + + +def get_table_columns(): + return [ + {"key": "Model", "title": "Model", "subtitle": None}, + {"key": "Size", "title": "Size", "subtitle": "(pixels)"}, + {"key": "mAP^val", "title": "mAPval", "subtitle": "0.5:0.95"}, + {"key": "mAP^test", "title": "mAPtest", "subtitle": "0.5:0.95"}, + {"key": "mAP^val_0.5", "title": "mAPval", "subtitle": "0.5"}, + {"key": "Speed", "title": "Speed", "subtitle": "V100 (ms)"}, + {"key": "Params", "title": "Params", "subtitle": "(M)"}, + {"key": "FLOPS", "title": "FLOPS", "subtitle": "640 (B)"}, + ] + + +def init(data, state): + data["models"] = get_models_list() + data["modelColumns"] = get_table_columns() + state["selectedModel"] = "YOLOv5s" + state["weightsInitialization"] = "coco" + + # @TODO: for debug + #state["weightsPath"] = "/yolov5_train/coco128_002/2390/weights/best.pt" + state["weightsPath"] = "" + + +def prepare_weights(state): + if state["weightsInitialization"] == "custom": + # download custom weights + weights_path_remote = state["weightsPath"] + if not weights_path_remote.endswith(".pt"): + raise ValueError(f"Weights file has unsupported extension {sly.fs.get_file_ext(weights_path_remote)}. " + f"Supported: '.pt'") + weights_path_local = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote)) + file_info = g.api.file.get_info_by_path(g.team_id, weights_path_remote) + if file_info is None: + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), weights_path_remote) + progress_cb = get_progress_cb("Download weights", file_info.sizeb, is_size=True) + g.api.file.download(g.team_id, weights_path_remote, weights_path_local, g.my_app.cache, progress_cb) + + state["_weightsPath"] = weights_path_remote + state["weightsPath"] = weights_path_local + else: + model_name = state['selectedModel'].lower() + state["weightsPath"] = f"{model_name}.pt" + sly.logger.info("Pretrained COCO weights will be added automatically") diff --git a/supervisely/train/src/ui/artifacts.html b/supervisely/train/src/ui/artifacts.html new file mode 100644 index 000000000000..437aa169e248 --- /dev/null +++ b/supervisely/train/src/ui/artifacts.html @@ -0,0 +1,14 @@ + +
+ Link to the directory with training artifacts will be here once training is finished +
+ + + {{data.outputName}} + + + + +
\ No newline at end of file diff --git a/supervisely/train/src/ui/artifacts.py b/supervisely/train/src/ui/artifacts.py new file mode 100644 index 000000000000..16b946b32e5c --- /dev/null +++ b/supervisely/train/src/ui/artifacts.py @@ -0,0 +1,17 @@ +import os +import supervisely.train.src.sly_train_globals as g + + +def init(data): + data["outputUrl"] = None + data["outputName"] = None + + +def set_task_output(): + file_info = g.api.file.get_info_by_path(g.team_id, os.path.join(g.remote_artifacts_dir, 'results.png')) + fields = [ + {"field": "data.outputUrl", "payload": g.api.file.get_url(file_info.id)}, + {"field": "data.outputName", "payload": g.remote_artifacts_dir}, + ] + g.api.app.set_fields(g.task_id, fields) + g.api.task.set_output_directory(g.task_id, file_info.id, g.remote_artifacts_dir) \ No newline at end of file diff --git a/supervisely/train/src/ui/classes.html b/supervisely/train/src/ui/classes.html new file mode 100644 index 000000000000..145d4df23632 --- /dev/null +++ b/supervisely/train/src/ui/classes.html @@ -0,0 +1,27 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/supervisely/train/src/ui/classes.py b/supervisely/train/src/ui/classes.py new file mode 100644 index 000000000000..429e68d945bf --- /dev/null +++ b/supervisely/train/src/ui/classes.py @@ -0,0 +1,25 @@ +import supervisely_lib as sly + + +def init(api: sly.Api, data, state, project_id, project_meta: sly.ProjectMeta): + stats = api.project.get_stats(project_id) + class_images = {} + for item in stats["images"]["objectClasses"]: + class_images[item["objectClass"]["name"]] = item["total"] + class_objects = {} + for item in stats["objects"]["items"]: + class_objects[item["objectClass"]["name"]] = item["total"] + + classes_json = project_meta.obj_classes.to_json() + for obj_class in classes_json: + obj_class["imagesCount"] = class_images[obj_class["title"]] + obj_class["objectsCount"] = class_objects[obj_class["title"]] + + unlabeled_count = 0 + for ds_counter in stats["images"]["datasets"]: + unlabeled_count += ds_counter["imagesNotMarked"] + + data["classes"] = classes_json + state["selectedClasses"] = [] + state["classes"] = len(classes_json) * [True] + data["unlabeledCount"] = unlabeled_count \ No newline at end of file diff --git a/supervisely/train/src/ui/hyperparameters.html b/supervisely/train/src/ui/hyperparameters.html new file mode 100644 index 000000000000..f346fb020f35 --- /dev/null +++ b/supervisely/train/src/ui/hyperparameters.html @@ -0,0 +1,75 @@ + + + + + + + + + + + + Multi-scale + + + Single class + + + + + + + + +
+ Log metrics every + + epochs +
+
+ + + + + + + + + + + Scratch mode +
Recommended hyperparameters for training from scratch
+
+ + Restore Defaults + + +
+ + + Finetune mode +
Recommended hyperparameters for model finutuning
+
+ + Restore Defaults + + +
+
+
+
\ No newline at end of file diff --git a/supervisely/train/src/ui/hyperparameters.py b/supervisely/train/src/ui/hyperparameters.py new file mode 100644 index 000000000000..8042fba6d013 --- /dev/null +++ b/supervisely/train/src/ui/hyperparameters.py @@ -0,0 +1,20 @@ +import supervisely_lib as sly +import supervisely.train.src.sly_train_globals as g + + +def init(state): + state["epochs"] = 10 + state["batchSize"] = 16 + state["imgSize"] = 640 + state["multiScale"] = False + state["singleClass"] = False + state["device"] = '0' + state["workers"] = 8 # 0 - for debug @TODO: for debug + state["activeTabName"] = "General" + state["hyp"] = { + "scratch": g.scratch_str, + "finetune": g.finetune_str, + } + state["hypRadio"] = "scratch" + state["optimizer"] = "SGD" + state["metricsPeriod"] = 1 diff --git a/supervisely/train/src/ui/input_project.html b/supervisely/train/src/ui/input_project.html new file mode 100644 index 000000000000..ff0ef021bc63 --- /dev/null +++ b/supervisely/train/src/ui/input_project.html @@ -0,0 +1,8 @@ + + + {{data.projectName}} ({{data.projectImagesCount}} + images) + + + \ No newline at end of file diff --git a/supervisely/train/src/ui/input_project.py b/supervisely/train/src/ui/input_project.py new file mode 100644 index 000000000000..e81d822f05c1 --- /dev/null +++ b/supervisely/train/src/ui/input_project.py @@ -0,0 +1,8 @@ +import supervisely.train.src.sly_train_globals as g + + +def init(data): + data["projectId"] = g.project_info.id + data["projectName"] = g.project_info.name + data["projectImagesCount"] = g.project_info.items_count + data["projectPreviewUrl"] = g.api.image.preview_url(g.project_info.reference_image_url, 100, 100) diff --git a/supervisely/train/src/ui/monitoring.html b/supervisely/train/src/ui/monitoring.html new file mode 100644 index 000000000000..26458aa203a6 --- /dev/null +++ b/supervisely/train/src/ui/monitoring.html @@ -0,0 +1,115 @@ + + + Start training + +
+ 0 training classes are selected +
+
+ Path to model weights is not defined +
+
+
{{data.progressName}}: {{data.currentProgressLabel}} / + {{data.totalProgressLabel}} +
+ +
+ + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + +
+
+ + + + +
+
+
+ + + + + +
+
\ No newline at end of file diff --git a/supervisely/train/src/ui/monitoring.py b/supervisely/train/src/ui/monitoring.py new file mode 100644 index 000000000000..0dbe0048bfe5 --- /dev/null +++ b/supervisely/train/src/ui/monitoring.py @@ -0,0 +1,44 @@ +import supervisely_lib as sly +import supervisely.train.src.sly_metrics as metrics + + +empty_gallery = { + "content": { + "projectMeta": sly.ProjectMeta().to_json(), + "annotations": {}, + "layout": [] + } +} + + +def init(data, state): + _init_start_state(state) + _init_galleries(data) + _init_progress(data) + _init_output(data) + metrics.init(data, state) + + +def _init_start_state(state): + state["started"] = False + state["activeNames"] = [] + + +def _init_galleries(data): + data["vis"] = empty_gallery + data["labelsVis"] = empty_gallery + data["predVis"] = empty_gallery + data["syncBindings"] = [] + + +def _init_progress(data): + data["progressName"] = "" + data["currentProgress"] = 0 + data["totalProgress"] = 0 + data["currentProgressLabel"] = "" + data["totalProgressLabel"] = "" + + +def _init_output(data): + data["outputUrl"] = "" + data["outputName"] = "" \ No newline at end of file diff --git a/supervisely/train/src/ui/splits.html b/supervisely/train/src/ui/splits.html new file mode 100644 index 000000000000..a40deb164f45 --- /dev/null +++ b/supervisely/train/src/ui/splits.html @@ -0,0 +1,133 @@ + + + + + Random +
Shuffle data and split with defined probability
+
+ + + + + + + + + + + +
+ + + Based on image tags +
Images should have assigned train or val tag
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + Based on datasets +
Select one or several datasets for every split
+
+ + + + + + + + + + + + + +
+
+ + + + + + + + + + +
\ No newline at end of file diff --git a/supervisely/train/src/ui/splits.py b/supervisely/train/src/ui/splits.py new file mode 100644 index 000000000000..e7e8095988db --- /dev/null +++ b/supervisely/train/src/ui/splits.py @@ -0,0 +1,71 @@ +import supervisely_lib as sly + + +def init(project_info, project_meta: sly.ProjectMeta, data, state): + data["randomSplit"] = [ + {"name": "train", "type": "success"}, + {"name": "val", "type": "primary"}, + {"name": "total", "type": "gray"}, + ] + data["totalImagesCount"] = project_info.items_count + + train_percent = 80 + train_count = int(project_info.items_count / 100 * train_percent) + state["randomSplit"] = { + "count": { + "total": project_info.items_count, + "train": train_count, + "val": project_info.items_count - train_count + }, + "percent": { + "total": 100, + "train": train_percent, + "val": 100 - train_percent + }, + "shareImagesBetweenSplits": False, + "sliderDisabled": False, + } + + state["splitMethod"] = "random" + + state["trainTagName"] = "" + if project_meta.tag_metas.get("train") is not None: + state["trainTagName"] = "train" + state["valTagName"] = "" + if project_meta.tag_metas.get("val") is not None: + state["valTagName"] = "val" + + state["trainDatasets"] = [] + state["valDatasets"] = [] + + state["unlabeledImages"] = "keep" + state["untaggedImages"] = "train" + + +def get_train_val_sets(project_dir, state): + split_method = state["splitMethod"] + if split_method == "random": + train_count = state["randomSplit"]["count"]["train"] + val_count = state["randomSplit"]["count"]["val"] + train_set, val_set = sly.Project.get_train_val_splits_by_count(project_dir, train_count, val_count) + return train_set, val_set + elif split_method == "tags": + train_tag_name = state["trainTagName"] + val_tag_name = state["valTagName"] + add_untagged_to = state["untaggedImages"] + train_set, val_set = sly.Project.get_train_val_splits_by_tag(project_dir, train_tag_name, val_tag_name, add_untagged_to) + return train_set, val_set + elif split_method == "datasets": + train_datasets = state["trainDatasets"] + val_datasets = state["valDatasets"] + train_set, val_set = sly.Project.get_train_val_splits_by_dataset(project_dir, train_datasets, val_datasets) + return train_set, val_set + else: + raise ValueError(f"Unknown split method: {split_method}") + + +def verify_train_val_sets(train_set, val_set): + if len(train_set) == 0: + raise ValueError("Train set is empty, check or change split configuration") + if len(val_set) == 0: + raise ValueError("Val set is empty, check or change split configuration") \ No newline at end of file diff --git a/supervisely/train/src/ui/ui.py b/supervisely/train/src/ui/ui.py new file mode 100644 index 000000000000..8566f30013c3 --- /dev/null +++ b/supervisely/train/src/ui/ui.py @@ -0,0 +1,18 @@ +import supervisely.train.src.sly_train_globals as g +import supervisely.train.src.ui.input_project as input_project +import supervisely.train.src.ui.classes as training_classes +import supervisely.train.src.ui.splits as train_val_split +import supervisely.train.src.ui.architectures as model_architectures +import supervisely.train.src.ui.hyperparameters as hyperparameters +import supervisely.train.src.ui.monitoring as monitoring +import supervisely.train.src.ui.artifacts as artifacts + + +def init(data, state): + input_project.init(data) + training_classes.init(g.api, data, state, g.project_id, g.project_meta) + train_val_split.init(g.project_info, g.project_meta, data, state) + model_architectures.init(data, state) + hyperparameters.init(state) + monitoring.init(data, state) + artifacts.init(data) diff --git a/supervisely/train/src/sly_prepare_data.py b/supervisely/train/src/yolov5_format.py similarity index 57% rename from supervisely/train/src/sly_prepare_data.py rename to supervisely/train/src/yolov5_format.py index aab0e6671dcb..5e605e7255cd 100644 --- a/supervisely/train/src/sly_prepare_data.py +++ b/supervisely/train/src/yolov5_format.py @@ -3,7 +3,7 @@ import supervisely_lib as sly -def transform_label(class_names, img_size, label: sly.Label): +def _transform_label(class_names, img_size, label: sly.Label): class_number = class_names.index(label.obj_class.name) rect_geometry = label.geometry.to_bbox() center = rect_geometry.center @@ -15,13 +15,12 @@ def transform_label(class_names, img_size, label: sly.Label): return result -def _create_data_config(output_dir, meta: sly.ProjectMeta, keep_classes): +def _create_data_config(output_dir, meta: sly.ProjectMeta): class_names = [] class_colors = [] for obj_class in meta.obj_classes: - if obj_class.name in keep_classes: - class_names.append(obj_class.name) - class_colors.append(obj_class.color) + class_names.append(obj_class.name) + class_colors.append(obj_class.color) data_yaml = { "train": os.path.join(output_dir, "images/train"), @@ -44,11 +43,11 @@ def _create_data_config(output_dir, meta: sly.ProjectMeta, keep_classes): return data_yaml -def transform_annotation(ann, class_names, save_path): +def _transform_annotation(ann, class_names, save_path): yolov5_ann = [] for label in ann.labels: if label.obj_class.name in class_names: - yolov5_ann.append(transform_label(class_names, ann.img_size, label)) + yolov5_ann.append(_transform_label(class_names, ann.img_size, label)) with open(save_path, 'w') as file: file.write("\n".join(yolov5_ann)) @@ -67,9 +66,9 @@ def _process_split(project, class_names, images_dir, labels_dir, split, progress ann = sly.Annotation.from_json(ann_json, project.meta) save_ann_path = os.path.join(labels_dir, f"{sly.fs.get_file_name(item_name)}.txt") - empty = transform_annotation(ann, class_names, save_ann_path) + empty = _transform_annotation(ann, class_names, save_ann_path) if empty: - sly.logger.warning(f"Empty annotation dataset={dataset_name} image={item_name}") + sly.logger.warning(f"Empty annotation: dataset={dataset_name}, image={item_name}") img_path = dataset.get_img_path(item_name) save_img_path = os.path.join(images_dir, item_name) @@ -78,12 +77,25 @@ def _process_split(project, class_names, images_dir, labels_dir, split, progress progress_cb(len(batch)) -def filter_and_transform_labels(input_dir, train_classes, - train_split, val_split, - output_dir, progress_cb): - project = sly.Project(input_dir, sly.OpenMode.READ) - data_yaml = _create_data_config(output_dir, project.meta, train_classes) +def _transform_set(set_name, data_yaml, project_meta, items, progress_cb): + res_images_dir = data_yaml[set_name] + res_labels_dir = data_yaml[f"labels_{set_name}"] + classes_names = data_yaml["names"] - _process_split(project, data_yaml["names"], data_yaml["train"], data_yaml["labels_train"], train_split, progress_cb) - _process_split(project, data_yaml["names"], data_yaml["val"], data_yaml["labels_val"], val_split, progress_cb) + for batch in sly.batched(items, batch_size=max(int(len(items) / 50), 10)): + for item in batch: + ann = sly.Annotation.load_json_file(item.ann_path, project_meta) + save_ann_path = os.path.join(res_labels_dir, f"{sly.fs.get_file_name(item.name)}.txt") + _transform_annotation(ann, classes_names, save_ann_path) + save_img_path = os.path.join(res_images_dir, sly.fs.get_file_name_with_ext(item.img_path)) + sly.fs.hardlink_or_copy_file(item.img_path, save_img_path) # to speedup and save drive space + progress_cb(len(batch)) + + +def transform(sly_project_dir, yolov5_output_dir, train_set, val_set, progress_cb): + project = sly.Project(sly_project_dir, sly.OpenMode.READ) + data_yaml = _create_data_config(yolov5_output_dir, project.meta) + + _transform_set("train", data_yaml, project.meta, train_set, progress_cb) + _transform_set("val", data_yaml, project.meta, val_set, progress_cb) \ No newline at end of file diff --git a/train.py b/train.py index db810e03fefa..f3f5cd75edca 100644 --- a/train.py +++ b/train.py @@ -44,7 +44,10 @@ import supervisely_lib as sly from supervisely_lib import logger + def train(hyp, opt, device, tb_writer=None): + train_batches_uploaded = False + logger.info('hyperparameters', extra=hyp) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank @@ -207,7 +210,6 @@ def train(hyp, opt, device, tb_writer=None): hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] - if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes @@ -215,6 +217,8 @@ def train(hyp, opt, device, tb_writer=None): # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) + if opt.sly: + upload_label_vis() if tb_writer: tb_writer.add_histogram('classes', c, 0) @@ -353,13 +357,18 @@ def train(hyp, opt, device, tb_writer=None): # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + plot_images(imgs, targets, paths, f) + #Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) + if plots and ni == 10 and opt.sly: + train_batches_uploaded = True + upload_train_data_vis() + # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- @@ -386,7 +395,8 @@ def train(hyp, opt, device, tb_writer=None): plots=plots and final_epoch, wandb_logger=wandb_logger, compute_loss=compute_loss, - is_coco=is_coco) + is_coco=is_coco, + opt_sly=opt.sly) # Write with open(results_file, 'a') as f: @@ -413,7 +423,7 @@ def train(hyp, opt, device, tb_writer=None): metrics[tag] = x if opt.sly: - send_metrics(epoch, epochs, metrics) + send_metrics(epoch, epochs, metrics, opt.metrics_period) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -443,6 +453,11 @@ def train(hyp, opt, device, tb_writer=None): del ckpt # end epoch ---------------------------------------------------------------------------------------------------- + + if plots and opt.sly and train_batches_uploaded is False: + train_batches_uploaded = True + upload_train_data_vis() + # end training if rank in [-1, 0]: # Plots @@ -524,6 +539,8 @@ def main(): parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--sly', action='store_true', help='for Supervisely App integration') + parser.add_argument('--metrics_period', type=int, default=1, help='Log metrics to Supervisely every "metrics_period" epochs') + opt = parser.parse_args() print("Input arguments:", opt) From fb5235a91a74c02ba1359ff6170c353d1d4aa626 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 15:06:09 +0300 Subject: [PATCH 220/254] update SDK version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3bd62b213f07..45b753554c71 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # pip install -r requirements.txt -supervisely==6.1.74 +supervisely==6.1.76 # base ---------------------------------------- #matplotlib>=3.2.2 From 3c5350a5820285cdf66662e90b0b5445e5604be5 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 15:38:40 +0300 Subject: [PATCH 221/254] fix imports --- supervisely/train/src/sly_train.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index 82246c1eaf99..aa5de9e8242d 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -1,9 +1,9 @@ import os import supervisely_lib as sly -import supervisely.train.src.sly_train_globals as g +import sly_train_globals as g -from supervisely.train.src.sly_train_globals import \ +from sly_train_globals import \ my_app, task_id, \ team_id, workspace_id, project_id, \ root_source_dir, scratch_str, finetune_str @@ -11,10 +11,10 @@ import ui.ui as ui from sly_train_utils import init_script_arguments from sly_utils import get_progress_cb, upload_artifacts -from supervisely.train.src.ui.splits import get_train_val_sets, verify_train_val_sets -import supervisely.train.src.yolov5_format as yolov5_format -from supervisely.train.src.ui.architectures import prepare_weights -from supervisely.train.src.ui.artifacts import set_task_output +from ui.splits import get_train_val_sets, verify_train_val_sets +import yolov5_format as yolov5_format +from ui.architectures import prepare_weights +from ui.artifacts import set_task_output import train as train_yolov5 @@ -99,7 +99,7 @@ def main(): my_app.run(data=data, state=state) - +# @TODO: double check inference # New features: # @TODO: resume training # @TODO: save checkpoint every N-th epochs From 07872fc6e12903f9b30480a6bfdc1856c5d7937a Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 15:49:21 +0300 Subject: [PATCH 222/254] change imports --- supervisely/train/debug.env | 2 +- supervisely/train/src/sly_metrics.py | 2 +- supervisely/train/src/ui/architectures.py | 4 ++-- supervisely/train/src/ui/artifacts.py | 2 +- supervisely/train/src/ui/hyperparameters.py | 3 +-- supervisely/train/src/ui/input_project.py | 2 +- supervisely/train/src/ui/monitoring.py | 2 +- supervisely/train/src/ui/ui.py | 16 ++++++++-------- test_yolov5.py | 2 +- train.py | 4 ++-- 10 files changed, 19 insertions(+), 20 deletions(-) diff --git a/supervisely/train/debug.env b/supervisely/train/debug.env index cb6f81ed7bb9..a916b0944443 100644 --- a/supervisely/train/debug.env +++ b/supervisely/train/debug.env @@ -8,7 +8,7 @@ TASK_ID=4326 context.teamId=229 context.workspaceId=287 -modal.state.slyProjectId=2128 # lemons-annotated +modal.state.slyProjectId=3511 # lemons-annotated SERVER_ADDRESS="put your value here" API_TOKEN="put your value here" diff --git a/supervisely/train/src/sly_metrics.py b/supervisely/train/src/sly_metrics.py index 594e96774826..e07da9764fad 100644 --- a/supervisely/train/src/sly_metrics.py +++ b/supervisely/train/src/sly_metrics.py @@ -1,5 +1,5 @@ import supervisely_lib as sly -import supervisely.train.src.sly_train_globals as globals +import sly_train_globals as globals def init_chart(title, names, xs, ys, smoothing=None): diff --git a/supervisely/train/src/ui/architectures.py b/supervisely/train/src/ui/architectures.py index ab00109ee5dd..aa8b09240041 100644 --- a/supervisely/train/src/ui/architectures.py +++ b/supervisely/train/src/ui/architectures.py @@ -1,7 +1,7 @@ import errno import os -import supervisely.train.src.sly_train_globals as g -from supervisely.train.src.sly_utils import get_progress_cb +import sly_train_globals as g +from sly_utils import get_progress_cb import supervisely_lib as sly diff --git a/supervisely/train/src/ui/artifacts.py b/supervisely/train/src/ui/artifacts.py index 16b946b32e5c..cceafcabc13c 100644 --- a/supervisely/train/src/ui/artifacts.py +++ b/supervisely/train/src/ui/artifacts.py @@ -1,5 +1,5 @@ import os -import supervisely.train.src.sly_train_globals as g +import sly_train_globals as g def init(data): diff --git a/supervisely/train/src/ui/hyperparameters.py b/supervisely/train/src/ui/hyperparameters.py index 8042fba6d013..3b28480d143e 100644 --- a/supervisely/train/src/ui/hyperparameters.py +++ b/supervisely/train/src/ui/hyperparameters.py @@ -1,5 +1,4 @@ -import supervisely_lib as sly -import supervisely.train.src.sly_train_globals as g +import sly_train_globals as g def init(state): diff --git a/supervisely/train/src/ui/input_project.py b/supervisely/train/src/ui/input_project.py index e81d822f05c1..aeda250bd7f5 100644 --- a/supervisely/train/src/ui/input_project.py +++ b/supervisely/train/src/ui/input_project.py @@ -1,4 +1,4 @@ -import supervisely.train.src.sly_train_globals as g +import sly_train_globals as g def init(data): diff --git a/supervisely/train/src/ui/monitoring.py b/supervisely/train/src/ui/monitoring.py index 0dbe0048bfe5..ec25c0562b35 100644 --- a/supervisely/train/src/ui/monitoring.py +++ b/supervisely/train/src/ui/monitoring.py @@ -1,5 +1,5 @@ import supervisely_lib as sly -import supervisely.train.src.sly_metrics as metrics +import sly_metrics as metrics empty_gallery = { diff --git a/supervisely/train/src/ui/ui.py b/supervisely/train/src/ui/ui.py index 8566f30013c3..05c0fe58bb01 100644 --- a/supervisely/train/src/ui/ui.py +++ b/supervisely/train/src/ui/ui.py @@ -1,11 +1,11 @@ -import supervisely.train.src.sly_train_globals as g -import supervisely.train.src.ui.input_project as input_project -import supervisely.train.src.ui.classes as training_classes -import supervisely.train.src.ui.splits as train_val_split -import supervisely.train.src.ui.architectures as model_architectures -import supervisely.train.src.ui.hyperparameters as hyperparameters -import supervisely.train.src.ui.monitoring as monitoring -import supervisely.train.src.ui.artifacts as artifacts +import sly_train_globals as g +import input_project as input_project +import classes as training_classes +import splits as train_val_split +import architectures as model_architectures +import hyperparameters as hyperparameters +import monitoring as monitoring +import artifacts as artifacts def init(data, state): diff --git a/test_yolov5.py b/test_yolov5.py index c4dcdaf3003b..af412356a064 100644 --- a/test_yolov5.py +++ b/test_yolov5.py @@ -17,7 +17,7 @@ from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized -from supervisely.train.src.sly_train_utils import upload_pred_vis +from sly_train_utils import upload_pred_vis import supervisely_lib as sly diff --git a/train.py b/train.py index f3f5cd75edca..7b356327b846 100644 --- a/train.py +++ b/train.py @@ -37,8 +37,8 @@ from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume -from supervisely.train.src.sly_train_utils import send_epoch_log, upload_label_vis, upload_train_data_vis -from supervisely.train.src.sly_metrics import send_metrics +from sly_train_utils import send_epoch_log, upload_label_vis, upload_train_data_vis +from sly_metrics import send_metrics #logger = logging.getLogger(__name__) import supervisely_lib as sly From 6a8599efb1f1f77babc1ff23f3174a97d73cd723 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 15:50:03 +0300 Subject: [PATCH 223/254] change imports --- supervisely/train/src/sly_train.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index aa5de9e8242d..db5859d41315 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -8,13 +8,13 @@ team_id, workspace_id, project_id, \ root_source_dir, scratch_str, finetune_str -import ui.ui as ui +import ui as ui from sly_train_utils import init_script_arguments from sly_utils import get_progress_cb, upload_artifacts -from ui.splits import get_train_val_sets, verify_train_val_sets +from splits import get_train_val_sets, verify_train_val_sets import yolov5_format as yolov5_format -from ui.architectures import prepare_weights -from ui.artifacts import set_task_output +from architectures import prepare_weights +from artifacts import set_task_output import train as train_yolov5 From da24644d42c60f55cc9a9041f0ede777e1c1eb96 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 15:53:40 +0300 Subject: [PATCH 224/254] add UI sources directory to sys.path --- supervisely/train/src/sly_train_globals.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/supervisely/train/src/sly_train_globals.py b/supervisely/train/src/sly_train_globals.py index 00230a36a2e7..c89dc9d15535 100644 --- a/supervisely/train/src/sly_train_globals.py +++ b/supervisely/train/src/sly_train_globals.py @@ -26,6 +26,9 @@ sly.logger.info(f"Source directory: {source_path}") sys.path.append(source_path) +ui_sources_dir = os.path.join(source_path, "ui") +sys.path.append(ui_sources_dir) +sly.logger.info(f"Added to sys.path: {ui_sources_dir}") with open(os.path.join(root_source_dir, "data/hyp.scratch.yaml"), 'r') as file: scratch_str = file.read() # yaml.safe_load( From 9407b980376dae38874b638b0e28473bcb97418b Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 16:10:15 +0300 Subject: [PATCH 225/254] new SDK version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 45b753554c71..0311341b7ff5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # pip install -r requirements.txt -supervisely==6.1.76 +supervisely==6.1.77 # base ---------------------------------------- #matplotlib>=3.2.2 From 14dbc3474b273d6ebe5a3234ad76b3ba8c15b454 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 16:23:16 +0300 Subject: [PATCH 226/254] new SDK version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0311341b7ff5..5ba58b7016a7 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # pip install -r requirements.txt -supervisely==6.1.77 +supervisely==6.1.78 # base ---------------------------------------- #matplotlib>=3.2.2 From 78d9bec231a3eb49118b6efc5706a6008f8c318b Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 16:28:16 +0300 Subject: [PATCH 227/254] fix GIoU smoothing --- supervisely/train/src/ui/monitoring.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/train/src/ui/monitoring.html b/supervisely/train/src/ui/monitoring.html index 26458aa203a6..0c2d174f2049 100644 --- a/supervisely/train/src/ui/monitoring.html +++ b/supervisely/train/src/ui/monitoring.html @@ -60,7 +60,7 @@ style="width: 450px;" @input="(val)=>{ state.smoothing = val; - data.mBox.options.smoothingWeight = val; + data.mGIoU.options.smoothingWeight = val; data.mObjectness.options.smoothingWeight = val; data.mClassification.options.smoothingWeight = val; }" From ef0e0d75609890806d381578b35ba139f2e1c419 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 16:38:50 +0300 Subject: [PATCH 228/254] update smoothing --- supervisely/train/src/ui/monitoring.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/train/src/ui/monitoring.html b/supervisely/train/src/ui/monitoring.html index 0c2d174f2049..30fcf6d3c5d8 100644 --- a/supervisely/train/src/ui/monitoring.html +++ b/supervisely/train/src/ui/monitoring.html @@ -56,7 +56,7 @@ Date: Fri, 7 May 2021 17:26:23 +0300 Subject: [PATCH 230/254] save link to app UI --- supervisely/train/src/sly_train.py | 3 ++- supervisely/train/src/sly_utils.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index db5859d41315..a01db3ec422f 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -99,7 +99,8 @@ def main(): my_app.run(data=data, state=state) -# @TODO: double check inference + +# @TODO: doublecheck inference # New features: # @TODO: resume training # @TODO: save checkpoint every N-th epochs diff --git a/supervisely/train/src/sly_utils.py b/supervisely/train/src/sly_utils.py index ba652011a4ea..5dd875647fe9 100644 --- a/supervisely/train/src/sly_utils.py +++ b/supervisely/train/src/sly_utils.py @@ -36,10 +36,20 @@ def update_uploading_progress(count, api: sly.Api, task_id, progress: sly.Progre _update_progress_ui(api, task_id, progress, stdout_print=True) +def _save_link_to_ui(local_dir, app_url): + # save report to file *.lnk (link to report) + name = "open_app.lnk" + local_path = os.path.join(local_dir, name) + sly.fs.ensure_base_path(local_path) + with open(local_path, "w") as text_file: + print(app_url, file=text_file) + + def upload_artifacts(local_dir, remote_dir): def _gen_message(current, total): return f"Upload artifacts to Team Files [{current}/{total}] " + _save_link_to_ui(local_dir, globals.my_app.app_url) local_files = sly.fs.list_files_recursively(local_dir) total_size = sum([sly.fs.get_file_size(file_path) for file_path in local_files]) From 1394349a5dd55d533d366d372fd1eda088686869 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 7 May 2021 17:42:01 +0300 Subject: [PATCH 231/254] todo --- supervisely/train/src/sly_train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index a01db3ec422f..94a15fb90282 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -101,6 +101,7 @@ def main(): # @TODO: doublecheck inference +# @TODO: add to readme - open_app.lnk # New features: # @TODO: resume training # @TODO: save checkpoint every N-th epochs From d207aaca4c64ccbf8eae5fc6171297a91f21b932 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 10:17:04 +0300 Subject: [PATCH 232/254] log train/val size --- supervisely/train/src/sly_train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py index 94a15fb90282..91d0fb3d1758 100644 --- a/supervisely/train/src/sly_train.py +++ b/supervisely/train/src/sly_train.py @@ -51,6 +51,8 @@ def train(api: sly.Api, task_id, context, state, app_logger): # split to train / validation sets (paths to images and annotations) train_set, val_set = get_train_val_sets(project_dir, state) verify_train_val_sets(train_set, val_set) + sly.logger.info(f"Train set: {len(train_set)} images") + sly.logger.info(f"Val set: {len(val_set)} images") # prepare directory for data in YOLOv5 format (nn will use it for training) train_data_dir = os.path.join(my_app.data_dir, "train_data") From a55ee16bbfdc005d9703e41b23e6032d514c85a6 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 11:00:28 +0300 Subject: [PATCH 233/254] sly-to-yolov5 format: fix same names in different datasets --- supervisely/train/debug.env | 2 +- supervisely/train/src/yolov5_format.py | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/supervisely/train/debug.env b/supervisely/train/debug.env index a916b0944443..316bb57ec1e8 100644 --- a/supervisely/train/debug.env +++ b/supervisely/train/debug.env @@ -8,7 +8,7 @@ TASK_ID=4326 context.teamId=229 context.workspaceId=287 -modal.state.slyProjectId=3511 # lemons-annotated +modal.state.slyProjectId=3529 # lemons-annotated SERVER_ADDRESS="put your value here" API_TOKEN="put your value here" diff --git a/supervisely/train/src/yolov5_format.py b/supervisely/train/src/yolov5_format.py index 5e605e7255cd..ea519213eec5 100644 --- a/supervisely/train/src/yolov5_format.py +++ b/supervisely/train/src/yolov5_format.py @@ -82,14 +82,20 @@ def _transform_set(set_name, data_yaml, project_meta, items, progress_cb): res_labels_dir = data_yaml[f"labels_{set_name}"] classes_names = data_yaml["names"] + used_names = set() for batch in sly.batched(items, batch_size=max(int(len(items) / 50), 10)): for item in batch: ann = sly.Annotation.load_json_file(item.ann_path, project_meta) - save_ann_path = os.path.join(res_labels_dir, f"{sly.fs.get_file_name(item.name)}.txt") - _transform_annotation(ann, classes_names, save_ann_path) + _item_name = sly._utils.generate_free_name(used_names, sly.fs.get_file_name(item.name)) + used_names.add(_item_name) + + _ann_name = f"{_item_name}.txt" + _img_name = f"{_item_name}{sly.fs.get_file_ext(item.img_path)}" - save_img_path = os.path.join(res_images_dir, sly.fs.get_file_name_with_ext(item.img_path)) - sly.fs.hardlink_or_copy_file(item.img_path, save_img_path) # to speedup and save drive space + save_ann_path = os.path.join(res_labels_dir, _ann_name) + _transform_annotation(ann, classes_names, save_ann_path) + save_img_path = os.path.join(res_images_dir, _img_name) + sly.fs.copy_file(item.img_path, save_img_path) # hardlink not working with yolov5 ds caches progress_cb(len(batch)) From e036c79feee70e9a71b9ec1f85779f176aa66c80 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 11:20:45 +0300 Subject: [PATCH 234/254] fix inference --- supervisely/serve/src/nn_utils.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/supervisely/serve/src/nn_utils.py b/supervisely/serve/src/nn_utils.py index e0ce21a8e19a..e090ca6cf06e 100644 --- a/supervisely/serve/src/nn_utils.py +++ b/supervisely/serve/src/nn_utils.py @@ -34,9 +34,8 @@ def construct_model_meta(model): return meta -def load_model(weights_path, imgsz=640, device='cpu'): +def load_model(weights_path, imgsz=640, device='cpu', half_precision=False): device = select_device(device) - half = device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights_path, map_location=device) # load FP32 model @@ -48,8 +47,11 @@ def load_model(weights_path, imgsz=640, device='cpu'): else: sly.logger.warning(f"Image size is not found in model checkpoint. Use default: {IMG_SIZE}") imgsz = IMG_SIZE - imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(imgsz, s=gs) # check img_size + + half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # to FP16 From 72cb08f32883f48640bb8ce5ccc6c1c1bd1dfe46 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:28:53 +0300 Subject: [PATCH 235/254] serve not tested --- supervisely/serve/config.json | 112 +++++++++++++++- supervisely/serve/src/modal.html | 42 ++++-- supervisely/serve/src/sly_serve.py | 4 +- supervisely/train/src/sly_metrics_utils.py | 142 --------------------- 4 files changed, 146 insertions(+), 154 deletions(-) delete mode 100644 supervisely/train/src/sly_metrics_utils.py diff --git a/supervisely/serve/config.json b/supervisely/serve/config.json index e5880b477660..3eed9cb775fe 100644 --- a/supervisely/serve/config.json +++ b/supervisely/serve/config.json @@ -12,9 +12,117 @@ "modal_template": "supervisely/serve/src/modal.html", "modal_template_state": { "modelWeightsOptions": "pretrained", - "modelSize": "yolov5s.pt", + "selectedModel": "YOLOv5s", "device": "0", - "weightsPath": "" + "weightsPath": "", + "models": [ + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5s", + "Size": 640, + "mAP^val": 36.7, + "mAP^test": 36.7, + "mAP^val_0.5": 55.4, + "Speed": 2.0, + "Params": 7.3, + "FLOPS": 17.0 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5m", + "Size": 640, + "mAP^val": 44.5, + "mAP^test": 44.5, + "mAP^val_0.5": 63.1, + "Speed": 2.7, + "Params": 21.4, + "FLOPS": 51.3 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5l", + "Size": 640, + "mAP^val": 48.2, + "mAP^test": 48.2, + "mAP^val_0.5": 66.9, + "Speed": 3.8, + "Params": 47.0, + "FLOPS": 115.4 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5x", + "Size": 640, + "mAP^val": 50.4, + "mAP^test": 50.4, + "mAP^val_0.5": 68.8, + "Speed": 6.1, + "Params": 87.7, + "FLOPS": 218.8 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5s6", + "Size": 1280, + "mAP^val": 43.3, + "mAP^test": 43.3, + "mAP^val_0.5": 61.9, + "Speed": 4.3, + "Params": 12.7, + "FLOPS": 17.4 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5m6", + "Size": 1280, + "mAP^val": 50.5, + "mAP^test": 50.5, + "mAP^val_0.5": 68.7, + "Speed": 8.4, + "Params": 35.9, + "FLOPS": 52.4 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5l6", + "Size": 1280, + "mAP^val": 53.4, + "mAP^test": 53.4, + "mAP^val_0.5": 71.1, + "Speed": 12.3, + "Params": 77.2, + "FLOPS": 117.7 + }, + { + "config": "", + "weightsUrl": "", + "Model": "YOLOv5x6", + "Size": 1280, + "mAP^val": 54.4, + "mAP^test": 54.4, + "mAP^val_0.5": 72.0, + "Speed": 22.4, + "Params": 141.8, + "FLOPS": 222.9 + } + ], + "modelColumns": [ + {"key": "Model", "title": "Model", "subtitle": null}, + {"key": "Size", "title": "Size", "subtitle": "(pixels)"}, + {"key": "mAP^val", "title": "mAPval", "subtitle": "0.5:0.95"}, + {"key": "mAP^test", "title": "mAPtest", "subtitle": "0.5:0.95"}, + {"key": "mAP^val_0.5", "title": "mAPval", "subtitle": "0.5"}, + {"key": "Speed", "title": "Speed", "subtitle": "V100 (ms)"}, + {"key": "Params", "title": "Params", "subtitle": "(M)"}, + {"key": "FLOPS", "title": "FLOPS", "subtitle": "640 (B)"} + ] }, "task_location": "application_sessions", "icon": "https://i.imgur.com/2U6HufM.png", diff --git a/supervisely/serve/src/modal.html b/supervisely/serve/src/modal.html index beaccd037238..d26dac8bb355 100644 --- a/supervisely/serve/src/modal.html +++ b/supervisely/serve/src/modal.html @@ -5,14 +5,40 @@ Custom model
- - - - - - - - + + + + + + + + + + + +
+
+
+ {{row["subtitle"]}} +
+
+
+ + {{model[column.key]}} + +
+
+ {{model[column.key]}} +
+
+ + + + + + + +
diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index eea15f4d2274..f510adca60d7 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -19,7 +19,7 @@ meta: sly.ProjectMeta = None modelWeightsOptions = os.environ['modal.state.modelWeightsOptions'] -pretrained_weights = os.environ['modal.state.modelSize'] +pretrained_weights = os.environ['modal.state.selectedModel'].lower() custom_weights = os.environ['modal.state.weightsPath'] @@ -169,7 +169,7 @@ def preprocess(api: sly.Api, task_id, context, state, app_logger): progress = sly.Progress("Downloading weights", 1, is_size=True, need_info_log=True) local_path = os.path.join(my_app.data_dir, "weights.pt") if modelWeightsOptions == "pretrained": - url = os.path.join("https://github.com/ultralytics/yolov5/releases/download/v4.0/", pretrained_weights) + url = os.path.join("https://github.com/ultralytics/yolov5/releases/download/v5.0/", pretrained_weights) final_weights = url sly.fs.download(url, local_path, my_app.cache, progress) elif modelWeightsOptions == "custom": diff --git a/supervisely/train/src/sly_metrics_utils.py b/supervisely/train/src/sly_metrics_utils.py deleted file mode 100644 index a5d175ae766f..000000000000 --- a/supervisely/train/src/sly_metrics_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -# old plotly implementation -# ================================================================= -# import supervisely_lib as sly -# import sly_train_globals as globals -# -# -# chart_train_style = { -# "name": "train", -# "mode": "lines+markers", -# "line": { -# "color": "rgb(0, 0, 255)", -# "width": 2 -# } -# } -# -# chart_val_style = { -# "name": "val", -# "mode": "lines+markers", -# "line": { -# "color": "rgb(255, 128, 0)", -# "width": 2 -# } -# } -# -# chart_layout = { -# "xaxis": { -# # "title": "epoch", -# "automargin": True -# }, -# "yaxis": { -# # "title": "value", -# "automargin": True -# }, -# "legend": { -# "orientation": "h", -# "yanchor": "bottom", -# "y": 0.99, -# "xanchor": "right", -# "x": 1 -# } -# } -# -# -# def init_chart(title, names, colors, xs, ys): -# data = [] -# for name, color, x, y in zip(names, colors, xs, ys): -# data.append({ -# "x": x, -# "y": y, -# "name": name, -# "mode": "lines+markers", -# #"type": "scattergl", -# "line": { -# "color": f"rgb({color[0]}, {color[1]}, {color[2]})", -# "width": 2 -# } -# }) -# -# chart = { -# "data": data, -# "layout": { -# "title": { -# "text": f"{title}", -# "xanchor": "left", -# 'y': 0.97, -# 'x': 0.03, -# "font": { -# "size": 14, -# "color": "rgb(96, 96, 96)", -# #"color": "rgb(0, 150, 0)", -# } -# }, -# **chart_layout -# } -# } -# return chart -# -# -# def init(data): -# demo_x = [[], []] #[[1, 2, 3, 4], [2, 4, 6, 8]] -# demo_y = [[], []] #[[10, 15, 13, 17], [16, 5, 11, 9]] -# data["mBox"] = init_chart("Box Loss", -# names=["train", "val"], -# colors=[[0, 0, 255], [255, 128, 0]], -# xs=demo_x, -# ys=demo_y) -# -# data["mObjectness"] = init_chart("Objectness Loss", -# names=["train", "val"], -# colors=[[0, 0, 255], [255, 128, 0]], -# xs=demo_x, -# ys=demo_y) -# -# data["mClassification"] = init_chart("Classification Loss", -# names=["train", "val"], -# colors=[[0, 0, 255], [255, 128, 0]], -# xs=demo_x, -# ys=demo_y) -# -# data["mPR"] = init_chart("Precision / Recall", -# names=["precision", "recall"], -# colors=[[255, 0, 255], [127, 0, 255]], -# xs=demo_x, -# ys=demo_y) -# -# data["mMAP"] = init_chart("mAP", -# names=["mAP@0.5", "mAP@0.5:0.95"], -# colors=[[255, 0, 255], [0, 255, 255]], -# xs=demo_x, -# ys=demo_y) -# -# -# def send_metrics(epoch, epochs, metrics): -# sly.logger.debug(f"Metrics: epoch {epoch} / {epochs}", extra={"metrics": metrics}) -# -# fields = [ -# {"field": "data.mBox.data[0].x", "payload": epoch, "append": True}, -# {"field": "data.mBox.data[1].x", "payload": epoch, "append": True}, -# {"field": "data.mBox.data[0].y", "payload": metrics["train/box_loss"], "append": True}, -# {"field": "data.mBox.data[1].y", "payload": metrics["val/box_loss"], "append": True}, -# -# {"field": "data.mObjectness.data[0].x", "payload": epoch, "append": True}, -# {"field": "data.mObjectness.data[1].x", "payload": epoch, "append": True}, -# {"field": "data.mObjectness.data[0].y", "payload": metrics["train/obj_loss"], "append": True}, -# {"field": "data.mObjectness.data[1].y", "payload": metrics["val/obj_loss"], "append": True}, -# -# {"field": "data.mClassification.data[0].x", "payload": epoch, "append": True}, -# {"field": "data.mClassification.data[1].x", "payload": epoch, "append": True}, -# {"field": "data.mClassification.data[0].y", "payload": metrics["train/cls_loss"], "append": True}, -# {"field": "data.mClassification.data[1].y", "payload": metrics["val/cls_loss"], "append": True}, -# -# {"field": "data.mPR.data[0].x", "payload": epoch, "append": True}, -# {"field": "data.mPR.data[1].x", "payload": epoch, "append": True}, -# {"field": "data.mPR.data[0].y", "payload": metrics["metrics/precision"], "append": True}, -# {"field": "data.mPR.data[1].y", "payload": metrics["metrics/recall"], "append": True}, -# -# {"field": "data.mMAP.data[0].x", "payload": epoch, "append": True}, -# {"field": "data.mMAP.data[1].x", "payload": epoch, "append": True}, -# {"field": "data.mMAP.data[0].y", "payload": metrics["metrics/mAP_0.5"], "append": True}, -# {"field": "data.mMAP.data[1].y", "payload": metrics["metrics/mAP_0.5:0.95"], "append": True}, -# ] -# globals.api.app.set_fields(globals.task_id, fields) From 152b906c9f695c0444739c5c319cd23f67bc0469 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:31:12 +0300 Subject: [PATCH 236/254] [serve] modal table stat --- supervisely/serve/src/modal.html | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/supervisely/serve/src/modal.html b/supervisely/serve/src/modal.html index d26dac8bb355..fed0f96f53bb 100644 --- a/supervisely/serve/src/modal.html +++ b/supervisely/serve/src/modal.html @@ -1,4 +1,14 @@
+ + .beautiful-table { border-collapse: collapse; } + .beautiful-table tr:nth-child(2n) { background-color: #f6f8fa; } + .beautiful-table td, .beautiful-table th { + border: 1px solid #dfe2e5; + padding: 6px 13px; + text-align: center; + line-height: 20px; + } + Pretrained on COCO @@ -31,14 +41,6 @@ - - - - - - - -
From 86a634da52feecd303d8cded05be5b6b423839e6 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:37:09 +0300 Subject: [PATCH 237/254] [serve] modal tabs --- supervisely/serve/config.json | 1 + supervisely/serve/src/modal.html | 88 +++++++++++++++++--------------- 2 files changed, 48 insertions(+), 41 deletions(-) diff --git a/supervisely/serve/config.json b/supervisely/serve/config.json index 3eed9cb775fe..4e5f73f7c213 100644 --- a/supervisely/serve/config.json +++ b/supervisely/serve/config.json @@ -10,6 +10,7 @@ "docker_image": "supervisely/base-pytorch:6.0.21", "main_script": "supervisely/serve/src/sly_serve.py", "modal_template": "supervisely/serve/src/modal.html", + "modal_width": 500, "modal_template_state": { "modelWeightsOptions": "pretrained", "selectedModel": "YOLOv5s", diff --git a/supervisely/serve/src/modal.html b/supervisely/serve/src/modal.html index fed0f96f53bb..ff5723d74afd 100644 --- a/supervisely/serve/src/modal.html +++ b/supervisely/serve/src/modal.html @@ -3,50 +3,56 @@ .beautiful-table { border-collapse: collapse; } .beautiful-table tr:nth-child(2n) { background-color: #f6f8fa; } .beautiful-table td, .beautiful-table th { - border: 1px solid #dfe2e5; - padding: 6px 13px; - text-align: center; - line-height: 20px; + border: 1px solid #dfe2e5; + padding: 6px 13px; + text-align: center; + line-height: 20px; } - - - Pretrained on COCO - Custom model - -
- - - - - - - - - + + +
-
-
- {{row["subtitle"]}} -
-
-
- + + + + + Pretrained on COCO + + + + + + + + + + - - -
+
+
+ {{row["subtitle"]}} +
+
+
+ + {{model[column.key]}} + +
+
{{model[column.key]}} - -
-
- {{model[column.key]}} -
-
-
-
- - - -
+ +
+ + + + + + Custom + + + + + + From bf3180fd6be7b7374658f11c9313689982abbdd0 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:39:11 +0300 Subject: [PATCH 238/254] [serve] modal tabs --- supervisely/serve/src/modal.html | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/supervisely/serve/src/modal.html b/supervisely/serve/src/modal.html index ff5723d74afd..d3a152edd114 100644 --- a/supervisely/serve/src/modal.html +++ b/supervisely/serve/src/modal.html @@ -11,7 +11,7 @@ - + Pretrained on COCO @@ -44,14 +44,12 @@ - - - Custom - - - - - + + Custom + + + + From 63e011feaf04ef9a756d39c8beed9c285566a763 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:39:43 +0300 Subject: [PATCH 239/254] [serve] modal width --- supervisely/serve/config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/serve/config.json b/supervisely/serve/config.json index 4e5f73f7c213..638c16b5b281 100644 --- a/supervisely/serve/config.json +++ b/supervisely/serve/config.json @@ -10,7 +10,7 @@ "docker_image": "supervisely/base-pytorch:6.0.21", "main_script": "supervisely/serve/src/sly_serve.py", "modal_template": "supervisely/serve/src/modal.html", - "modal_width": 500, + "modal_width": 550, "modal_template_state": { "modelWeightsOptions": "pretrained", "selectedModel": "YOLOv5s", From 111485f3f68483a238582d9775cf9205b5901161 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:43:25 +0300 Subject: [PATCH 240/254] [serve] modal tabs style --- supervisely/serve/config.json | 2 +- supervisely/serve/src/modal.html | 47 +++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/supervisely/serve/config.json b/supervisely/serve/config.json index 638c16b5b281..8e7ce5035866 100644 --- a/supervisely/serve/config.json +++ b/supervisely/serve/config.json @@ -10,7 +10,7 @@ "docker_image": "supervisely/base-pytorch:6.0.21", "main_script": "supervisely/serve/src/sly_serve.py", "modal_template": "supervisely/serve/src/modal.html", - "modal_width": 550, + "modal_width": 700, "modal_template_state": { "modelWeightsOptions": "pretrained", "selectedModel": "YOLOv5s", diff --git a/supervisely/serve/src/modal.html b/supervisely/serve/src/modal.html index d3a152edd114..7caf05aecc95 100644 --- a/supervisely/serve/src/modal.html +++ b/supervisely/serve/src/modal.html @@ -1,14 +1,35 @@ -
+
+ #yolov5-train .el-tabs.el-tabs-cards .el-radio { + display: flex; + align-items: start; + /*margin-bottom: 10px;*/ + margin-left: 0; + white-space: normal; + } + + #yolov5-train .el-tabs.el-tabs-cards .el-radio__label div { + color: #7f858e; + font-size: 13px; + } + .beautiful-table { border-collapse: collapse; } .beautiful-table tr:nth-child(2n) { background-color: #f6f8fa; } .beautiful-table td, .beautiful-table th { - border: 1px solid #dfe2e5; - padding: 6px 13px; - text-align: center; - line-height: 20px; + border: 1px solid #dfe2e5; + padding: 6px 13px; + text-align: center; + line-height: 20px; } + + #yolov5-train .el-tabs.el-tabs-cards { border-radius: 4px; box-shadow: none; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__header { background-color: #f6fafd; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__nav { float: none; display: flex; justify-content: + space-between; } + #yolov5-train .el-tabs.el-tabs-cards .el-tabs__item { flex: 1; margin-bottom: -3px; padding: 9px 16px 13px; + height: auto; line-height: normal; border-radius: 4px; } + @@ -42,15 +63,15 @@ + + + Custom + + + + + - - - Custom - - - - - From 20869396184939b7e9a973ff76671bf8626ca8a0 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 14:45:30 +0300 Subject: [PATCH 241/254] [serve] fix pretrained weights URL --- supervisely/serve/src/sly_serve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index f510adca60d7..580722c9a38b 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -169,7 +169,7 @@ def preprocess(api: sly.Api, task_id, context, state, app_logger): progress = sly.Progress("Downloading weights", 1, is_size=True, need_info_log=True) local_path = os.path.join(my_app.data_dir, "weights.pt") if modelWeightsOptions == "pretrained": - url = os.path.join("https://github.com/ultralytics/yolov5/releases/download/v5.0/", pretrained_weights) + url = f"https://github.com/ultralytics/yolov5/releases/download/v5.0/{pretrained_weights}.pt" final_weights = url sly.fs.download(url, local_path, my_app.cache, progress) elif modelWeightsOptions == "custom": From fb5eeed18245e6cc11a1ee0d5cab7e717492c629 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 15:17:46 +0300 Subject: [PATCH 242/254] [serve] add stride to serv --- supervisely/serve/src/nn_utils.py | 21 ++++++++++----------- supervisely/serve/src/sly_serve.py | 5 +++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/supervisely/serve/src/nn_utils.py b/supervisely/serve/src/nn_utils.py index e090ca6cf06e..a4028a8453e5 100644 --- a/supervisely/serve/src/nn_utils.py +++ b/supervisely/serve/src/nn_utils.py @@ -34,8 +34,9 @@ def construct_model_meta(model): return meta -def load_model(weights_path, imgsz=640, device='cpu', half_precision=False): +def load_model(weights_path, imgsz=640, device='cpu'): device = select_device(device) + half = device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights_path, map_location=device) # load FP32 model @@ -47,27 +48,25 @@ def load_model(weights_path, imgsz=640, device='cpu', half_precision=False): else: sly.logger.warning(f"Image size is not found in model checkpoint. Use default: {IMG_SIZE}") imgsz = IMG_SIZE + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check img_size - - half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # to FP16 - img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img - _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - return model, half, device, imgsz + return model, half, device, imgsz, stride -def inference(model, half, device, imgsz, image: np.ndarray, meta: sly.ProjectMeta, conf_thres=0.25, iou_thres=0.45, +def inference(model, half, device, imgsz, stride, image: np.ndarray, meta: sly.ProjectMeta, conf_thres=0.25, iou_thres=0.45, augment=False, agnostic_nms=False, debug_visualization=False) -> sly.Annotation: names = model.module.names if hasattr(model, 'module') else model.names - img0 = image + img0 = image # RGB # Padded resize - img = letterbox(img0, new_shape=imgsz)[0] + img = letterbox(img0, new_shape=imgsz, stride=stride)[0] img = img.transpose(2, 0, 1) # to 3x416x416 img = np.ascontiguousarray(img) diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index 580722c9a38b..8f36c6b5c306 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -29,6 +29,7 @@ half = None device = None imgsz = None +stride = None settings_path = os.path.join(root_source_path, "supervisely/serve/custom_settings.yaml") @@ -98,7 +99,7 @@ def inference_image_path(image_path, context, state, app_logger): } rect = results[0] image = sly.image.crop(image, rect) - ann_json = inference(model, half, device, imgsz, image, meta, + ann_json = inference(model, half, device, imgsz, stride, image, meta, conf_thres=conf_thres, iou_thres=iou_thres, augment=augment, debug_visualization=debug_visualization) return ann_json @@ -181,7 +182,7 @@ def preprocess(api: sly.Api, task_id, context, state, app_logger): raise ValueError("Unknown weights option {!r}".format(modelWeightsOptions)) # load model on device - model, half, device, imgsz = load_model(local_path, device=DEVICE_STR) + model, half, device, imgsz, stride = load_model(local_path, device=DEVICE_STR) meta = construct_model_meta(model) sly.logger.info("Model has been successfully deployed") From 8c7a8bae9878ae857c6ef2510002b5558ee46ab6 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 16:41:42 +0300 Subject: [PATCH 243/254] [train] readme wip --- supervisely/train/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/supervisely/train/README.md b/supervisely/train/README.md index 99fdb494038a..d76b805a95b2 100644 --- a/supervisely/train/README.md +++ b/supervisely/train/README.md @@ -22,6 +22,9 @@ Train YOLOv5 on your custom data. All annotations will be converted to the bounding boxes automatically. Configure Train / Validation splits, model and training hyperparameters. Run on any agent (with GPU) in your team. Monitor progress, metrics, logs and other visualizations withing a single dashboard. +- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, and other fixes +- **March 3, 2021**: [v4.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v4.0.9): YOLOv5 is integrated to Supervisely (train / serve / inference) + # How To Use 1. Add app to your team from Ecosystem @@ -45,4 +48,4 @@ Watch short video for more details: # Screenshot - + From 48d4eeb09e82f74947f5f87f84172f0101caeeb4 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 16:42:17 +0300 Subject: [PATCH 244/254] [train] readme wip --- supervisely/train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/train/README.md b/supervisely/train/README.md index d76b805a95b2..82732dfe51e3 100644 --- a/supervisely/train/README.md +++ b/supervisely/train/README.md @@ -22,7 +22,7 @@ Train YOLOv5 on your custom data. All annotations will be converted to the bounding boxes automatically. Configure Train / Validation splits, model and training hyperparameters. Run on any agent (with GPU) in your team. Monitor progress, metrics, logs and other visualizations withing a single dashboard. -- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, and other fixes +- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, update settings in UI, other fixes - **March 3, 2021**: [v4.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v4.0.9): YOLOv5 is integrated to Supervisely (train / serve / inference) # How To Use From 16b5c20311c637b962a599edaed175cc4cca416b Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Tue, 11 May 2021 16:42:36 +0300 Subject: [PATCH 245/254] [train] readme wip --- supervisely/train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/train/README.md b/supervisely/train/README.md index 82732dfe51e3..f26e78f3a841 100644 --- a/supervisely/train/README.md +++ b/supervisely/train/README.md @@ -22,7 +22,7 @@ Train YOLOv5 on your custom data. All annotations will be converted to the bounding boxes automatically. Configure Train / Validation splits, model and training hyperparameters. Run on any agent (with GPU) in your team. Monitor progress, metrics, logs and other visualizations withing a single dashboard. -- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, update settings in UI, other fixes +- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, update UI for settings, other fixes - **March 3, 2021**: [v4.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v4.0.9): YOLOv5 is integrated to Supervisely (train / serve / inference) # How To Use From 7c0e7c725db6a8b22524ba17544f21dca5fa8748 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Thu, 13 May 2021 13:47:30 +0300 Subject: [PATCH 246/254] [serve] change inference_image_id to work with remote storages (s3, azure, ...) --- supervisely/serve/src/sly_serve.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index 8f36c6b5c306..1e3f193f67e2 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -130,8 +130,12 @@ def inference_image_id(api: sly.Api, task_id, context, state, app_logger): app_logger.debug("Input data", extra={"state": state}) image_id = state["image_id"] image_info = api.image.get_info_by_id(image_id) - state["image_url"] = image_info.full_storage_url - inference_image_url(api, task_id, context, state, app_logger) + image_path = os.path.join(my_app.data_dir, sly.rand_str(10) + image_info.name) + api.image.download_path(image_id, image_path) + ann_json = inference_image_path(image_path, context, state, app_logger) + sly.fs.silent_remove(image_path) + request_id = context["request_id"] + my_app.send_response(request_id, data=ann_json) @my_app.callback("inference_batch_ids") From 1ac6bedd21a26fc74044be419386a42463905a48 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Thu, 13 May 2021 14:43:39 +0300 Subject: [PATCH 247/254] [serve] fix stride initialization --- supervisely/serve/src/sly_serve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index 1e3f193f67e2..64d3b648e6ae 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -168,7 +168,7 @@ def debug_inference(): @my_app.callback("preprocess") @sly.timeit def preprocess(api: sly.Api, task_id, context, state, app_logger): - global model, half, device, imgsz, meta, final_weights + global model, half, device, imgsz, stride, meta, final_weights # download weights progress = sly.Progress("Downloading weights", 1, is_size=True, need_info_log=True) From f92ed93d1f140c1014c5481a3d24532fa12d4e78 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Thu, 13 May 2021 15:32:36 +0300 Subject: [PATCH 248/254] [serve] yolov5 serve - fixed --- supervisely/serve/debug.env | 9 +++++---- supervisely/serve/src/sly_serve.py | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/supervisely/serve/debug.env b/supervisely/serve/debug.env index 991b0537373d..df15b44000fa 100644 --- a/supervisely/serve/debug.env +++ b/supervisely/serve/debug.env @@ -4,14 +4,15 @@ DEBUG_APP_DIR="/app_debug_data" DEBUG_CACHE_DIR="/app_cache" LOG_LEVEL="trace" -TASK_ID=2635 +TASK_ID=4585 -context.teamId=7 -context.workspaceId=263 +context.teamId=237 +context.workspaceId=311 modal.state.modelWeightsOptions="pretrained" #"pretrained" "custom" -modal.state.modelSize="yolov5s.pt" +#modal.state.modelSize="yolov5s.pt" +modal.state.selectedModel="YOLOv5s" modal.state.weightsPath="/yolov5_train/lemons_annotated/2472/weights/best.pt" modal.state.device="0" diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index 64d3b648e6ae..0ba697223b91 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -168,7 +168,8 @@ def debug_inference(): @my_app.callback("preprocess") @sly.timeit def preprocess(api: sly.Api, task_id, context, state, app_logger): - global model, half, device, imgsz, stride, meta, final_weights + global model, half, device, imgsz, meta, final_weights + global stride # download weights progress = sly.Progress("Downloading weights", 1, is_size=True, need_info_log=True) From 7603c865dbfa127e9560a9c1b4c5e115044512e7 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Fri, 14 May 2021 12:34:49 +0300 Subject: [PATCH 249/254] add additional info logs --- supervisely/train/debug.env | 3 ++- supervisely/train/src/ui/splits.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/supervisely/train/debug.env b/supervisely/train/debug.env index 316bb57ec1e8..f32ca184fde1 100644 --- a/supervisely/train/debug.env +++ b/supervisely/train/debug.env @@ -8,7 +8,8 @@ TASK_ID=4326 context.teamId=229 context.workspaceId=287 -modal.state.slyProjectId=3529 # lemons-annotated +#modal.state.slyProjectId=3529 # lemons-annotated +modal.state.slyProjectId=3643 # lemons with ready train/val tags SERVER_ADDRESS="put your value here" API_TOKEN="put your value here" diff --git a/supervisely/train/src/ui/splits.py b/supervisely/train/src/ui/splits.py index e7e8095988db..6ac50b2f4513 100644 --- a/supervisely/train/src/ui/splits.py +++ b/supervisely/train/src/ui/splits.py @@ -44,6 +44,7 @@ def init(project_info, project_meta: sly.ProjectMeta, data, state): def get_train_val_sets(project_dir, state): split_method = state["splitMethod"] + sly.logger.info(f"Split method for train/val is '{split_method}'") if split_method == "random": train_count = state["randomSplit"]["count"]["train"] val_count = state["randomSplit"]["count"]["val"] From e7955d8fe2bb8b4a6d2cc79dfdd5dcb86ff174c7 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Mon, 17 May 2021 09:42:21 +0300 Subject: [PATCH 250/254] [serve] todo --- supervisely/serve/src/sly_serve.py | 1 + 1 file changed, 1 insertion(+) diff --git a/supervisely/serve/src/sly_serve.py b/supervisely/serve/src/sly_serve.py index 0ba697223b91..cbdfbd3f2ed9 100644 --- a/supervisely/serve/src/sly_serve.py +++ b/supervisely/serve/src/sly_serve.py @@ -204,6 +204,7 @@ def main(): my_app.run(initial_events=[{"command": "preprocess"}]) +#@TODO: move inference methods to SDK #@TODO: augment inference #@TODO: https://pypi.org/project/cachetools/ if __name__ == "__main__": From 22d48c29870a8a3754bd9a35bf2d9135615ae71d Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Mon, 17 May 2021 09:51:52 +0300 Subject: [PATCH 251/254] [train] splits - hide notice1 --- supervisely/train/src/ui/splits.html | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/supervisely/train/src/ui/splits.html b/supervisely/train/src/ui/splits.html index a40deb164f45..81130cc43b90 100644 --- a/supervisely/train/src/ui/splits.html +++ b/supervisely/train/src/ui/splits.html @@ -48,15 +48,15 @@ Based on image tags
Images should have assigned train or val tag
- - - - - - + + + + + + + + From e16acdb745b7c581c5b68f4b6a2bf572e12ef628 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Mon, 17 May 2021 16:49:49 +0300 Subject: [PATCH 252/254] fix collections readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2904b18376df..0fa30d3e704d 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ YOLOv5 is one of the best available detectors. And we are proud to announce its # For Developers - you can use sources of [Serve YOLOv5 app](https://github.com/supervisely-ecosystem/yolov5/tree/master/supervisely/serve) as example of how to prepare weights, initialize model and apply it to a folder with images (or to images URLs) -- This apps collection is based on the original YOLOv5 [release v4.0](https://github.com/ultralytics/yolov5/releases/tag/v4.0). Once a next official release is available, all apps will be synchronized with it and also released with the new versions. Before running any app you can choose what version to use. Also Supervisely Team will pull updates from original master branch from time to time. +- This apps collection is based on the original YOLOv5 [release v5.0](https://github.com/ultralytics/yolov5/releases/tag/v5.0). Once a next official release is available, all apps will be synchronized with it and also released with the new versions. Before running any app you can choose what version to use. Also Supervisely Team will pull updates from original master branch from time to time. # Contact & Questions & Suggestions From 6318b1cb10c65718b61f83961fbca19c34e912ef Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Mon, 17 May 2021 16:59:55 +0300 Subject: [PATCH 253/254] train readme - new screenshot --- supervisely/train/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/supervisely/train/README.md b/supervisely/train/README.md index f26e78f3a841..7e9653cf2db2 100644 --- a/supervisely/train/README.md +++ b/supervisely/train/README.md @@ -48,4 +48,4 @@ Watch short video for more details: # Screenshot - + From aa60ea45f7062ee55ecc900afeb1ca2eb1a2dbd2 Mon Sep 17 00:00:00 2001 From: max kolomeychenko Date: Mon, 17 May 2021 17:09:37 +0300 Subject: [PATCH 254/254] train readme --- supervisely/train/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/supervisely/train/README.md b/supervisely/train/README.md index 7e9653cf2db2..c0f6c87578cc 100644 --- a/supervisely/train/README.md +++ b/supervisely/train/README.md @@ -22,7 +22,9 @@ Train YOLOv5 on your custom data. All annotations will be converted to the bounding boxes automatically. Configure Train / Validation splits, model and training hyperparameters. Run on any agent (with GPU) in your team. Monitor progress, metrics, logs and other visualizations withing a single dashboard. -- **May 11, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, update UI for settings, other fixes + +Major releases: +- **May 17, 2021**: [v5.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v5.0.0): merge updates from original YOLOv5 repo (including new model architectures), split data to train/val based on datasets or tags, update UI for settings, other fixes - **March 3, 2021**: [v4.0 release](https://github.com/supervisely-ecosystem/yolov5/tree/v4.0.9): YOLOv5 is integrated to Supervisely (train / serve / inference) # How To Use