From 1752768fb3b3ff4f842eaaecf7eba4808ac124a9 Mon Sep 17 00:00:00 2001 From: Nioolek <40284075+Nioolek@users.noreply.github.com> Date: Wed, 14 Dec 2022 06:48:15 +0800 Subject: [PATCH 001/128] Fix Chinese README (#10465) * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * add blank * Update translate-readme.yml Disable auto-translation by changing on-push branch to 'translate_readme'. This prevents overwriting of manual fixes. Signed-off-by: Glenn Jocher * Update translate-readme.yml Signed-off-by: Glenn Jocher * fix live doc * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 3 +- README.md | 4 +- README.zh-CN.md | 260 ++++++++++++------------- 3 files changed, 133 insertions(+), 134 deletions(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 76f59b83e65f..538ff375097e 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -6,8 +6,7 @@ name: Translate README on: push: branches: - - main - - master + - translate_readme # replace with 'master' to enable action paths: - README.md diff --git a/README.md b/README.md index 9ee97321082e..21bdc83f349e 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,9 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - + diff --git a/README.zh-CN.md b/README.zh-CN.md index 0fc77565c5ef..15232be3aa4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[英语](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)\|[简体中文](README.zh-CN.md)
YOLOv5 CI @@ -17,9 +17,9 @@

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -要申请企业许可证,请填写表格Ultralytics 许可. +如果要申请企业许可证,请填写表格Ultralytics 许可. +##
Ultralytics 直播会议
-[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ 将与 [Roboflow](https://roboflow.com/?ref=ultralytics) 的 [Joseph Nelson](https://github.com/josephofiowa) 在 **欧洲中部时间 12 月 13 日星期二的 19:00** ,他将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。欢迎收听 Glenn 和 Joseph ,以了解如何通过无缝数据集集成来加快工作流程! 🔥 - +
-##
细分 ⭐ 新
+##
实例分割模型 ⭐ 新
-我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
- Segmentation Checkpoints + 实例分割模型列表
-我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 -| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | -| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | --------------------------------------------- | --------------------------------- | --------------------------------- | ----------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | -- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) -- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
- Segmentation Usage Examples  Open In Colab + 分割模型使用示例  Open In Colab -### 火车 +### 训练 -YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 ```bash -# Single-GPU +# 单 GPU python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 COCO 数据集上验证 YOLOv5s-seg mask mAP: ```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 ``` ### 预测 @@ -119,13 +119,13 @@ python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) ``` | ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -### 出口 +### 模型导出 将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: @@ -137,12 +137,12 @@ python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --devi ##
文档
-见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
-Install +安装 -克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone @@ -153,10 +153,10 @@ pip install -r requirements.txt # install
-Inference +推理 -YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). +使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -177,10 +177,10 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-Inference with detect.py +使用 detect.py 推理 -`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 -最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 +最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -198,13 +198,14 @@ python detect.py --weights yolov5s.pt --source 0 #
-Training +训练 -下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 -V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 -最大的`--batch-size`可能,或通过`--batch-size -1`为了 -YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 +最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 +YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -219,16 +220,15 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-Tutorials +教程 - [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 - [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 - [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 - [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) @@ -236,12 +236,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 - [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 - [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
-##
集成
+##
模块集成

@@ -263,118 +263,118 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | -| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | -| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | -##
Ultralytics 集线器
+##
Ultralytics HUB
-[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! ##
为什么选择 YOLOv5
-YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 +YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。

- YOLOv5-P5 640 Figure + YOLOv5-P5 640 图

- Figure Notes + 图表笔记 -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
-### 预训练检查点 - -| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | -| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +### 预训练模型 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes + 笔记 -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-##
分类⭐新
+##
分类网络 ⭐ 新
-YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。
- Classification Checkpoints + 分类网络模型
-我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 - -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | -| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + Table Notes (点击以展开) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
- Classification Usage Examples  Open In Colab + 分类训练示例  Open In Colab -### 火车 +### 训练 -YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。 ```bash -# Single-GPU +# 单 GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: @@ -395,7 +395,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### 出口 +### 模型导出 将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: @@ -407,7 +407,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
环境
-在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 +使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。 -##
应用程序
+##
APP
-在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! +通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! Ultralytics mobile app ##
贡献
-我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
-##
执照
+##
License
-YOLOv5 在两种不同的许可下可用: +YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 -##
接触
+##
联系我们
-对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). +若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。
From 1ae91940abe9ca3e064784bb18c12271ab3157b4 Mon Sep 17 00:00:00 2001 From: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Date: Thu, 15 Dec 2022 07:56:42 -0500 Subject: [PATCH 002/128] Update Comet hyperlinks (#10500) * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 2 +- classify/tutorial.ipynb | 4 ++-- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 2 +- utils/loggers/comet/README.md | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 21bdc83f349e..56015b239fc9 100644 --- a/README.md +++ b/README.md @@ -264,7 +264,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Ultralytics HUB
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index c6f5d0d88a2d..94bafba00204 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1341,7 +1341,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1476,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 09ca963d4b98..e1179ffc1cc6 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -454,7 +454,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 6ab0a33366a5..cebcee3dfd24 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -860,7 +860,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8f206cd9830e..8a361e2b211d 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -54,7 +54,7 @@ That's it! Comet will automatically log your hyperparameters, command line argum yolo-ui # Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -119,7 +119,7 @@ You can control the frequency of logged predictions and the associated images by **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) ```shell @@ -161,7 +161,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ## Uploading a Dataset to Comet Artifacts -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. @@ -251,6 +251,6 @@ comet optimizer -j utils/loggers/comet/hpo.py \ ### Visualizing Results -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) hyperparameter-yolo From b564c1f3653a9b11038a80e348a34afbf59943be Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:05:00 +0900 Subject: [PATCH 003/128] Check `conf_thres` and `iou_thres` prior to use (#10515) * Checks conf_thres and iou_thres at beign Why checks conf_thres after operation with it? Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index e5a843c4a758..6145801ca47f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -898,6 +898,9 @@ def non_max_suppression( list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output @@ -909,10 +912,6 @@ def non_max_suppression( nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height From 8d65f9d8ce274f78949ab88b7359580cc8cabacc Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:10:26 +0900 Subject: [PATCH 004/128] Support extensive shape for functions related to bounding box localization (#10516) * support extensive shape for functions related to bounding box localization Signed-off-by: Yonghye Kwon * merge exp branch updates Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 54 ++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6145801ca47f..744abb439ed1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -750,30 +750,30 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y @@ -782,18 +782,18 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): if clip: clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y return y @@ -833,9 +833,9 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes @@ -862,13 +862,13 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F def clip_boxes(boxes, shape): # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def clip_segments(segments, shape): From b2f94e8c356083bb85d76a60ea2b54d5ad9fbe36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Dec 2022 12:26:57 +0100 Subject: [PATCH 005/128] Update to ONNX opset 17 (#10522) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 928992903b0b..baf86f1d9297 100644 --- a/export.py +++ b/export.py @@ -624,7 +624,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') From 43623729cc634d690cece1f1d015e4d59e0b9d98 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Sat, 17 Dec 2022 19:55:08 +0800 Subject: [PATCH 006/128] Update train.py (#10485) Setting `master_port` to 1 may cause `Permission denied` due to failure to bind the port. So it is better to set it to a port greater than 1024. Signed-off-by: Wang Xin Signed-off-by: Wang Xin Co-authored-by: Ayush Chaurasia --- classify/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index a50845a4f781..4767be77bd61 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt From 2c35c1b318ecd4856275039220c052a976d2cfe2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sun, 18 Dec 2022 21:03:01 +0900 Subject: [PATCH 007/128] Limit detections without explicit if condition (#10502) * limit detections without explicit if condition Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup indexing code for limit detections Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 744abb439ed1..70b6f6446f23 100644 --- a/utils/general.py +++ b/utils/general.py @@ -978,8 +978,7 @@ def non_max_suppression( c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] + i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix From b2a0f1cdc579bd81b3c4543752abaa4a90a53c8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Dec 2022 20:06:01 +0100 Subject: [PATCH 008/128] Update `onnx>=1.12.0` (#10526) --- export.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index baf86f1d9297..7910178b2338 100644 --- a/export.py +++ b/export.py @@ -132,7 +132,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements('onnx') + check_requirements('onnx>=1.12.0') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') diff --git a/requirements.txt b/requirements.txt index 85eb839df8a0..4a8649c696a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.9.0 # ONNX export +# onnx>=1.12.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export From 10e93d295fed1459666409751b4a897521c31b90 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 19 Dec 2022 18:27:34 +0900 Subject: [PATCH 009/128] Set a seed of generator with an option for more randomness when training several models with different seeds (#10486) * set seed with parameter Signed-off-by: Yonghye Kwon * make seed to be a large number * set seed with a parameter * set a seed of dataloader with opt for more randomness Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- train.py | 3 ++- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 8b5446e58f2d..5d75f22b6335 100644 --- a/train.py +++ b/train.py @@ -198,7 +198,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), - shuffle=True) + shuffle=True, + seed=opt.seed) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6d2b27ea5e60..302cc3300d35 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -115,7 +115,8 @@ def create_dataloader(path, image_weights=False, quad=False, prefix='', - shuffle=False): + shuffle=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -140,7 +141,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9de6f0fbf903..d66b36115e3f 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,8 @@ def create_dataloader(path, prefix='', shuffle=False, mask_downsample_ratio=1, - overlap_mask=False): + overlap_mask=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -64,7 +65,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, From 5545ff3545d886417b4eff12203d1af4d758cc10 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 20 Dec 2022 01:19:14 +0900 Subject: [PATCH 010/128] Sort by confidence and remove excess boxes without explicit if (#10517) * sort by confidence and remove excess boxes without explicit if Signed-off-by: Yonghye Kwon * cleanup indexing boxes for remove excess boxes it is related to https://github.com/ultralytics/yolov5/pull/10502. Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 70b6f6446f23..0bbcb6e7334c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -969,10 +969,7 @@ def non_max_suppression( n = x.shape[0] # number of boxes if not n: # no boxes continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes From f72f0fec980b35d7f9575d15b326f529b5a9ac0d Mon Sep 17 00:00:00 2001 From: Amir Pourmand Date: Tue, 20 Dec 2022 18:37:43 +0330 Subject: [PATCH 011/128] Add Albumentation Default hyperparameter file (#10529) * add albumentation hyps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename hyp.noAugmentation.yaml to hyp.no-augmentation.yaml * Update hyp.no-augmentation.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- data/hyps/hyp.no-augmentation.yaml | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 data/hyps/hyp.no-augmentation.yaml diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 000000000000..8fbd5b262afa --- /dev/null +++ b/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) From 887d95296642b2fdee1cafa80c0c59618ca3c2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:17:19 +0100 Subject: [PATCH 012/128] Created using Colaboratory --- segment/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index e1179ffc1cc6..dc6599415480 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -264,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -454,7 +454,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -462,11 +463,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -590,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From c765b8c274c78676ae351f159953652152725fcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:18:09 +0100 Subject: [PATCH 013/128] Created using Colaboratory --- classify/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 94bafba00204..06af62a1b4c1 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -183,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1269,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1341,7 +1341,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1349,11 +1350,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -1476,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 96a71b17a276fa0a0b6fbdf68d579ce0603bfa2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:19:45 +0100 Subject: [PATCH 014/128] Created using Colaboratory --- tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cebcee3dfd24..e83617e9dce7 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -412,7 +412,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -682,7 +682,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -860,7 +860,8 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -868,11 +869,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +973,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2370a5513ebf67bd10b8d15fd6353e008380bc43 Mon Sep 17 00:00:00 2001 From: "Mr.Li" <1055271769@qq.com> Date: Thu, 22 Dec 2022 21:55:09 +0800 Subject: [PATCH 015/128] Bugfix: update dataloaders.py to fix "resize to 0" (#10558) * fix bug "resize to 0" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use math.ceil() for resize to enforce min floor of 1 pixel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 302cc3300d35..cbb3114e94d8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -737,7 +737,7 @@ def load_image(self, i): r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized From 5f8054c47c4938c6df6c3f1344de774f15a18404 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Dec 2022 18:15:33 +0100 Subject: [PATCH 016/128] FROM nvcr.io/nvidia/pytorch:22.12-py3 (#10588) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1ecf4c64f75f..26b3439c1941 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.11-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 3c1afd9ab69f289f46f6ad291e7be3cae15f6c35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Dec 2022 14:54:43 +0100 Subject: [PATCH 017/128] ENV OMP_NUM_THREADS=1 (#10593) @Laughing-q @AyushExel setting to 1 due to recent issues Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 26b3439c1941..e0d4411118f0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -29,7 +29,7 @@ WORKDIR /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables -ENV OMP_NUM_THREADS=8 +ENV OMP_NUM_THREADS=1 # Usage Examples ------------------------------------------------------------------------------------------------------- From e72dc1fabaaa47273a825f35ba3a8884bcc2e16b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 14:32:21 +0100 Subject: [PATCH 018/128] Dockerfile uninstall torch nightly in favor of stable (#10604) @AyushExel @Laughing-q fix for Docker error ``` AttributeError: Can't get attribute '_rebuild_parameter_v2' on ``` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e0d4411118f0..abc3da0ee502 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -12,10 +12,10 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx -# Install pip packages +# Install pip packages (uninstall torch nightly in favor of stable) COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip uninstall -y Pillow torchtext torch torchvision RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From b1e997642cec09f55ce71af8af874b9e7463aeba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Dec 2022 14:36:32 +0100 Subject: [PATCH 019/128] Bump actions/stale from 6 to 7 (#10590) Bumps [actions/stale](https://github.com/actions/stale) from 6 to 7. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v6...v7) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9067c343608b..b21e9c00e6c5 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v6 + - uses: actions/stale@v7 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From 8ca182613499c323a411f559b7b5ea072122c897 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 15:41:03 +0100 Subject: [PATCH 020/128] Update `pycocotools>=2.0.6` (#10605) * Update `pycocotools>=2.0.6` Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- segment/val.py | 2 +- utils/docker/Dockerfile | 4 ++-- val.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4a8649c696a8..c6bd0f26cabb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -44,6 +44,6 @@ seaborn>=0.11.0 # Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0.6 # COCO mAP # roboflow # ultralytics # HUB https://hub.ultralytics.com diff --git a/segment/val.py b/segment/val.py index 5cf8ae8b41c1..248d2bee9be1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -159,7 +159,7 @@ def run( callbacks=Callbacks(), ): if save_json: - check_requirements(['pycocotools']) + check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index abc3da0ee502..6f9de5208e7f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,8 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ + Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory diff --git a/val.py b/val.py index 8d27d9d3dab1..599aa1afdd4a 100644 --- a/val.py +++ b/val.py @@ -309,7 +309,7 @@ def run( json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') + check_requirements('pycocotools>=2.0.6') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval From 65071da7181e2ede9d3514f20c88e6bd646af07c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 16:47:26 +0100 Subject: [PATCH 021/128] Update Dockerfile `pip install -U pycocotools` (#10606) * Update Dockerfile `pip install -U pycocotools` Previous command not working. Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 6f9de5208e7f..98e9c2927b87 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,9 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip install --no-cache -U pycocotools # install --upgrade RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ + Pillow>=9.1.0 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory From a389bff3cb0209c4f74c512fc340a414056fc45d Mon Sep 17 00:00:00 2001 From: Hisam Fahri Date: Tue, 3 Jan 2023 03:09:02 +0700 Subject: [PATCH 022/128] docs: remove past Ultralytics Live Session event from readme (#10635) Signed-off-by: Hisam Fahri Signed-off-by: Hisam Fahri --- README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/README.md b/README.md index 56015b239fc9..c32f3d6fe4ae 100644 --- a/README.md +++ b/README.md @@ -45,17 +45,6 @@ To request an Enterprise License please complete the form at Ultralytics Live Session
- - - ##
Segmentation ⭐ NEW
From 632bf485b4ab2adbaef71f4eced5e6b59ecef7e2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 3 Jan 2023 05:10:13 +0900 Subject: [PATCH 023/128] Remove rocket emoji causes cp949 codec errors (#10646) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c6bd0f26cabb..c0e4a91d7dd1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -# YOLOv5 🚀 requirements +# YOLOv5 requirements # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ From c0ca1d21f24ced15fcc3ec6e80f5e55d78fde9d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 3 Jan 2023 19:21:31 +0100 Subject: [PATCH 024/128] `ultralytics/assets` update `master` to `main` (#10663) --- README.md | 74 ++++++++++++++++++++--------------------- README.zh-CN.md | 74 ++++++++++++++++++++--------------------- classify/tutorial.ipynb | 4 +-- segment/tutorial.ipynb | 4 +-- tutorial.ipynb | 4 +-- 5 files changed, 80 insertions(+), 80 deletions(-) diff --git a/README.md b/README.md index c32f3d6fe4ae..8044252cb74b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) @@ -23,25 +23,25 @@ To request an Enterprise License please complete the form at - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -233,20 +233,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -261,7 +261,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
@@ -395,19 +395,19 @@ Get started in seconds with our verified environments. Click each icon below for
- + - + - + - + - +
@@ -443,25 +443,25 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md index 15232be3aa4f..ab76afbc5252 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)\|[简体中文](README.zh-CN.md)
@@ -23,25 +23,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -245,20 +245,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -272,7 +272,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -412,19 +412,19 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
- + - + - + - + - +
@@ -458,25 +458,25 @@ YOLOv5 在两种不同的 License 下可用:
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 06af62a1b4c1..03c1dd0bc0de 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1222,7 +1222,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index dc6599415480..cb1af34d9f17 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -216,7 +216,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index e83617e9dce7..6308898b8b71 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -622,7 +622,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 9fcbf93a1f0afacecb8b41b86fb1304db1942928 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 18:45:31 +0100 Subject: [PATCH 025/128] Created using Colaboratory --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6308898b8b71..c320d699a940 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -651,17 +651,17 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", + "if logger == 'ClearML':\n", + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", "elif logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()" + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ], "metadata": { "id": "i3oKtE4g-aNn" From 79c05e5689817645bb12b7f77a3d8318582c0f05 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 7 Jan 2023 00:19:14 +0530 Subject: [PATCH 026/128] Add Neural Magic DeepSparse tutorial to README (#10698) * Update README.md Signed-off-by: Ayush Chaurasia * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 8044252cb74b..a2245db46c14 100644 --- a/README.md +++ b/README.md @@ -223,7 +223,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW - [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
@@ -247,13 +247,13 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - - + + -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| ##
Ultralytics HUB
From fdc35b119ad21c7f205596dbb238f780c87040ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 20:04:42 +0100 Subject: [PATCH 027/128] Update Ultralytics App banner URL (#10704) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a2245db46c14..80a4da2ade8e 100644 --- a/README.md +++ b/README.md @@ -417,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! -Ultralytics mobile app +Ultralytics mobile app ##
Contribute
From 1ea901bd5257e8688a122a27afcb21d74b7c5fbc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 14:42:57 +0100 Subject: [PATCH 028/128] Migrate policies to ultralytics/.github (#10721) --- .github/CODE_OF_CONDUCT.md | 128 ------------------------------------- .github/SECURITY.md | 7 -- 2 files changed, 135 deletions(-) delete mode 100644 .github/CODE_OF_CONDUCT.md delete mode 100644 .github/SECURITY.md diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 27e59e9aab38..000000000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# YOLOv5 🚀 Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -- Demonstrating empathy and kindness toward other people -- Being respectful of differing opinions, viewpoints, and experiences -- Giving and gracefully accepting constructive feedback -- Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -- Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -- The use of sexualized language or imagery, and sexual attention or - advances of any kind -- Trolling, insulting or derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others' private information, such as a physical or email - address, without their explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -hello@ultralytics.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. - -[homepage]: https://www.contributor-covenant.org diff --git a/.github/SECURITY.md b/.github/SECURITY.md deleted file mode 100644 index aa3e8409da6b..000000000000 --- a/.github/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. - -### Reporting a Vulnerability - -To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! From caba2aed4a6c2ad85712acb7cb1dd22ed886dc95 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 20:35:02 +0100 Subject: [PATCH 029/128] Update translate-readme.yml (#10725) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 538ff375097e..2bb351ec7e81 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -19,7 +19,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 16 - # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + # ISO Language Codes: https://cloud.google.com/translate/docs/languages - name: Adding README - Chinese Simplified uses: dephraiim/translate-readme@main with: From 37d1e5e5df33f4a9bef75661e5a075927b058540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:40:17 +0800 Subject: [PATCH 030/128] Update some Chinese content of Neural Magic (#10727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index ab76afbc5252..8c6efadfd242 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -45,16 +45,6 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表
Ultralytics 直播会议 - - - ##
实例分割模型 ⭐ 新
@@ -260,12 +250,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - +
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From cdd804d39ff84b413bde36a84006f51769b6043b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 22:05:41 +0800 Subject: [PATCH 031/128] Fix logo-neuralmagic.png image link (#10731) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 8c6efadfd242..c406f35820a7 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -249,7 +249,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From 0e24b7e2f584beea3f573ddb82c3b93558daeb1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 17:43:12 +0100 Subject: [PATCH 032/128] PIL `.get_size()` deprecation fix (#10754) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d2f232de0e97..41a387200ba4 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width, height + _, _, w, h = self.font.getbbox(label) # text width, height outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From bd10f0f6c72d3a0135b72f31b51057eb74c116eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 18:01:36 +0100 Subject: [PATCH 033/128] Revert PIL deprecation fix Signed-off-by: Glenn Jocher --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 41a387200ba4..f84aed9fb5c7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,8 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - _, _, w, h = self.font.getbbox(label) # text width, height + w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 + # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From 9650f16f41248b24a72276e2287185350939285d Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Fri, 13 Jan 2023 02:35:05 +0800 Subject: [PATCH 034/128] Ignore *_paddle_model/ dir (#10745) Signed-off-by: Wang Xin Signed-off-by: Wang Xin --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 69a00843ea42..6bcedfac610d 100755 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ VOC/ *_saved_model/ *_web_model/ *_openvino_model/ +*_paddle_model/ darknet53.conv.74 yolov3-tiny.conv.15 From 2b356c0ab24bc945d69ab66b67e8af755697b611 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Jan 2023 12:40:15 +0100 Subject: [PATCH 035/128] Update Dockerfile (#10768) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 98e9c2927b87..c8b88357cb6d 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -54,7 +54,7 @@ ENV OMP_NUM_THREADS=1 # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up -# docker system prune -a --volumes +# sudo docker system prune -a --volumes # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ From 3a059125dd7b01c76b7a02b59814ed5bc32d9ac4 Mon Sep 17 00:00:00 2001 From: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Date: Sat, 14 Jan 2023 21:43:27 +0800 Subject: [PATCH 036/128] Fx confusion-matrix xlabel typo (#10692) fix confusion-matrix xlabel typo Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index c01f823a77a1..7fb077774384 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -208,7 +208,7 @@ def plot(self, normalize=True, save_dir='', names=()): vmin=0.0, xticklabels=ticklabels, yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_ylabel('True') + ax.set_xlabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) From 3b6e27ad0ad990cc69c519e969a6094aacfb9e3e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 14:46:56 +0100 Subject: [PATCH 037/128] [pre-commit.ci] pre-commit suggestions (#10655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.3.0 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.3.0...v3.3.1) - [github.com/PyCQA/isort: 5.10.1 → 5.11.4](https://github.com/PyCQA/isort/compare/5.10.1...5.11.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28dbc89223cf..f7ae077ee272 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,14 +24,14 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade name: Upgrade code args: [ --py37-plus ] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.11.4 hooks: - id: isort name: Sort imports From 589edc7b012d45a5c8ad6231d7716f88cb6e43ca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Jan 2023 18:43:06 +0100 Subject: [PATCH 038/128] Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 (#10769) * Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 Ultralytics Live Session Ep.3 is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, image segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Co-authored-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 80a4da2ade8e..399ebe5666e2 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,20 @@ To request an Enterprise License please complete the form at Ultralytics Live Session + +
+ +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. + +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. + +Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 + + + +
+ ##
Segmentation ⭐ NEW
From c442a2e99321ebd72b242bc961824f82d46e4fd3 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:40:03 +0100 Subject: [PATCH 039/128] Update Ultralytics Live Session 3 - https://youtu.be/IPcpYO5ITa8 (#10782) * Update Date of Ultralytics Live Session 3 Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 399ebe5666e2..f19130f6f094 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,14 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. -Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 +To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - +
##
Segmentation ⭐ NEW
From 064365d8683fd002e9ad789c1e91fa3d021b44f0 Mon Sep 17 00:00:00 2001 From: Johan Bergman <35481994+duran67@users.noreply.github.com> Date: Fri, 20 Jan 2023 23:49:43 +0100 Subject: [PATCH 040/128] Update parse_opt() in export.py to work as in train.py (#10789) Update parse_opt() to work as in train.py Change parse_opt() be able to use parse_known_args(), same as in train.py, so export.main() can be called from other script without error. e.g.: from yolov5 import export opt = export.parse_opt(True) opt.weights = opt.include = ("torchscript", "onnx") opt.data = opt.imgsz = [, ] export.main(opt) Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 7910178b2338..9ca3441bc66a 100644 --- a/export.py +++ b/export.py @@ -610,7 +610,7 @@ def run( return f # return list of exported files/dirs -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') @@ -638,7 +638,7 @@ def parse_opt(): nargs='+', default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() print_args(vars(opt)) return opt From 6a62c94190583cca257bb091c6ced9d9c3b2dd3d Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Sat, 4 Feb 2023 20:21:30 +0800 Subject: [PATCH 041/128] fix zero labels (#10820) update --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 1eae5db8f816..7ab75f17fb18 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -201,7 +201,7 @@ def random_perspective(im, # Transform label coordinates n = len(targets) if n: - use_segments = any(x.any() for x in segments) + use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample From d02ee60512c50d9573bb7a136d8baade8a0bd332 Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Sat, 4 Feb 2023 13:30:10 +0100 Subject: [PATCH 042/128] Update README.md (#10893) * Update README.md Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f19130f6f094..2938710214b4 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,17 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. +Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. + +Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - + + + ##
Segmentation ⭐ NEW
From b8a2c47fa94011260e0980a217dd7ec0d537414e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 15:11:32 +0400 Subject: [PATCH 043/128] Update Dockerfile `FROM pytorch/pytorch:latest` (#10902) * Update Dockerfile `FROM pytorch/pytorch:latest` Signed-off-by: Glenn Jocher * isort * precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spelling * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * Cleanup * Cleanup * Cleanup --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 34 ++- README.md | 107 ++++--- README.zh-CN.md | 168 +++++------ classify/tutorial.ipynb | 2 +- classify/val.py | 6 +- utils/docker/Dockerfile | 33 ++- utils/loggers/__init__.py | 24 +- utils/loggers/clearml/README.md | 39 +-- utils/loggers/comet/README.md | 10 +- utils/loggers/wandb/README.md | 162 ----------- utils/loggers/wandb/log_dataset.py | 27 -- utils/loggers/wandb/sweep.py | 41 --- utils/loggers/wandb/sweep.yaml | 143 ---------- utils/loggers/wandb/wandb_utils.py | 434 ++--------------------------- 14 files changed, 250 insertions(+), 980 deletions(-) delete mode 100644 utils/loggers/wandb/README.md delete mode 100644 utils/loggers/wandb/log_dataset.py delete mode 100644 utils/loggers/wandb/sweep.py delete mode 100644 utils/loggers/wandb/sweep.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f7ae077ee272..83425ad6cf78 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,7 @@ default_language_version: python: python3.8 +exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci ci: autofix_prs: true @@ -28,13 +29,13 @@ repos: hooks: - id: pyupgrade name: Upgrade code - args: [ --py37-plus ] + args: [--py37-plus] - - repo: https://github.com/PyCQA/isort - rev: 5.11.4 - hooks: - - id: isort - name: Sort imports + # - repo: https://github.com/PyCQA/isort + # rev: 5.11.4 + # hooks: + # - id: isort + # name: Sort imports - repo: https://github.com/pre-commit/mirrors-yapf rev: v0.32.0 @@ -50,15 +51,22 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README.zh-CN.md" - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 5.0.4 hooks: - id: flake8 name: PEP8 + + #- repo: https://github.com/codespell-project/codespell + # rev: v2.2.2 + # hooks: + # - id: codespell + # args: + # - --ignore-words-list=crate,nd + + #- repo: https://github.com/asottile/yesqa + # rev: v1.4.0 + # hooks: + # - id: yesqa diff --git a/README.md b/README.md index 2938710214b4..e836abf6d551 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,10 @@

- [English](README.md) | [简体中文](README.zh-CN.md) -
-
+[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +22,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics Licensing. -
+
@@ -49,7 +50,7 @@ To request an Enterprise License please complete the form at -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. @@ -78,13 +79,13 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7. We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official - **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` @@ -97,6 +98,7 @@ We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640
Segmentation Usage Examples  Open In Colab ### Train + YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. ```bash @@ -108,33 +110,41 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train ``` ### Val + Validate YOLOv5s-seg mask mAP on COCO dataset: + ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate ``` ### Predict + Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + ```bash python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) ``` -![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) ---- |--- +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ### Export + Export YOLOv5s-seg model to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 ```
- ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -164,10 +174,10 @@ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -245,7 +255,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Integrations

@@ -268,10 +277,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| - +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
@@ -280,7 +288,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. @@ -303,19 +310,19 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -327,7 +334,6 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
- ##
Classification ⭐ NEW
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -340,18 +346,18 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings sup We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | | [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | | [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | | [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | | [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | | [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | +| | | | | | | | | | | [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | | [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | | [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | | [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | +| | | | | | | | | | | [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | | [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | | [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | @@ -364,6 +370,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + @@ -371,6 +378,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x Classification Usage Examples  Open In Colab ### Train + YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. ```bash @@ -382,28 +390,37 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val + Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: + ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict + Use pretrained YOLOv5s-cls.pt to predict bus.jpg: + ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### Export + Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` - + ##
Environments
@@ -436,14 +453,13 @@ Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! -
+ ##
License
@@ -452,7 +468,6 @@ YOLOv5 is available under two different licenses: - **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. - **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). - ##
Contact
For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). diff --git a/README.zh-CN.md b/README.zh-CN.md index c406f35820a7..b69d3921df99 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英文](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -61,18 +61,18 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 @@ -253,8 +255,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
@@ -277,36 +279,36 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
图表笔记 -- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 -- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 -- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
### 预训练模型 -| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 -- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 -- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -321,33 +323,33 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | -| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) -- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` -- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
分类训练示例  Open In Colab @@ -382,7 +384,9 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### 模型导出 @@ -438,8 +442,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 03c1dd0bc0de..cc18aa934039 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -78,7 +78,7 @@ "source": [ "# 1. Predict\n", "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", "\n", "```shell\n", "python classify/predict.py --source 0 # webcam\n", diff --git a/classify/val.py b/classify/val.py index 8657036fb2a2..03ba817d5ea2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -128,9 +128,9 @@ def run( LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + acc_i = acc[targets == i] + top1i, top5i = acc_i.mean(0).tolist() + LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c8b88357cb6d..e18b2ac69678 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,23 +3,33 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.12-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir +# FROM docker.io/pytorch/pytorch:latest +FROM pytorch/pytorch:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 -# Install pip packages (uninstall torch nightly in favor of stable) +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Install pip packages COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -U pycocotools # install --upgrade -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 ultralytics \ - --extra-index-url https://download.pytorch.org/whl/cu113 +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + # tensorflow tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app @@ -32,6 +42,9 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables ENV OMP_NUM_THREADS=1 +# Cleanup +ENV DEBIAN_FRONTEND teletype + # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 22da87034f24..1e7f38e0d677 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,6 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -105,14 +101,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # W&B if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) + self.wandb = WandbLogger(self.opt) else: self.wandb = None @@ -175,7 +165,7 @@ def on_pretrain_routine_end(self, labels, names): self.comet_logger.on_pretrain_routine_end(paths) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) + log_dict = dict(zip(self.keys[:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -221,10 +211,10 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') if self.comet_logger: self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) @@ -253,7 +243,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): for i, name in enumerate(self.best_keys): self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) + self.wandb.end_epoch() if self.clearml: self.clearml.current_epoch_logged_images = set() # reset epoch image limit diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 3cf4c268583f..ca41c040193c 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -23,7 +23,6 @@ And so much more. It's up to you how many of these tools you want to use, you ca ![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) -

@@ -35,15 +34,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t 1. Install the `clearml` python package: - ```bash - pip install clearml - ``` + ```bash + pip install clearml + ``` 1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - ```bash - clearml-init - ``` + ```bash + clearml-init + ``` That's it! You're done 😎 @@ -60,18 +59,20 @@ pip install clearml>=1.2.0 This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. -PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` or with custom project and task name: + ```bash python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` This will capture: + - Source code + uncommitted changes - Installed packages - (Hyper)parameters @@ -94,7 +95,7 @@ There even more we can do with all of this information, like hyperparameter opti ## 🔗 Dataset Version Management -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! ![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) @@ -112,6 +113,7 @@ The YOLOv5 repository supports a number of different datasets by using yaml file |_ LICENSE |_ README.txt ``` + But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. @@ -132,13 +134,15 @@ Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `nam ### Upload Your Dataset -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + ```bash cd coco128 clearml-data sync --project YOLOv5 --name coco128 --folder . ``` The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + ```bash # Optionally add --parent if you want to base # this version on another dataset version, so no duplicate files are uploaded! @@ -177,7 +181,7 @@ python utils/loggers/clearml/hpo.py ## 🤯 Remote Execution (advanced) -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: - [YouTube video](https://youtu.be/MX3BrXnaULs) @@ -186,6 +190,7 @@ This is where the ClearML Agent comes into play. Check out what the agent can do In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + ```bash clearml-agent daemon --queue [--docker] ``` @@ -194,11 +199,11 @@ clearml-agent daemon --queue [--docker] With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! -🪄 Clone the experiment by right clicking it +🪄 Clone the experiment by right-clicking it 🎯 Edit the hyperparameters to what you wish them to be -⏳ Enqueue the task to any of the queues by right clicking it +⏳ Enqueue the task to any of the queues by right-clicking it ![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) @@ -206,7 +211,8 @@ With our agent running, we can give it some work. Remember from the HPO section Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + ```python # ... # Loggers @@ -214,16 +220,17 @@ data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML data_dict = loggers.clearml.data_dict # ... ``` + When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! ### Autoscaling workers -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! Check out the autoscalers getting started video below. diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8a361e2b211d..47e6a45654b8 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -23,7 +23,7 @@ pip install comet_ml There are two ways to configure Comet with YOLOv5. -You can either set your credentials through enviroment variables +You can either set your credentials through environment variables **Environment Variables** @@ -49,11 +49,12 @@ project_name= # This will default to 'yolov5' python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt ``` -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI yolo-ui # Try out an Example! + Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -65,6 +66,7 @@ Or better yet, try it out yourself in this Colab Notebook By default, Comet will log the following items ## Metrics + - Box Loss, Object Loss, Classification Loss for the training and validation data - mAP_0.5, mAP_0.5:0.95 metrics for the validation data. - Precision and Recall for the validation data @@ -121,7 +123,6 @@ You can control the frequency of logged predictions and the associated images by Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - ```shell python train.py \ --img 640 \ @@ -192,6 +193,7 @@ If you would like to use a dataset from Comet Artifacts, set the `path` variable # contents of artifact.yaml file path: "comet:///:" ``` + Then pass this file to your training script in the following way ```shell @@ -221,7 +223,7 @@ python train.py \ ## Hyperparameter Search with the Comet Optimizer -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. ### Configuring an Optimizer Sweep diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9..000000000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -
- -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb69307..000000000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f2778b..000000000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea0285f..000000000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 238f4edbf2a0..6bc2ec510d0a 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,110 +1,32 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# WARNING ⚠️ wandb is deprecated and will be removed in future release. +# See supported integrations at https://github.com/ultralytics/yolov5#integrations import logging import os import sys from contextlib import contextmanager from pathlib import Path -from typing import Dict -import yaml -from tqdm import tqdm +from utils.general import LOGGER, colorstr FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) +DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ + f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + LOGGER.warning(DEPRECATION_WARNING) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - class WandbLogger(): """Log training runs, datasets, models, and predictions to Weights & Biases. @@ -132,38 +54,16 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.wandb, self.wandb_run = wandb, wandb.run if wandb else None self.val_artifact, self.train_artifact = None, None self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: + if self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, @@ -172,51 +72,15 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - if isinstance(opt.data, dict): # This means another dataset manager has already processed the dataset info (e.g. ClearML) # and they will have stored the already processed dict in opt.data self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: @@ -231,81 +95,18 @@ def setup_training(self, opt): self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" + model_dir, _ = self.download_model_artifact(opt) + if model_dir: + self.weights = Path(model_dir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 if opt.evolve or opt.noplots: self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ @@ -332,190 +133,8 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + pass def log(self, log_dict): """ @@ -528,7 +147,7 @@ def log(self, log_dict): for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self, best_result=False): + def end_epoch(self): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. @@ -537,8 +156,6 @@ def end_epoch(self, best_result=False): """ if self.wandb_run: with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images try: wandb.log(self.log_dict) except BaseException as e: @@ -547,21 +164,7 @@ def end_epoch(self, best_result=False): ) self.wandb_run.finish() self.wandb_run = None - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): """ @@ -572,6 +175,7 @@ def finish_run(self): with all_logging_disabled(): wandb.log(self.log_dict) wandb.run.finish() + LOGGER.warning(DEPRECATION_WARNING) @contextmanager From b1a3126e5d9ffaddd2ae11362a0087c5541f08f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 22:12:26 +0400 Subject: [PATCH 044/128] Bump docker/build-push-action from 3 to 4 (#10911) * Bump docker/build-push-action from 3 to 4 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Docker (#10913) * Dockerfile standardizations and improvements * README fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 6 +++--- README.md | 7 ------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++----- utils/docker/Dockerfile-cpu | 6 +++--- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1d0bd30b22cb..4f7fff00677c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -29,7 +29,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -39,7 +39,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -48,7 +48,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . diff --git a/README.md b/README.md index e836abf6d551..33468d0635ad 100644 --- a/README.md +++ b/README.md @@ -446,13 +446,6 @@ Get started in seconds with our verified environments. Click each icon below for
-##
App
- -Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! - - -Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e18b2ac69678..b9448101b94c 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -28,7 +28,7 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index eed1410793a1..aea764d3b86b 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,11 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + # tensorflow-aarch64 tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 558f81f00584..356c06df727d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,9 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory From 8b5a7d417929ac51ce27a1fb1264b01dab72d612 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 22:41:03 +0400 Subject: [PATCH 045/128] Update Dockerfile (#10916) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b9448101b94c..c68b8dcdfd62 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx # RUN alias python=python3 # Create working directory -RUN mkdir -p /usr/src/app +RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents From 90f23519c854b96cf108a6179d214c54b3b5bda3 Mon Sep 17 00:00:00 2001 From: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Date: Tue, 7 Feb 2023 00:11:23 +0530 Subject: [PATCH 046/128] Improved the language in CONTRIBUTING.md (#10906) Signed-off-by: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7498f8995d40..71857faddb89 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,13 @@ Select `requirements.txt` to update by clicking on it in GitHub. ### 2. Click 'Edit this file' -Button is in top-right corner. +The button is in the top-right corner.

PR_step2

### 3. Make Changes -Change `matplotlib` version from `3.2.2` to `3.3`. +Change the `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

@@ -62,7 +62,7 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: If you spot a problem with YOLOv5 please submit a Bug Report! For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few -short guidelines below to help users provide what we need in order to get started. +short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating @@ -76,14 +76,14 @@ the problem should be: In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -- ✅ **Current** – Verify that your code is up-to-date with current +- ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 -**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. From 9ba18266b2e0ae085d975a987eb68d98a87155ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 01:58:47 +0400 Subject: [PATCH 047/128] Update Dockerfile (#10917) * Update Dockerfile Signed-off-by: Glenn Jocher * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 8 -------- utils/docker/Dockerfile-arm64 | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c68b8dcdfd62..0349c50526e0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -31,14 +31,6 @@ RUN pip install --no-cache -r requirements.txt albumentations comet gsutil noteb coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - # Set environment variables ENV OMP_NUM_THREADS=1 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index aea764d3b86b..2b08f2baaf76 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -19,7 +19,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + coremltools onnx onnxruntime 'openvino-dev>=2022.3' # tensorflow-aarch64 tensorflowjs \ # Create working directory From c3c8e71d7a58c8d07db5e015b5311a5fffda7f00 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 02:15:54 +0400 Subject: [PATCH 048/128] Update Dockerfile-arm64 (#10918) Docker fixes --- utils/docker/Dockerfile-arm64 | 4 ++-- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 2b08f2baaf76..0279dfb8c997 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,8 +18,8 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnxruntime 'openvino-dev>=2022.3' +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnxruntime # tensorflow-aarch64 tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 356c06df727d..19b2962d4cab 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,7 +18,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu From ea8508a638affa3cb150542ed733fc3aa70be3c2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:27:08 +0400 Subject: [PATCH 049/128] [pre-commit.ci] pre-commit suggestions (#10919) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - https://github.com/pre-commit/mirrors-yapf → https://github.com/google/yapf - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83425ad6cf78..b188048e63a6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: # - id: isort # name: Sort imports - - repo: https://github.com/pre-commit/mirrors-yapf + - repo: https://github.com/google/yapf rev: v0.32.0 hooks: - id: yapf @@ -54,7 +54,7 @@ repos: # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 From cec1b9bc923cdd235baa3b9b5c80e3700bc9b1dc Mon Sep 17 00:00:00 2001 From: Mahmoud Hegab Date: Tue, 7 Feb 2023 23:32:29 -0800 Subject: [PATCH 050/128] add the dropout_p parameter (#10805) * add the dropout_p parameter Signed-off-by: Mahmoud Hegab * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Mahmoud Hegab Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..71340688d2e0 100644 --- a/models/common.py +++ b/models/common.py @@ -846,12 +846,19 @@ def forward(self, x): class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + def __init__(self, + c1, + c2, + k=1, + s=1, + p=None, + g=1, + dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) - self.drop = nn.Dropout(p=0.0, inplace=True) + self.drop = nn.Dropout(p=dropout_p, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): From a3c0fd05216a1fdb9f1ba0aff2e5421819b871ed Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:45:58 +0000 Subject: [PATCH 051/128] [Snyk] Fix for 2 vulnerabilities (#10931) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 - https://snyk.io/vuln/SNYK-PYTHON-WHEEL-3180413 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index c0e4a91d7dd1..ce205f43c5dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 976fa99e5c1d7f5b49f8e7ae458ff3bf93459135 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:52:22 +0000 Subject: [PATCH 052/128] [Snyk] Security upgrade gunicorn from 19.9.0 to 19.10.0 (#10933) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-GUNICORN-541164 --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 42d7ffc0eed8..b6b496feaa7b 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones pip==21.1 Flask==1.0.2 -gunicorn==19.9.0 +gunicorn==19.10.0 From a270b4f1252b65bf60f3996cf9ec9ac01ce3a466 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:54:04 +0000 Subject: [PATCH 053/128] [Snyk] Security upgrade setuptools from 39.0.1 to 65.5.1 (#10934) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index ce205f43c5dd..eee15ddf93c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +setuptools>=65.5.1 # Snyk vulnerability fix wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 From e326252ee4af03b4514f20262b719bf0a9468161 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 16:57:18 +0400 Subject: [PATCH 054/128] Security improvements (#10942) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- utils/general.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cbb3114e94d8..02c2a79f5747 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -52,7 +52,7 @@ def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.md5(str(size).encode()) # hash sizes + h = hashlib.sha256(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash diff --git a/utils/general.py b/utils/general.py index 0bbcb6e7334c..63cc29bfb35d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -14,6 +14,7 @@ import random import re import signal +import subprocess import sys import time import urllib @@ -551,7 +552,7 @@ def check_dataset(data, autodownload=True): r = None # success elif s.startswith('bash '): # bash script LOGGER.info(f'Running {s} ...') - r = os.system(s) + r = subprocess.run(s, shell=True) else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' @@ -648,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - os.system(f'tar xf {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip if delete: f.unlink() # remove zip @@ -1022,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1046,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload def apply_classifier(x, model, img, im0): From 61407c93cc0cbabcfbd6de51a3c8293b99219e2e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:18:27 +0400 Subject: [PATCH 055/128] Security improvements for subprocess.run() (#10943) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 63cc29bfb35d..4d5e94bc98f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -649,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip elif f.suffix == '.gz': - subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip if delete: f.unlink() # remove zip @@ -1023,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local + subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1047,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload + subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload def apply_classifier(x, model, img, im0): From 238da321cb365533a99d36a1e768d1d4259b6766 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:58:24 +0400 Subject: [PATCH 056/128] Security3 (#10944) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 4 +++- segment/val.py | 3 ++- train.py | 4 +++- utils/downloads.py | 3 ++- utils/general.py | 3 +-- val.py | 3 ++- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/segment/train.py b/segment/train.py index 3f32d2100a75..883c8b0a2b62 100644 --- a/segment/train.py +++ b/segment/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -597,7 +598,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 248d2bee9be1..8168b5407c1d 100644 --- a/segment/val.py +++ b/segment/val.py @@ -23,6 +23,7 @@ import argparse import json import os +import subprocess import sys from multiprocessing.pool import ThreadPool from pathlib import Path @@ -461,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index 5d75f22b6335..db65f2c74c6c 100644 --- a/train.py +++ b/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -571,7 +572,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index 72ea87340eb9..a3ff9274066e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -50,7 +50,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + subprocess.run( + f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 4d5e94bc98f9..4e5c7147fd40 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,7 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download diff --git a/val.py b/val.py index 599aa1afdd4a..62fa2c980988 100644 --- a/val.py +++ b/val.py @@ -22,6 +22,7 @@ import argparse import json import os +import subprocess import sys from pathlib import Path @@ -397,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 35d6d9f408e5f1e02e5edc8f4bd6976bcf3bff8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 20:32:58 +0400 Subject: [PATCH 057/128] Update Dockerfile-arm64 (#10945) * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher * Update Dockerfile-cpu Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 0279dfb8c997..b2e381f089d2 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 +FROM arm64v8/ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 19b2962d4cab..dcc71924564b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 +FROM ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From a895e98172a595252d1f8b5064de344b7ecafbec Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 20:21:38 +0000 Subject: [PATCH 058/128] [Snyk] Security upgrade ubuntu from latest to rolling (#10946) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314744 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314768 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314792 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314816 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314840 * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index b2e381f089d2..7023c6a4bb1f 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:latest +FROM arm64v8/ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index dcc71924564b..06bad9a3790d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:latest +FROM ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From fd38767ea84453107ec3a19893fb2dd4e5034216 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 10 Feb 2023 20:00:40 +0530 Subject: [PATCH 059/128] Update README and greetings with YOLOv8 info (#10735) * update * update * update * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Ayush Chaurasia * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README * Update README * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * Update README with YOLOv8 --------- Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 12 +- README.md | 234 ++++++++++++++++---------------- README.zh-CN.md | 46 +++---- 3 files changed, 148 insertions(+), 144 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 5e1589c340ed..8d780a23e2b5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -25,11 +25,9 @@ jobs: issue-message: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). - If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - - For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). ## Requirements @@ -55,3 +53,9 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + ## YOLOv8 + + Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/README.md b/README.md index 33468d0635ad..e00ec478a85b 100644 --- a/README.md +++ b/README.md @@ -45,105 +45,25 @@ To request an Enterprise License please complete the form at
- -##
Ultralytics Live Session
- -
- -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ - -Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. - -Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! - -To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - - - -
- -##
Segmentation ⭐ NEW
- -
- - -
- -Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. - -
- Segmentation Checkpoints -
-We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. +##
YOLOv8 🚀 NEW
-| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- Segmentation Usage Examples  Open In Colab +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -### Train - -YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` - -### Val - -Validate YOLOv5s-seg mask mAP on COCO dataset: - -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### Predict - -Use pretrained YOLOv5m-seg.pt to predict bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # load from PyTorch Hub (WARNING: inference not yet supported) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### Export - -Export YOLOv5s-seg model to ONNX and TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```commandline +pip install ultralytics ``` -
+
+ + +
##
Documentation
@@ -312,17 +232,17 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | | ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -334,7 +254,87 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
-##
Classification ⭐ NEW
+##
Segmentation
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train + +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5s-seg mask mAP on COCO dataset: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict + +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### Export + +Export YOLOv5s-seg model to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
Classification
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -347,21 +347,21 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | | -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) @@ -463,7 +463,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/).
diff --git a/README.zh-CN.md b/README.zh-CN.md index b69d3921df99..bd38e8f457be 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -290,17 +290,17 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 | 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | | ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 @@ -325,21 +325,21 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 | 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | | -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) From 80e54473905c08b1c4c771056a0f5c1a261736d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 18:59:22 +0400 Subject: [PATCH 060/128] Dockerfile `openssl` security update (#10949) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 0349c50526e0..54927c03eb80 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,6 +16,10 @@ RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt install --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From d389840f66bb95c150d8c0e4d97759b07d21e821 Mon Sep 17 00:00:00 2001 From: David Strahm Date: Fri, 10 Feb 2023 16:11:08 +0100 Subject: [PATCH 061/128] Allow int8 quantization for export_tfjs (#10948) * Allow int8 quantization for export_tfjs --int8 param currently has no effect on export_tfjs. With this change, ` python export.py --weights ../path/to/best.pt --include tfjs --int8` will add the --quantize_uint8 param to the tensorflowjs_converter script, greatly reducing model size for web usage. Signed-off-by: David Strahm * Update Dockerfile Signed-off-by: Glenn Jocher --------- Signed-off-by: David Strahm Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- export.py | 8 +++++--- utils/docker/Dockerfile | 9 ++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/export.py b/export.py index 9ca3441bc66a..1bf0532dde34 100644 --- a/export.py +++ b/export.py @@ -426,7 +426,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): @try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): +def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') import tensorflowjs as tfjs @@ -436,7 +436,9 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + int8_export = ' --quantize_uint8 ' if int8 else '' + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) @@ -588,7 +590,7 @@ def run( f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: - f[9], _ = export_tfjs(file) + f[9], _ = export_tfjs(file, int8) if paddle: # PaddlePaddle f[10], _ = export_paddle(model, im, file, metadata) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 54927c03eb80..cfe7b0635c28 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,13 +13,12 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ + # Security updates + # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 + openssl # RUN alias python=python3 -# Security updates -# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 -RUN apt install --no-install-recommends -y openssl - # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 5c3eba664e228d0416285e94954a8a42751bf98b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 21:19:08 +0400 Subject: [PATCH 062/128] Update Dockerfile `apt upgrade openssl` (#10951) Update Dockerfile upgrade `openssl` Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index cfe7b0635c28..b5d2af9fb08e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,12 +13,13 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ - # Security updates - # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 - openssl +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 416a132ceab4d0e2cd4857e8c1e02950c10d80d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 03:07:13 +0400 Subject: [PATCH 063/128] Update README.md (#10952) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update partner logo hosting * Update partner logo hosting --------- Signed-off-by: Glenn Jocher --- README.md | 11 ++++++----- README.zh-CN.md | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e00ec478a85b..25d12b289d09 100644 --- a/README.md +++ b/README.md @@ -185,16 +185,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | @@ -452,7 +452,8 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - + + ##
License
diff --git a/README.zh-CN.md b/README.zh-CN.md index bd38e8f457be..c581842c6556 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -243,16 +243,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | @@ -436,7 +436,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + + ##
License
From 25c17370dd0bc6f6b42cc29592750cf3334797dd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 12:45:18 +0400 Subject: [PATCH 064/128] Update greetings.yml (#10955) * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8d780a23e2b5..42a2463585a8 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -53,9 +53,13 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - ## YOLOv8 + ## Introducing YOLOv8 🚀 - Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! + + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: ```bash pip install ultralytics ``` From fa4bdbe14d33b3aa74e2eac5bdb940cc4b337198 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 14:24:49 +0400 Subject: [PATCH 065/128] Update README.zh-CN.md (#10956) * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 2 +- README.zh-CN.md | 181 +++++++++++++++++++++++++----------------------- 2 files changed, 97 insertions(+), 86 deletions(-) diff --git a/README.md b/README.md index 25d12b289d09..3a0e2fe1a188 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) diff --git a/README.zh-CN.md b/README.zh-CN.md index c581842c6556..c25dc0c3326a 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)|[简体中文](README.zh-CN.md)
@@ -45,87 +45,23 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表实例分割模型 ⭐ 新
- - - -我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 - -
- 实例分割模型列表 - -
- -我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 - -| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- 分割模型使用示例  Open In Colab - -### 训练 - -YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 - -```bash -# 单 GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 +##
YOLOv8 🚀 NEW
-# 多 GPU, DDP 模式 -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -### 验证 - -在 COCO 数据集上验证 YOLOv5s-seg mask mAP: +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -```bash -bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +```commandline +pip install ultralytics ``` -### 预测 - -使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### 模型导出 - -将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 -``` - -
+
+ + +
##
文档
@@ -312,6 +248,88 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
+##
实例分割模型 ⭐ 新
+ +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 + +
+ 实例分割模型列表 + +
+ +
+ + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 + +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ 分割模型使用示例  Open In Colab + +### 训练 + +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 + +```bash +# 单 GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# 多 GPU, DDP 模式 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 验证 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 模型导出 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ ##
分类网络 ⭐ 新
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。 @@ -423,13 +441,6 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
-##
APP
- -通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! - - -Ultralytics mobile app - ##
贡献
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -448,7 +459,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。 +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。
From 1a2eb532cec4b5f0a4b295554b3c73ae9f7fff3a Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 15:38:27 +0200 Subject: [PATCH 066/128] Fix return value check for subprocess.run (#10972) Subprocess.run does not return an integer. Regressed in #10944 --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4e5c7147fd40..01f0a3bddc7d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,8 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = r == 0 + proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) + success = proc.returncode == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() From a2de5c5bf61f1165ffeb4af51dc5b24e8d04bff6 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 16:00:31 +0200 Subject: [PATCH 067/128] Subprocess improvements (#10973) * Use list-form arguments for subprocess.run calls where possible Augments #10944 * Deduplicate curl code * Avoid eval() to parse integer --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- classify/train.py | 2 +- export.py | 35 ++++++++++++++++++++++++++--------- segment/train.py | 8 ++++++-- segment/val.py | 2 +- train.py | 8 ++++++-- utils/downloads.py | 29 +++++++++++++++++++++++++---- utils/general.py | 6 ++---- val.py | 2 +- 8 files changed, 68 insertions(+), 24 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4767be77bd61..8ae2fdd52828 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) diff --git a/export.py b/export.py index 1bf0532dde34..2c9fb77d17be 100644 --- a/export.py +++ b/export.py @@ -194,8 +194,15 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export + args = [ + "mo", + "--input_model", + str(file.with_suffix('.onnx')), + "--output_dir", + f, + "--data_type", + ("FP16" if half else "FP32"),] + subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -420,8 +427,15 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) + subprocess.run([ + 'edgetpu_compiler', + '-s', + '-d', + '-k', + '10', + '--out_dir', + str(file.parent), + f_tfl,], check=True) return f, None @@ -436,11 +450,14 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - int8_export = ' --quantize_uint8 ' if int8 else '' - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) + args = [ + 'tensorflowjs_converter', + '--input_format=tf_frozen_model', + '--quantize_uint8' if int8 else '', + '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + str(f_pb), + str(f),] + subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order diff --git a/segment/train.py b/segment/train.py index 883c8b0a2b62..4914f9613a3d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -598,8 +598,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 8168b5407c1d..665b540a5490 100644 --- a/segment/val.py +++ b/segment/val.py @@ -462,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index db65f2c74c6c..ccda0a7fe2e3 100644 --- a/train.py +++ b/train.py @@ -572,8 +572,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index a3ff9274066e..2610f3c66aac 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -26,8 +26,10 @@ def is_url(url, check=True): def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes + output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + if output: + return int(output.split()[0]) + return 0 def url_getsize(url='https://ultralytics.com/images/bus.jpg'): @@ -36,6 +38,25 @@ def url_getsize(url='https://ultralytics.com/images/bus.jpg'): return int(response.headers.get('content-length', -1)) +def curl_download(url, filename, *, silent: bool = False) -> bool: + """ + Download a file from a url to a filename using curl. + """ + silent_option = 'sS' if silent else '' # silent + proc = subprocess.run([ + 'curl', + '-#', + f'-{silent_option}L', + url, + '--output', + filename, + '--retry', + '9', + '-C', + '-',]) + return proc.returncode == 0 + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER @@ -50,8 +71,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - subprocess.run( - f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail + # curl download, retry and resume on fail + curl_download(url2 or url, file) finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 01f0a3bddc7d..a6af4f3216dd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize +from utils.downloads import gsutil_getsize, curl_download from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -630,9 +630,7 @@ def download_one(url, dir): LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: - s = 'sS' if threads > 1 else '' # silent - proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = proc.returncode == 0 + success = curl_download(url, f, silent=(threads > 1)) else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() diff --git a/val.py b/val.py index 62fa2c980988..7829afb68b79 100644 --- a/val.py +++ b/val.py @@ -398,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 4d28fec3b8b663fa8225634ca8eeb4446505527e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 13 Feb 2023 20:27:22 +0400 Subject: [PATCH 068/128] Update README.md (#10975) @pderrenger YOLOv5 HUB copy updates Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a0e2fe1a188..2a28ea11490a 100644 --- a/README.md +++ b/README.md @@ -203,7 +203,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
From e7b60999ad88a40bfb84c539bed1e6ec11249af2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:28:20 +0400 Subject: [PATCH 069/128] Fix Comet link (#10990) @DN6 fixes YOLOv5 Comet link we chatted about Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a28ea11490a..16dfd9fca085 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From e4d836080f68dd14ae9becaa7b50c510ac1db54f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:31:41 +0400 Subject: [PATCH 070/128] Update README.zh-CN.md (#10991) Signed-off-by: Glenn Jocher --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index c25dc0c3326a..17c046c8d98d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -184,7 +184,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + @@ -193,7 +193,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 4dd1caaf9af97ca56d7938a4baf3be8d0ea0a3ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 21:07:55 +0400 Subject: [PATCH 071/128] Update README.md (#10992) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 17c046c8d98d..800a670cfb4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -191,8 +191,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 6d283ec167b60a0160eb275323a9b13b563ff804 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 16 Feb 2023 17:08:13 +0000 Subject: [PATCH 072/128] [Snyk] Security upgrade werkzeug from 1.0.1 to 2.2.3 (#10995) * fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319935 - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319936 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/google_app_engine/additional_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index b6b496feaa7b..d5b76758c876 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -2,3 +2,4 @@ pip==21.1 Flask==1.0.2 gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability From 226a5e43cbceff5de43a71c4fb3f3f7478a9bb03 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 23:48:42 +0400 Subject: [PATCH 073/128] Update ci-testing.yml benchmarks to Python 3.10 (#10997) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f31bb6e6ce3c..f9c62d623042 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ '3.9' ] # requires python<=3.9 + python-version: [ '3.10' ] # requires python<=3.10 model: [ yolov5n ] steps: - uses: actions/checkout@v3 From 34e1bc8ee3cabc809bb3302b0cc6de4f6dcce10e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Feb 2023 13:53:45 +0400 Subject: [PATCH 074/128] Update downloads.py (#11005) * Update downloads.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 2610f3c66aac..e739919540b4 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -120,11 +120,9 @@ def github_assets(repository, version='latest'): file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') return str(file) From 7a972e86c4e5009830d5e6faacadfe6e1ed2efff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Feb 2023 01:06:24 +0100 Subject: [PATCH 075/128] Update .pre-commit-config.yaml (#11009) * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Pre-commit updates * Pre-commit updates --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 35 ++--- benchmarks.py | 2 +- classify/predict.py | 4 +- classify/train.py | 26 ++-- classify/tutorial.ipynb | 2 +- classify/val.py | 8 +- detect.py | 2 +- export.py | 26 ++-- models/common.py | 16 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/tf.py | 12 +- segment/predict.py | 2 +- segment/train.py | 14 +- segment/tutorial.ipynb | 2 +- segment/val.py | 16 +- train.py | 6 +- tutorial.ipynb | 2 +- utils/__init__.py | 2 +- utils/dataloaders.py | 34 ++--- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 8 +- utils/flask_rest_api/restapi.py | 22 +-- utils/general.py | 48 +++--- utils/loggers/__init__.py | 16 +- utils/loggers/clearml/clearml_utils.py | 6 +- utils/loggers/comet/__init__.py | 192 ++++++++++++------------ utils/loggers/comet/comet_utils.py | 42 +++--- utils/loggers/comet/hpo.py | 32 ++-- utils/loggers/wandb/wandb_utils.py | 10 +- utils/metrics.py | 10 +- utils/plots.py | 2 +- utils/segment/dataloaders.py | 32 ++-- utils/segment/loss.py | 12 +- utils/segment/metrics.py | 90 +++++------ utils/segment/plots.py | 20 +-- utils/torch_utils.py | 4 +- utils/triton.py | 14 +- val.py | 4 +- 39 files changed, 389 insertions(+), 392 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b188048e63a6..c5162378ab81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,5 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci @@ -16,13 +13,13 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # - id: end-of-file-fixer + - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml - - id: check-toml - - id: pretty-format-json - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 @@ -31,11 +28,11 @@ repos: name: Upgrade code args: [--py37-plus] - # - repo: https://github.com/PyCQA/isort - # rev: 5.11.4 - # hooks: - # - id: isort - # name: Sort imports + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports - repo: https://github.com/google/yapf rev: v0.32.0 @@ -59,12 +56,12 @@ repos: - id: flake8 name: PEP8 - #- repo: https://github.com/codespell-project/codespell - # rev: v2.2.2 - # hooks: - # - id: codespell - # args: - # - --ignore-words-list=crate,nd + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota #- repo: https://github.com/asottile/yesqa # rev: v1.4.0 diff --git a/benchmarks.py b/benchmarks.py index 03d7d693a936..09108b8a7cc4 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -164,6 +164,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index 5a5edabda42c..5f0d40787b52 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -179,7 +179,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image @@ -221,6 +221,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index 8ae2fdd52828..b752a3c1fe32 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) @@ -220,11 +220,11 @@ def train(opt, device): # Log metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate logger.log_metrics(metrics, epoch) # Save model @@ -251,11 +251,11 @@ def train(opt, device): if RANK in {-1, 0} and final_epoch: LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") + f'\nVisualize: https://netron.app\n') # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels @@ -263,7 +263,7 @@ def train(opt, device): file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) logger.log_model(best, epochs, metadata=meta) @@ -310,7 +310,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +328,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index cc18aa934039..58723608bdbe 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1477,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/classify/val.py b/classify/val.py index 03ba817d5ea2..4edd5a1f5e9e 100644 --- a/classify/val.py +++ b/classify/val.py @@ -100,7 +100,7 @@ def run( pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: @@ -123,14 +123,14 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): acc_i = acc[targets == i] top1i, top5i = acc_i.mean(0).tolist() - LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image @@ -165,6 +165,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/detect.py b/detect.py index 2d13401f78bd..3f32d7a50d6b 100644 --- a/detect.py +++ b/detect.py @@ -256,6 +256,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/export.py b/export.py index 2c9fb77d17be..e8287704866a 100644 --- a/export.py +++ b/export.py @@ -120,7 +120,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) @@ -195,13 +195,13 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') args = [ - "mo", - "--input_model", + 'mo', + '--input_model', str(file.with_suffix('.onnx')), - "--output_dir", + '--output_dir', f, - "--data_type", - ("FP16" if half else "FP32"),] + '--data_type', + ('FP16' if half else 'FP32'),] subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -237,7 +237,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') @@ -293,7 +293,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) @@ -403,7 +403,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, "wb").write(tflite_model) + open(f, 'wb').write(tflite_model) return f, None @@ -618,14 +618,14 @@ def run( det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") + f'\nVisualize: https://netron.app') return f # return list of exported files/dirs @@ -667,6 +667,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/models/common.py b/models/common.py index 71340688d2e0..f416ddf25eb8 100644 --- a/models/common.py +++ b/models/common.py @@ -380,11 +380,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) + network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -431,7 +431,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -445,7 +445,7 @@ def gd_outputs(gd): gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -467,9 +467,9 @@ def gd_outputs(gd): output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, "r") as model: + with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') @@ -491,7 +491,7 @@ def gd_outputs(gd): check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") + nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -608,7 +608,7 @@ def _model_type(p='path/to/model.pt'): url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] @staticmethod diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index f73d1992ac19..07ec25ba264d 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 7cbdb36b425c..a827814e1399 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..8290cf2e57f5 100644 --- a/models/tf.py +++ b/models/tf.py @@ -356,7 +356,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +371,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" + assert dimension == 1, 'convert only NCHW to NHWC concat' self.d = 3 def call(self, inputs): @@ -523,17 +523,17 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", + mode='CONSTANT', constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -603,6 +603,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/predict.py b/segment/predict.py index e9093baa1cc7..d82df89a85b0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -279,6 +279,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 4914f9613a3d..2e71de131a8d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -139,7 +139,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) + logger.update_params({'batch_size': batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer @@ -341,10 +341,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') if ni == 10: files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) + logger.log_images(files, 'Mosaics', epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -454,8 +454,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) torch.cuda.empty_cache() return results @@ -548,7 +548,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -659,6 +659,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb1af34d9f17..cb52045bcb25 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -591,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/val.py b/segment/val.py index 665b540a5490..a7f95fe9b6fc 100644 --- a/segment/val.py +++ b/segment/val.py @@ -70,8 +70,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -105,7 +105,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -231,8 +231,8 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) @@ -343,7 +343,7 @@ def run( # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') @@ -369,7 +369,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -468,6 +468,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/train.py b/train.py index ccda0a7fe2e3..c4e3aac3561a 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) + loggers.on_params_update({'batch_size': batch_size}) # Optimizer nbs = 64 # nominal batch size @@ -522,7 +522,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -635,6 +635,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index c320d699a940..32af68b57945 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -973,4 +973,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..d158c5515a12 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -69,7 +69,7 @@ def notebook_init(verbose=True): if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") + total, used, free = shutil.disk_usage('/') display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 02c2a79f5747..7687a2ba2665 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -89,7 +89,7 @@ def exif_transpose(image): if method is not None: image = image.transpose(method) del exif[0x0112] - image.info["exif"] = exif.tobytes() + image.info['exif'] = exif.tobytes() return image @@ -212,11 +212,11 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor["top"] if top is None else (monitor["top"] + top) - self.left = monitor["left"] if left is None else (monitor["left"] + left) - self.width = width or monitor["width"] - self.height = height or monitor["height"] - self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self @@ -224,7 +224,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms @@ -239,7 +239,7 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: @@ -358,7 +358,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' @@ -373,7 +373,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline @@ -495,7 +495,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -598,8 +598,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=''): mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " - f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' f"{'caching images ✅' if cache else 'not caching images ⚠️'}") return cache @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning {path.parent / path.stem}..." + desc = f'{prefix}Scanning {path.parent / path.stem}...' with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' pbar.close() if msgs: @@ -1063,7 +1063,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): if zipped: data['path'] = data_dir except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e + raise Exception('error/HUB/dataset_stats/yaml_load') from e check_dataset(data, autodownload) # download dataset if missing self.hub_dir = Path(data['path'] + '-hub') @@ -1188,7 +1188,7 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] else: sample = self.torch_transforms(im) return sample, j diff --git a/utils/downloads.py b/utils/downloads.py index e739919540b4..643b529fba3b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -77,7 +77,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') LOGGER.info('') diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 773ad8932967..952e5dcb90fa 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -7,13 +7,13 @@ import requests -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' # Read image -with open(IMAGE, "rb") as f: +with open(IMAGE, 'rb') as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={"image": image_data}).json() +response = requests.post(DETECTION_URL, files={'image': image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 8482435c861e..9258b1a68860 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -13,36 +13,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = "/v1/object-detection/" +DETECTION_URL = '/v1/object-detection/' -@app.route(DETECTION_URL, methods=["POST"]) +@app.route(DETECTION_URL, methods=['POST']) def predict(model): - if request.method != "POST": + if request.method != 'POST': return - if request.files.get("image"): + if request.files.get('image'): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files["image"] + im_file = request.files['image'] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + return results.pandas().xyxy[0].to_json(orient='records') -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index a6af4f3216dd..b6efe6bb8732 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize, curl_download +from utils.downloads import curl_download, gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -90,11 +90,11 @@ def is_kaggle(): def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): + if Path('/.dockerenv').exists(): return True try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) except OSError: return False @@ -113,7 +113,7 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = "yolov5" +LOGGING_NAME = 'yolov5' def set_logging(name=LOGGING_NAME, verbose=True): @@ -121,21 +121,21 @@ def set_logging(name=LOGGING_NAME, verbose=True): rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { name: { - "format": "%(message)s"}}, - "handlers": { + 'format': '%(message)s'}}, + 'handlers': { name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -218,7 +218,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -299,7 +299,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility return True except OSError: return False @@ -386,7 +386,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta check_python() # check python version if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." + assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -450,7 +450,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' def check_yaml(file, suffix=('.yaml', '.yml')): @@ -556,8 +556,8 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}') check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary @@ -673,7 +673,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1e7f38e0d677..9de1f226233c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -121,8 +121,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # Comet if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -158,7 +158,7 @@ def on_pretrain_routine_end(self, labels, names): plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks if self.comet_logger: @@ -212,7 +212,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) if self.clearml: self.clearml.log_debug_samples(files, title='Validation') @@ -279,7 +279,7 @@ def on_train_end(self, last, best, epoch, results): if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact(str(best if best.exists() else last), @@ -329,7 +329,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): if wandb and 'wandb' in self.include: self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, + name=None if opt.name == 'exp' else opt.name, config=opt) else: self.wandb = None @@ -370,12 +370,12 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) def update_params(self, params): - # Update the paramters logged + # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 3457727a96a4..2764abe90da8 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -25,7 +25,7 @@ def construct_dataset(clearml_info_string): dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) if len(yaml_filenames) > 1: raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 'the dataset definition this way.') @@ -100,7 +100,7 @@ def __init__(self, opt, hyp): self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", + self.task.set_base_docker('ultralytics/yolov5:latest', docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', docker_setup_bash_script='pip install clearml') @@ -150,7 +150,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" + label = f'{class_name}: {confidence_percentage}%' if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b0318f88d6a6..d4599841c9fc 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') except (ModuleNotFoundError, ImportError): comet_ml = None COMET_PROJECT_NAME = None @@ -31,32 +31,32 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = "comet://" +COMET_PREFIX = 'comet://' -COMET_MODE = os.getenv("COMET_MODE", "online") +COMET_MODE = os.getenv('COMET_MODE', 'online') # Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' -RANK = int(os.getenv("RANK", -1)) +RANK = int(os.getenv('RANK', -1)) to_pil = T.ToPILImage() @@ -66,7 +66,7 @@ class CometLogger: with Comet """ - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,52 +87,52 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") + self.experiment.log_other('Created from', 'YOLOv5') if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, ) self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, "conf_thres"): + if hasattr(self.opt, 'conf_thres'): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): + if hasattr(self.opt, 'iou_thres'): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: @@ -147,22 +147,22 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": + if mode == 'offline': if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, @@ -182,11 +182,11 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) return @@ -210,12 +210,12 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} - model_files = glob.glob(f"{path}/*.pt") + model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: name = Path(model_path).name @@ -232,12 +232,12 @@ def check_dataset(self, data_file): data_config = yaml.safe_load(f) if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") + path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) return check_dataset(data_file) @@ -253,8 +253,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -263,22 +263,22 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -305,35 +305,35 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) + img_paths = sorted(glob.glob(f'{asset_path}/*')) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") + logger.error(f'COMET ERROR: {e}') continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: + for key in ['train', 'val', 'test']: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, "") + metadata[key] = split_path.replace(path, '') - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) for key in metadata.keys(): - if key in ["train", "val", "test"]: + if key in ['train', 'val', 'test']: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -352,13 +352,13 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir + data_dict['path'] = artifact_save_dir - metadata_names = metadata.get("names") + metadata_names = metadata.get('names') if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" @@ -366,13 +366,13 @@ def download_dataset_artifact(self, artifact_path): return data_dict def update_data_paths(self, data_dict): - path = data_dict.get("path", "") + path = data_dict.get('path', '') - for split in ["train", "val", "test"]: + for split in ['train', 'val', 'test']: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) return data_dict @@ -413,11 +413,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -481,7 +481,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append("background") + class_names.append('background') num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -491,7 +491,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) epoch=epoch, column_label='Actual Category', row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", + file_name=f'confusion-matrix-epoch-{epoch}.json', ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 3cbd45156b57..27600761ad28 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" + model_dir = f'{opt.project}/{experiment.name}' os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') return model_asset_list = sorted( model_asset_list, - key=lambda x: x["step"], + key=lambda x: x['step'], reverse=True, ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,22 +44,22 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') return try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') logger.exception(e) @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,11 +85,11 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" + save_dir = f'{opt.project}/{experiment.name}' os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 7dd5c92e8de1..fc49115c1358 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,7 +21,7 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') def get_args(known=False): @@ -68,30 +68,30 @@ def get_args(known=False): parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', type=int, default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == "__main__": +if __name__ == '__main__': opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +99,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +110,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] - logger.info("COMET INFO: Starting Hyperparameter Sweep") + logger.info('COMET INFO: Starting Hyperparameter Sweep') for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) + run(parameter['parameters'], opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6bc2ec510d0a..c8ab38197381 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH RANK = int(os.getenv('RANK', -1)) DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ - f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' try: import wandb @@ -65,7 +65,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.data_dict = None if self.wandb: self.wandb_run = wandb.init(config=opt, - resume="allow", + resume='allow', project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, @@ -97,7 +97,7 @@ def setup_training(self, opt): if isinstance(opt.resume, str): model_dir, _ = self.download_model_artifact(opt) if model_dir: - self.weights = Path(model_dir) / "last.pt" + self.weights = Path(model_dir) / 'last.pt' config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ @@ -131,7 +131,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') def val_one_image(self, pred, predn, path, names, im): pass @@ -160,7 +160,7 @@ def end_epoch(self): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' ) self.wandb_run.finish() self.wandb_run = None diff --git a/utils/metrics.py b/utils/metrics.py index 7fb077774384..95f364c23f34 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -194,14 +194,14 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" + ticklabels = (names + ['background']) if labels else 'auto' with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, ax=ax, annot=nc < 30, annot_kws={ - "size": 8}, + 'size': 8}, cmap='Blues', fmt='.2f', square=True, @@ -331,7 +331,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) @@ -354,7 +354,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index f84aed9fb5c7..24c618c80b59 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -450,7 +450,7 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() if verbose: - LOGGER.info(f"Saving {f}") + LOGGER.info(f'Saving {f}') if labels is not None: LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) if pred is not None: diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d66b36115e3f..097a5d5cb058 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -95,7 +95,7 @@ def __init__( stride=32, pad=0, min_items=0, - prefix="", + prefix='', downsample_ratio=1, overlap=False, ): @@ -116,7 +116,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp["mixup"]: + if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -147,11 +147,11 @@ def __getitem__(self, index): img, labels, segments = random_perspective(img, labels, segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: @@ -177,17 +177,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down - if random.random() < hyp["flipud"]: + if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp["fliplr"]: + if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -251,15 +251,15 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b45b2c27e0a0..2a8a4c680f6f 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -83,7 +83,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -101,10 +101,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -112,7 +112,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index b09ce23fb9e3..c9f137e38ead 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -21,7 +21,7 @@ def ap_per_class_box_and_mask( pred_cls, target_cls, plot=False, - save_dir=".", + save_dir='.', names=(), ): """ @@ -37,7 +37,7 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Box")[2:] + prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, @@ -45,21 +45,21 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Mask")[2:] + prefix='Mask')[2:] results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} return results @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,33 @@ def ap_class_index(self): KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 9b90900b3772..3ba097624fcd 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -108,13 +108,13 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' annotator.im.save(fname) # save -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) @@ -125,19 +125,19 @@ def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..5b67b3fa7a06 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -291,7 +291,7 @@ def model_info(model, verbose=False, imgsz=640): fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -342,7 +342,7 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') return optimizer diff --git a/utils/triton.py b/utils/triton.py index a94ef0ad197d..25928021477e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -21,7 +21,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": + if parsed_url.scheme == 'grpc': from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,7 +31,7 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] else: from tritonclient.http import InferenceServerClient, InferInput @@ -43,14 +43,14 @@ def create_input_placeholders() -> typing.List[InferInput]: def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) + return self.metadata.get('backend', self.metadata.get('platform')) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: """ Invokes the model. Parameters can be provided via args or kwargs. @@ -68,14 +68,14 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") + raise RuntimeError('No inputs provided.') if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") + raise RuntimeError('Cannot specify args and kwargs at the same time') placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 7829afb68b79..d4073b42fe78 100644 --- a/val.py +++ b/val.py @@ -304,7 +304,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -404,6 +404,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) From 4db6757ef9d43f49a780ff29deb06b28e96fbe84 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Mon, 20 Feb 2023 18:23:13 +0800 Subject: [PATCH 076/128] Fixed access 'names' from a DistributedDataParallel module (#11023) --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index b752a3c1fe32..ae2363ccf056 100644 --- a/classify/train.py +++ b/classify/train.py @@ -44,7 +44,7 @@ check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, +from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -260,7 +260,7 @@ def train(opt, device): # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') # Log results meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} From feca55719bab7dad14284f77a096da387094dbde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 20:09:02 -0800 Subject: [PATCH 077/128] Update "YOLOv5 is out of date" msg (#11061) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index b6efe6bb8732..b7e38b3a1a50 100644 --- a/utils/general.py +++ b/utils/general.py @@ -338,7 +338,7 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." else: s += f'up to date with {url} ✅' LOGGER.info(s) From 6559d8fcebd1c6abe4f5e100cff82d8fdda3f232 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 21:34:04 -0800 Subject: [PATCH 078/128] Update ci-testing.yml (#11062) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f9c62d623042..83438094b6f6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -79,10 +79,10 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir - # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility - name: Cache pip uses: actions/cache@v3 with: From b8731d855fce77120bf6401f689fb0accd66c2a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 14:16:03 -0800 Subject: [PATCH 079/128] Update requirements.txt (#11065) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eee15ddf93c4..3e6e39d8cc07 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.0 # Snyk vulnerability fix +wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From b005788f36fd329a840879fcfb5975bc5902ada8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:15:50 -0800 Subject: [PATCH 080/128] Update requirements.txt (#11067) Signed-off-by: Glenn Jocher --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e6e39d8cc07..7aa4732d6d78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,6 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 7dee52f94d28e09142717ffff95ee689982364d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:58:59 -0800 Subject: [PATCH 081/128] Update requirements.txt (#11068) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7aa4732d6d78..d67c44c9d812 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ -gitpython +gitpython>=3.1.30 ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 From 3c0a6e664bc3847ab9cca3df66195de6acfeb012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 16:15:07 -0800 Subject: [PATCH 082/128] Security fixes for IPython (#11069) Signed-off-by: Glenn Jocher --- models/common.py | 9 ++++++--- requirements.txt | 2 +- utils/__init__.py | 6 ++++-- utils/general.py | 19 +++++++++++++------ 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index f416ddf25eb8..aa8ae674eb47 100644 --- a/models/common.py +++ b/models/common.py @@ -21,14 +21,13 @@ import requests import torch import torch.nn as nn -from IPython.display import display from PIL import Image from torch.cuda import amp from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -767,7 +766,11 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - display(im) if is_notebook() else im.show(self.files[i]) + if is_jupyter(): + from IPython.display import display + display(im) + else: + im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/requirements.txt b/requirements.txt index d67c44c9d812..11cb9aaaf99e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 -ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 @@ -43,6 +42,7 @@ setuptools>=65.5.1 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- +# ipython # interactive notebook # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP diff --git a/utils/__init__.py b/utils/__init__.py index d158c5515a12..5b9fcd517e03 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -60,17 +60,19 @@ def notebook_init(verbose=True): check_font() import psutil - from IPython import display # to display images and clear console output if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory # System info + display = None if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage('/') - display.clear_output() + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/general.py b/utils/general.py index b7e38b3a1a50..74620460070e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,7 +29,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -77,10 +76,18 @@ def is_colab(): return 'google.colab' in sys.modules -def is_notebook(): - # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace - ipython_type = str(type(IPython.get_ipython())) - return 'colab' in ipython_type or 'zmqshell' in ipython_type +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False def is_kaggle(): @@ -429,7 +436,7 @@ def check_img_size(imgsz, s=32, floor=0): def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_notebook() + assert not is_jupyter() assert not is_docker() cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) From 5ca8e822c8e75cde1d613dea8bfa49009fdc3618 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 10:55:05 -0800 Subject: [PATCH 083/128] Update export.py (#11077) Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index e8287704866a..e167b2088cb1 100644 --- a/export.py +++ b/export.py @@ -413,7 +413,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( From 5c91daeaecaeca709b8b6d13bd571d068fdbd003 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 19:55:23 -0800 Subject: [PATCH 084/128] Update ci-testing.yml (#11079) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 83438094b6f6..7c74fe6fe652 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,12 +25,16 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - #- name: Cache pip - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-Benchmarks-${{ hashFiles('requirements.txt') }} - # restore-keys: ${{ runner.os }}-Benchmarks- + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + id: pip-cache + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility + - name: Cache pip + uses: actions/cache@v3 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} + restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- - name: Install requirements run: | python -m pip install --upgrade pip wheel From 85f6019e5af2641e33139e97415b7bd1dc72d779 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Mon, 6 Mar 2023 22:54:34 +0100 Subject: [PATCH 085/128] Rename evolve folder if default project name (#11108) Save logs to 'runs/evolve-seg' if default project name, 'runs/train-seg' Signed-off-by: Iker Lluvia --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index 2e71de131a8d..c6ac2d5e23d2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -530,8 +530,8 @@ def main(opt, callbacks=Callbacks()): check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') + if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / 'runs/evolve-seg') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': opt.name = Path(opt.cfg).stem # use model.yaml as name From ea05d5cb6c0dc01ef254761f0b140ceab17f9fd3 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Thu, 9 Mar 2023 23:47:53 +0100 Subject: [PATCH 086/128] Correct mutation adding the missing parameters (#11109) * Correct mutation adding the missing parameters Correct mutation considering the higher number of segmentation parameters compared to object detection. Fixes #9730 Signed-off-by: Iker Lluvia * Use already defined segmentation keys from segment/metrics.py --------- Signed-off-by: Iker Lluvia Co-authored-by: Glenn Jocher --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index c6ac2d5e23d2..8ed75ba63e7c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -629,7 +629,7 @@ def main(opt, callbacks=Callbacks()): while all(v == 1): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate + hyp[k] = float(x[i + 12] * v[i]) # mutate # Constrain to limits for k, v in meta.items(): @@ -641,7 +641,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) From 5543b89466d072a9f8f2e31f8257a1ccc7f588e9 Mon Sep 17 00:00:00 2001 From: Sheng Hu Date: Fri, 10 Mar 2023 06:55:02 +0800 Subject: [PATCH 087/128] Fix a visualization bug (#11134) Fix a visualization bug reported here: https://github.com/ultralytics/yolov5/issues/11133 Signed-off-by: Sheng Hu Co-authored-by: Glenn Jocher --- utils/segment/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 3ba097624fcd..1b22ec838ac9 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets From 3e55763d45f9c5f8217e4dad5ba1e6c1f42e3bf8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Mar 2023 02:10:38 +0100 Subject: [PATCH 088/128] Update ci-testing.yml (#11154) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7c74fe6fe652..a6f47bb8811c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,16 +25,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -83,16 +74,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel From ef7039ea1806669c4cd4e8eb5abd3182a66883f9 Mon Sep 17 00:00:00 2001 From: Jonas Heinle Date: Thu, 23 Mar 2023 11:28:31 +0100 Subject: [PATCH 089/128] Error in tensor shape of docstring (#11206) Corrected the tensor shape in the doc string. The incoming masks are stacked in dim=0 therefore the doc is wrong Signed-off-by: Jonas Heinle --- utils/segment/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 9da894538665..f1b2f1dd120f 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -10,7 +10,7 @@ def crop_mask(masks, boxes): Vectorized by Chong (thanks Chong). Args: - - masks should be a size [h, w, n] tensor of masks + - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ From 78a90c9661a05e8e1b7cc52a0989e4895fd96981 Mon Sep 17 00:00:00 2001 From: Sheng Hu Date: Thu, 23 Mar 2023 18:29:44 +0800 Subject: [PATCH 090/128] Remove duplicate assignment code (#11178) Signed-off-by: Sheng Hu --- utils/segment/loss.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/segment/loss.py b/utils/segment/loss.py index 2a8a4c680f6f..caeff3cad586 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -16,7 +16,6 @@ def __init__(self, model, autobalance=False, overlap=False): self.overlap = overlap device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters - self.device = device # Define criteria BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) From d223460f3a4b4151437b15ac83990cea4b0f42e2 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 24 Mar 2023 03:27:46 +0800 Subject: [PATCH 091/128] Fixed creation of empty directories in path was entered mistaken (#11174) FFixed creation of empty directories in path was entered mistaken Co-authored-by: Glenn Jocher --- utils/downloads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/downloads.py b/utils/downloads.py index 643b529fba3b..88f523742b5b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -118,8 +118,8 @@ def github_assets(repository, version='latest'): except Exception: tag = release - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) safe_download(file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', min_bytes=1E5, From 6dd17516c8610fca8a75b8c003866c1bbe921daa Mon Sep 17 00:00:00 2001 From: Eljas Hyyrynen Date: Thu, 23 Mar 2023 21:29:11 +0200 Subject: [PATCH 092/128] dataloaders: fix class filtering for segmentation (#11171) * dataloaders: fix class filtering for segmentation self.segments[i] and segment[j] are lists so they cannot be indexed with booleans self.segments is a tuple so it has to be converted into a list first * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7687a2ba2665..28d5b7974cf8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -531,13 +531,14 @@ def __init__(self, # Update labels include_class = [] # filter labels to include only these classes (optional) + self.segments = list(self.segments) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: - self.segments[i] = segment[j] + self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 From 81f81d3e97e78bf69c88652b4717edcd037c2f4b Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 24 Mar 2023 03:30:10 +0800 Subject: [PATCH 093/128] Fixed randrange warnings in Python>=3.10 (#11161) Co-authored-by: Glenn Jocher --- segment/train.py | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index 8ed75ba63e7c..de5f7035e6b6 100644 --- a/segment/train.py +++ b/segment/train.py @@ -299,7 +299,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) diff --git a/train.py b/train.py index c4e3aac3561a..960f24c3ecc7 100644 --- a/train.py +++ b/train.py @@ -299,7 +299,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) From f095258e0cdda729e5d67a84ea4115921b3e7c3d Mon Sep 17 00:00:00 2001 From: Grzegorz K <2612193+grzegorzk@users.noreply.github.com> Date: Thu, 23 Mar 2023 20:34:03 +0100 Subject: [PATCH 094/128] Sync signature of cv2.imread and cv2.imwrite (#11209) Co-authored-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 74620460070e..adb924257162 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1119,13 +1119,13 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): imshow_ = cv2.imshow # copy to avoid recursion errors -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) -def imwrite(path, im): +def imwrite(filename, img): try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) return True except Exception: return False From 52e2fde1b3af9fbdbb4abb63d0ed40c1f0096a39 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 23 Mar 2023 20:35:13 +0100 Subject: [PATCH 095/128] Update social icons: add TikTok, remove Facebook (#11226) Add TikTok, remove Facebook Signed-off-by: Glenn Jocher --- README.md | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 16dfd9fca085..7ddf47272962 100644 --- a/README.md +++ b/README.md @@ -23,27 +23,25 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics Licensing.
- - - - - - - - - - - - - - - - - - - - -
+ + + + + + + + + + + + + + + + + + +
@@ -477,14 +475,11 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - - - - - + + From b96f35ce75effc96f1a20efddd836fa17501b4f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 23 Mar 2023 20:39:57 +0100 Subject: [PATCH 096/128] Update README.zh-CN.md social icons, add TikTok and remove Facebook (#11227) Update README.zh-CN.md Signed-off-by: Glenn Jocher --- README.zh-CN.md | 46 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 800a670cfb4f..30d5ece9728e 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -22,27 +22,24 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可.
- - - - - - - - - - - - - - - - - - - - -
+ + + + + + + + + + + + + + + + + + ##
YOLOv8 🚀 NEW
@@ -472,14 +469,11 @@ YOLOv5 在两种不同的 License 下可用: - - - - - + + From b54fd0ac28b921756d8eaa66cdd12f1ba55833df Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Mar 2023 20:27:43 +0200 Subject: [PATCH 097/128] FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime (#11246) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b5d2af9fb08e..811ad4a6c9cb 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -2,9 +2,8 @@ # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -# FROM docker.io/pytorch/pytorch:latest -FROM pytorch/pytorch:latest +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From 789551ffd1ff4c43f53129454e39e5a35d1ec905 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Mar 2023 13:48:32 +0200 Subject: [PATCH 098/128] Bump actions/stale from 7 to 8 (#11249) Bumps [actions/stale](https://github.com/actions/stale) from 7 to 8. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v7...v8) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b21e9c00e6c5..470dc6197b51 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v7 + - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From 0c8de3fca4a702f8ff5c435e67f378d1fce70243 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Mar 2023 02:38:50 +0200 Subject: [PATCH 099/128] Update YOLOv5 tutorials on docs.ultralytics.com (#11254) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 35 +++++++++++++++++------------------ README.zh-CN.md | 32 ++++++++++++++++---------------- 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 7ddf47272962..cb1540737a14 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ pip install ultralytics ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -153,23 +153,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ - RECOMMENDED -- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) -- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW +- [Train Custom Data](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ RECOMMENDED +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tta) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/ensemble) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW
diff --git a/README.zh-CN.md b/README.zh-CN.md index 30d5ece9728e..9a819598be7e 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -147,22 +147,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 推荐 +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ 推荐 +- [多 GPU 训练](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 新 +- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 新 +- [测试时增强(TTA)](https://docs.ultralytics.com/yolov5/tta) +- [模型集成](https://docs.ultralytics.com/yolov5/ensemble) +- [模型剪枝/稀疏性](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 +- [Roboflow 用于数据集、标签和主动学习](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 +- [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 +- [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新
From a82132c10bdc0463815a83884fdd85267fc58fdb Mon Sep 17 00:00:00 2001 From: Grzegorz K <2612193+grzegorzk@users.noreply.github.com> Date: Thu, 30 Mar 2023 11:39:11 +0200 Subject: [PATCH 100/128] Do not monkey-patch cv2 methods for unaware caller (#11210) --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index adb924257162..68c0736f65fa 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1135,6 +1135,7 @@ def imshow(path, im): imshow_(path.encode('unicode_escape').decode(), im) -cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine +if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: + cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ From cca5e21995679c4fce32d67a69e2ec89fe131c0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 Apr 2023 13:02:51 +0200 Subject: [PATCH 101/128] Update greetings.yml (#11287) * Update greeting * Cleanup README * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 2 +- README.md | 2 +- README.zh-CN.md | 2 +- requirements.txt | 1 - tutorial.ipynb | 12 +++--------- 5 files changed, 6 insertions(+), 13 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 42a2463585a8..a4eca919a5b3 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,7 +23,7 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution/). If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. diff --git a/README.md b/README.md index cb1540737a14..9c991abf0179 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) - [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) - [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/roboflow) +- [Roboflow for Datasets](https://docs.ultralytics.com/yolov5/roboflow) - [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW - [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW - [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW diff --git a/README.zh-CN.md b/README.zh-CN.md index 9a819598be7e..761e61634dfb 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -159,7 +159,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) - [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) - [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 -- [Roboflow 用于数据集、标签和主动学习](https://docs.ultralytics.com/yolov5/roboflow) +- [Roboflow](https://docs.ultralytics.com/yolov5/roboflow) - [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 - [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 - [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新 diff --git a/requirements.txt b/requirements.txt index 11cb9aaaf99e..fc7193604607 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,5 +46,4 @@ setuptools>=65.5.1 # Snyk vulnerability fix # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP -# roboflow # ultralytics # HUB https://hub.ultralytics.com diff --git a/tutorial.ipynb b/tutorial.ipynb index 32af68b57945..0d1f387cf040 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -632,19 +632,13 @@ "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "

\n", + "
\n", "\n", "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "
\n", + "## Label a dataset on Roboflow (optional)\n", "\n", - "

Label images lightning fast (including with model-assisted labeling)" + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package." ] }, { From 23c492321290266810e08fa5ee9a23fc9d6a571f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 13:12:53 +0200 Subject: [PATCH 102/128] [pre-commit.ci] pre-commit suggestions (#11293) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/codespell-project/codespell: v2.2.2 → v2.2.4](https://github.com/codespell-project/codespell/compare/v2.2.2...v2.2.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c5162378ab81..750ced690531 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,7 +57,7 @@ repos: name: PEP8 - repo: https://github.com/codespell-project/codespell - rev: v2.2.2 + rev: v2.2.4 hooks: - id: codespell args: From 9dd0ad30c0668cc467b4496f45de2a85a901830d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 8 Apr 2023 18:07:38 +0200 Subject: [PATCH 103/128] Update issue YAMLs (#11318) * Update issue YAMLs Signed-off-by: Glenn Jocher * Update config.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/config.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 4db7cefb2707..776e2b37cadb 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,11 @@ blank_issues_enabled: true contact_links: + - name: 📄 Docs + url: https://docs.ultralytics.com/yolov5 + about: Full Ultralytics YOLOv5 Documentation - name: 💬 Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - - name: Stack Overflow - url: https://stackoverflow.com/search?q=YOLOv5 - about: Ask on Stack Overflow with 'YOLOv5' tag + - name: 🎧 Discord + url: https://discord.gg/n6cFeSPZdD + about: Ask on Ultralytics Discord From 71244aed2ccc57646f3c944cf6a95f20a5a03088 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 13:31:57 +0200 Subject: [PATCH 104/128] Update config.yml (#11321) Signed-off-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 776e2b37cadb..743feb957ff1 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: true contact_links: - name: 📄 Docs url: https://docs.ultralytics.com/yolov5 - about: Full Ultralytics YOLOv5 Documentation + about: View Ultralytics YOLOv5 Docs - name: 💬 Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum From 20a7368373b592d9832a10f79a7a527d1976e321 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 16:18:24 +0200 Subject: [PATCH 105/128] Created using Colaboratory --- tutorial.ipynb | 633 +++++++++++-------------------------------------- 1 file changed, 134 insertions(+), 499 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 0d1f387cf040..8753a3205d90 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -4,360 +4,13 @@ "metadata": { "colab": { "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "1f7df330663048998adcf8a45bc8f69b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", - "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", - "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" - ], - "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" - } - }, - "e896e6096dd244c59d7955e2035cd729": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", - "placeholder": "​", - "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", - "value": "100%" - } - }, - "a6ff238c29984b24bf6d0bd175c19430": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", - "value": 818322941 - } - }, - "3c085ba3f3fd4c3c8a6bb41b41ce1479": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", - "placeholder": "​", - "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", - "value": " 780M/780M [00:05<00:00, 126MB/s]" - } - }, - "16b0c8aa6e0f427e8a54d3791abb7504": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c7b2dd0f78384cad8e400b282996cdf5": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6a27e43b0e434edd82ee63f0a91036ca": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "cce0e6c0c4ec442cb47e65c674e02e92": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c5b9f38e2f0d4f9aa97fe87265263743": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "df554fb955c7454696beac5a82889386": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "74e9112a87a242f4831b7d68c7da6333": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } + "accelerator": "GPU" }, "cells": [ { @@ -378,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] @@ -401,7 +54,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" + "outputId": "e8225db4-e61d-4640-8b1f-8bfce3331cea" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -412,20 +65,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.3/166.8 GB disk)\n" ] } ] @@ -459,29 +112,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" + "outputId": "284ef04b-1596-412f-88f6-948828dd2b49" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 13, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", + "100% 14.1M/14.1M [00:00<00:00, 24.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 41.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 60.0ms\n", + "Speed: 0.5ms pre-process, 50.8ms inference, 37.7ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -512,44 +165,23 @@ "metadata": { "id": "WQPtK1QYVaD_", "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "1f7df330663048998adcf8a45bc8f69b", - "e896e6096dd244c59d7955e2035cd729", - "a6ff238c29984b24bf6d0bd175c19430", - "3c085ba3f3fd4c3c8a6bb41b41ce1479", - "16b0c8aa6e0f427e8a54d3791abb7504", - "c7b2dd0f78384cad8e400b282996cdf5", - "6a27e43b0e434edd82ee63f0a91036ca", - "cce0e6c0c4ec442cb47e65c674e02e92", - "c5b9f38e2f0d4f9aa97fe87265263743", - "df554fb955c7454696beac5a82889386", - "74e9112a87a242f4831b7d68c7da6333" - ] + "base_uri": "https://localhost:8080/" }, - "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" + "outputId": "cf7d52f0-281c-4c96-a488-79f5908f8426" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { - "output_type": "display_data", - "data": { - "text/plain": [ - " 0%| | 0.00/780M [00:00

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", @@ -645,14 +277,14 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", + "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", "\n", - "if logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()\n", - "elif logger == 'Comet':\n", + "if logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", "elif logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train" @@ -670,21 +302,24 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "721b9028-767f-4a05-c964-692c245f7398" + "outputId": "bbeeea2b-04fc-4185-aa64-258690495b5a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ + "2023-04-09 14:11:38.063605: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2023-04-09 14:11:39.026661: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -693,8 +328,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", - "Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 75.6MB/s]\n", + "Dataset download success ✅ (0.6s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -728,11 +363,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1709.36it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 264.35it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Sun, 9 Apr 2023 22:34:18 +0800 Subject: [PATCH 106/128] Update requirements.txt (#11294) * Update requirements.txt Signed-off-by: whx-s <127172288+whx-s@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: whx-s <127172288+whx-s@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 2 +- tutorial.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index fc7193604607..baf7a2757f1d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 -matplotlib>=3.2.2 +matplotlib>=3.3 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 diff --git a/tutorial.ipynb b/tutorial.ipynb index 8753a3205d90..d2b54c9c60ef 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -602,4 +602,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From f7c656278cec73d9dd987629c3486924fe612957 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 17:51:25 +0200 Subject: [PATCH 107/128] Update general.py (#11322) * Update general.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index 68c0736f65fa..0e76792a08d5 100644 --- a/utils/general.py +++ b/utils/general.py @@ -58,6 +58,7 @@ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab def is_ascii(s=''): From 1db95338cf5091db8e3e67395e4487da0e1ee51d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 18:15:53 +0200 Subject: [PATCH 108/128] Update PULL_REQUEST_TEMPLATE.md (#11323) * Update PULL_REQUEST_TEMPLATE.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update PULL_REQUEST_TEMPLATE.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f25b017ace8b..51f9803a57a5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -6,4 +6,8 @@ Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributin - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. + +Note that Copilot will summarize this PR below, do not modify the 'copilot:all' line. --> + +copilot:all From 34cf749958d2dd3ed1205f6bb07e0f20f6e2372d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:36:16 +0200 Subject: [PATCH 109/128] Update LICENSE to AGPL-3.0 (#11359) * Update LICENSE to AGPL-3.0 This pull request updates the license of the YOLOv5 project from GNU General Public License v3.0 (GPL-3.0) to GNU Affero General Public License v3.0 (AGPL-3.0). We at Ultralytics have decided to make this change in order to better protect our intellectual property and ensure that any modifications made to the YOLOv5 source code will be shared back with the community when used over a network. AGPL-3.0 is very similar to GPL-3.0, but with an additional clause to address the use of software over a network. This change ensures that if someone modifies YOLOv5 and provides it as a service over a network (e.g., through a web application or API), they must also make the source code of their modified version available to users of the service. This update includes the following changes: - Replace the `LICENSE` file with the AGPL-3.0 license text - Update the license reference in the `README.md` file - Update the license headers in source code files We believe that this change will promote a more collaborative environment and help drive further innovation within the YOLOv5 community. Please review the changes and let us know if you have any questions or concerns. Signed-off-by: Glenn Jocher * Update headers to AGPL-3.0 --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/docker.yml | 2 +- .github/workflows/greetings.yml | 2 +- .github/workflows/stale.yml | 2 +- .github/workflows/translate-readme.yml | 2 +- .pre-commit-config.yaml | 2 +- CITATION.cff | 2 +- CONTRIBUTING.md | 2 +- LICENSE | 153 +++++++++++------------- README.md | 4 +- README.zh-CN.md | 4 +- benchmarks.py | 2 +- classify/predict.py | 2 +- classify/train.py | 2 +- classify/val.py | 2 +- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/ImageNet.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128-seg.yaml | 2 +- data/coco128.yaml | 2 +- data/hyps/hyp.Objects365.yaml | 2 +- data/hyps/hyp.VOC.yaml | 2 +- data/hyps/hyp.no-augmentation.yaml | 2 +- data/hyps/hyp.scratch-high.yaml | 2 +- data/hyps/hyp.scratch-low.yaml | 2 +- data/hyps/hyp.scratch-med.yaml | 2 +- data/scripts/download_weights.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_coco128.sh | 2 +- data/scripts/get_imagenet.sh | 2 +- data/xView.yaml | 2 +- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/common.py | 2 +- models/experimental.py | 2 +- models/hub/anchors.yaml | 2 +- models/hub/yolov3-spp.yaml | 2 +- models/hub/yolov3-tiny.yaml | 2 +- models/hub/yolov3.yaml | 2 +- models/hub/yolov5-bifpn.yaml | 2 +- models/hub/yolov5-fpn.yaml | 2 +- models/hub/yolov5-p2.yaml | 2 +- models/hub/yolov5-p34.yaml | 2 +- models/hub/yolov5-p6.yaml | 2 +- models/hub/yolov5-p7.yaml | 2 +- models/hub/yolov5-panet.yaml | 2 +- models/hub/yolov5l6.yaml | 2 +- models/hub/yolov5m6.yaml | 2 +- models/hub/yolov5n6.yaml | 2 +- models/hub/yolov5s-LeakyReLU.yaml | 2 +- models/hub/yolov5s-ghost.yaml | 2 +- models/hub/yolov5s-transformer.yaml | 2 +- models/hub/yolov5s6.yaml | 2 +- models/hub/yolov5x6.yaml | 2 +- models/segment/yolov5l-seg.yaml | 2 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5n-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/segment/yolov5x-seg.yaml | 2 +- models/tf.py | 2 +- models/yolo.py | 2 +- models/yolov5l.yaml | 2 +- models/yolov5m.yaml | 2 +- models/yolov5n.yaml | 2 +- models/yolov5s.yaml | 2 +- models/yolov5x.yaml | 2 +- segment/predict.py | 2 +- segment/train.py | 2 +- segment/val.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/activations.py | 2 +- utils/augmentations.py | 2 +- utils/autoanchor.py | 2 +- utils/autobatch.py | 2 +- utils/callbacks.py | 2 +- utils/dataloaders.py | 2 +- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 2 +- utils/flask_rest_api/restapi.py | 2 +- utils/general.py | 2 +- utils/loggers/__init__.py | 2 +- utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 2 +- utils/metrics.py | 2 +- utils/plots.py | 2 +- utils/segment/augmentations.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/segment/metrics.py | 2 +- utils/torch_utils.py | 2 +- utils/triton.py | 2 +- val.py | 2 +- 101 files changed, 172 insertions(+), 185 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a6f47bb8811c..bff95f654552 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 4f7fff00677c..190b48875fa6 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a4eca919a5b3..337a563803db 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license name: Greetings diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 470dc6197b51..734350441c61 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license name: Close stale issues on: diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 2bb351ec7e81..d5e2be26f523 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md name: Translate README diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 750ced690531..8bd40484c522 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, GPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' diff --git a/CITATION.cff b/CITATION.cff index 8e2cf1148b92..c277230d922f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -10,5 +10,5 @@ preferred-citation: version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 - license: GPL-3.0 + license: AGPL-3.0 url: "https://github.com/ultralytics/yolov5" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 71857faddb89..6e9ce5998d9f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -90,4 +90,4 @@ understand and diagnose your problem. ## License By contributing, you agree that your contributions will be licensed under -the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) +the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/) diff --git a/LICENSE b/LICENSE index 92b370f0e0e1..be3f7b28e564 100644 --- a/LICENSE +++ b/LICENSE @@ -1,23 +1,21 @@ -GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble - The GNU General Public License is a free, copyleft license for -software and other kinds of works. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to +our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. +software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you @@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. The precise terms and conditions for copying, distribution and modification follow. @@ -72,7 +60,7 @@ modification follow. 0. Definitions. - "This License" refers to version 3 of the GNU General Public License. + "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. @@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - 13. Use with the GNU Affero General Public License. + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single +under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General +Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published +GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's +versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. @@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by + it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. + GNU Affero General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program. If not, see . + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 9c991abf0179..f48a4dd73122 100644 --- a/README.md +++ b/README.md @@ -456,8 +456,8 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare YOLOv5 is available under two different licenses: -- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). ##
Contact
diff --git a/README.zh-CN.md b/README.zh-CN.md index 761e61634dfb..0a696e591d0d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -451,8 +451,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **AGPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 AGPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
diff --git a/benchmarks.py b/benchmarks.py index 09108b8a7cc4..fc3073965ab3 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 benchmarks on all supported export formats diff --git a/classify/predict.py b/classify/predict.py index 5f0d40787b52..c1b6650d4bd0 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/classify/train.py b/classify/train.py index ae2363ccf056..8b8327f173ef 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset diff --git a/classify/val.py b/classify/val.py index 4edd5a1f5e9e..643489d64d36 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 classification model on a classification dataset diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 558151dc849e..8a65407a6333 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 01812d031bc5..7b02ac95dd95 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 14f12950605f..5fdcb63f89a5 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 05b26a1f4796..bb2aa34cd4a4 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index edae7171c660..a943eecdeee6 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 27d38109c53a..104856f0c9c7 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index a8bcf8e628ec..2a13904dc8dd 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index d64dfc7fed76..ea32cb6269a3 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index 5e81910cc456..0a2499c00a1a 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index 12556736a571..0cb53120be2c 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index 74971740f7c7..c4b6e8051d7b 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index 0aa4e7d9f8f5..ce20dbbddbdb 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index 8fbd5b262afa..0ae796c16dc2 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters when using Albumentations frameworks # python train.py --hyp hyp.no-augmentation.yaml # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 123cc8407413..0a0f4ec21621 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index b9ef1d55a3b6..9d722568f526 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index d6867d7557ba..f6abb090bb04 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 31e0a15569f2..e408959b32b2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases # Example usage: bash data/scripts/download_weights.sh # parent diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 0d388b0a12a8..0bb276140b07 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download COCO 2017 dataset http://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index e7ddce89b115..2bfd6a2b32ed 100755 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: bash data/scripts/get_coco128.sh # parent diff --git a/data/scripts/get_imagenet.sh b/data/scripts/get_imagenet.sh index 6026d502e8f3..1df0fc7b66cc 100755 --- a/data/scripts/get_imagenet.sh +++ b/data/scripts/get_imagenet.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download ILSVRC2012 ImageNet dataset https://image-net.org # Example usage: bash data/scripts/get_imagenet.sh # parent diff --git a/data/xView.yaml b/data/xView.yaml index 770ab7870449..5e013ac9056d 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index 3f32d7a50d6b..64d6f149a614 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/export.py b/export.py index e167b2088cb1..73f4bb8c0870 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit diff --git a/hubconf.py b/hubconf.py index 41af8e39d14d..9d820a54f290 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 diff --git a/models/common.py b/models/common.py index aa8ae674eb47..b1c24ad378dc 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Common modules """ diff --git a/models/experimental.py b/models/experimental.py index 02d35b9ebd11..d60d1808da11 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Experimental modules """ diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index e4d7beb06e07..df2f668b022c 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Default anchors for COCO data diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index c66982158ce8..4a71ed405277 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index b28b44315248..50b47e282df4 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index d1ef91290a8d..c5e21098f893 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 504815f5cfa0..9dbdd4ee0580 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index a23e9c6fbf9f..2292eb1185a0 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 554117dda59a..2c0ae44841cc 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index dbf0f850083e..60ae3b4b6f30 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index a17202f22044..a9e1b5f90c72 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index edd7d13a34a6..a502412f0887 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index ccfbf900691c..5595e2573823 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 632c2cb699e3..651dbb0251ae 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index ecc53fd68ba6..059b12b46929 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index 0c0c71d32551..5052e7cbfc8b 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 3a179bf3311c..0368a78dcbb4 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index ff9519c3f1aa..ce5238fa5dfc 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 100d7c447527..f5267163453c 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index a28fb559482b..2f39b0379e74 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index ba795c4aad31..e1edbcb8634c 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index 4782de11dd2d..71f80cc08054 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index 07ec25ba264d..2b8e1db2818a 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index c28225ab4a50..1f67f8e3dfb0 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index a827814e1399..2ff2524ca9b5 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 5d0c4524a99c..589f65c76f95 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/tf.py b/models/tf.py index 8290cf2e57f5..bc0a465d7edd 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 diff --git a/models/yolo.py b/models/yolo.py index ed21c067ee93..18d2542bfb48 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ YOLO-specific modules diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index ce8a5de46a27..31362f876932 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index ad13ab370ff6..a76900c5a2e2 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index 8a28a40d6e20..aba96cfc54f4 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index f35beabb1e1c..5d05364c4936 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index f617a027d8a2..4bdd93915da5 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/segment/predict.py b/segment/predict.py index d82df89a85b0..4d4d6036358a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. diff --git a/segment/train.py b/segment/train.py index de5f7035e6b6..7e600f77d571 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. diff --git a/segment/val.py b/segment/val.py index a7f95fe9b6fc..c0575fd59a91 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset diff --git a/train.py b/train.py index 960f24c3ecc7..7c403ee6d680 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. diff --git a/utils/__init__.py b/utils/__init__.py index 5b9fcd517e03..6c10857df079 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ utils/initialization """ diff --git a/utils/activations.py b/utils/activations.py index 084ce8c41230..e4d4bbde5ec8 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Activation functions """ diff --git a/utils/augmentations.py b/utils/augmentations.py index 7ab75f17fb18..52e2e346e36e 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ diff --git a/utils/autoanchor.py b/utils/autoanchor.py index bb5cf6e6965e..4c11ab3decec 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ AutoAnchor utils """ diff --git a/utils/autobatch.py b/utils/autobatch.py index bdeb91c3d2bd..aa763b888462 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Auto-batch utils """ diff --git a/utils/callbacks.py b/utils/callbacks.py index 166d8938322d..ccebba02bcaa 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Callback utils """ diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 28d5b7974cf8..a5bd86d49d7e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders and dataset utils """ diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 811ad4a6c9cb..ff657dea2bf2 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 7023c6a4bb1f..556ae59a8700 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 06bad9a3790d..8e8c23bf952f 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments diff --git a/utils/downloads.py b/utils/downloads.py index 88f523742b5b..629537d5ade6 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Download utils """ diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 952e5dcb90fa..256ad1319c82 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Perform test request """ diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 9258b1a68860..ae4756b276e4 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run a Flask REST API exposing one or more YOLOv5s models """ diff --git a/utils/general.py b/utils/general.py index 0e76792a08d5..053aeacd651d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ General utils """ diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 9de1f226233c..d428cdba6196 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Logging utils """ diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index c8ab38197381..4ea32b1d4c6e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # WARNING ⚠️ wandb is deprecated and will be removed in future release. # See supported integrations at https://github.com/ultralytics/yolov5#integrations diff --git a/utils/loss.py b/utils/loss.py index 9b9c3d9f8018..26cca8797315 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Loss functions """ diff --git a/utils/metrics.py b/utils/metrics.py index 95f364c23f34..5646f40e9860 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Model validation metrics """ diff --git a/utils/plots.py b/utils/plots.py index 24c618c80b59..d1284b950269 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Plotting utils """ diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 169addedf0f5..f8154b834869 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 097a5d5cb058..3ee826dba69c 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders """ diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index c9f137e38ead..6020fa062ba5 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Model validation metrics """ diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5b67b3fa7a06..d9e060ab99df 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ PyTorch utils """ diff --git a/utils/triton.py b/utils/triton.py index 25928021477e..b5153dad940d 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Utils to interact with the Triton Inference Server """ diff --git a/val.py b/val.py index d4073b42fe78..3d01f1a5996d 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 detection model on a detection dataset From 2118e3b88e16f24f600959886f9576fd873cc293 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:37:02 +0200 Subject: [PATCH 110/128] Update check_requirements.py (#11358) Update general.py Signed-off-by: Glenn Jocher --- utils/general.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/utils/general.py b/utils/general.py index 053aeacd651d..221191005754 100644 --- a/utils/general.py +++ b/utils/general.py @@ -388,10 +388,23 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) +def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): + """ + Check if installed dependencies meet YOLOv5 requirements and attempt to auto-update if needed. + + Args: + requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a + string, or a list of package requirements as strings. + exclude (Tuple[str]): Tuple of package names to exclude from checking. + install (bool): If True, attempt to auto-update packages that don't meet requirements. + cmds (str): Additional commands to pass to the pip install command when auto-updating. + + Returns: + None + """ prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version + file = None if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() assert file.exists(), f'{prefix} {file} not found, check failed.' @@ -400,22 +413,25 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta elif isinstance(requirements, str): requirements = [requirements] - s = '' - n = 0 + s = '' # console string + n = 0 # number of packages updates for r in requirements: try: pkg.require(r) except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(r)).name) + except ImportError: + s += f'"{r}" ' + n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - # assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) - source = file if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + assert check_online(), 'AutoUpdate skipped (offline)' + LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file or requirements}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: From 60e29e2d86e25c8678232652edcd920e35e836bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:47:07 +0200 Subject: [PATCH 111/128] Update check_requirements() (#11360) Update general.py Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 221191005754..3d7fd20c48d1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -427,7 +427,7 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=() n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: assert check_online(), 'AutoUpdate skipped (offline)' LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) From a66fa8314c7527043b010135aa9dd25c95900e8f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 18:11:15 +0200 Subject: [PATCH 112/128] Add NMS to CoreML exports (#11361) * Add NMS to CoreML exports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 148 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 73f4bb8c0870..5f8e1c4821da 100644 --- a/export.py +++ b/export.py @@ -77,6 +77,25 @@ MACOS = platform.system() == 'Darwin' # macOS environment +class iOSModel(torch.nn.Module): + + def __init__(self, model, im): + super().__init__() + b, c, h, w = im.shape # batch, channel, height, width + self.model = model + self.nc = model.nc # number of classes + if w == h: + self.normalize = 1. / w + else: + self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller) + # np = model(im)[0].shape[1] # number of points + # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger) + + def forward(self, x): + xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1) + return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) + + def export_formats(): # YOLOv5 export formats x = [ @@ -223,7 +242,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): @try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): +def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export check_requirements('coremltools') import coremltools as ct @@ -231,6 +250,8 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') + if nms: + model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) @@ -506,6 +527,129 @@ def add_tflite_metadata(file, metadata, num_outputs): tmp_file.unlink() +def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')): + # YOLOv5 CoreML pipeline + import coremltools as ct + from PIL import Image + + print(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + batch_size, ch, h, w = list(im.shape) # BCHW + t = time.time() + + # Output shapes + spec = model.get_spec() + out0, out1 = iter(spec.description.output) + if platform.system() == 'Darwin': + img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection + out = model.predict({'image': img}) + out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape + else: # linux and windows can not run model.predict(), get sizes from pytorch output y + s = tuple(y[0].shape) + out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4) + + # Checks + nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height + na, nc = out0_shape + # na, nc = out0.type.multiArrayType.shape # number anchors, classes + assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + + # Define output shapes (missing) + out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) + out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4) + # spec.neuralNetwork.preprocessing[0].featureName = '0' + + # Flexible input shapes + # from coremltools.models.neural_network import flexible_shape_utils + # s = [] # shapes + # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192)) + # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width) + # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s) + # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges + # r.add_height_range((192, 640)) + # r.add_width_range((192, 640)) + # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r) + + # Print + print(spec.description) + + # Model from spec + model = ct.models.MLModel(spec) + + # 3. Create NMS protobuf + nms_spec = ct.proto.Model_pb2.Model() + nms_spec.specificationVersion = 5 + for i in range(2): + decoder_output = model._spec.description.output[i].SerializeToString() + nms_spec.description.input.add() + nms_spec.description.input[i].ParseFromString(decoder_output) + nms_spec.description.output.add() + nms_spec.description.output[i].ParseFromString(decoder_output) + + nms_spec.description.output[0].name = 'confidence' + nms_spec.description.output[1].name = 'coordinates' + + output_sizes = [nc, 4] + for i in range(2): + ma_type = nms_spec.description.output[i].type.multiArrayType + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[0].lowerBound = 0 + ma_type.shapeRange.sizeRanges[0].upperBound = -1 + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] + ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] + del ma_type.shape[:] + + nms = nms_spec.nonMaximumSuppression + nms.confidenceInputFeatureName = out0.name # 1x507x80 + nms.coordinatesInputFeatureName = out1.name # 1x507x4 + nms.confidenceOutputFeatureName = 'confidence' + nms.coordinatesOutputFeatureName = 'coordinates' + nms.iouThresholdInputFeatureName = 'iouThreshold' + nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.iouThreshold = 0.45 + nms.confidenceThreshold = 0.25 + nms.pickTop.perClass = True + nms.stringClassLabels.vector.extend(names.values()) + nms_model = ct.models.MLModel(nms_spec) + + # 4. Pipeline models together + pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), + ('iouThreshold', ct.models.datatypes.Double()), + ('confidenceThreshold', ct.models.datatypes.Double())], + output_features=['confidence', 'coordinates']) + pipeline.add_model(model) + pipeline.add_model(nms_model) + + # Correct datatypes + pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString()) + pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString()) + pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString()) + + # Update metadata + pipeline.spec.specificationVersion = 5 + pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com' + pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE' + pipeline.spec.description.metadata.userDefined.update({ + 'classes': ','.join(names.values()), + 'iou_threshold': str(nms.iouThreshold), + 'confidence_threshold': str(nms.confidenceThreshold)}) + + # Save the model + f = file.with_suffix('.mlmodel') # filename + model = ct.models.MLModel(pipeline.spec) + model.input_description['image'] = 'Input image' + model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})' + model.input_description['confidenceThreshold'] = \ + f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})' + model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' + model.save(f) # pipelined + print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)') + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -584,7 +728,9 @@ def run( if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) + f[4], ct_model = export_coreml(model, im, file, int8, half, nms) + if nms: + pipeline_coreml(ct_model, im, file, model.names, y) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' From 94714fe6addff41e4984ff510f70cc415a131725 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 15 Apr 2023 00:51:17 +0200 Subject: [PATCH 113/128] Update requirements.txt to comment tensorboard (#11362) * Update requirements.txt comment tensorboard Signed-off-by: Glenn Jocher * Make tensorboard optional * Update __init__.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- utils/loggers/__init__.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index baf7a2757f1d..e1d98a82ceac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- -tensorboard>=2.4.1 +# tensorboard>=2.4.1 # clearml>=1.2.0 # comet diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index d428cdba6196..9b4c1d13b778 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -9,7 +9,6 @@ import pkg_resources as pkg import torch -from torch.utils.tensorboard import SummaryWriter from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger @@ -20,6 +19,11 @@ LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML RANK = int(os.getenv('RANK', -1)) +try: + from torch.utils.tensorboard import SummaryWriter +except ImportError: + SummaryWriter = lambda *args: None # None = SummaryWriter(str) + try: import wandb From 4408d02e7aef15b6f7c524a7dd7ee8ac9715f715 Mon Sep 17 00:00:00 2001 From: abuod0 <97100904+abuod0@users.noreply.github.com> Date: Tue, 18 Apr 2023 18:21:35 +0800 Subject: [PATCH 114/128] Fixing onnx and Tensorflow versionsm for Jetson (#11377) Signed-off-by: abuod0 <97100904+abuod0@users.noreply.github.com> --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e1d98a82ceac..65924c9feec4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,12 +28,12 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.12.0 # ONNX export +# onnx>=1.10.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn<=1.1.2 # CoreML quantization -# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From aa7c45c2cff71d498f42e9762c73cb9e5b0c699a Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Tue, 18 Apr 2023 12:22:14 +0200 Subject: [PATCH 115/128] Update Ultralytics HUB Copy with Mention of YOLOv8 (#11375) Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f48a4dd73122..e4258aa32592 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! From fb1e746b8a0a2219d767184c4982a5e4c9ed5067 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 13:50:28 +0200 Subject: [PATCH 116/128] Docs update for new YOLOv5 URLs (#11409) * Update YOLOv5 Docs Tutorials * Update YOLOv5 Docs Tutorials * Update actions * Update actions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update actions * Update actions * Update actions * Update actions * Update actions * Update actions --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 12 +++++------ .github/workflows/links.yml | 38 +++++++++++++++++++++++++++++++++ .github/workflows/stale.yml | 33 +++++++++++++++++----------- README.md | 36 +++++++++++++++---------------- README.zh-CN.md | 38 ++++++++++++++++----------------- utils/loggers/comet/README.md | 2 +- 6 files changed, 102 insertions(+), 57 deletions(-) create mode 100644 .github/workflows/links.yml diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 337a563803db..3712ea9dec8a 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,11 +23,11 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution/). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results/). ## Requirements @@ -43,15 +43,15 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/aws_quickstart_tutorial/) + - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls ## Status YOLOv5 CI - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. ## Introducing YOLOv8 🚀 diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml new file mode 100644 index 000000000000..b3e82df34947 --- /dev/null +++ b/.github/workflows/links.yml @@ -0,0 +1,38 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO Continuous Integration (CI) GitHub Actions tests + +name: Check Broken links + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_dispatch: + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + Links: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Test Markdown and HTML links + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-mail './**/*.md' './**/*.html' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + + - name: Test Markdown, HTML, YAML, Python and Notebook links + if: github.event_name == 'workflow_dispatch' + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 734350441c61..65c8f70798f1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,26 +12,33 @@ jobs: - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: | - 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + 👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help. - Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: - - **Wiki** – https://github.com/ultralytics/yolov5/wiki - - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials - - **Docs** – https://docs.ultralytics.com + For additional resources and information, please see the links below: - Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://ultralytics.com/hub - - **Vision API** – https://ultralytics.com/yolov5 - - **About Us** – https://ultralytics.com/about - - **Join Our Team** – https://ultralytics.com/work - - **Contact Us** – https://ultralytics.com/contact + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! - Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ + + stale-pr-message: | + 👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap. + + We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved. + + For additional resources and information, please see the links below: + + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com + + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ - stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-issue-stale: 30 days-before-issue-close: 10 days-before-pr-stale: 90 diff --git a/README.md b/README.md index e4258aa32592..7a43245a1f45 100644 --- a/README.md +++ b/README.md @@ -153,22 +153,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 RECOMMENDED -- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ RECOMMENDED -- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/export) 🚀 -- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tta) -- [Model Ensembling](https://docs.ultralytics.com/yolov5/ensemble) -- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/pruning_sparsity) -- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) -- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) -- [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW -- [Roboflow for Datasets](https://docs.ultralytics.com/yolov5/roboflow) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW -- [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW +- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW
@@ -436,10 +436,10 @@ Get started in seconds with our verified environments. Click each icon below for - + - + diff --git a/README.zh-CN.md b/README.zh-CN.md index 0a696e591d0d..6396ecffbe79 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -147,22 +147,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 推荐 -- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ 推荐 -- [多 GPU 训练](https://docs.ultralytics.com/yolov5/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 新 -- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/export) 🚀 -- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 新 -- [测试时增强(TTA)](https://docs.ultralytics.com/yolov5/tta) -- [模型集成](https://docs.ultralytics.com/yolov5/ensemble) -- [模型剪枝/稀疏性](https://docs.ultralytics.com/yolov5/pruning_sparsity) -- [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) -- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) -- [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 -- [Roboflow](https://docs.ultralytics.com/yolov5/roboflow) -- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 -- [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 -- [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新 +- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐 +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新 +- [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 +- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 新 +- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 新 +- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 新 +- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 新 +- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 新
@@ -431,10 +431,10 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + - + @@ -456,7 +456,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。 +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv5 错误和请求功能。
diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 47e6a45654b8..aee8d16a336c 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -164,7 +164,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. ```shell python train.py \ From ea191def0a9f04b03feb1a535c816c6107c0ff7b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 22:11:03 +0200 Subject: [PATCH 117/128] Update greetings.yml (#11411) Signed-off-by: Glenn Jocher --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 3712ea9dec8a..8aca12d3c370 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,7 +44,7 @@ jobs: - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/aws_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls ## Status From be61a64c47b9324ed2a0c0c1af007c06966da915 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 23:47:29 +0200 Subject: [PATCH 118/128] Update links to https://docs.ultralytics.com (#11412) * Update links * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- classify/tutorial.ipynb | 10 +++++----- hubconf.py | 2 +- segment/train.py | 8 +++++--- segment/tutorial.ipynb | 10 +++++----- train.py | 8 +++++--- tutorial.ipynb | 10 +++++----- utils/dataloaders.py | 2 +- utils/loggers/__init__.py | 2 +- 10 files changed, 36 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 7a43245a1f45..7bc7a6ce8ca4 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python @@ -134,7 +134,7 @@ The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5 results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. @@ -247,7 +247,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -484,4 +484,4 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/
-[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 6396ecffbe79..c7733488bd02 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -80,7 +80,7 @@ pip install -r requirements.txt # install
推理 -使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python @@ -128,7 +128,7 @@ python detect.py --weights yolov5s.pt --source 0 # 下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 -YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 @@ -241,7 +241,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 - 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 - \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -479,4 +479,4 @@ YOLOv5 在两种不同的 License 下可用: -[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 58723608bdbe..ddf67c5519f5 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1350,7 +1350,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -1372,7 +1372,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -1404,9 +1404,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/hubconf.py b/hubconf.py index 9d820a54f290..73caf06685da 100644 --- a/hubconf.py +++ b/hubconf.py @@ -73,7 +73,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo return model.to(device) except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' + help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e diff --git a/segment/train.py b/segment/train.py index 7e600f77d571..073fc742005b 100644 --- a/segment/train.py +++ b/segment/train.py @@ -12,7 +12,7 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse @@ -167,8 +167,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb52045bcb25..6e5caf53b8ff 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -463,7 +463,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -485,7 +485,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -517,9 +517,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/train.py b/train.py index 7c403ee6d680..216da6399028 100644 --- a/train.py +++ b/train.py @@ -12,7 +12,7 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse @@ -175,8 +175,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/tutorial.ipynb b/tutorial.ipynb index d2b54c9c60ef..be87068822af 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -498,7 +498,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -520,7 +520,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -555,9 +555,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/utils/dataloaders.py b/utils/dataloaders.py index a5bd86d49d7e..26201c3c78fc 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -36,7 +36,7 @@ from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 9b4c1d13b778..c7c283b728ac 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -118,7 +118,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.clearml = None prefix = colorstr('ClearML: ') LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme') else: self.clearml = None From 3e14883825bb0f981e82aecb1da0e43e109fea68 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Apr 2023 01:49:44 +0200 Subject: [PATCH 119/128] [Snyk] Security upgrade ubuntu from rolling to 22.10 (#11389) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2210-GNUTLS28-3319585 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3148007 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3148007 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3180315 - https://snyk.io/vuln/SNYK-UBUNTU2210-TAR-3261142 * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 556ae59a8700..7b5c610e5071 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:rolling +FROM arm64v8/ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 8e8c23bf952f..613bdffa4768 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:rolling +FROM ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From c35d9aedecdab9df481e7ab4059557c852f34f00 Mon Sep 17 00:00:00 2001 From: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> Date: Sun, 23 Apr 2023 19:34:17 +0330 Subject: [PATCH 120/128] Update hyp.no-augmentation.yaml (#11420) remove one zero in "hsv_s: 00" Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> --- data/hyps/hyp.no-augmentation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index 0ae796c16dc2..8da18150538b 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -21,7 +21,7 @@ anchor_t: 4.0 # anchor-multiple threshold # this parameters are all zero since we want to use albumentation framework fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) hsv_h: 0 # image HSV-Hue augmentation (fraction) -hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_s: 0 # image HSV-Saturation augmentation (fraction) hsv_v: 0 # image HSV-Value augmentation (fraction) degrees: 0.0 # image rotation (+/- deg) translate: 0 # image translation (+/- fraction) From ff6a9ac842f3a09941ac3dca5355cfa896b5f5d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Apr 2023 18:38:25 +0200 Subject: [PATCH 121/128] Add Slack notification of CI errors (#11421) * Update links.yml Signed-off-by: Glenn Jocher * Update links * Update links --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 16 ++++++++++++++++ .github/workflows/links.yml | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index bff95f654552..1ad6087921d6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -45,6 +45,14 @@ jobs: python detect.py --weights ${{ matrix.model }}.onnx --img 320 python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 + - name: Notify on failure + if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + uses: slackapi/slack-github-action@v1.23.0 + with: + payload: | + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} Tests: timeout-minutes: 60 @@ -151,3 +159,11 @@ jobs: for path in '$m', '$b': model = torch.hub.load('.', 'custom', path=path, source='local') EOF + - name: Notify on failure + if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + uses: slackapi/slack-github-action@v1.23.0 + with: + payload: | + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index b3e82df34947..f64037201660 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -23,7 +23,7 @@ jobs: with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-mail './**/*.md' './**/*.html' + args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} @@ -33,6 +33,6 @@ jobs: with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} From f3ee5960671f7d48c2a71cf666a97318661192af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 17:14:31 +0200 Subject: [PATCH 122/128] Bump lycheeverse/lychee-action from 1.6.1 to 1.7.0 (#11427) Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.6.1 to 1.7.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.6.1...v1.7.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index f64037201660..a5413318030f 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - name: Test Markdown and HTML links - uses: lycheeverse/lychee-action@v1.6.1 + uses: lycheeverse/lychee-action@v1.7.0 with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) @@ -29,7 +29,7 @@ jobs: - name: Test Markdown, HTML, YAML, Python and Notebook links if: github.event_name == 'workflow_dispatch' - uses: lycheeverse/lychee-action@v1.6.1 + uses: lycheeverse/lychee-action@v1.7.0 with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) From 8ecc7276ecdd9c409b3dc8b9051142569009c6f4 Mon Sep 17 00:00:00 2001 From: Hongxiao Xiang <83959468+xhx787@users.noreply.github.com> Date: Tue, 25 Apr 2023 23:45:42 +0800 Subject: [PATCH 123/128] Update README.md and README.zh-CN.md (#11434) revise readme.md --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7bc7a6ce8ca4..5326816ce52c 100644 --- a/README.md +++ b/README.md @@ -309,7 +309,7 @@ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # val Use pretrained YOLOv5m-seg.pt to predict bus.jpg: ```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg ``` ```python @@ -400,7 +400,7 @@ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --im Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python diff --git a/README.zh-CN.md b/README.zh-CN.md index c7733488bd02..913f817a3c14 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -305,7 +305,7 @@ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验 使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: ```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg ``` ```python @@ -395,7 +395,7 @@ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --im 使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python From 22a3cbb97df70fd40221e17738cba2d97e266fad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 13:50:49 +0200 Subject: [PATCH 124/128] Improve Slack notification (#11458) * Improve Slack notification * Improve Slack notification * Improve Slack notification --- .github/workflows/ci-testing.yml | 20 +++++++++----------- .github/workflows/codeql-analysis.yml | 1 + .github/workflows/docker.yml | 1 + 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1ad6087921d6..7de084fef06d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -45,14 +45,6 @@ jobs: python detect.py --weights ${{ matrix.model }}.onnx --img 320 python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.23.0 - with: - payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} Tests: timeout-minutes: 60 @@ -159,11 +151,17 @@ jobs: for path in '$m', '$b': model = torch.hub.load('.', 'custom', path=path, source='local') EOF - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + + Summary: + runs-on: ubuntu-latest + needs: [Benchmarks, Tests] # Add job names that you want to check for failure + if: always() # This ensures the job runs even if previous jobs fail + steps: + - name: Check for failure and notify + if: ${{ needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' }} # Check if any of the jobs failed uses: slackapi/slack-github-action@v1.23.0 with: payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b6f751096d9a..05db12dabd1a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -6,6 +6,7 @@ name: "CodeQL" on: schedule: - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month + workflow_dispatch: jobs: analyze: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 190b48875fa6..13e79216fc20 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,7 @@ name: Publish Docker Images on: push: branches: [ master ] + workflow_dispatch: jobs: docker: From 5178d415fb4b1346b953ca79a7d52b806b6d261b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 17:56:10 +0200 Subject: [PATCH 125/128] Add discord links (#11459) * Add discord links * Update README.md Signed-off-by: Glenn Jocher * Add discord links --------- Signed-off-by: Glenn Jocher --- README.md | 12 ++++++++++-- README.zh-CN.md | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5326816ce52c..0cefb443b90a 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,9 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -To request an Enterprise License please complete the form at Ultralytics Licensing. +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! + +To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -461,7 +466,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!
@@ -482,6 +487,9 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ + + +
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 913f817a3c14..4b4d9500ae9a 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,9 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -如果要申请企业许可证,请填写表格Ultralytics 许可. +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! + +如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -456,7 +461,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv5 错误和请求功能。 +对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/n6cFeSPZdD) 社区进行问题和讨论!
@@ -477,6 +482,9 @@ YOLOv5 在两种不同的 License 下可用: + + +
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation From 8211a033ef9abe51ac0885989e0807f87e215abf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 20:51:02 +0200 Subject: [PATCH 126/128] Add https://docs.ultralytics.com/help/contributing (#11460) * Add https://docs.ultralytics.com/help/contributing * Add https://docs.ultralytics.com/help/contributing --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/ISSUE_TEMPLATE/feature-request.yml | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 2 +- README.md | 2 +- README.zh-CN.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index fcb64138b088..b82e55a2ec2f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -80,6 +80,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 68ef985186ef..1d3d53df217e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -45,6 +45,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 51f9803a57a5..d96d5afd2836 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributin - Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented. - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). -Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. +Please see our ✅ [Contributing Guide](https://docs.ultralytics.com/help/contributing) for more details. Note that Copilot will summarize this PR below, do not modify the 'copilot:all' line. --> diff --git a/README.md b/README.md index 0cefb443b90a..37f683343f53 100644 --- a/README.md +++ b/README.md @@ -450,7 +450,7 @@ Get started in seconds with our verified environments. Click each icon below for ##
Contribute
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/README.zh-CN.md b/README.zh-CN.md index 4b4d9500ae9a..da60d3fe0573 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -445,7 +445,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
贡献
-我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! From 1d65a5ac9069b3b60b97c3c805d1250f2993d5e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 May 2023 10:35:51 +0200 Subject: [PATCH 127/128] Add links to https://docs.ultralytics.com/help/ (#11462) --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- CONTRIBUTING.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b82e55a2ec2f..04f9c76fde1f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -62,7 +62,7 @@ body: label: Minimal Reproducible Example description: > When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. - This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + This is referred to by community members as creating a [minimal reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). placeholder: | ``` # Code to reproduce your issue here diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e9ce5998d9f..95d88b9830d6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces the problem should be: - ✅ **Minimal** – Use as little code as possible that still produces the same problem @@ -84,7 +84,7 @@ should be: If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. ## License From 867f7f0e22c268590dcf036167b3dc5e87f8d8a2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 May 2023 14:33:31 +0200 Subject: [PATCH 128/128] Update links.yml (#11463) * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * updates --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/links.yml | 36 +++++++++++++++++++------------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7de084fef06d..e71a4b8f16ac 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -158,7 +158,7 @@ jobs: if: always() # This ensures the job runs even if previous jobs fail steps: - name: Check for failure and notify - if: ${{ needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' }} # Check if any of the jobs failed + if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') uses: slackapi/slack-github-action@v1.23.0 with: payload: | diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index a5413318030f..306689f46507 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,5 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# YOLO Continuous Integration (CI) GitHub Actions tests +# YOLO Continuous Integration (CI) GitHub Actions tests broken link checker +# Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) name: Check Broken links @@ -18,21 +19,26 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Test Markdown and HTML links - uses: lycheeverse/lychee-action@v1.7.0 + - name: Download and install lychee + run: | + LYCHEE_URL=$(curl -s https://github.com/gitapi/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) + curl -L $LYCHEE_URL -o lychee.tar.gz + tar xzf lychee.tar.gz + sudo mv lychee /usr/local/bin + + - name: Test Markdown and HTML links with retry + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - - name: Test Markdown, HTML, YAML, Python and Notebook links + - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' - uses: lycheeverse/lychee-action@v1.7.0 + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'