diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 27e59e9aab38..000000000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# YOLOv5 ๐Ÿš€ Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -- Demonstrating empathy and kindness toward other people -- Being respectful of differing opinions, viewpoints, and experiences -- Giving and gracefully accepting constructive feedback -- Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -- Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -- The use of sexualized language or imagery, and sexual attention or - advances of any kind -- Trolling, insulting or derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others' private information, such as a physical or email - address, without their explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -hello@ultralytics.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. - -[homepage]: https://www.contributor-covenant.org diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index fcb64138b088..04f9c76fde1f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -62,7 +62,7 @@ body: label: Minimal Reproducible Example description: > When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. - This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + This is referred to by community members as creating a [minimal reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). placeholder: | ``` # Code to reproduce your issue here @@ -80,6 +80,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 4db7cefb2707..743feb957ff1 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,11 @@ blank_issues_enabled: true contact_links: + - name: ๐Ÿ“„ Docs + url: https://docs.ultralytics.com/yolov5 + about: View Ultralytics YOLOv5 Docs - name: ๐Ÿ’ฌ Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - - name: Stack Overflow - url: https://stackoverflow.com/search?q=YOLOv5 - about: Ask on Stack Overflow with 'YOLOv5' tag + - name: ๐ŸŽง Discord + url: https://discord.gg/n6cFeSPZdD + about: Ask on Ultralytics Discord diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 68ef985186ef..1d3d53df217e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -45,6 +45,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f25b017ace8b..d96d5afd2836 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,5 +5,9 @@ Thank you for submitting a YOLOv5 ๐Ÿš€ Pull Request! We want to make contributin - Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented. - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). -Please see our โœ… [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. +Please see our โœ… [Contributing Guide](https://docs.ultralytics.com/help/contributing) for more details. + +Note that Copilot will summarize this PR below, do not modify the 'copilot:all' line. --> + +copilot:all diff --git a/.github/SECURITY.md b/.github/SECURITY.md deleted file mode 100644 index aa3e8409da6b..000000000000 --- a/.github/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -We aim to make YOLOv5 ๐Ÿš€ as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. - -### Reporting a Vulnerability - -To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f31bb6e6ce3c..e71a4b8f16ac 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI @@ -18,19 +18,14 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ '3.9' ] # requires python<=3.9 + python-version: [ '3.10' ] # requires python<=3.10 model: [ yolov5n ] steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - #- name: Cache pip - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-Benchmarks-${{ hashFiles('requirements.txt') }} - # restore-keys: ${{ runner.os }}-Benchmarks- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -79,16 +74,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir - # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -165,3 +151,17 @@ jobs: for path in '$m', '$b': model = torch.hub.load('.', 'custom', path=path, source='local') EOF + + Summary: + runs-on: ubuntu-latest + needs: [Benchmarks, Tests] # Add job names that you want to check for failure + if: always() # This ensures the job runs even if previous jobs fail + steps: + - name: Check for failure and notify + if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + uses: slackapi/slack-github-action@v1.23.0 + with: + payload: | + {"text": " GitHub Actions error for ${{ github.workflow }} โŒ\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b6f751096d9a..05db12dabd1a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -6,6 +6,7 @@ name: "CodeQL" on: schedule: - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month + workflow_dispatch: jobs: analyze: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1d0bd30b22cb..13e79216fc20 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images @@ -6,6 +6,7 @@ name: Publish Docker Images on: push: branches: [ master ] + workflow_dispatch: jobs: docker: @@ -29,7 +30,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -39,7 +40,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -48,7 +49,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 5e1589c340ed..8aca12d3c370 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license name: Greetings @@ -23,13 +23,11 @@ jobs: - โœ… Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ โ€” Bruce Lee issue-message: | - ๐Ÿ‘‹ Hello @${{ github.actor }}, thank you for your interest in YOLOv5 ๐Ÿš€! Please visit our โญ๏ธ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + ๐Ÿ‘‹ Hello @${{ github.actor }}, thank you for your interest in YOLOv5 ๐Ÿš€! Please visit our โญ๏ธ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). - If this is a ๐Ÿ› Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + If this is a ๐Ÿ› Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training โ“ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - - For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. + If this is a custom training โ“ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results/). ## Requirements @@ -45,13 +43,23 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) + - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls ## Status YOLOv5 CI - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. + + ## Introducing YOLOv8 ๐Ÿš€ + + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) ๐Ÿš€! + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml new file mode 100644 index 000000000000..306689f46507 --- /dev/null +++ b/.github/workflows/links.yml @@ -0,0 +1,44 @@ +# Ultralytics YOLO ๐Ÿš€, AGPL-3.0 license +# YOLO Continuous Integration (CI) GitHub Actions tests broken link checker +# Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + +name: Check Broken links + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_dispatch: + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + Links: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Download and install lychee + run: | + LYCHEE_URL=$(curl -s https://github.com/gitapi/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) + curl -L $LYCHEE_URL -o lychee.tar.gz + tar xzf lychee.tar.gz + sudo mv lychee /usr/local/bin + + - name: Test Markdown and HTML links with retry + uses: nick-invision/retry@v2 + with: + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + + - name: Test Markdown, HTML, YAML, Python and Notebook links with retry + if: github.event_name == 'workflow_dispatch' + uses: nick-invision/retry@v2 + with: + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9067c343608b..65c8f70798f1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license name: Close stale issues on: @@ -9,29 +9,36 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v6 + - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: | - ๐Ÿ‘‹ Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + ๐Ÿ‘‹ Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help. - Access additional [YOLOv5](https://ultralytics.com/yolov5) ๐Ÿš€ resources: - - **Wiki** โ€“ https://github.com/ultralytics/yolov5/wiki - - **Tutorials** โ€“ https://github.com/ultralytics/yolov5#tutorials - - **Docs** โ€“ https://docs.ultralytics.com + For additional resources and information, please see the links below: - Access additional [Ultralytics](https://ultralytics.com) โšก resources: - - **Ultralytics HUB** โ€“ https://ultralytics.com/hub - - **Vision API** โ€“ https://ultralytics.com/yolov5 - - **About Us** โ€“ https://ultralytics.com/about - - **Join Our Team** โ€“ https://ultralytics.com/work - - **Contact Us** โ€“ https://ultralytics.com/contact + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! - Thank you for your contributions to YOLOv5 ๐Ÿš€ and Vision AI โญ! + Thank you for your contributions to YOLO ๐Ÿš€ and Vision AI โญ + + stale-pr-message: | + ๐Ÿ‘‹ Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap. + + We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved. + + For additional resources and information, please see the links below: + + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com + + Thank you for your contributions to YOLO ๐Ÿš€ and Vision AI โญ - stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 ๐Ÿš€ and Vision AI โญ.' days-before-issue-stale: 30 days-before-issue-close: 10 days-before-pr-stale: 90 diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 76f59b83e65f..d5e2be26f523 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md name: Translate README @@ -6,8 +6,7 @@ name: Translate README on: push: branches: - - main - - master + - translate_readme # replace with 'master' to enable action paths: - README.md @@ -20,7 +19,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 16 - # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + # ISO Language Codes: https://cloud.google.com/translate/docs/languages - name: Adding README - Chinese Simplified uses: dephraiim/translate-readme@main with: diff --git a/.gitignore b/.gitignore index 69a00843ea42..6bcedfac610d 100755 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ VOC/ *_saved_model/ *_web_model/ *_openvino_model/ +*_paddle_model/ darknet53.conv.74 yolov3-tiny.conv.15 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28dbc89223cf..8bd40484c522 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,9 +1,7 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 +# Ultralytics YOLO ๐Ÿš€, AGPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md +exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci ci: autofix_prs: true @@ -15,28 +13,28 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # - id: end-of-file-fixer + - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml - - id: check-toml - - id: pretty-format-json - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade name: Upgrade code - args: [ --py37-plus ] + args: [--py37-plus] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort name: Sort imports - - repo: https://github.com/pre-commit/mirrors-yapf + - repo: https://github.com/google/yapf rev: v0.32.0 hooks: - id: yapf @@ -50,15 +48,22 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README.zh-CN.md" - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 rev: 6.0.0 hooks: - id: flake8 name: PEP8 + + - repo: https://github.com/codespell-project/codespell + rev: v2.2.4 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota + + #- repo: https://github.com/asottile/yesqa + # rev: v1.4.0 + # hooks: + # - id: yesqa diff --git a/CITATION.cff b/CITATION.cff index 8e2cf1148b92..c277230d922f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -10,5 +10,5 @@ preferred-citation: version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 - license: GPL-3.0 + license: AGPL-3.0 url: "https://github.com/ultralytics/yolov5" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7498f8995d40..95d88b9830d6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,13 @@ Select `requirements.txt` to update by clicking on it in GitHub. ### 2. Click 'Edit this file' -Button is in top-right corner. +The button is in the top-right corner.

PR_step2

### 3. Make Changes -Change `matplotlib` version from `3.2.2` to `3.3`. +Change the `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

@@ -62,11 +62,11 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: If you spot a problem with YOLOv5 please submit a Bug Report! For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few -short guidelines below to help users provide what we need in order to get started. +short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces the problem should be: - โœ… **Minimal** โ€“ Use as little code as possible that still produces the same problem @@ -76,18 +76,18 @@ the problem should be: In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -- โœ… **Current** โ€“ Verify that your code is up-to-date with current +- โœ… **Current** โ€“ Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. - โœ… **Unmodified** โ€“ Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code โš ๏ธ. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the ๐Ÿ› -**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. ## License By contributing, you agree that your contributions will be licensed under -the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) +the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/) diff --git a/LICENSE b/LICENSE index 92b370f0e0e1..be3f7b28e564 100644 --- a/LICENSE +++ b/LICENSE @@ -1,23 +1,21 @@ -GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble - The GNU General Public License is a free, copyleft license for -software and other kinds of works. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to +our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. +software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you @@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. The precise terms and conditions for copying, distribution and modification follow. @@ -72,7 +60,7 @@ modification follow. 0. Definitions. - "This License" refers to version 3 of the GNU General Public License. + "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. @@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - 13. Use with the GNU Affero General Public License. + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single +under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General +Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published +GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's +versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. @@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by + it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. + GNU Affero General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program. If not, see . + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 9ee97321082e..37f683343f53 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,13 @@

- +

- [English](README.md) | [็ฎ€ไฝ“ไธญๆ–‡](README.zh-CN.md) -
-
+[English](README.md) | [็ฎ€ไฝ“ไธญๆ–‡](README.zh-CN.md) +
+ +
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -19,119 +20,57 @@ YOLOv5 ๐Ÿš€ is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -To request an Enterprise License please complete the form at Ultralytics Licensing. - -
- - - - - - - - - - - - - - - - - - - - -
-
- +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! -##
Ultralytics Live Session
+To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
- -[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) โœจ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! ๐Ÿ”ฅ - - - + + + + + + + + + + + + + + + + + + + +
-##
Segmentation โญ NEW
- -
- -
- -Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. - -
- Segmentation Checkpoints -
-We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. - -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- Segmentation Usage Examples  Open In Colab - -### Train -YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` +##
YOLOv8 ๐Ÿš€ NEW
-### Val -Validate YOLOv5s-seg mask mAP on COCO dataset: -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### Predict -Use pretrained YOLOv5m-seg.pt to predict bus.jpg: -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) -``` +We are thrilled to announce the launch of Ultralytics YOLOv8 ๐Ÿš€, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) ---- |--- +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -### Export -Export YOLOv5s-seg model to ONNX and TensorRT: -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```commandline +pip install ultralytics ``` -
- +
+ + +
##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -151,17 +90,17 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -200,7 +139,7 @@ The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5 results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. @@ -219,61 +158,57 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)ย  ๐Ÿš€ RECOMMENDED -- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)ย  โ˜˜๏ธ - RECOMMENDED -- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ๐ŸŒŸ NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) ๐Ÿš€ -- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) ๐ŸŒŸ NEW -- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) -- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) ๐ŸŒŸ NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)ย  ๐ŸŒŸ NEW -- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) ๐ŸŒŸ NEW -- [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) ๐ŸŒŸ NEW -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) ๐ŸŒŸ NEW +- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) ๐Ÿš€ RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) โ˜˜๏ธ +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) ๐ŸŒŸ NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) ๐Ÿš€ +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) ๐ŸŒŸ NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) ๐ŸŒŸ NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) ๐ŸŒŸ NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) ๐ŸŒŸ NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) ๐ŸŒŸ NEW
- ##
Integrations

- +

- - + + - - - - - - - + + + + + + +
-|Roboflow|ClearML โญ NEW|Comet โญ NEW|Deci โญ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| - +| Roboflow | ClearML โญ NEW | Comet โญ NEW | Neural Magic โญ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our โญ **NEW** no-code solution to visualize datasets, train YOLOv5 ๐Ÿš€ models, and deploy to the real world in a seamless experience. Get started for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) โญ, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 ๐Ÿš€ model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! - - + ##
Why YOLOv5
@@ -297,19 +232,19 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -317,12 +252,91 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
+##
Segmentation
-##
Classification โญ NEW
+Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train + +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5s-seg mask mAP on COCO dataset: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict + +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### Export + +Export YOLOv5s-seg model to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
Classification
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -334,22 +348,22 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings sup We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) @@ -358,6 +372,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
@@ -365,6 +380,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x Classification Usage Examples  Open In Colab ### Train + YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. ```bash @@ -376,28 +392,37 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val + Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: + ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict + Use pretrained YOLOv5s-cls.pt to predict bus.jpg: + ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### Export + Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` -
+ ##
Environments
@@ -406,73 +431,65 @@ Get started in seconds with our verified environments. Click each icon below for
- + - + - + - - + + - - + +
-##
App
- -Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! - - -Ultralytics mobile app - - ##
Contribute
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! -
+ + ##
License
YOLOv5 is available under two different licenses: -- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). - +- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!
- - + + - - + + - - - - - + + - - - - - + + + + + - + + + +
-[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 0fc77565c5ef..da60d3fe0573 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,12 +1,12 @@

- +

-[่‹ฑ่ฏญ](README.md)\|[็ฎ€ไฝ“ไธญๆ–‡](README.zh-CN.md)
+[่‹ฑๆ–‡](README.md)|[็ฎ€ไฝ“ไธญๆ–‡](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -17,132 +17,62 @@

-YOLOv5 ๐Ÿš€ ๆ˜ฏไธ–็•ŒไธŠๆœ€ๅ—ๆฌข่ฟŽ็š„่ง†่ง‰ AI๏ผŒไปฃ่กจ่ถ…ๅŠ›ๅฏนๆœชๆฅ่ง†่ง‰ AI ๆ–นๆณ•็š„ๅผ€ๆบ็ ”็ฉถ๏ผŒ็ป“ๅˆๅœจๆ•ฐๅƒๅฐๆ—ถ็š„็ ”็ฉถๅ’Œๅผ€ๅ‘ไธญ็งฏ็ดฏ็š„็ป้ชŒๆ•™่ฎญๅ’Œๆœ€ไฝณๅฎž่ทตใ€‚ - -่ฆ็”ณ่ฏทไผไธš่ฎธๅฏ่ฏ๏ผŒ่ฏทๅกซๅ†™่กจๆ ผUltralytics ่ฎธๅฏ. - -
- - - - - - - - - - - - - - - - - - - - -
-
- -##
Ultralytics ็Žฐๅœบไผš่ฎฎ
- -
- -[Ultralytics Live Session Epใ€‚ 2ไธช](https://youtu.be/LKpuzZllNpA)โœจๅฐ†็›ดๆ’ญ**ๆฌงๆดฒไธญ้ƒจๆ—ถ้—ด 12 ๆœˆ 13 ๆ—ฅๆ˜ŸๆœŸไบŒ 19:00**ๅ’Œ[็บฆ็‘Ÿๅคซยท็บณๅฐ”้€Š](https://github.com/josephofiowa)็š„[ๆœบๅ™จไบบๆต](https://roboflow.com/?ref=ultralytics)่ฐๅฐ†ไธŽๆˆ‘ไปฌไธ€่ตท่ฎจ่ฎบๅ…จๆ–ฐ็š„ Roboflow x Ultralytics HUB ้›†ๆˆใ€‚ๆ”ถๅฌ Glenn ๅ’Œ Joseph ่ฏข้—ฎๅฆ‚ไฝ•้€š่ฟ‡ๆ— ็ผๆ•ฐๆฎ้›†้›†ๆˆๆฅๅŠ ๅฟซๅทฅไฝœๆต็จ‹๏ผ ๐Ÿ”ฅ +YOLOv5 ๐Ÿš€ ๆ˜ฏไธ–็•ŒไธŠๆœ€ๅ—ๆฌข่ฟŽ็š„่ง†่ง‰ AI๏ผŒไปฃ่กจ Ultralytics ๅฏนๆœชๆฅ่ง†่ง‰ AI ๆ–นๆณ•็š„ๅผ€ๆบ็ ”็ฉถ๏ผŒ็ป“ๅˆๅœจๆ•ฐๅƒๅฐๆ—ถ็š„็ ”็ฉถๅ’Œๅผ€ๅ‘ไธญ็งฏ็ดฏ็š„็ป้ชŒๆ•™่ฎญๅ’Œๆœ€ไฝณๅฎž่ทตใ€‚ - - -
+ๆˆ‘ไปฌๅธŒๆœ›่ฟ™้‡Œ็š„่ต„ๆบ่ƒฝๅธฎๅŠฉๆ‚จๅ……ๅˆ†ๅˆฉ็”จ YOLOv5ใ€‚่ฏทๆต่งˆ YOLOv5 ๆ–‡ๆกฃ ไบ†่งฃ่ฏฆ็ป†ไฟกๆฏ๏ผŒๅœจ GitHub ไธŠๆไบค้—ฎ้ข˜ไปฅ่Žทๅพ—ๆ”ฏๆŒ๏ผŒๅนถๅŠ ๅ…ฅๆˆ‘ไปฌ็š„ Discord ็คพๅŒบ่ฟ›่กŒ้—ฎ้ข˜ๅ’Œ่ฎจ่ฎบ๏ผ -##
็ป†ๅˆ† โญ ๆ–ฐ
+ๅฆ‚้œ€็”ณ่ฏทไผไธš่ฎธๅฏ๏ผŒ่ฏทๅœจ [Ultralytics Licensing](https://ultralytics.com/license) ๅค„ๅกซๅ†™่กจๆ ผ
- - + + + + + + + + + + + + + + + + + + + + +
-ๆˆ‘ไปฌๆ–ฐ็š„ YOLOv5[ๅ‘ๅธƒ v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)ๅฎžไพ‹ๅˆ†ๅ‰ฒๆจกๅž‹ๆ˜ฏไธ–็•ŒไธŠๆœ€ๅฟซๅ’Œๆœ€ๅ‡†็กฎ็š„๏ผŒๅ‡ป่ดฅๆ‰€ๆœ‰ๅฝ“ๅ‰[SOTA ๅŸบๅ‡†](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).ๆˆ‘ไปฌไฝฟๅฎƒไปฌ้žๅธธๆ˜“ไบŽ่ฎญ็ปƒใ€้ชŒ่ฏๅ’Œ้ƒจ็ฝฒใ€‚ๆŸฅ็œ‹ๆˆ‘ไปฌ็š„ๅฎŒๆ•ด่ฏฆ็ป†ไฟกๆฏ[ๅ‘่กŒ่ฏดๆ˜Ž](https://github.com/ultralytics/yolov5/releases/v7.0)ๅนถ่ฎฟ้—ฎๆˆ‘ไปฌ็š„[YOLOv5 ๅˆ†ๅ‰ฒ Colab ็ฌ”่ฎฐๆœฌ](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)ๅฟซ้€Ÿๅ…ฅ้—จๆ•™็จ‹ใ€‚ - -
- Segmentation Checkpoints - -
- -ๆˆ‘ไปฌไฝฟ็”จ A100 GPU ๅœจ COCO ไธŠไปฅ 640 ๅ›พๅƒๅคงๅฐ่ฎญ็ปƒไบ† 300 ไธชๆ—ถๆœŸ็š„ YOLOv5 ๅˆ†ๅ‰ฒๆจกๅž‹ใ€‚ๆˆ‘ไปฌๅฐ†ๆ‰€ๆœ‰ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX FP32 ไปฅ่ฟ›่กŒ CPU ้€Ÿๅบฆๆต‹่ฏ•๏ผŒๅนถๅฏผๅ‡บๅˆฐ TensorRT FP16 ไปฅ่ฟ›่กŒ GPU ้€Ÿๅบฆๆต‹่ฏ•ใ€‚ๆˆ‘ไปฌๅœจ Google ไธŠ่ฟ›่กŒไบ†ๆ‰€ๆœ‰้€Ÿๅบฆๆต‹่ฏ•[ๅไฝœไธด](https://colab.research.google.com/signup)ไพฟไบŽ้‡็Žฐ็š„็ฌ”่ฎฐๆœฌใ€‚ - -| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | ๅœฐๅ›พ็›’ๅญ
50-95 | ๅœฐๅ›พ้ขๅ…ท
50-95 | ็ซ่ฝฆๆ—ถ้—ด
300ไธช็บชๅ…ƒ
A100๏ผˆๅฐๆ—ถ๏ผ‰ | ้€Ÿๅบฆ
ONNX ไธญๅคฎๅค„็†ๅ™จ
๏ผˆๅฐๅง๏ผ‰ | ้€Ÿๅบฆ
ๅŒไปๅ ‚A100
๏ผˆๅฐๅง๏ผ‰ | ๅ‚ๆ•ฐ
(็”ท) | ๅคฑ่ดฅ่€…
@640๏ผˆไบŒ๏ผ‰ | -| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | -| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5mๆฎต](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | ๆˆ‘๏ผš43๏ผˆX๏ผ‰ | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- ไฝฟ็”จ SGD ไผ˜ๅŒ–ๅ™จๅฐ†ๆ‰€ๆœ‰ๆฃ€ๆŸฅ็‚น่ฎญ็ปƒๅˆฐ 300 ไธชๆ—ถๆœŸ`lr0=0.01`ๅ’Œ`weight_decay=5e-5`ๅœจๅ›พๅƒๅคงๅฐ 640 ๅ’Œๆ‰€ๆœ‰้ป˜่ฎค่ฎพ็ฝฎใ€‚
่ฟ่กŒ่ฎฐๅฝ•ๅˆฐ[HTTPS://็Žฉ่ฑ†็“ฃ.็ˆฑ/Glenn-ๅฐฑocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) -- **ๅ‡†็กฎๆ€ง**ๅ€ผ้€‚็”จไบŽ COCO ๆ•ฐๆฎ้›†ไธŠ็š„ๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆใ€‚
้‡็Žฐ่€…`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **้€Ÿๅบฆ**ไฝฟ็”จ a ๅฏน่ถ…่ฟ‡ 100 ไธชๆŽจ็†ๅ›พๅƒ่ฟ›่กŒๅนณๅ‡[ๅไฝœไธด](https://colab.research.google.com/signup)A100 ้ซ˜ RAM ๅฎžไพ‹ใ€‚ๅ€ผไป…่กจ็คบๆŽจ็†้€Ÿๅบฆ๏ผˆNMS ๆฏๅผ ๅ›พๅƒๅขžๅŠ ็บฆ 1 ๆฏซ็ง’๏ผ‰ใ€‚
้‡็Žฐ่€…`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **ๅ‡บๅฃ**ๅˆฐ FP32 ็š„ ONNX ๅ’Œ FP16 ็š„ TensorRT ๅฎŒๆˆ`export.py`.
้‡็Žฐ่€…`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- Segmentation Usage Examples  Open In Colab - -### ็ซ่ฝฆ - -YOLOv5ๅˆ†ๅ‰ฒ่ฎญ็ปƒๆ”ฏๆŒ่‡ชๅŠจไธ‹่ฝฝCOCO128-segๅˆ†ๅ‰ฒๆ•ฐๆฎ้›†`--data coco128-seg.yaml`COCO-segments ๆ•ฐๆฎ้›†็š„ๅ‚ๆ•ฐๅ’Œๆ‰‹ๅŠจไธ‹่ฝฝ`bash data/scripts/get_coco.sh --train --val --segments`ๆŽฅ็€`python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` - -### ็“ฆๅฐ” - -ๅœจ COCO ๆ•ฐๆฎ้›†ไธŠ้ชŒ่ฏ YOLOv5s-seg mask mAP๏ผš - -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### ้ข„ๆต‹ - -ไฝฟ็”จ้ข„่ฎญ็ปƒ็š„ YOLOv5m-seg.pt ๆฅ้ข„ๆต‹ bus.jpg๏ผš - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +##
YOLOv8 ๐Ÿš€ NEW
-### ๅ‡บๅฃ +We are thrilled to announce the launch of Ultralytics YOLOv8 ๐Ÿš€, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -ๅฐ† YOLOv5s-seg ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX ๅ’Œ TensorRT๏ผš +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```commandline +pip install ultralytics ``` -
+
+ + +
##
ๆ–‡ๆกฃ
-่ง[YOLOv5 ๆ–‡ๆกฃ](https://docs.ultralytics.com)ๆœ‰ๅ…ณๅŸน่ฎญใ€ๆต‹่ฏ•ๅ’Œ้ƒจ็ฝฒ็š„ๅฎŒๆ•ดๆ–‡ๆกฃใ€‚่ฏทๅ‚้˜…ไธ‹้ข็š„ๅฟซ้€Ÿๅ…ฅ้—จ็คบไพ‹ใ€‚ +ๆœ‰ๅ…ณ่ฎญ็ปƒใ€ๆต‹่ฏ•ๅ’Œ้ƒจ็ฝฒ็š„ๅฎŒๆ•ดๆ–‡ๆกฃ่ง[YOLOv5 ๆ–‡ๆกฃ](https://docs.ultralytics.com)ใ€‚่ฏทๅ‚้˜…ไธ‹้ข็š„ๅฟซ้€Ÿๅ…ฅ้—จ็คบไพ‹ใ€‚
-Install +ๅฎ‰่ฃ… -ๅ…‹้š†ๅ›ž่ดญๅนถๅฎ‰่ฃ…[่ฆๆฑ‚.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)ๅœจไธ€ไธช[**Python>=3.7.0**](https://www.python.org/)็Žฏๅขƒ๏ผŒๅŒ…ๆ‹ฌ[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +ๅ…‹้š† repo๏ผŒๅนถ่ฆๆฑ‚ๅœจ [**Python>=3.7.0**](https://www.python.org/) ็Žฏๅขƒไธญๅฎ‰่ฃ… [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ๏ผŒไธ”่ฆๆฑ‚ [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) ใ€‚ ```bash git clone https://github.com/ultralytics/yolov5 # clone @@ -153,19 +83,19 @@ pip install -r requirements.txt # install
-Inference +ๆŽจ็† -YOLOv5[PyTorch ไธญๅฟƒ](https://github.com/ultralytics/yolov5/issues/36)ๆŽจ็†ใ€‚[ๆฅทๆจก](https://github.com/ultralytics/yolov5/tree/master/models)่‡ชๅŠจไปŽๆœ€ๆ–ฐไธ‹่ฝฝ -YOLOv5[ๅ‘ๅธƒ](https://github.com/ultralytics/yolov5/releases). +ไฝฟ็”จ YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) ๆŽจ็†ใ€‚ๆœ€ๆ–ฐ [ๆจกๅž‹](https://github.com/ultralytics/yolov5/tree/master/models) ๅฐ†่‡ชๅŠจ็š„ไปŽ +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) ไธญไธ‹่ฝฝใ€‚ ```python import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -177,10 +107,10 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-Inference with detect.py +ไฝฟ็”จ detect.py ๆŽจ็† -`detect.py`ๅœจๅ„็งๆฅๆบไธŠ่ฟ่กŒๆŽจ็†๏ผŒไธ‹่ฝฝ[ๆฅทๆจก](https://github.com/ultralytics/yolov5/tree/master/models)่‡ชๅŠจไปŽ -ๆœ€ๆ–ฐ็š„YOLOv5[ๅ‘ๅธƒ](https://github.com/ultralytics/yolov5/releases)ๅนถๅฐ†็ป“ๆžœไฟๅญ˜ๅˆฐ`runs/detect`. +`detect.py` ๅœจๅ„็งๆฅๆบไธŠ่ฟ่กŒๆŽจ็†๏ผŒ [ๆจกๅž‹](https://github.com/ultralytics/yolov5/tree/master/models) ่‡ชๅŠจไปŽ +ๆœ€ๆ–ฐ็š„YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) ไธญไธ‹่ฝฝ๏ผŒๅนถๅฐ†็ป“ๆžœไฟๅญ˜ๅˆฐ `runs/detect` ใ€‚ ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -198,13 +128,14 @@ python detect.py --weights yolov5s.pt --source 0 #
-Training +่ฎญ็ปƒ -ไธ‹้ข็š„ๅ‘ฝไปค้‡็Žฐ YOLOv5[ๅฏๅฏ](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)็ป“ๆžœใ€‚[ๆฅทๆจก](https://github.com/ultralytics/yolov5/tree/master/models)ๅ’Œ[ๆ•ฐๆฎ้›†](https://github.com/ultralytics/yolov5/tree/master/data)่‡ชๅŠจไปŽๆœ€ๆ–ฐไธ‹่ฝฝ -YOLOv5[ๅ‘ๅธƒ](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x ็š„่ฎญ็ปƒๆ—ถ้—ดไธบ -V100 GPU ไธŠ 1/2/4/6/8 ๅคฉ๏ผˆ[ๅคšGPU](https://github.com/ultralytics/yolov5/issues/475)ๅ€ๅฟซ๏ผ‰ใ€‚ไฝฟ็”จ -ๆœ€ๅคง็š„`--batch-size`ๅฏ่ƒฝ๏ผŒๆˆ–้€š่ฟ‡`--batch-size -1`ไธบไบ† -YOLOv5[่‡ชๅŠจๆ‰นๅค„็†](https://github.com/ultralytics/yolov5/pull/5092).ๆ˜พ็คบ็š„ๆ‰น้‡ๅคงๅฐ้€‚็”จไบŽ V100-16GBใ€‚ +ไธ‹้ข็š„ๅ‘ฝไปค้‡็Žฐ YOLOv5 ๅœจ [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) ๆ•ฐๆฎ้›†ไธŠ็š„็ป“ๆžœใ€‚ +ๆœ€ๆ–ฐ็š„ [ๆจกๅž‹](https://github.com/ultralytics/yolov5/tree/master/models) ๅ’Œ [ๆ•ฐๆฎ้›†](https://github.com/ultralytics/yolov5/tree/master/data) +ๅฐ†่‡ชๅŠจ็š„ไปŽ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) ไธญไธ‹่ฝฝใ€‚ +YOLOv5n/s/m/l/x ๅœจ V100 GPU ็š„่ฎญ็ปƒๆ—ถ้—ดไธบ 1/2/4/6/8 ๅคฉ๏ผˆ [ๅคšGPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) ่ฎญ็ปƒ้€Ÿๅบฆๆ›ดๅฟซ๏ผ‰ใ€‚ +ๅฐฝๅฏ่ƒฝไฝฟ็”จๆ›ดๅคง็š„ `--batch-size` ๏ผŒๆˆ–้€š่ฟ‡ `--batch-size -1` ๅฎž็Žฐ +YOLOv5 [่‡ชๅŠจๆ‰นๅค„็†](https://github.com/ultralytics/yolov5/pull/5092) ใ€‚ไธ‹ๆ–นๆ˜พ็คบ็š„ batchsize ้€‚็”จไบŽ V100-16GBใ€‚ ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -219,162 +150,243 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-Tutorials - -- [่ฎญ็ปƒ่‡ชๅฎšไน‰ๆ•ฐๆฎ](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)๐Ÿš€ ๆŽจ่ -- [่Žทๅพ—ๆœ€ไฝณ่ฎญ็ปƒ็ป“ๆžœ็š„ๆŠ€ๅทง](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)โ˜˜๏ธ - ๆŽจ่็š„ -- [ๅคš GPU ่ฎญ็ปƒ](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch ไธญๅฟƒ](https://github.com/ultralytics/yolov5/issues/36)๐ŸŒŸ ๆ–ฐ -- [TFLiteใ€ONNXใ€CoreMLใ€TensorRT ๅฏผๅ‡บ](https://github.com/ultralytics/yolov5/issues/251)๐Ÿš€ -- [NVIDIA Jetson Nano ้ƒจ็ฝฒ](https://github.com/ultralytics/yolov5/issues/9627)๐ŸŒŸ ๆ–ฐ -- [ๆต‹่ฏ•ๆ—ถ้—ดๅขžๅผบ (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [ๆจกๅž‹้›†ๆˆ](https://github.com/ultralytics/yolov5/issues/318) -- [ๆจกๅž‹ไฟฎๅ‰ช/็จ€็–ๅบฆ](https://github.com/ultralytics/yolov5/issues/304) -- [่ถ…ๅ‚ๆ•ฐ่ฟ›ๅŒ–](https://github.com/ultralytics/yolov5/issues/607) -- [ไฝฟ็”จๅ†ป็ป“ๅฑ‚่ฟ›่กŒ่ฟ็งปๅญฆไน ](https://github.com/ultralytics/yolov5/issues/1314) -- [ๆžถๆž„ๆ€ป็ป“](https://github.com/ultralytics/yolov5/issues/6998)๐ŸŒŸ ๆ–ฐ -- [็”จไบŽๆ•ฐๆฎ้›†ใ€ๆ ‡็ญพๅ’ŒไธปๅŠจๅญฆไน ็š„ Roboflow](https://github.com/ultralytics/yolov5/issues/4975)๐ŸŒŸ ๆ–ฐ -- [ClearML ่ฎฐๅฝ•](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)๐ŸŒŸ ๆ–ฐ -- [ๆ‰€ไปฅๅนณๅฐ](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)๐ŸŒŸ ๆ–ฐ -- [ๅฝ—ๆ˜Ÿ่ฎฐๅฝ•](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)๐ŸŒŸ ๆ–ฐ +ๆ•™็จ‹ + +- [่ฎญ็ปƒ่‡ชๅฎšไน‰ๆ•ฐๆฎ](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) ๐Ÿš€ ๆŽจ่ +- [่Žทๅพ—ๆœ€ไฝณ่ฎญ็ปƒ็ป“ๆžœ็š„ๆŠ€ๅทง](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) โ˜˜๏ธ +- [ๅคšGPU่ฎญ็ปƒ](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) ๐ŸŒŸ ๆ–ฐ +- [TFLite๏ผŒONNX๏ผŒCoreML๏ผŒTensorRTๅฏผๅ‡บ](https://docs.ultralytics.com/yolov5/tutorials/model_export) ๐Ÿš€ +- [NVIDIA Jetsonๅนณๅฐ้ƒจ็ฝฒ](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) ๐ŸŒŸ ๆ–ฐ +- [ๆต‹่ฏ•ๆ—ถๅขžๅผบ (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [ๆจกๅž‹้›†ๆˆ](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [ๆจกๅž‹ๅ‰ชๆž/็จ€็–](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [่ถ…ๅ‚ๆ•ฐ่ฟ›ๅŒ–](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [ๅ†ป็ป“ๅฑ‚็š„่ฟ็งปๅญฆไน ](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [ๆžถๆž„ๆฆ‚่ฟฐ](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) ๐ŸŒŸ ๆ–ฐ +- [Roboflow็”จไบŽๆ•ฐๆฎ้›†ใ€ๆ ‡ๆณจๅ’ŒไธปๅŠจๅญฆไน ](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearMLๆ—ฅๅฟ—่ฎฐๅฝ•](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) ๐ŸŒŸ ๆ–ฐ +- [ไฝฟ็”จNeural Magic็š„Deepsparse็š„YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) ๐ŸŒŸ ๆ–ฐ +- [Cometๆ—ฅๅฟ—่ฎฐๅฝ•](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) ๐ŸŒŸ ๆ–ฐ
-##
้›†ๆˆ
+##
ๆจกๅ—้›†ๆˆ

- +

- - + + - - - - - - - + + + + + + +
-| ๆœบๅ™จไบบๆต | ClearML โญ ๆ–ฐ | ๅฝ—ๆ˜Ÿโญๆ–ฐ | ๆ‰€ไปฅโญๆ–ฐ | -| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | -| ๅฐ†ๆ‚จ็š„่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†ๆ ‡่ฎฐๅนถ็›ดๆŽฅๅฏผๅ‡บๅˆฐ YOLOv5 ไปฅ่ฟ›่กŒ่ฎญ็ปƒ[ๆœบๅ™จไบบๆต](https://roboflow.com/?ref=ultralytics) | ไฝฟ็”จ่‡ชๅŠจ่ทŸ่ธชใ€ๅฏ่ง†ๅŒ–็”š่‡ณ่ฟœ็จ‹่ฎญ็ปƒ YOLOv5[ๆธ…้™คML](https://cutt.ly/yolov5-readme-clearml)๏ผˆๅผ€ๆบ๏ผ๏ผ‰ | ๆฐธ่ฟœๅ…่ดน๏ผŒ[ๅฝ—ๆ˜Ÿ](https://bit.ly/yolov5-readme-comet)ๅฏ่ฎฉๆ‚จไฟๅญ˜ YOLOv5 ๆจกๅž‹ใ€ๆขๅค่ฎญ็ปƒไปฅๅŠไบคไบ’ๅผๅฏ่ง†ๅŒ–ๅ’Œ่ฐƒ่ฏ•้ข„ๆต‹ | ไธ€้”ฎ่‡ชๅŠจ็ผ–่ฏ‘้‡ๅŒ–YOLOv5ไปฅ่Žทๅพ—ๆ›ดๅฅฝ็š„ๆŽจ็†ๆ€ง่ƒฝ[ๆ‰€ไปฅ](https://bit.ly/yolov5-deci-platform) | +| Roboflow | ClearML โญ ๆ–ฐ | Comet โญ ๆ–ฐ | Neural Magic โญ ๆ–ฐ | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| ๅฐ†ๆ‚จ็š„่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†่ฟ›่กŒๆ ‡ๆณจๅนถ็›ดๆŽฅๅฏผๅ‡บๅˆฐ YOLOv5 ไปฅ่ฟ›่กŒ่ฎญ็ปƒ [Roboflow](https://roboflow.com/?ref=ultralytics) | ่‡ชๅŠจ่ทŸ่ธชใ€ๅฏ่ง†ๅŒ–็”š่‡ณ่ฟœ็จ‹่ฎญ็ปƒ YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)๏ผˆๅผ€ๆบ๏ผ๏ผ‰ | ๆฐธ่ฟœๅ…่ดน๏ผŒ[Comet](https://bit.ly/yolov5-readme-comet2)ๅฏ่ฎฉๆ‚จไฟๅญ˜ YOLOv5 ๆจกๅž‹ใ€ๆขๅค่ฎญ็ปƒไปฅๅŠไบคไบ’ๅผๅฏ่ง†ๅŒ–ๅ’Œ่ฐƒ่ฏ•้ข„ๆต‹ | ไฝฟ็”จ [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)๏ผŒ่ฟ่กŒ YOLOv5 ๆŽจ็†็š„้€Ÿๅบฆๆœ€้ซ˜ๅฏๆ้ซ˜6ๅ€ | -##
Ultralytics ้›†็บฟๅ™จ
+##
Ultralytics HUB
-[Ultralytics ้›†็บฟๅ™จ](https://bit.ly/ultralytics_hub)ๆ˜ฏๆˆ‘ไปฌ็š„โญ**ๆ–ฐ็š„**็”จไบŽๅฏ่ง†ๅŒ–ๆ•ฐๆฎ้›†ใ€่ฎญ็ปƒ YOLOv5 ๐Ÿš€ ๆจกๅž‹ๅนถไปฅๆ— ็ผไฝ“้ชŒ้ƒจ็ฝฒๅˆฐ็Žฐๅฎžไธ–็•Œ็š„ๆ— ไปฃ็ ่งฃๅ†ณๆ–นๆกˆใ€‚ๅผ€ๅง‹ไฝฟ็”จ**่‡ช็”ฑ็š„**็Žฐๅœจ๏ผ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) ๆ˜ฏๆˆ‘ไปฌ็š„โญ**ๆ–ฐ็š„**็”จไบŽๅฏ่ง†ๅŒ–ๆ•ฐๆฎ้›†ใ€่ฎญ็ปƒ YOLOv5 ๐Ÿš€ ๆจกๅž‹ๅนถไปฅๆ— ็ผไฝ“้ชŒ้ƒจ็ฝฒๅˆฐ็Žฐๅฎžไธ–็•Œ็š„ๆ— ไปฃ็ ่งฃๅ†ณๆ–นๆกˆใ€‚็Žฐๅœจๅผ€ๅง‹ **ๅ…่ดน** ไฝฟ็”จไป–๏ผ - + ##
ไธบไป€ไนˆ้€‰ๆ‹ฉ YOLOv5
-YOLOv5 ่ขซ่ฎพ่ฎกไธบ่ถ…็บงๅฎนๆ˜“ไธŠๆ‰‹ๅ’Œ็ฎ€ๅ•ๆ˜“ๅญฆใ€‚ๆˆ‘ไปฌไผ˜ๅ…ˆ่€ƒ่™‘็Žฐๅฎžไธ–็•Œ็š„็ป“ๆžœใ€‚ +YOLOv5 ่ถ…็บงๅฎนๆ˜“ไธŠๆ‰‹๏ผŒ็ฎ€ๅ•ๆ˜“ๅญฆใ€‚ๆˆ‘ไปฌไผ˜ๅ…ˆ่€ƒ่™‘็Žฐๅฎžไธ–็•Œ็š„็ป“ๆžœใ€‚

- YOLOv5-P5 640 Figure + YOLOv5-P5 640 ๅ›พ

- Figure Notes + ๅ›พ่กจ็ฌ”่ฎฐ + +- **COCO AP val** ่กจ็คบ mAP@0.5:0.95 ๆŒ‡ๆ ‡๏ผŒๅœจ [COCO val2017](http://cocodataset.org) ๆ•ฐๆฎ้›†็š„ 5000 ๅผ ๅ›พๅƒไธŠๆต‹ๅพ—๏ผŒ ๅ›พๅƒๅŒ…ๅซ 256 ๅˆฐ 1536 ๅ„็งๆŽจ็†ๅคงๅฐใ€‚ +- **ๆ˜พๅกๆŽจ็†้€Ÿๅบฆ** ไธบๅœจ [COCO val2017](http://cocodataset.org) ๆ•ฐๆฎ้›†ไธŠ็š„ๅนณๅ‡ๆŽจ็†ๆ—ถ้—ด๏ผŒไฝฟ็”จ [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100ๅฎžไพ‹๏ผŒbatchsize ไธบ 32 ใ€‚ +- **EfficientDet** ๆ•ฐๆฎๆฅ่‡ช [google/automl](https://github.com/google/automl) ๏ผŒ batchsize ไธบ32ใ€‚ +- **ๅค็Žฐๅ‘ฝไปค** ไธบ `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### ้ข„่ฎญ็ปƒๆจกๅž‹ + +| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | mAPval
50-95 | mAPval
50 | ๆŽจ็†้€Ÿๅบฆ
CPU b1
๏ผˆms๏ผ‰ | ๆŽจ็†้€Ÿๅบฆ
V100 b1
๏ผˆms๏ผ‰ | ้€Ÿๅบฆ
V100 b32
๏ผˆms๏ผ‰ | ๅ‚ๆ•ฐ้‡
(M) | FLOPs
@640 (B) | +| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ ็ฌ”่ฎฐ -- **COCO AP ๅ€ผ**่กจ็คบ[map@0.5](mailto:mAP@0.5):0.95 ๆŒ‡ๆ ‡ๅœจ 5000 ๅผ ๅ›พๅƒไธŠๆต‹ๅพ—[COCO val2017](http://cocodataset.org)ไปŽ 256 ๅˆฐ 1536 ็š„ๅ„็งๆŽจ็†ๅคงๅฐ็š„ๆ•ฐๆฎ้›†ใ€‚ -- **ๆ˜พๅก้€Ÿๅบฆ**ๆต‹้‡ๆฏๅผ ๅ›พๅƒ็š„ๅนณๅ‡ๆŽจ็†ๆ—ถ้—ด[COCO val2017](http://cocodataset.org)ๆ•ฐๆฎ้›†ไฝฟ็”จ[็พŽๅ›ฝ้”€ๅ”ฎ.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)ๆ‰น้‡ๅคงๅฐไธบ 32 ็š„ V100 ๅฎžไพ‹ใ€‚ -- **้ซ˜ๆ•ˆ**ๆ•ฐๆฎๆฅ่‡ช[่ฐทๆญŒ/ๆฑฝ่ฝฆ](https://github.com/google/automl)ๆ‰น้‡ๅคงๅฐไธบ 8ใ€‚ -- **ๅคๅˆถ**็ป่ฟ‡`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- ๆ‰€ๆœ‰ๆจกๅž‹้ƒฝไฝฟ็”จ้ป˜่ฎค้…็ฝฎ๏ผŒ่ฎญ็ปƒ 300 epochsใ€‚nๅ’Œsๆจกๅž‹ไฝฟ็”จ [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ๏ผŒๅ…ถไป–ๆจกๅž‹้ƒฝไฝฟ็”จ [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) ใ€‚ +- \*\*mAPval\*\*ๅœจๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆไธŠ่ฎก็ฎ—๏ผŒๆ•ฐๆฎ้›†ไฝฟ็”จ [COCO val2017](http://cocodataset.org) ใ€‚
ๅค็Žฐๅ‘ฝไปค `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **ๆŽจ็†้€Ÿๅบฆ**ๅœจ COCO val ๅ›พๅƒๆ€ปไฝ“ๆ—ถ้—ดไธŠ่ฟ›่กŒๅนณๅ‡ๅพ—ๅˆฐ๏ผŒๆต‹่ฏ•็Žฏๅขƒไฝฟ็”จ[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)ๅฎžไพ‹ใ€‚ NMS ๆ—ถ้—ด (ๅคง็บฆ 1 ms/img) ไธๅŒ…ๆ‹ฌๅœจๅ†…ใ€‚
ๅค็Žฐๅ‘ฝไปค `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [ๆต‹่ฏ•ๆ—ถๆ•ฐๆฎๅขžๅผบ](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) ๅŒ…ๆ‹ฌๅๅฐ„ๅ’Œๅฐบๅบฆๅ˜ๆขใ€‚
ๅค็Žฐๅ‘ฝไปค `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-### ้ข„่ฎญ็ปƒๆฃ€ๆŸฅ็‚น - -| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | ๅœฐๅ›พๅ€ผ
50-95 | ๅœฐๅ›พๅ€ผ
50 | ้€Ÿๅบฆ
ๅค„็†ๅ™จb1
๏ผˆๅฐๅง๏ผ‰ | ้€Ÿๅบฆ
V100 b1
๏ผˆๅฐๅง๏ผ‰ | ้€Ÿๅบฆ
V100 b32
๏ผˆๅฐๅง๏ผ‰ | ๅ‚ๆ•ฐ
(็”ท) | ๅคฑ่ดฅ่€…
@640๏ผˆไบŒ๏ผ‰ | -| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[็”ต่ฎฏๅฑ€][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +##
ๅฎžไพ‹ๅˆ†ๅ‰ฒๆจกๅž‹ โญ ๆ–ฐ
+ +ๆˆ‘ไปฌๆ–ฐ็š„ YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) ๅฎžไพ‹ๅˆ†ๅ‰ฒๆจกๅž‹ๆ˜ฏไธ–็•ŒไธŠๆœ€ๅฟซๅ’Œๆœ€ๅ‡†็กฎ็š„ๆจกๅž‹๏ผŒๅ‡ป่ดฅๆ‰€ๆœ‰ๅฝ“ๅ‰ [SOTA ๅŸบๅ‡†](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)ใ€‚ๆˆ‘ไปฌไฝฟๅฎƒ้žๅธธๆ˜“ไบŽ่ฎญ็ปƒใ€้ชŒ่ฏๅ’Œ้ƒจ็ฝฒใ€‚ๆ›ดๅคš็ป†่Š‚่ฏทๆŸฅ็œ‹ [ๅ‘่กŒ่ฏดๆ˜Ž](https://github.com/ultralytics/yolov5/releases/v7.0) ๆˆ–่ฎฟ้—ฎๆˆ‘ไปฌ็š„ [YOLOv5 ๅˆ†ๅ‰ฒ Colab ็ฌ”่ฎฐๆœฌ](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) ไปฅๅฟซ้€Ÿๅ…ฅ้—จใ€‚
- Table Notes + ๅฎžไพ‹ๅˆ†ๅ‰ฒๆจกๅž‹ๅˆ—่กจ + +
+ +
+ + +
+ +ๆˆ‘ไปฌไฝฟ็”จ A100 GPU ๅœจ COCO ไธŠไปฅ 640 ๅ›พๅƒๅคงๅฐ่ฎญ็ปƒไบ† 300 epochs ๅพ—ๅˆฐ YOLOv5 ๅˆ†ๅ‰ฒๆจกๅž‹ใ€‚ๆˆ‘ไปฌๅฐ†ๆ‰€ๆœ‰ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX FP32 ไปฅ่ฟ›่กŒ CPU ้€Ÿๅบฆๆต‹่ฏ•๏ผŒๅนถๅฏผๅ‡บๅˆฐ TensorRT FP16 ไปฅ่ฟ›่กŒ GPU ้€Ÿๅบฆๆต‹่ฏ•ใ€‚ไธบไบ†ไพฟไบŽๅ†็Žฐ๏ผŒๆˆ‘ไปฌๅœจ Google [Colab Pro](https://colab.research.google.com/signup) ไธŠ่ฟ›่กŒไบ†ๆ‰€ๆœ‰้€Ÿๅบฆๆต‹่ฏ•ใ€‚ + +| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | mAPbox
50-95 | mAPmask
50-95 | ่ฎญ็ปƒๆ—ถ้•ฟ
300 epochs
A100 GPU๏ผˆๅฐๆ—ถ๏ผ‰ | ๆŽจ็†้€Ÿๅบฆ
ONNX CPU
๏ผˆms๏ผ‰ | ๆŽจ็†้€Ÿๅบฆ
TRT A100
๏ผˆms๏ผ‰ | ๅ‚ๆ•ฐ้‡
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- ๆ‰€ๆœ‰ๆจกๅž‹ไฝฟ็”จ SGD ไผ˜ๅŒ–ๅ™จ่ฎญ็ปƒ๏ผŒ ้ƒฝไฝฟ็”จ `lr0=0.01` ๅ’Œ `weight_decay=5e-5` ๅ‚ๆ•ฐ๏ผŒ ๅ›พๅƒๅคงๅฐไธบ 640 ใ€‚
่ฎญ็ปƒ log ๅฏไปฅๆŸฅ็œ‹ https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **ๅ‡†็กฎๆ€ง**็ป“ๆžœ้ƒฝๅœจ COCO ๆ•ฐๆฎ้›†ไธŠ๏ผŒไฝฟ็”จๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆๆต‹่ฏ•ๅพ—ๅˆฐใ€‚
ๅค็Žฐๅ‘ฝไปค `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **ๆŽจ็†้€Ÿๅบฆ**ๆ˜ฏไฝฟ็”จ 100 ๅผ ๅ›พๅƒๆŽจ็†ๆ—ถ้—ด่ฟ›่กŒๅนณๅ‡ๅพ—ๅˆฐ๏ผŒๆต‹่ฏ•็Žฏๅขƒไฝฟ็”จ [Colab Pro](https://colab.research.google.com/signup) ไธŠ A100 ้ซ˜ RAM ๅฎžไพ‹ใ€‚็ป“ๆžœไป…่กจ็คบๆŽจ็†้€Ÿๅบฆ๏ผˆNMS ๆฏๅผ ๅ›พๅƒๅขžๅŠ ็บฆ 1 ๆฏซ็ง’๏ผ‰ใ€‚
ๅค็Žฐๅ‘ฝไปค `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **ๆจกๅž‹่ฝฌๆข**ๅˆฐ FP32 ็š„ ONNX ๅ’Œ FP16 ็š„ TensorRT ่„šๆœฌไธบ `export.py`.
่ฟ่กŒๅ‘ฝไปค `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ ๅˆ†ๅ‰ฒๆจกๅž‹ไฝฟ็”จ็คบไพ‹  Open In Colab + +### ่ฎญ็ปƒ + +YOLOv5ๅˆ†ๅ‰ฒ่ฎญ็ปƒๆ”ฏๆŒ่‡ชๅŠจไธ‹่ฝฝ COCO128-seg ๅˆ†ๅ‰ฒๆ•ฐๆฎ้›†๏ผŒ็”จๆˆทไป…้œ€ๅœจๅฏๅŠจๆŒ‡ไปคไธญๅŒ…ๅซ `--data coco128-seg.yaml` ๅ‚ๆ•ฐใ€‚ ่‹ฅ่ฆๆ‰‹ๅŠจไธ‹่ฝฝ๏ผŒไฝฟ็”จๅ‘ฝไปค `bash data/scripts/get_coco.sh --train --val --segments`๏ผŒ ๅœจไธ‹่ฝฝๅฎŒๆฏ•ๅŽ๏ผŒไฝฟ็”จๅ‘ฝไปค `python train.py --data coco.yaml` ๅผ€ๅฏ่ฎญ็ปƒใ€‚ + +```bash +# ๅ• GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# ๅคš GPU๏ผŒ DDP ๆจกๅผ +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### ้ชŒ่ฏ + +ๅœจ COCO ๆ•ฐๆฎ้›†ไธŠ้ชŒ่ฏ YOLOv5s-seg mask mAP๏ผš -- ๆ‰€ๆœ‰ๆฃ€ๆŸฅ็‚น้ƒฝไฝฟ็”จ้ป˜่ฎค่ฎพ็ฝฎ่ฎญ็ปƒๅˆฐ 300 ไธชๆ—ถๆœŸใ€‚็บณ็ฑณๅ’Œๅฐๅž‹ๅž‹ๅทไฝฟ็”จ[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps๏ผŒๆ‰€ๆœ‰ๅ…ถไป–ไบบ้ƒฝไฝฟ็”จ[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **ๅœฐๅ›พๅ€ผ**ๅ€ผ้€‚็”จไบŽๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆ[COCO val2017](http://cocodataset.org)ๆ•ฐๆฎ้›†ใ€‚
้‡็Žฐ่€…`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **้€Ÿๅบฆ**ไฝฟ็”จ a ๅฏน COCO val ๅ›พๅƒ่ฟ›่กŒๅนณๅ‡[็พŽๅ›ฝ้”€ๅ”ฎ.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)ๅฎžไพ‹ใ€‚ NMS ๆ—ถ้—ด (~1 ms/img) ไธๅŒ…ๆ‹ฌๅœจๅ†…ใ€‚
้‡็Žฐ่€…`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **็”ต่ฎฏๅฑ€**[ๆต‹่ฏ•ๆ—ถ้—ดๅขžๅŠ ](https://github.com/ultralytics/yolov5/issues/303)ๅŒ…ๆ‹ฌๅๅฐ„ๅ’Œๅฐบๅบฆๅขžๅผบใ€‚
้‡็Žฐ่€…`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +```bash +bash data/scripts/get_coco.sh --val --segments # ไธ‹่ฝฝ COCO val segments ๆ•ฐๆฎ้›† (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # ้ชŒ่ฏ +``` + +### ้ข„ๆต‹ + +ไฝฟ็”จ้ข„่ฎญ็ปƒ็š„ YOLOv5m-seg.pt ๆฅ้ข„ๆต‹ bus.jpg๏ผš + +```bash +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # ไปŽload from PyTorch Hub ๅŠ ่ฝฝๆจกๅž‹ (WARNING: ๆŽจ็†ๆš‚ๆœชๆ”ฏๆŒ) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### ๆจกๅž‹ๅฏผๅ‡บ + +ๅฐ† YOLOv5s-seg ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX ๅ’Œ TensorRT๏ผš + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```
-##
ๅˆ†็ฑปโญๆ–ฐ
+##
ๅˆ†็ฑป็ฝ‘็ปœ โญ ๆ–ฐ
-YOLOv5[ๅ‘ๅธƒ v6.2](https://github.com/ultralytics/yolov5/releases)ๅธฆๆฅๅฏนๅˆ†็ฑปๆจกๅž‹่ฎญ็ปƒใ€้ชŒ่ฏๅ’Œ้ƒจ็ฝฒ็š„ๆ”ฏๆŒ๏ผๆŸฅ็œ‹ๆˆ‘ไปฌ็š„ๅฎŒๆ•ด่ฏฆ็ป†ไฟกๆฏ[ๅ‘่กŒ่ฏดๆ˜Ž](https://github.com/ultralytics/yolov5/releases/v6.2)ๅนถ่ฎฟ้—ฎๆˆ‘ไปฌ็š„[YOLOv5 ๅˆ†็ฑป Colab ็ฌ”่ฎฐๆœฌ](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)ๅฟซ้€Ÿๅ…ฅ้—จๆ•™็จ‹ใ€‚ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) ๅธฆๆฅๅฏนๅˆ†็ฑปๆจกๅž‹่ฎญ็ปƒใ€้ชŒ่ฏๅ’Œ้ƒจ็ฝฒ็š„ๆ”ฏๆŒ๏ผ่ฏฆๆƒ…่ฏทๆŸฅ็œ‹ [ๅ‘่กŒ่ฏดๆ˜Ž](https://github.com/ultralytics/yolov5/releases/v6.2) ๆˆ–่ฎฟ้—ฎๆˆ‘ไปฌ็š„ [YOLOv5 ๅˆ†็ฑป Colab ็ฌ”่ฎฐๆœฌ](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) ไปฅๅฟซ้€Ÿๅ…ฅ้—จใ€‚
- Classification Checkpoints + ๅˆ†็ฑป็ฝ‘็ปœๆจกๅž‹
-ๆˆ‘ไปฌไฝฟ็”จ 4xA100 ๅฎžไพ‹ๅœจ ImageNet ไธŠ่ฎญ็ปƒไบ† 90 ไธชๆ—ถๆœŸ็š„ YOLOv5-cls ๅˆ†็ฑปๆจกๅž‹๏ผŒๆˆ‘ไปฌ่ฎญ็ปƒไบ† ResNet ๅ’Œ EfficientNet ๆจกๅž‹ไปฅๅŠ็›ธๅŒ็š„้ป˜่ฎค่ฎญ็ปƒ่ฎพ็ฝฎไปฅ่ฟ›่กŒๆฏ”่พƒใ€‚ๆˆ‘ไปฌๅฐ†ๆ‰€ๆœ‰ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX FP32 ไปฅ่ฟ›่กŒ CPU ้€Ÿๅบฆๆต‹่ฏ•๏ผŒๅนถๅฏผๅ‡บๅˆฐ TensorRT FP16 ไปฅ่ฟ›่กŒ GPU ้€Ÿๅบฆๆต‹่ฏ•ใ€‚ๆˆ‘ไปฌๅœจ Google ไธŠ่ฟ›่กŒไบ†ๆ‰€ๆœ‰้€Ÿๅบฆๆต‹่ฏ•[ๅไฝœไธด](https://colab.research.google.com/signup)ไธบไบ†ไพฟไบŽ้‡็Žฐใ€‚ - -| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | acc
top1 | acc
็ƒน้ฅช | ่ฎญ็ปƒ
90ไธช็บชๅ…ƒ
4xA100๏ผˆๅฐๆ—ถ๏ผ‰ | ้€Ÿๅบฆ
ONNX ไธญๅคฎๅค„็†ๅ™จ
๏ผˆๅฐๅง๏ผ‰ | ้€Ÿๅบฆ
TensorRT V100
๏ผˆๅฐๅง๏ผ‰ | ๅ‚ๆ•ฐ
(็”ท) | ๅคฑ่ดฅ่€…
@224๏ผˆไน™๏ผ‰ | -| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [้ซ˜ๆ•ˆ็ฝ‘็ปœ\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [้ซ˜ๆ•ˆ็ฝ‘็ปœ b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [ๆˆ‘ไปฌๅฐ†้ข„ๆต‹](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [้ซ˜ๆ•ˆNetb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +ๆˆ‘ไปฌไฝฟ็”จ 4xA100 ๅฎžไพ‹ๅœจ ImageNet ไธŠ่ฎญ็ปƒไบ† 90 ไธช epochs ๅพ—ๅˆฐ YOLOv5-cls ๅˆ†็ฑปๆจกๅž‹๏ผŒๆˆ‘ไปฌ่ฎญ็ปƒไบ† ResNet ๅ’Œ EfficientNet ๆจกๅž‹ไปฅๅŠ็›ธๅŒ็š„้ป˜่ฎค่ฎญ็ปƒ่ฎพ็ฝฎไปฅ่ฟ›่กŒๆฏ”่พƒใ€‚ๆˆ‘ไปฌๅฐ†ๆ‰€ๆœ‰ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX FP32 ไปฅ่ฟ›่กŒ CPU ้€Ÿๅบฆๆต‹่ฏ•๏ผŒๅนถๅฏผๅ‡บๅˆฐ TensorRT FP16 ไปฅ่ฟ›่กŒ GPU ้€Ÿๅบฆๆต‹่ฏ•ใ€‚ไธบไบ†ไพฟไบŽ้‡็Žฐ๏ผŒๆˆ‘ไปฌๅœจ Google ไธŠ่ฟ›่กŒไบ†ๆ‰€ๆœ‰้€Ÿๅบฆๆต‹่ฏ• [Colab Pro](https://colab.research.google.com/signup) ใ€‚ + +| ๆจกๅž‹ | ๅฐบๅฏธ
๏ผˆๅƒ็ด ๏ผ‰ | acc
top1 | acc
top5 | ่ฎญ็ปƒๆ—ถ้•ฟ
90 epochs
4xA100๏ผˆๅฐๆ—ถ๏ผ‰ | ๆŽจ็†้€Ÿๅบฆ
ONNX CPU
๏ผˆms๏ผ‰ | ๆŽจ็†้€Ÿๅบฆ
TensorRT V100
๏ผˆms๏ผ‰ | ๅ‚ๆ•ฐ
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + Table Notes (็‚นๅ‡ปไปฅๅฑ•ๅผ€) -- ไฝฟ็”จ SGD ไผ˜ๅŒ–ๅ™จๅฐ†ๆ‰€ๆœ‰ๆฃ€ๆŸฅ็‚น่ฎญ็ปƒๅˆฐ 90 ไธชๆ—ถๆœŸ`lr0=0.001`ๅ’Œ`weight_decay=5e-5`ๅœจๅ›พๅƒๅคงๅฐ 224 ๅ’Œๆ‰€ๆœ‰้ป˜่ฎค่ฎพ็ฝฎใ€‚
่ฟ่กŒ่ฎฐๅฝ•ๅˆฐ[HTTPS://็Žฉ่ฑ†็“ฃ.็ˆฑ/Glenn-ๅฐฑocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **ๅ‡†็กฎๆ€ง**ๅ€ผ้€‚็”จไบŽๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆ[ImageNet-1k](https://www.image-net.org/index.php)ๆ•ฐๆฎ้›†ใ€‚
้‡็Žฐ่€…`python classify/val.py --data ../datasets/imagenet --img 224` -- **้€Ÿๅบฆ**ไฝฟ็”จ่ฐทๆญŒๅนณๅ‡่ถ…่ฟ‡ 100 ไธชๆŽจ็†ๅ›พๅƒ[ๅไฝœไธด](https://colab.research.google.com/signup)V100 ้ซ˜ RAM ๅฎžไพ‹ใ€‚
้‡็Žฐ่€…`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **ๅ‡บๅฃ**ๅˆฐ FP32 ็š„ ONNX ๅ’Œ FP16 ็š„ TensorRT ๅฎŒๆˆ`export.py`.
้‡็Žฐ่€…`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
-
+- ๆ‰€ๆœ‰ๆจกๅž‹้ƒฝไฝฟ็”จ SGD ไผ˜ๅŒ–ๅ™จ่ฎญ็ปƒ 90 ไธช epochs๏ผŒ้ƒฝไฝฟ็”จ `lr0=0.001` ๅ’Œ `weight_decay=5e-5` ๅ‚ๆ•ฐ๏ผŒ ๅ›พๅƒๅคงๅฐไธบ 224 ๏ผŒไธ”้ƒฝไฝฟ็”จ้ป˜่ฎค่ฎพ็ฝฎใ€‚
่ฎญ็ปƒ log ๅฏไปฅๆŸฅ็œ‹ https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **ๅ‡†็กฎๆ€ง**้ƒฝๅœจๅ•ๆจกๅž‹ๅ•ๅฐบๅบฆไธŠ่ฎก็ฎ—๏ผŒๆ•ฐๆฎ้›†ไฝฟ็”จ [ImageNet-1k](https://www.image-net.org/index.php) ใ€‚
ๅค็Žฐๅ‘ฝไปค `python classify/val.py --data ../datasets/imagenet --img 224` +- **ๆŽจ็†้€Ÿๅบฆ**ๆ˜ฏไฝฟ็”จ 100 ไธชๆŽจ็†ๅ›พๅƒ่ฟ›่กŒๅนณๅ‡ๅพ—ๅˆฐ๏ผŒๆต‹่ฏ•็Žฏๅขƒไฝฟ็”จ่ฐทๆญŒ [Colab Pro](https://colab.research.google.com/signup) V100 ้ซ˜ RAM ๅฎžไพ‹ใ€‚
ๅค็Žฐๅ‘ฝไปค `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **ๆจกๅž‹ๅฏผๅ‡บ**ๅˆฐ FP32 ็š„ ONNX ๅ’Œ FP16 ็š„ TensorRT ไฝฟ็”จ `export.py` ใ€‚
ๅค็Žฐๅ‘ฝไปค `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
- Classification Usage Examples  Open In Colab + ๅˆ†็ฑป่ฎญ็ปƒ็คบไพ‹  Open In Colab -### ็ซ่ฝฆ +### ่ฎญ็ปƒ -YOLOv5 ๅˆ†็ฑป่ฎญ็ปƒๆ”ฏๆŒ่‡ชๅŠจไธ‹่ฝฝ MNISTใ€Fashion-MNISTใ€CIFAR10ใ€CIFAR100ใ€Imagenetteใ€Imagewoof ๅ’Œ ImageNet ๆ•ฐๆฎ้›†`--data`ไบ‰่ฎบใ€‚ๅผ€ๅง‹ไฝฟ็”จ MNIST ่ฟ›่กŒ่ฎญ็ปƒ`--data mnist`. +YOLOv5 ๅˆ†็ฑป่ฎญ็ปƒๆ”ฏๆŒ่‡ชๅŠจไธ‹่ฝฝ MNISTใ€Fashion-MNISTใ€CIFAR10ใ€CIFAR100ใ€Imagenetteใ€Imagewoof ๅ’Œ ImageNet ๆ•ฐๆฎ้›†๏ผŒๅ‘ฝไปคไธญไฝฟ็”จ `--data` ๅณๅฏใ€‚ MNIST ็คบไพ‹ `--data mnist` ใ€‚ ```bash -# Single-GPU +# ๅ• GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# ๅคš GPU๏ผŒ DDP ๆจกๅผ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### ็“ฆๅฐ” +### ้ชŒ่ฏ ๅœจ ImageNet-1k ๆ•ฐๆฎ้›†ไธŠ้ชŒ่ฏ YOLOv5m-cls ็š„ๅ‡†็กฎๆ€ง๏ผš @@ -388,14 +400,16 @@ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --im ไฝฟ็”จ้ข„่ฎญ็ปƒ็š„ YOLOv5s-cls.pt ๆฅ้ข„ๆต‹ bus.jpg๏ผš ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` -### ๅ‡บๅฃ +### ๆจกๅž‹ๅฏผๅ‡บ ๅฐ†ไธ€็ป„็ป่ฟ‡่ฎญ็ปƒ็š„ YOLOv5s-clsใ€ResNet ๅ’Œ EfficientNet ๆจกๅž‹ๅฏผๅ‡บๅˆฐ ONNX ๅ’Œ TensorRT๏ผš @@ -407,76 +421,70 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
็Žฏๅขƒ
-ๅœจๅ‡ ็ง’้’Ÿๅ†…ๅผ€ๅง‹ไฝฟ็”จๆˆ‘ไปฌ็ป่ฟ‡้ชŒ่ฏ็š„็Žฏๅขƒใ€‚ๅ•ๅ‡ปไธ‹้ข็š„ๆฏไธชๅ›พๆ ‡ไบ†่งฃ่ฏฆ็ป†ไฟกๆฏใ€‚ +ไฝฟ็”จไธ‹้ขๆˆ‘ไปฌ็ป่ฟ‡้ชŒ่ฏ็š„็Žฏๅขƒ๏ผŒๅœจๅ‡ ็ง’้’Ÿๅ†…ๅผ€ๅง‹ไฝฟ็”จ YOLOv5 ใ€‚ๅ•ๅ‡ปไธ‹้ข็š„ๅ›พๆ ‡ไบ†่งฃ่ฏฆ็ป†ไฟกๆฏใ€‚
- + - + - + - - + + - - + +
-##
ๅบ”็”จ็จ‹ๅบ
- -ๅœจๆ‚จ็š„ iOS ๆˆ– Android ่ฎพๅค‡ไธŠ่ฟ่กŒ YOLOv5 ๆจกๅž‹[Ultralytics ๅบ”็”จ็จ‹ๅบ](https://ultralytics.com/app_install)! - - -Ultralytics mobile app - ##
่ดก็Œฎ
-ๆˆ‘ไปฌๅ–œๆฌขๆ‚จ็š„ๆ„่ง๏ผๆˆ‘ไปฌๅธŒๆœ›ๅฐฝๅฏ่ƒฝ็ฎ€ๅ•ๅ’Œ้€ๆ˜Žๅœฐไธบ YOLOv5 ๅšๅ‡บ่ดก็Œฎใ€‚่ฏท็œ‹ๆˆ‘ไปฌ็š„[ๆŠ•็จฟๆŒ‡ๅ—](CONTRIBUTING.md)ๅผ€ๅง‹๏ผŒๅนถๅกซๅ†™[YOLOv5่ฐƒๆŸฅ](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)ๅ‘ๆˆ‘ไปฌๅ‘้€ๆ‚จ็š„ไฝ“้ชŒๅ้ฆˆใ€‚ๆ„Ÿ่ฐขๆˆ‘ไปฌๆ‰€ๆœ‰็š„่ดก็Œฎ่€…๏ผ +ๆˆ‘ไปฌๅ–œๆฌขๆ‚จ็š„ๆ„่งๆˆ–ๅปบ่ฎฎ๏ผๆˆ‘ไปฌๅธŒๆœ›ๅฐฝๅฏ่ƒฝ็ฎ€ๅ•ๅ’Œ้€ๆ˜Žๅœฐไธบ YOLOv5 ๅšๅ‡บ่ดก็Œฎใ€‚่ฏท็œ‹ๆˆ‘ไปฌ็š„ [ๆŠ•็จฟๆŒ‡ๅ—](https://docs.ultralytics.com/help/contributing/)๏ผŒๅนถๅกซๅ†™ [YOLOv5่ฐƒๆŸฅ](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) ๅ‘ๆˆ‘ไปฌๅ‘้€ๆ‚จ็š„ไฝ“้ชŒๅ้ฆˆใ€‚ๆ„Ÿ่ฐขๆˆ‘ไปฌๆ‰€ๆœ‰็š„่ดก็Œฎ่€…๏ผ -
+ + -##
ๆ‰ง็…ง
+##
License
-YOLOv5 ๅœจไธค็งไธๅŒ็š„่ฎธๅฏไธ‹ๅฏ็”จ๏ผš +YOLOv5 ๅœจไธค็งไธๅŒ็š„ License ไธ‹ๅฏ็”จ๏ผš -- **GPL-3.0 ่ฎธๅฏ่ฏ**๏ผš ็œ‹[ๆ‰ง็…ง](https://github.com/ultralytics/yolov5/blob/master/LICENSE)ๆ–‡ไปถ็š„่ฏฆ็ป†ไฟกๆฏใ€‚ -- **ไผไธšๆ‰ง็…ง**๏ผšๅœจๆฒกๆœ‰ GPL-3.0 ๅผ€ๆบ่ฆๆฑ‚็š„ๆƒ…ๅ†ตไธ‹ไธบๅ•†ไธšไบงๅ“ๅผ€ๅ‘ๆไพ›ๆ›ดๅคง็š„็ตๆดปๆ€งใ€‚ๅ…ธๅž‹็”จไพ‹ๆ˜ฏๅฐ† Ultralytics ่ฝฏไปถๅ’Œ AI ๆจกๅž‹ๅตŒๅ…ฅๅˆฐๅ•†ไธšไบงๅ“ๅ’Œๅบ”็”จ็จ‹ๅบไธญใ€‚ๅœจไปฅไธ‹ไฝ็ฝฎ็”ณ่ฏทไผไธš่ฎธๅฏ่ฏ[Ultralytics ่ฎธๅฏ](https://ultralytics.com/license). +- **AGPL-3.0 License**๏ผš ๆŸฅ็œ‹ [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) ๆ–‡ไปถ็š„่ฏฆ็ป†ไฟกๆฏใ€‚ +- **ไผไธšLicense**๏ผšๅœจๆฒกๆœ‰ AGPL-3.0 ๅผ€ๆบ่ฆๆฑ‚็š„ๆƒ…ๅ†ตไธ‹ไธบๅ•†ไธšไบงๅ“ๅผ€ๅ‘ๆไพ›ๆ›ดๅคง็š„็ตๆดปๆ€งใ€‚ๅ…ธๅž‹็”จไพ‹ๆ˜ฏๅฐ† Ultralytics ่ฝฏไปถๅ’Œ AI ๆจกๅž‹ๅตŒๅ…ฅๅˆฐๅ•†ไธšไบงๅ“ๅ’Œๅบ”็”จ็จ‹ๅบไธญใ€‚ๅœจไปฅไธ‹ไฝ็ฝฎ็”ณ่ฏทไผไธš่ฎธๅฏ่ฏ [Ultralytics ่ฎธๅฏ](https://ultralytics.com/license) ใ€‚ -##
ๆŽฅ่งฆ
+##
่”็ณปๆˆ‘ไปฌ
-ๅฏนไบŽ YOLOv5 ้”™่ฏฏๅ’ŒๅŠŸ่ƒฝ่ฏทๆฑ‚๏ผŒ่ฏท่ฎฟ้—ฎ[GitHub ้—ฎ้ข˜](https://github.com/ultralytics/yolov5/issues).ๅฆ‚้œ€ไธ“ไธšๆ”ฏๆŒ๏ผŒ่ฏท[่”็ณปๆˆ‘ไปฌ](https://ultralytics.com/contact). +ๅฏนไบŽ YOLOv5 ็š„้”™่ฏฏๆŠฅๅ‘Šๅ’ŒๅŠŸ่ƒฝ่ฏทๆฑ‚๏ผŒ่ฏท่ฎฟ้—ฎ [GitHub Issues](https://github.com/ultralytics/yolov5/issues)๏ผŒๅนถๅŠ ๅ…ฅๆˆ‘ไปฌ็š„ [Discord](https://discord.gg/n6cFeSPZdD) ็คพๅŒบ่ฟ›่กŒ้—ฎ้ข˜ๅ’Œ่ฎจ่ฎบ๏ผ
- - + + - - + + - - - - - + + - - - - - + + + + + - + + + +
-[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/benchmarks.py b/benchmarks.py index 03d7d693a936..fc3073965ab3 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Run YOLOv5 benchmarks on all supported export formats @@ -164,6 +164,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index 5a5edabda42c..c1b6650d4bd0 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. @@ -179,7 +179,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image @@ -221,6 +221,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index a50845a4f781..8b8327f173ef 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset @@ -6,7 +6,7 @@ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt @@ -44,7 +44,7 @@ check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, +from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found โš ๏ธ, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) @@ -220,11 +220,11 @@ def train(opt, device): # Log metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate logger.log_metrics(metrics, epoch) # Save model @@ -251,19 +251,19 @@ def train(opt, device): if RANK in {-1, 0} and final_epoch: LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") + f'\nVisualize: https://netron.app\n') # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) logger.log_model(best, epochs, metadata=meta) @@ -310,7 +310,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +328,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index c6f5d0d88a2d..ddf67c5519f5 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -78,7 +78,7 @@ "source": [ "# 1. Predict\n", "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", "\n", "```shell\n", "python classify/predict.py --source 0 # webcam\n", @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -183,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1222,7 +1222,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", @@ -1269,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1341,7 +1341,8 @@ }, "source": [ "## Comet Logging and Visualization ๐ŸŒŸ NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1349,11 +1350,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -1371,7 +1372,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -1403,9 +1404,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { @@ -1476,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/classify/val.py b/classify/val.py index 8657036fb2a2..643489d64d36 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 classification model on a classification dataset @@ -100,7 +100,7 @@ def run( pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: @@ -123,14 +123,14 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + acc_i = acc[targets == i] + top1i, top5i = acc_i.mean(0).tolist() + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image @@ -165,6 +165,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 558151dc849e..8a65407a6333 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 01812d031bc5..7b02ac95dd95 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 14f12950605f..5fdcb63f89a5 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 05b26a1f4796..bb2aa34cd4a4 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index edae7171c660..a943eecdeee6 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 27d38109c53a..104856f0c9c7 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index a8bcf8e628ec..2a13904dc8dd 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index d64dfc7fed76..ea32cb6269a3 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index 5e81910cc456..0a2499c00a1a 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index 12556736a571..0cb53120be2c 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index 74971740f7c7..c4b6e8051d7b 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index 0aa4e7d9f8f5..ce20dbbddbdb 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 000000000000..8da18150538b --- /dev/null +++ b/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 0 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 123cc8407413..0a0f4ec21621 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index b9ef1d55a3b6..9d722568f526 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index d6867d7557ba..f6abb090bb04 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 31e0a15569f2..e408959b32b2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases # Example usage: bash data/scripts/download_weights.sh # parent diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 0d388b0a12a8..0bb276140b07 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Download COCO 2017 dataset http://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index e7ddce89b115..2bfd6a2b32ed 100755 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: bash data/scripts/get_coco128.sh # parent diff --git a/data/scripts/get_imagenet.sh b/data/scripts/get_imagenet.sh index 6026d502e8f3..1df0fc7b66cc 100755 --- a/data/scripts/get_imagenet.sh +++ b/data/scripts/get_imagenet.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Download ILSVRC2012 ImageNet dataset https://image-net.org # Example usage: bash data/scripts/get_imagenet.sh # parent diff --git a/data/xView.yaml b/data/xView.yaml index 770ab7870449..5e013ac9056d 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index 2d13401f78bd..64d6f149a614 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. @@ -256,6 +256,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/export.py b/export.py index 928992903b0b..5f8e1c4821da 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit @@ -77,6 +77,25 @@ MACOS = platform.system() == 'Darwin' # macOS environment +class iOSModel(torch.nn.Module): + + def __init__(self, model, im): + super().__init__() + b, c, h, w = im.shape # batch, channel, height, width + self.model = model + self.nc = model.nc # number of classes + if w == h: + self.normalize = 1. / w + else: + self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller) + # np = model(im)[0].shape[1] # number of points + # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger) + + def forward(self, x): + xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1) + return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) + + def export_formats(): # YOLOv5 export formats x = [ @@ -120,7 +139,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) @@ -132,7 +151,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements('onnx') + check_requirements('onnx>=1.12.0') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') @@ -194,8 +213,15 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export + args = [ + 'mo', + '--input_model', + str(file.with_suffix('.onnx')), + '--output_dir', + f, + '--data_type', + ('FP16' if half else 'FP32'),] + subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -216,7 +242,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): @try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): +def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export check_requirements('coremltools') import coremltools as ct @@ -224,13 +250,15 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') + if nms: + model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') @@ -286,7 +314,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING โš ๏ธ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f'{prefix} WARNING โš ๏ธ --dynamic model requires maximum --batch-size argument') profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) @@ -396,7 +424,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, "wb").write(tflite_model) + open(f, 'wb').write(tflite_model) return f, None @@ -406,7 +434,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( @@ -420,13 +448,20 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) + subprocess.run([ + 'edgetpu_compiler', + '-s', + '-d', + '-k', + '10', + '--out_dir', + str(file.parent), + f_tfl,], check=True) return f, None @try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): +def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') import tensorflowjs as tfjs @@ -436,9 +471,14 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) + args = [ + 'tensorflowjs_converter', + '--input_format=tf_frozen_model', + '--quantize_uint8' if int8 else '', + '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + str(f_pb), + str(f),] + subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order @@ -487,6 +527,129 @@ def add_tflite_metadata(file, metadata, num_outputs): tmp_file.unlink() +def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')): + # YOLOv5 CoreML pipeline + import coremltools as ct + from PIL import Image + + print(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + batch_size, ch, h, w = list(im.shape) # BCHW + t = time.time() + + # Output shapes + spec = model.get_spec() + out0, out1 = iter(spec.description.output) + if platform.system() == 'Darwin': + img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection + out = model.predict({'image': img}) + out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape + else: # linux and windows can not run model.predict(), get sizes from pytorch output y + s = tuple(y[0].shape) + out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4) + + # Checks + nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height + na, nc = out0_shape + # na, nc = out0.type.multiArrayType.shape # number anchors, classes + assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + + # Define output shapes (missing) + out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) + out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4) + # spec.neuralNetwork.preprocessing[0].featureName = '0' + + # Flexible input shapes + # from coremltools.models.neural_network import flexible_shape_utils + # s = [] # shapes + # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192)) + # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width) + # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s) + # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges + # r.add_height_range((192, 640)) + # r.add_width_range((192, 640)) + # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r) + + # Print + print(spec.description) + + # Model from spec + model = ct.models.MLModel(spec) + + # 3. Create NMS protobuf + nms_spec = ct.proto.Model_pb2.Model() + nms_spec.specificationVersion = 5 + for i in range(2): + decoder_output = model._spec.description.output[i].SerializeToString() + nms_spec.description.input.add() + nms_spec.description.input[i].ParseFromString(decoder_output) + nms_spec.description.output.add() + nms_spec.description.output[i].ParseFromString(decoder_output) + + nms_spec.description.output[0].name = 'confidence' + nms_spec.description.output[1].name = 'coordinates' + + output_sizes = [nc, 4] + for i in range(2): + ma_type = nms_spec.description.output[i].type.multiArrayType + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[0].lowerBound = 0 + ma_type.shapeRange.sizeRanges[0].upperBound = -1 + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] + ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] + del ma_type.shape[:] + + nms = nms_spec.nonMaximumSuppression + nms.confidenceInputFeatureName = out0.name # 1x507x80 + nms.coordinatesInputFeatureName = out1.name # 1x507x4 + nms.confidenceOutputFeatureName = 'confidence' + nms.coordinatesOutputFeatureName = 'coordinates' + nms.iouThresholdInputFeatureName = 'iouThreshold' + nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.iouThreshold = 0.45 + nms.confidenceThreshold = 0.25 + nms.pickTop.perClass = True + nms.stringClassLabels.vector.extend(names.values()) + nms_model = ct.models.MLModel(nms_spec) + + # 4. Pipeline models together + pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), + ('iouThreshold', ct.models.datatypes.Double()), + ('confidenceThreshold', ct.models.datatypes.Double())], + output_features=['confidence', 'coordinates']) + pipeline.add_model(model) + pipeline.add_model(nms_model) + + # Correct datatypes + pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString()) + pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString()) + pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString()) + + # Update metadata + pipeline.spec.specificationVersion = 5 + pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com' + pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE' + pipeline.spec.description.metadata.userDefined.update({ + 'classes': ','.join(names.values()), + 'iou_threshold': str(nms.iouThreshold), + 'confidence_threshold': str(nms.confidenceThreshold)}) + + # Save the model + f = file.with_suffix('.mlmodel') # filename + model = ct.models.MLModel(pipeline.spec) + model.input_description['image'] = 'Input image' + model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})' + model.input_description['confidenceThreshold'] = \ + f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})' + model.output_description['confidence'] = 'Boxes ร— Class confidence (see user-defined metadata "classes")' + model.output_description['coordinates'] = 'Boxes ร— [x, y, width, height] (relative to image size)' + model.save(f) # pipelined + print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)') + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -565,7 +728,9 @@ def run( if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) + f[4], ct_model = export_coreml(model, im, file, int8, half, nms) + if nms: + pipeline_coreml(ct_model, im, file, model.names, y) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' @@ -588,7 +753,7 @@ def run( f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: - f[9], _ = export_tfjs(file) + f[9], _ = export_tfjs(file, int8) if paddle: # PaddlePaddle f[10], _ = export_paddle(model, im, file, metadata) @@ -599,18 +764,18 @@ def run( det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING โš ๏ธ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING โš ๏ธ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + s = '# WARNING โš ๏ธ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING โš ๏ธ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") + f'\nVisualize: https://netron.app') return f # return list of exported files/dirs -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') @@ -624,7 +789,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') @@ -638,7 +803,7 @@ def parse_opt(): nargs='+', default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() print_args(vars(opt)) return opt @@ -648,6 +813,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/hubconf.py b/hubconf.py index 41af8e39d14d..73caf06685da 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 @@ -73,7 +73,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo return model.to(device) except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' + help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..b1c24ad378dc 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Common modules """ @@ -21,14 +21,13 @@ import requests import torch import torch.nn as nn -from IPython.display import display from PIL import Image from torch.cuda import amp from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -380,11 +379,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) + network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -431,7 +430,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -445,7 +444,7 @@ def gd_outputs(gd): gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -467,9 +466,9 @@ def gd_outputs(gd): output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, "r") as model: + with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') @@ -491,7 +490,7 @@ def gd_outputs(gd): check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") + nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -608,7 +607,7 @@ def _model_type(p='path/to/model.pt'): url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] @staticmethod @@ -767,7 +766,11 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - display(im) if is_notebook() else im.show(self.files[i]) + if is_jupyter(): + from IPython.display import display + display(im) + else: + im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save @@ -846,12 +849,19 @@ def forward(self, x): class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + def __init__(self, + c1, + c2, + k=1, + s=1, + p=None, + g=1, + dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) - self.drop = nn.Dropout(p=0.0, inplace=True) + self.drop = nn.Dropout(p=dropout_p, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): diff --git a/models/experimental.py b/models/experimental.py index 02d35b9ebd11..d60d1808da11 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Experimental modules """ diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index e4d7beb06e07..df2f668b022c 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Default anchors for COCO data diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index c66982158ce8..4a71ed405277 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index b28b44315248..50b47e282df4 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index d1ef91290a8d..c5e21098f893 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 504815f5cfa0..9dbdd4ee0580 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index a23e9c6fbf9f..2292eb1185a0 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 554117dda59a..2c0ae44841cc 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index dbf0f850083e..60ae3b4b6f30 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index a17202f22044..a9e1b5f90c72 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index edd7d13a34a6..a502412f0887 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index ccfbf900691c..5595e2573823 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 632c2cb699e3..651dbb0251ae 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index ecc53fd68ba6..059b12b46929 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index 0c0c71d32551..5052e7cbfc8b 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 3a179bf3311c..0368a78dcbb4 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index ff9519c3f1aa..ce5238fa5dfc 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 100d7c447527..f5267163453c 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index a28fb559482b..2f39b0379e74 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index ba795c4aad31..e1edbcb8634c 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index 4782de11dd2d..71f80cc08054 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index f73d1992ac19..2b8e1db2818a 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index c28225ab4a50..1f67f8e3dfb0 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 7cbdb36b425c..2ff2524ca9b5 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 5d0c4524a99c..589f65c76f95 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..bc0a465d7edd 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 @@ -356,7 +356,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +371,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" + assert dimension == 1, 'convert only NCHW to NHWC concat' self.d = 3 def call(self, inputs): @@ -523,17 +523,17 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", + mode='CONSTANT', constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -603,6 +603,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/models/yolo.py b/models/yolo.py index ed21c067ee93..18d2542bfb48 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ YOLO-specific modules diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index ce8a5de46a27..31362f876932 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index ad13ab370ff6..a76900c5a2e2 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index 8a28a40d6e20..aba96cfc54f4 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index f35beabb1e1c..5d05364c4936 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index f617a027d8a2..4bdd93915da5 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/requirements.txt b/requirements.txt index 85eb839df8a0..65924c9feec4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ -# YOLOv5 ๐Ÿš€ requirements +# YOLOv5 requirements # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ -gitpython -ipython # interactive notebook -matplotlib>=3.2.2 +gitpython>=3.1.30 +matplotlib>=3.3 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 @@ -19,7 +18,7 @@ tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- -tensorboard>=2.4.1 +# tensorboard>=2.4.1 # clearml>=1.2.0 # comet @@ -29,21 +28,22 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.9.0 # ONNX export +# onnx>=1.10.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn<=1.1.2 # CoreML quantization -# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +setuptools>=65.5.1 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- +# ipython # interactive notebook # mss # screenshots # albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP -# roboflow +# pycocotools>=2.0.6 # COCO mAP # ultralytics # HUB https://hub.ultralytics.com diff --git a/segment/predict.py b/segment/predict.py index e9093baa1cc7..4d4d6036358a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. @@ -279,6 +279,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 3f32d2100a75..073fc742005b 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. @@ -12,13 +12,14 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -138,7 +139,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) + logger.update_params({'batch_size': batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer @@ -166,8 +167,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm @@ -298,7 +301,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) @@ -340,10 +343,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') if ni == 10: files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) + logger.log_images(files, 'Mosaics', epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -453,8 +456,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) torch.cuda.empty_cache() return results @@ -529,8 +532,8 @@ def main(opt, callbacks=Callbacks()): check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') + if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / 'runs/evolve-seg') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': opt.name = Path(opt.cfg).stem # use model.yaml as name @@ -547,7 +550,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -597,7 +600,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate @@ -623,7 +631,7 @@ def main(opt, callbacks=Callbacks()): while all(v == 1): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate + hyp[k] = float(x[i + 12] * v[i]) # mutate # Constrain to limits for k, v in meta.items(): @@ -635,7 +643,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) @@ -653,6 +661,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 09ca963d4b98..6e5caf53b8ff 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -216,7 +216,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", @@ -264,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -454,7 +454,8 @@ }, "source": [ "## Comet Logging and Visualization ๐ŸŒŸ NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -462,11 +463,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -484,7 +485,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -516,9 +517,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { @@ -590,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/val.py b/segment/val.py index 5cf8ae8b41c1..c0575fd59a91 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset @@ -23,6 +23,7 @@ import argparse import json import os +import subprocess import sys from multiprocessing.pool import ThreadPool from pathlib import Path @@ -69,8 +70,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -104,7 +105,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -159,7 +160,7 @@ def run( callbacks=Callbacks(), ): if save_json: - check_requirements(['pycocotools']) + check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster @@ -230,8 +231,8 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) @@ -342,7 +343,7 @@ def run( # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING โš ๏ธ no labels found in {task} set, can not compute metrics without labels') @@ -368,7 +369,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -461,12 +462,12 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/train.py b/train.py index cebc6ad66782..227a82659d11 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. @@ -12,13 +12,14 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -147,7 +148,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) + loggers.on_params_update({'batch_size': batch_size}) # Optimizer nbs = 64 # nominal batch size @@ -174,8 +175,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING โš ๏ธ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm @@ -200,8 +203,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio prefix=colorstr('train: '), shuffle=True, validation=False, - weighted_sampler=opt.weighted_sampler) - + weighted_sampler=opt.weighted_sampler + seed=opt.seed) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' @@ -302,7 +305,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) @@ -528,7 +531,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -578,7 +581,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate @@ -636,6 +644,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6ab0a33366a5..be87068822af 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -4,360 +4,13 @@ "metadata": { "colab": { "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "1f7df330663048998adcf8a45bc8f69b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", - "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", - "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" - ], - "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" - } - }, - "e896e6096dd244c59d7955e2035cd729": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", - "placeholder": "โ€‹", - "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", - "value": "100%" - } - }, - "a6ff238c29984b24bf6d0bd175c19430": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", - "value": 818322941 - } - }, - "3c085ba3f3fd4c3c8a6bb41b41ce1479": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", - "placeholder": "โ€‹", - "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", - "value": " 780M/780M [00:05<00:00, 126MB/s]" - } - }, - "16b0c8aa6e0f427e8a54d3791abb7504": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c7b2dd0f78384cad8e400b282996cdf5": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6a27e43b0e434edd82ee63f0a91036ca": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "cce0e6c0c4ec442cb47e65c674e02e92": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c5b9f38e2f0d4f9aa97fe87265263743": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "df554fb955c7454696beac5a82889386": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "74e9112a87a242f4831b7d68c7da6333": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } + "accelerator": "GPU" }, "cells": [ { @@ -369,7 +22,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -378,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 ๐Ÿš€ notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "This YOLOv5 ๐Ÿš€ notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "
" ] @@ -401,7 +54,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" + "outputId": "e8225db4-e61d-4640-8b1f-8bfce3331cea" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,14 +71,14 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 ๐Ÿš€ v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete โœ… (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + "Setup complete โœ… (2 CPUs, 12.7 GB RAM, 23.3/166.8 GB disk)\n" ] } ] @@ -459,29 +112,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" + "outputId": "284ef04b-1596-412f-88f6-948828dd2b49" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": 13, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 ๐Ÿš€ v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", + "100% 14.1M/14.1M [00:00<00:00, 24.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 41.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 60.0ms\n", + "Speed: 0.5ms pre-process, 50.8ms inference, 37.7ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -512,23 +165,9 @@ "metadata": { "id": "WQPtK1QYVaD_", "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "1f7df330663048998adcf8a45bc8f69b", - "e896e6096dd244c59d7955e2035cd729", - "a6ff238c29984b24bf6d0bd175c19430", - "3c085ba3f3fd4c3c8a6bb41b41ce1479", - "16b0c8aa6e0f427e8a54d3791abb7504", - "c7b2dd0f78384cad8e400b282996cdf5", - "6a27e43b0e434edd82ee63f0a91036ca", - "cce0e6c0c4ec442cb47e65c674e02e92", - "c5b9f38e2f0d4f9aa97fe87265263743", - "df554fb955c7454696beac5a82889386", - "74e9112a87a242f4831b7d68c7da6333" - ] + "base_uri": "https://localhost:8080/" }, - "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" + "outputId": "cf7d52f0-281c-4c96-a488-79f5908f8426" }, "source": [ "# Download COCO val\n", @@ -538,18 +177,11 @@ "execution_count": 3, "outputs": [ { - "output_type": "display_data", - "data": { - "text/plain": [ - " 0%| | 0.00/780M [00:00

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", @@ -632,36 +264,30 @@ "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "

\n", + "
\n", "\n", "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", "\n", - "## Train on Custom Data with Roboflow ๐ŸŒŸ NEW\n", + "## Label a dataset on Roboflow (optional)\n", "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package." ] }, { "cell_type": "code", "source": [ "#@title Select YOLOv5 ๐Ÿš€ logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", + "if logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", " %pip install -q clearml\n", - " import clearml; clearml.browser_login()" + " import clearml; clearml.browser_login()\n", + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -676,7 +302,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "721b9028-767f-4a05-c964-692c245f7398" + "outputId": "bbeeea2b-04fc-4185-aa64-258690495b5a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -688,9 +314,12 @@ "output_type": "stream", "name": "stdout", "text": [ + "2023-04-09 14:11:38.063605: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2023-04-09 14:11:39.026661: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 โœ…\n", - "YOLOv5 ๐Ÿš€ v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 ๐Ÿš€ v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 ๐Ÿš€ in ClearML\n", @@ -699,8 +328,8 @@ "\n", "Dataset not found โš ๏ธ, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", - "Dataset download success โœ… (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 75.6MB/s]\n", + "Dataset download success โœ… (0.6s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +363,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed โœ…\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1709.36it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 264.35it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -890,7 +520,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -925,9 +555,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..6c10857df079 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ utils/initialization """ @@ -60,17 +60,19 @@ def notebook_init(verbose=True): check_font() import psutil - from IPython import display # to display images and clear console output if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory # System info + display = None if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") - display.clear_output() + total, used, free = shutil.disk_usage('/') + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/activations.py b/utils/activations.py index 084ce8c41230..e4d4bbde5ec8 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Activation functions """ diff --git a/utils/augmentations.py b/utils/augmentations.py index 1eae5db8f816..52e2e346e36e 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ @@ -201,7 +201,7 @@ def random_perspective(im, # Transform label coordinates n = len(targets) if n: - use_segments = any(x.any() for x in segments) + use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample diff --git a/utils/autoanchor.py b/utils/autoanchor.py index bb5cf6e6965e..4c11ab3decec 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ AutoAnchor utils """ diff --git a/utils/autobatch.py b/utils/autobatch.py index bdeb91c3d2bd..aa763b888462 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Auto-batch utils """ diff --git a/utils/callbacks.py b/utils/callbacks.py index 166d8938322d..ccebba02bcaa 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Callback utils """ diff --git a/utils/dataloaders.py b/utils/dataloaders.py index dd4bc46b0744..99b2f720ff9f 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Dataloaders and dataset utils """ @@ -37,7 +37,7 @@ from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -53,7 +53,7 @@ def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.md5(str(size).encode()) # hash sizes + h = hashlib.sha256(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash @@ -90,7 +90,7 @@ def exif_transpose(image): if method is not None: image = image.transpose(method) del exif[0x0112] - image.info["exif"] = exif.tobytes() + image.info['exif'] = exif.tobytes() return image @@ -159,7 +159,8 @@ def create_dataloader(path, prefix='', shuffle=False, validation=False, - weighted_sampler=False): + weighted_sampler=False + seed=0): if rect and shuffle: LOGGER.warning('WARNING โš ๏ธ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -191,7 +192,7 @@ def create_dataloader(path, loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, @@ -262,11 +263,11 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor["top"] if top is None else (monitor["top"] + top) - self.left = monitor["left"] if left is None else (monitor["left"] + left) - self.width = width or monitor["width"] - self.height = height or monitor["height"] - self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self @@ -274,7 +275,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms @@ -289,7 +290,7 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: @@ -408,7 +409,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' @@ -423,7 +424,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline @@ -545,7 +546,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -581,13 +582,14 @@ def __init__(self, # Update labels include_class = [] # filter labels to include only these classes (optional) + self.segments = list(self.segments) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: - self.segments[i] = segment[j] + self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 @@ -648,8 +650,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=''): mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " - f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' f"{'caching images โœ…' if cache else 'not caching images โš ๏ธ'}") return cache @@ -657,7 +659,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning {path.parent / path.stem}..." + desc = f'{prefix}Scanning {path.parent / path.stem}...' with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -672,7 +674,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' pbar.close() if msgs: @@ -787,7 +789,7 @@ def load_image(self, i): r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized @@ -1113,7 +1115,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): if zipped: data['path'] = data_dir except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e + raise Exception('error/HUB/dataset_stats/yaml_load') from e check_dataset(data, autodownload) # download dataset if missing self.hub_dir = Path(data['path'] + '-hub') @@ -1238,7 +1240,7 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] else: sample = self.torch_transforms(im) return sample, j diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1ecf4c64f75f..ff657dea2bf2 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -1,35 +1,44 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.11-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 -# Install pip packages -COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision -RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ - --extra-index-url https://download.pytorch.org/whl/cu113 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl # Create working directory -RUN mkdir -p /usr/src/app +RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + # tensorflow tensorflowjs \ + # Set environment variables -ENV OMP_NUM_THREADS=8 +ENV OMP_NUM_THREADS=1 + +# Cleanup +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- @@ -53,7 +62,7 @@ ENV OMP_NUM_THREADS=8 # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up -# docker system prune -a --volumes +# sudo docker system prune -a --volumes # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index eed1410793a1..7b5c610e5071 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -1,9 +1,9 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 +FROM arm64v8/ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ @@ -18,11 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnxruntime + # tensorflow-aarch64 tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 558f81f00584..613bdffa4768 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -1,9 +1,9 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 +FROM ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ @@ -18,9 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory diff --git a/utils/downloads.py b/utils/downloads.py index 72ea87340eb9..629537d5ade6 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Download utils """ @@ -26,8 +26,10 @@ def is_url(url, check=True): def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes + output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + if output: + return int(output.split()[0]) + return 0 def url_getsize(url='https://ultralytics.com/images/bus.jpg'): @@ -36,6 +38,25 @@ def url_getsize(url='https://ultralytics.com/images/bus.jpg'): return int(response.headers.get('content-length', -1)) +def curl_download(url, filename, *, silent: bool = False) -> bool: + """ + Download a file from a url to a filename using curl. + """ + silent_option = 'sS' if silent else '' # silent + proc = subprocess.run([ + 'curl', + '-#', + f'-{silent_option}L', + url, + '--output', + filename, + '--retry', + '9', + '-C', + '-',]) + return proc.returncode == 0 + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER @@ -50,12 +71,13 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + # curl download, retry and resume on fail + curl_download(url2 or url, file) finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') LOGGER.info('') @@ -96,13 +118,11 @@ def github_assets(repository, version='latest'): except Exception: tag = release - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') return str(file) diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 773ad8932967..256ad1319c82 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Perform test request """ @@ -7,13 +7,13 @@ import requests -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' # Read image -with open(IMAGE, "rb") as f: +with open(IMAGE, 'rb') as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={"image": image_data}).json() +response = requests.post(DETECTION_URL, files={'image': image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 8482435c861e..ae4756b276e4 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Run a Flask REST API exposing one or more YOLOv5s models """ @@ -13,36 +13,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = "/v1/object-detection/" +DETECTION_URL = '/v1/object-detection/' -@app.route(DETECTION_URL, methods=["POST"]) +@app.route(DETECTION_URL, methods=['POST']) def predict(model): - if request.method != "POST": + if request.method != 'POST': return - if request.files.get("image"): + if request.files.get('image'): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files["image"] + im_file = request.files['image'] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + return results.pandas().xyxy[0].to_json(orient='records') -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index e5a843c4a758..3d7fd20c48d1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ General utils """ @@ -14,6 +14,7 @@ import random import re import signal +import subprocess import sys import time import urllib @@ -28,7 +29,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -37,7 +37,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize +from utils.downloads import curl_download, gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -58,6 +58,7 @@ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab def is_ascii(s=''): @@ -76,10 +77,18 @@ def is_colab(): return 'google.colab' in sys.modules -def is_notebook(): - # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace - ipython_type = str(type(IPython.get_ipython())) - return 'colab' in ipython_type or 'zmqshell' in ipython_type +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False def is_kaggle(): @@ -89,11 +98,11 @@ def is_kaggle(): def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): + if Path('/.dockerenv').exists(): return True try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) except OSError: return False @@ -112,7 +121,7 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = "yolov5" +LOGGING_NAME = 'yolov5' def set_logging(name=LOGGING_NAME, verbose=True): @@ -120,21 +129,21 @@ def set_logging(name=LOGGING_NAME, verbose=True): rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { name: { - "format": "%(message)s"}}, - "handlers": { + 'format': '%(message)s'}}, + 'handlers': { name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -217,7 +226,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -298,7 +307,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility return True except OSError: return False @@ -337,7 +346,7 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"โš ๏ธ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + s += f"โš ๏ธ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." else: s += f'up to date with {url} โœ…' LOGGER.info(s) @@ -379,34 +388,50 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) +def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): + """ + Check if installed dependencies meet YOLOv5 requirements and attempt to auto-update if needed. + + Args: + requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a + string, or a list of package requirements as strings. + exclude (Tuple[str]): Tuple of package names to exclude from checking. + install (bool): If True, attempt to auto-update packages that don't meet requirements. + cmds (str): Additional commands to pass to the pip install command when auto-updating. + + Returns: + None + """ prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version + file = None if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." + assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): requirements = [requirements] - s = '' - n = 0 + s = '' # console string + n = 0 # number of packages updates for r in requirements: try: pkg.require(r) except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(r)).name) + except ImportError: + s += f'"{r}" ' + n += 1 if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - # assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) - source = file if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + assert check_online(), 'AutoUpdate skipped (offline)' + LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file or requirements}\n" \ f"{prefix} โš ๏ธ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: @@ -428,7 +453,7 @@ def check_img_size(imgsz, s=32, floor=0): def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_notebook() + assert not is_jupyter() assert not is_docker() cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) @@ -449,7 +474,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' def check_yaml(file, suffix=('.yaml', '.yml')): @@ -551,12 +576,12 @@ def check_dataset(data, autodownload=True): r = None # success elif s.startswith('bash '): # bash script LOGGER.info(f'Running {s} ...') - r = os.system(s) + r = subprocess.run(s, shell=True) else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success โœ… {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} โŒ" - LOGGER.info(f"Dataset download {s}") + s = f"success โœ… {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} โŒ' + LOGGER.info(f'Dataset download {s}') check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary @@ -629,10 +654,7 @@ def download_one(url, dir): LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: - s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue - success = r == 0 + success = curl_download(url, f, silent=(threads > 1)) else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() @@ -648,9 +670,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - os.system(f'tar xf {f} --directory {f.parent}') # unzip + subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip + subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip if delete: f.unlink() # remove zip @@ -675,7 +697,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!ยกยท$โ‚ฌ%&()=?ยฟ^*;:,ยจยด><+]", repl="_", string=s) + return re.sub(pattern='[|@#!ยกยท$โ‚ฌ%&()=?ยฟ^*;:,ยจยด><+]', repl='_', string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): @@ -750,30 +772,30 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y @@ -782,18 +804,18 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): if clip: clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y return y @@ -833,9 +855,9 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes @@ -862,13 +884,13 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F def clip_boxes(boxes, shape): # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def clip_segments(segments, shape): @@ -898,6 +920,9 @@ def non_max_suppression( list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output @@ -909,10 +934,6 @@ def non_max_suppression( nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height @@ -970,17 +991,13 @@ def non_max_suppression( n = x.shape[0] # number of boxes if not n: # no boxes continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] + i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix @@ -1027,7 +1044,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1051,7 +1068,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload def apply_classifier(x, model, img, im0): @@ -1119,13 +1136,13 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): imshow_ = cv2.imshow # copy to avoid recursion errors -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) -def imwrite(path, im): +def imwrite(filename, img): try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) return True except Exception: return False @@ -1135,6 +1152,7 @@ def imshow(path, im): imshow_(path.encode('unicode_escape').decode(), im) -cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine +if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: + cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 42d7ffc0eed8..d5b76758c876 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,5 @@ # add these requirements in your app on top of the existing ones pip==21.1 Flask==1.0.2 -gunicorn==19.9.0 +gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 22da87034f24..c7c283b728ac 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Logging utils """ @@ -9,7 +9,6 @@ import pkg_resources as pkg import torch -from torch.utils.tensorboard import SummaryWriter from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger @@ -20,6 +19,11 @@ LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML RANK = int(os.getenv('RANK', -1)) +try: + from torch.utils.tensorboard import SummaryWriter +except ImportError: + SummaryWriter = lambda *args: None # None = SummaryWriter(str) + try: import wandb @@ -84,10 +88,6 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 ๐Ÿš€ runs in Weights & Biases" - # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 ๐Ÿš€ in ClearML" @@ -105,14 +105,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # W&B if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) + self.wandb = WandbLogger(self.opt) else: self.wandb = None @@ -124,15 +118,15 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.clearml = None prefix = colorstr('ClearML: ') LOGGER.warning(f'{prefix}WARNING โš ๏ธ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme') else: self.clearml = None # Comet if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -168,14 +162,14 @@ def on_pretrain_routine_end(self, labels, names): plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks if self.comet_logger: self.comet_logger.on_pretrain_routine_end(paths) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) + log_dict = dict(zip(self.keys[:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -221,10 +215,10 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') + if self.wandb: + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') if self.comet_logger: self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) @@ -253,7 +247,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): for i, name in enumerate(self.best_keys): self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) + self.wandb.end_epoch() if self.clearml: self.clearml.current_epoch_logged_images = set() # reset epoch image limit @@ -289,7 +283,7 @@ def on_train_end(self, last, best, epoch, results): if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact(str(best if best.exists() else last), @@ -339,7 +333,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): if wandb and 'wandb' in self.include: self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, + name=None if opt.name == 'exp' else opt.name, config=opt) else: self.wandb = None @@ -380,12 +374,12 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) def update_params(self, params): - # Update the paramters logged + # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 3cf4c268583f..ca41c040193c 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -23,7 +23,6 @@ And so much more. It's up to you how many of these tools you want to use, you ca ![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) -

@@ -35,15 +34,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t 1. Install the `clearml` python package: - ```bash - pip install clearml - ``` + ```bash + pip install clearml + ``` 1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - ```bash - clearml-init - ``` + ```bash + clearml-init + ``` That's it! You're done ๐Ÿ˜Ž @@ -60,18 +59,20 @@ pip install clearml>=1.2.0 This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. -PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` or with custom project and task name: + ```bash python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` This will capture: + - Source code + uncommitted changes - Installed packages - (Hyper)parameters @@ -94,7 +95,7 @@ There even more we can do with all of this information, like hyperparameter opti ## ๐Ÿ”— Dataset Version Management -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! ![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) @@ -112,6 +113,7 @@ The YOLOv5 repository supports a number of different datasets by using yaml file |_ LICENSE |_ README.txt ``` + But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. Next, โš ๏ธ**copy the corresponding yaml file to the root of the dataset folder**โš ๏ธ. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. @@ -132,13 +134,15 @@ Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `nam ### Upload Your Dataset -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + ```bash cd coco128 clearml-data sync --project YOLOv5 --name coco128 --folder . ``` The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + ```bash # Optionally add --parent if you want to base # this version on another dataset version, so no duplicate files are uploaded! @@ -177,7 +181,7 @@ python utils/loggers/clearml/hpo.py ## ๐Ÿคฏ Remote Execution (advanced) -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: - [YouTube video](https://youtu.be/MX3BrXnaULs) @@ -186,6 +190,7 @@ This is where the ClearML Agent comes into play. Check out what the agent can do In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + ```bash clearml-agent daemon --queue [--docker] ``` @@ -194,11 +199,11 @@ clearml-agent daemon --queue [--docker] With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! -๐Ÿช„ Clone the experiment by right clicking it +๐Ÿช„ Clone the experiment by right-clicking it ๐ŸŽฏ Edit the hyperparameters to what you wish them to be -โณ Enqueue the task to any of the queues by right clicking it +โณ Enqueue the task to any of the queues by right-clicking it ![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) @@ -206,7 +211,8 @@ With our agent running, we can give it some work. Remember from the HPO section Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + ```python # ... # Loggers @@ -214,16 +220,17 @@ data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML data_dict = loggers.clearml.data_dict # ... ``` + When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! ### Autoscaling workers -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! Check out the autoscalers getting started video below. diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 3457727a96a4..2764abe90da8 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -25,7 +25,7 @@ def construct_dataset(clearml_info_string): dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) if len(yaml_filenames) > 1: raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 'the dataset definition this way.') @@ -100,7 +100,7 @@ def __init__(self, opt, hyp): self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", + self.task.set_base_docker('ultralytics/yolov5:latest', docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', docker_setup_bash_script='pip install clearml') @@ -150,7 +150,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" + label = f'{class_name}: {confidence_percentage}%' if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8f206cd9830e..aee8d16a336c 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -23,7 +23,7 @@ pip install comet_ml There are two ways to configure Comet with YOLOv5. -You can either set your credentials through enviroment variables +You can either set your credentials through environment variables **Environment Variables** @@ -49,12 +49,13 @@ project_name= # This will default to 'yolov5' python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt ``` -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI yolo-ui # Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -65,6 +66,7 @@ Or better yet, try it out yourself in this Colab Notebook By default, Comet will log the following items ## Metrics + - Box Loss, Object Loss, Classification Loss for the training and validation data - mAP_0.5, mAP_0.5:0.95 metrics for the validation data. - Precision and Recall for the validation data @@ -119,8 +121,7 @@ You can control the frequency of logged predictions and the associated images by **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) - +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) ```shell python train.py \ @@ -161,9 +162,9 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ## Uploading a Dataset to Comet Artifacts -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. ```shell python train.py \ @@ -192,6 +193,7 @@ If you would like to use a dataset from Comet Artifacts, set the `path` variable # contents of artifact.yaml file path: "comet:///:" ``` + Then pass this file to your training script in the following way ```shell @@ -221,7 +223,7 @@ python train.py \ ## Hyperparameter Search with the Comet Optimizer -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. ### Configuring an Optimizer Sweep @@ -251,6 +253,6 @@ comet optimizer -j utils/loggers/comet/hpo.py \ ### Visualizing Results -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) hyperparameter-yolo diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b0318f88d6a6..d4599841c9fc 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') except (ModuleNotFoundError, ImportError): comet_ml = None COMET_PROJECT_NAME = None @@ -31,32 +31,32 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = "comet://" +COMET_PREFIX = 'comet://' -COMET_MODE = os.getenv("COMET_MODE", "online") +COMET_MODE = os.getenv('COMET_MODE', 'online') # Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' -RANK = int(os.getenv("RANK", -1)) +RANK = int(os.getenv('RANK', -1)) to_pil = T.ToPILImage() @@ -66,7 +66,7 @@ class CometLogger: with Comet """ - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,52 +87,52 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") + self.experiment.log_other('Created from', 'YOLOv5') if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, ) self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, "conf_thres"): + if hasattr(self.opt, 'conf_thres'): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): + if hasattr(self.opt, 'iou_thres'): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: @@ -147,22 +147,22 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": + if mode == 'offline': if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, @@ -182,11 +182,11 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) return @@ -210,12 +210,12 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} - model_files = glob.glob(f"{path}/*.pt") + model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: name = Path(model_path).name @@ -232,12 +232,12 @@ def check_dataset(self, data_file): data_config = yaml.safe_load(f) if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") + path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) return check_dataset(data_file) @@ -253,8 +253,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -263,22 +263,22 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -305,35 +305,35 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) + img_paths = sorted(glob.glob(f'{asset_path}/*')) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") + logger.error(f'COMET ERROR: {e}') continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: + for key in ['train', 'val', 'test']: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, "") + metadata[key] = split_path.replace(path, '') - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) for key in metadata.keys(): - if key in ["train", "val", "test"]: + if key in ['train', 'val', 'test']: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -352,13 +352,13 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir + data_dict['path'] = artifact_save_dir - metadata_names = metadata.get("names") + metadata_names = metadata.get('names') if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" @@ -366,13 +366,13 @@ def download_dataset_artifact(self, artifact_path): return data_dict def update_data_paths(self, data_dict): - path = data_dict.get("path", "") + path = data_dict.get('path', '') - for split in ["train", "val", "test"]: + for split in ['train', 'val', 'test']: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) return data_dict @@ -413,11 +413,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -481,7 +481,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append("background") + class_names.append('background') num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -491,7 +491,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) epoch=epoch, column_label='Actual Category', row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", + file_name=f'confusion-matrix-epoch-{epoch}.json', ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 3cbd45156b57..27600761ad28 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" + model_dir = f'{opt.project}/{experiment.name}' os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') return model_asset_list = sorted( model_asset_list, - key=lambda x: x["step"], + key=lambda x: x['step'], reverse=True, ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,22 +44,22 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') return try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') logger.exception(e) @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,11 +85,11 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" + save_dir = f'{opt.project}/{experiment.name}' os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 7dd5c92e8de1..fc49115c1358 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,7 +21,7 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') def get_args(known=False): @@ -68,30 +68,30 @@ def get_args(known=False): parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', type=int, default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == "__main__": +if __name__ == '__main__': opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +99,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +110,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] - logger.info("COMET INFO: Starting Hyperparameter Sweep") + logger.info('COMET INFO: Starting Hyperparameter Sweep') for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) + run(parameter['parameters'], opt) diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9..000000000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -๐Ÿ“š This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 ๐Ÿš€. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models โ€” architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts withย wandb-artifact://ย prefix followed by the run path, i.e,ย wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -
- -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb69307..000000000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f2778b..000000000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea0285f..000000000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 238f4edbf2a0..4ea32b1d4c6e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,110 +1,32 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license + +# WARNING โš ๏ธ wandb is deprecated and will be removed in future release. +# See supported integrations at https://github.com/ultralytics/yolov5#integrations import logging import os import sys from contextlib import contextmanager from pathlib import Path -from typing import Dict -import yaml -from tqdm import tqdm +from utils.general import LOGGER, colorstr FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) +DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING โš ๏ธ wandb is deprecated and will be removed in a future release. " \ + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + LOGGER.warning(DEPRECATION_WARNING) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - class WandbLogger(): """Log training runs, datasets, models, and predictions to Weights & Biases. @@ -132,91 +54,33 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.wandb, self.wandb_run = wandb, wandb.run if wandb else None self.val_artifact, self.train_artifact = None, None self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: + if self.wandb: self.wandb_run = wandb.init(config=opt, - resume="allow", + resume='allow', project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - if isinstance(opt.data, dict): # This means another dataset manager has already processed the dataset info (e.g. ClearML) # and they will have stored the already processed dict in opt.data self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: @@ -231,81 +95,18 @@ def setup_training(self, opt): self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" + model_dir, _ = self.download_model_artifact(opt) + if model_dir: + self.weights = Path(model_dir) / 'last.pt' config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 if opt.evolve or opt.noplots: self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ @@ -330,192 +131,10 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) + LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + pass def log(self, log_dict): """ @@ -528,7 +147,7 @@ def log(self, log_dict): for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self, best_result=False): + def end_epoch(self): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. @@ -537,31 +156,15 @@ def end_epoch(self, best_result=False): """ if self.wandb_run: with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images try: wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' ) self.wandb_run.finish() self.wandb_run = None - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): """ @@ -572,6 +175,7 @@ def finish_run(self): with all_logging_disabled(): wandb.log(self.log_dict) wandb.run.finish() + LOGGER.warning(DEPRECATION_WARNING) @contextmanager diff --git a/utils/loss.py b/utils/loss.py index 9b9c3d9f8018..26cca8797315 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Loss functions """ diff --git a/utils/metrics.py b/utils/metrics.py index c01f823a77a1..5646f40e9860 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Model validation metrics """ @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -194,21 +194,21 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" + ticklabels = (names + ['background']) if labels else 'auto' with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, ax=ax, annot=nc < 30, annot_kws={ - "size": 8}, + 'size': 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, xticklabels=ticklabels, yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_ylabel('True') + ax.set_xlabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) @@ -331,7 +331,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) @@ -354,7 +354,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index d2f232de0e97..d1284b950269 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Plotting utils """ @@ -88,7 +88,8 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width, height + w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 + # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, @@ -449,7 +450,7 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() if verbose: - LOGGER.info(f"Saving {f}") + LOGGER.info(f'Saving {f}') if labels is not None: LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) if pred is not None: diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 169addedf0f5..f8154b834869 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9de6f0fbf903..3ee826dba69c 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Dataloaders """ @@ -37,7 +37,8 @@ def create_dataloader(path, prefix='', shuffle=False, mask_downsample_ratio=1, - overlap_mask=False): + overlap_mask=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING โš ๏ธ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -64,7 +65,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, @@ -94,7 +95,7 @@ def __init__( stride=32, pad=0, min_items=0, - prefix="", + prefix='', downsample_ratio=1, overlap=False, ): @@ -115,7 +116,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp["mixup"]: + if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -146,11 +147,11 @@ def __getitem__(self, index): img, labels, segments = random_perspective(img, labels, segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: @@ -176,17 +177,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down - if random.random() < hyp["flipud"]: + if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp["fliplr"]: + if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -250,15 +251,15 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 diff --git a/utils/segment/general.py b/utils/segment/general.py index 9da894538665..f1b2f1dd120f 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -10,7 +10,7 @@ def crop_mask(masks, boxes): Vectorized by Chong (thanks Chong). Args: - - masks should be a size [h, w, n] tensor of masks + - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b45b2c27e0a0..caeff3cad586 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -16,7 +16,6 @@ def __init__(self, model, autobalance=False, overlap=False): self.overlap = overlap device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters - self.device = device # Define criteria BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) @@ -83,7 +82,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -101,10 +100,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -112,7 +111,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index b09ce23fb9e3..6020fa062ba5 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Model validation metrics """ @@ -21,7 +21,7 @@ def ap_per_class_box_and_mask( pred_cls, target_cls, plot=False, - save_dir=".", + save_dir='.', names=(), ): """ @@ -37,7 +37,7 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Box")[2:] + prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, @@ -45,21 +45,21 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Mask")[2:] + prefix='Mask')[2:] results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} return results @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,33 @@ def ap_class_index(self): KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 9b90900b3772..1b22ec838ac9 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets @@ -108,13 +108,13 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' annotator.im.save(fname) # save -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) @@ -125,19 +125,19 @@ def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..d9e060ab99df 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ PyTorch utils """ @@ -291,7 +291,7 @@ def model_info(model, verbose=False, imgsz=640): fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -342,7 +342,7 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') return optimizer diff --git a/utils/triton.py b/utils/triton.py index a94ef0ad197d..b5153dad940d 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Utils to interact with the Triton Inference Server """ @@ -21,7 +21,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": + if parsed_url.scheme == 'grpc': from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,7 +31,7 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] else: from tritonclient.http import InferenceServerClient, InferInput @@ -43,14 +43,14 @@ def create_input_placeholders() -> typing.List[InferInput]: def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) + return self.metadata.get('backend', self.metadata.get('platform')) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: """ Invokes the model. Parameters can be provided via args or kwargs. @@ -68,14 +68,14 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") + raise RuntimeError('No inputs provided.') if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") + raise RuntimeError('Cannot specify args and kwargs at the same time') placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 719bb09560ef..bcabbdfcb4bf 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# YOLOv5 ๐Ÿš€ by Ultralytics, GPL-3.0 license +# YOLOv5 ๐Ÿš€ by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 detection model on a detection dataset @@ -22,6 +22,7 @@ import argparse import json import os +import subprocess import sys from pathlib import Path @@ -305,13 +306,13 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') + check_requirements('pycocotools>=2.0.6') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval @@ -399,12 +400,12 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt)