diff --git a/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py b/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py new file mode 100644 index 000000000..82e2ae6d0 --- /dev/null +++ b/configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py @@ -0,0 +1,42 @@ +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +data_root = 'data/balloon/' +# Path of train annotation file +train_ann_file = 'train.json' +train_data_prefix = 'train/' # Prefix of train image path +# Path of val annotation file +val_ann_file = 'val.json' +val_data_prefix = 'val/' # Prefix of val image path +metainfo = { + 'classes': ('balloon', ), + 'palette': [ + (220, 20, 60), + ] +} +num_classes = 1 + +train_batch_size_per_gpu = 4 +train_num_workers = 2 +log_interval = 1 +##################### +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=train_data_prefix), + ann_file=train_ann_file)) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) +test_dataloader = val_dataloader +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = val_evaluator +default_hooks = dict(logger=dict(interval=log_interval)) +##################### + +model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) diff --git a/demo/15_minutes_instance_segmentation.ipynb b/demo/15_minutes_instance_segmentation.ipynb new file mode 100644 index 000000000..a09a1a105 --- /dev/null +++ b/demo/15_minutes_instance_segmentation.ipynb @@ -0,0 +1,658 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "x7seefPduh36" + }, + "source": [ + "
\n", + " \n", + "
 
\n", + "
\n", + " OpenMMLab website\n", + " \n", + " \n", + " HOT\n", + " \n", + " \n", + "     \n", + " OpenMMLab platform\n", + " \n", + " \n", + " TRY IT OUT\n", + " \n", + " \n", + "
\n", + "
 
\n", + "\n", + "\"Open\n", + "\n", + "[![PyPI](https://img.shields.io/pypi/v/mmyolo)](https://pypi.org/project/mmyolo)\n", + "[![docs](https://img.shields.io/badge/docs-latest-blue)](https://mmyolo.readthedocs.io/en/latest/)\n", + "[![deploy](https://github.com/open-mmlab/mmyolo/workflows/deploy/badge.svg)](https://github.com/open-mmlab/mmyolo/actions)\n", + "[![codecov](https://codecov.io/gh/open-mmlab/mmyolo/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmyolo)\n", + "[![license](https://img.shields.io/github/license/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/blob/main/LICENSE)\n", + "[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmyolo.svg)](https://github.com/open-mmlab/mmyolo/issues)\n", + "\n", + "[📘Documentation](https://mmyolo.readthedocs.io/en/latest/) |\n", + "[🛠️Installation](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html) |\n", + "[👀Model Zoo](https://mmyolo.readthedocs.io/en/latest/model_zoo.html) |\n", + "[🆕Update News](https://mmyolo.readthedocs.io/en/latest/notes/changelog.html) |\n", + "[🤔Reporting Issues](https://github.com/open-mmlab/mmyolo/issues/new/choose)\n", + "\n", + "
\n", + "\n", + "
\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + " \"\"\n", + " \n", + " \"\"\n", + "
" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "V6W8P5XEJGoc" + }, + "source": [ + "# 15 minutes to get started with MMYOLO instance segmentation\n", + "\n", + "Instance segmentation is a task in computer vision that aims to segment each object in an image and assign each object a unique identifier.\n", + "\n", + "Unlike semantic segmentation, instance segmentation not only segments out different categories in an image, but also separates different instances of the same category.\n", + "\n", + "
\n", + "\"Instance\n", + "
\n", + "\n", + "Taking the downloadable balloon dataset as an example, I will guide you through a 15-minute easy introduction to MMYOLO instance segmentation. The entire process includes the following steps:\n", + "\n", + "- [Installation](#installation)\n", + "- [Dataset](#dataset)\n", + "- [Config](#config)\n", + "- [Training](#training)\n", + "- [Testing](#testing)\n", + "- [EasyDeploy](#easydeploy-deployment)\n", + "\n", + "In this tutorial, we will use YOLOv5-s as an example. For the demo configuration of the balloon dataset with other YOLO series algorithms, please refer to the corresponding algorithm configuration folder." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Ae5SqsA7wYGQ" + }, + "source": [ + "## Installation\n", + "\n", + "Assuming you've already installed Conda in advance, then install PyTorch using the following commands." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "XVLRaEIzwW-6", + "outputId": "901b5db6-b1d7-4830-e746-485ee76d6648" + }, + "outputs": [], + "source": [ + "# -----------------------------------------------------------------------------------------\n", + "# If you are using colab, you can skip this cell for PyTorch is pre-installed on the colab.\n", + "# -----------------------------------------------------------------------------------------\n", + "!python -V\n", + "# Check nvcc version\n", + "!nvcc -V\n", + "# Check GCC version\n", + "!gcc --version\n", + "# Create a new Conda environment\n", + "%conda create -n mmyolo python=3.8 -y\n", + "%conda activate mmyolo\n", + "# If you have GPU\n", + "%conda install pytorch torchvision -c pytorch\n", + "# If you only have CPU\n", + "# %conda install pytorch torchvision cpuonly -c pytorch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Check PyTorch version\n", + "import torch\n", + "print(torch.__version__)\n", + "print(torch.cuda.is_available())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Install MMYOLO and dependency libraries using the following commands.\n", + "For details about how to configure the environment, see [Installation and verification](https://mmyolo.readthedocs.io/en/latest/get_started/installation.html).\n", + "```{note}\n", + "Note: Since this repo uses OpenMMLab 2.0, it is better to create a new conda virtual environment to prevent conflicts with the repo installed in OpenMMLab 1.0.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "-qATUuntwmfD", + "outputId": "24be577b-efce-46f2-8b2f-a65d02824467" + }, + "outputs": [], + "source": [ + "!git clone https://github.com/open-mmlab/mmyolo.git\n", + "%cd mmyolo\n", + "%pip install -U openmim\n", + "!mim install -r requirements/mminstall.txt\n", + "# Install albumentations\n", + "!mim install -r requirements/albu.txt\n", + "# Install MMYOLO\n", + "!mim install -v -e .\n", + "# \"-v\" means verbose, or more output\n", + "# \"-e\" means installing a project in editable mode,\n", + "# thus any local modifications made to the code will take effect without reinstallation." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset\n", + "\n", + "The Balloon dataset is a single-class dataset that consists of 74 images and includes annotated information required for training. Here is an example image from the dataset:\n", + "\n", + "
\n", + "\"balloon\n", + "
\n", + "\n", + "You can download and use it directly by the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "gMQXwWuIw3ef", + "outputId": "c8efeac7-5b0c-4342-b5af-d3e790e358c3" + }, + "outputs": [], + "source": [ + "!python tools/misc/download_dataset.py --dataset-name balloon --save-dir ./data/balloon --unzip --delete\n", + "!python ./tools/dataset_converters/balloon2coco.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "covQskXXw2ul" + }, + "source": [ + "The data for the MMYOLO project is located in the MMYOLO project directory. The `train.json` and `val.json` files store the annotations in COCO format, while the `data/balloon/train` and `data/balloon/val` directories contain all the images for the dataset.\n", + "\n", + "## Config\n", + "\n", + "Taking YOLOv5 algorithm as an example, considering the limited GPU memory of users, we need to modify some default training parameters to make them run smoothly. The key parameters to be modified are as follows:\n", + "\n", + "- YOLOv5 is an Anchor-Based algorithm, and different datasets need to calculate suitable anchors adaptively.\n", + "- The default config uses 8 GPUs with a batch size of 16 per GPU. Now change it to a single GPU with a batch size of 12.\n", + "- In principle, the learning rate should be linearly scaled accordingly when the batch size is changed, but actual measurements have found that this is not necessary.\n", + "\n", + "To perform the specific operation, create a new configuration file named `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` in the `configs/yolov5/ins_seg` folder. For convenience, we have already provided this configuration file. Copy the following contents into the configuration file.\n", + "\n", + "```python\n", + "_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa\n", + "\n", + "data_root = 'data/balloon/' # dataset root\n", + "# Training set annotation file of json path\n", + "train_ann_file = 'train.json'\n", + "train_data_prefix = 'train/' # Dataset prefix\n", + "# Validation set annotation file of json path\n", + "val_ann_file = 'val.json'\n", + "val_data_prefix = 'val/'\n", + "metainfo = {\n", + " 'classes': ('balloon', ), # dataset category name\n", + " 'palette': [\n", + " (220, 20, 60),\n", + " ]\n", + "}\n", + "num_classes = 1\n", + "# Set batch size to 4\n", + "train_batch_size_per_gpu = 4\n", + "# dataloader num workers\n", + "train_num_workers = 2\n", + "log_interval = 1\n", + "#####################\n", + "train_dataloader = dict(\n", + " batch_size=train_batch_size_per_gpu,\n", + " num_workers=train_num_workers,\n", + " dataset=dict(\n", + " data_root=data_root,\n", + " metainfo=metainfo,\n", + " data_prefix=dict(img=train_data_prefix),\n", + " ann_file=train_ann_file))\n", + "val_dataloader = dict(\n", + " dataset=dict(\n", + " data_root=data_root,\n", + " metainfo=metainfo,\n", + " data_prefix=dict(img=val_data_prefix),\n", + " ann_file=val_ann_file))\n", + "test_dataloader = val_dataloader\n", + "val_evaluator = dict(ann_file=data_root + val_ann_file)\n", + "test_evaluator = val_evaluator\n", + "default_hooks = dict(logger=dict(interval=log_interval))\n", + "#####################\n", + "\n", + "model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes)))\n", + "```\n", + "\n", + "The above configuration inherits from `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py` and updates configurations such as `data_root`, `metainfo`, `train_dataloader`, `val_dataloader`, `num_classes`, etc., based on the characteristics of the balloon dataset.\n", + "\n", + "## Training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "TQ0h6sv_rJxq" + }, + "source": [ + "After running the training command mentioned above, the folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance` will be automatically generated. The weight files and the training configuration file for this session will be saved in this folder. On a lower-end GPU like the GTX 1660, the entire training process will take approximately 30 minutes.\n", + "\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The performance on `val.json` is as follows:\n", + "\n", + "```text\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.330\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.509\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.317\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.103\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.417\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.150\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.396\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.454\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.317\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.525\n", + "```\n", + "\n", + "The above performance is obtained by printing using the COCO API, where -1 indicates the absence of objects of that scale.\n", + "\n", + "### Some Notes\n", + "\n", + "Two key warnings are printed during training:\n", + "\n", + "- You are using `YOLOv5Head` with num_classes == 1. The loss_cls will be 0. This is a normal phenomenon.\n", + "\n", + "The warning is because the `num_classes` currently trained is 1, the loss of the classification branch is always 0 according to the community of the YOLOv5 algorithm, which is a normal phenomenon.\n", + "\n", + "### Training is resumed after the interruption\n", + "\n", + "If you stop training, you can add `--resume` to the end of the training command and the program will automatically resume training with the latest weights file from `work_dirs`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --resume" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "3sJxvQoUrMhX" + }, + "source": [ + "### Save GPU memory strategy\n", + "\n", + "The above config requires about 3G RAM, so if you don't have enough, consider turning on mixed-precision training" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --amp" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "jVJdyHTxrQ9a" + }, + "source": [ + "### Training visualization\n", + "\n", + "MMYOLO currently supports local, TensorBoard, WandB and other back-end visualization. The default is to use local visualization, and you can switch to WandB and other real-time visualization of various indicators in the training process.\n", + "\n", + "#### 1 WandB\n", + "\n", + "WandB visualization need registered in website, and in the https://wandb.ai/settings for wandb API Keys.\n", + "\n", + "
\n", + "\"image\"/\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install wandb\n", + "# After running wandb login, enter the API Keys obtained above, and the login is successful.\n", + "!wandb login" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "Yu0_4YYRrbyY" + }, + "source": [ + "Add the wandb config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])\n", + "```\n", + "\n", + "Running the training command and you will see the loss, learning rate, and coco/bbox_mAP visualizations in the link." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "f_DyzfDIzwMa" + }, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "#### 2 Tensorboard\n", + "\n", + "Install Tensorboard using the following command." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "gHkGlii3n29Q" + }, + "outputs": [], + "source": [ + "%pip install tensorboard" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "bE-nx9TY1P-M" + }, + "source": [ + "Add the `tensorboard` config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`.\n", + "\n", + "```python\n", + "visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')])\n", + "```\n", + "\n", + "After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/vis_data`.\n", + "We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "g8fZgokho5CE" + }, + "outputs": [], + "source": [ + "!tensorboard --logdir=work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "GUZ7MPoaro-o" + }, + "source": [ + "## Testing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "VYmxtE0GunTB", + "outputId": "f440807c-1931-4810-b76d-617f73fde227" + }, + "outputs": [], + "source": [ + "!python tools/test.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance best_coco_bbox_mAP_epoch_300.pth --show-dir show_results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "_cFocUqN0BCb" + }, + "source": [ + "Run the above test command, you can not only get the AP performance printed in the **Training** section, You can also automatically save the result images to the `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/show_results` folder. Below is one of the result images, the left image is the actual annotation, and the right image is the inference result of the model.\n", + "\n", + "
\n", + "\"result_img\"/\n", + "
\n", + "\n", + "You can also visualize model inference results in a browser window if you use `WandbVisBackend` or `TensorboardVisBackend`.\n", + "\n", + "## Feature map visualization\n", + "\n", + "MMYOLO provides visualization scripts for feature map to analyze the current model training. Please refer to [Feature Map Visualization](../recommended_topics/visualization.md)\n", + "\n", + "Due to the bias of direct visualization of `test_pipeline`, we need to modify the `test_pipeline` of `configs/yolov5/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='YOLOv5KeepRatioResize', scale=img_scale),\n", + " dict(\n", + " type='LetterResize',\n", + " scale=img_scale,\n", + " allow_scale_up=False,\n", + " pad_val=dict(img=114)),\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor', 'pad_param'))\n", + "]\n", + "```\n", + "\n", + "to the following config:\n", + "\n", + "```python\n", + "test_pipeline = [\n", + " dict(\n", + " type='LoadImageFromFile',\n", + " file_client_args=_base_.file_client_args),\n", + " dict(type='mmdet.Resize', scale=img_scale, keep_ratio=False), # modify the LetterResize to mmdet.Resize\n", + " dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),\n", + " dict(\n", + " type='mmdet.PackDetInputs',\n", + " meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n", + " 'scale_factor'))\n", + "]\n", + "```\n", + "\n", + "Let's choose the `data/balloon/train/3927754171_9011487133_b.jpg` image as an example to visualize the output feature maps of YOLOv5 backbone and neck layers.\n", + "\n", + "**1. Visualize the three channels of YOLOv5 backbone**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg onfigs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth --target-layers backbone --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "The result will be saved to the output folder in current path. Three output feature maps plotted in the above figure correspond to small, medium and large output feature maps.\n", + "\n", + "**2. Visualize the three channels of YOLOv5 neck**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \\\n", + " configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \\\n", + " work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \\\n", + " --target-layers neck \\\n", + " --channel-reduction squeeze_mean" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\"image\"/\n", + "
\n", + "\n", + "**3. Grad-Based CAM visualization**\n", + "TODO" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## EasyDeploy deployment\n", + "TODO\n", + "\n", + "This completes the transformation deployment of the trained model and checks the inference results. This is the end of the tutorial.\n", + "\n", + "If you encounter problems during training or testing, please check the [common troubleshooting steps](https://mmyolo.readthedocs.io/en/dev/recommended_topics/troubleshooting_steps.html) first and feel free to open an [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose) if you still can't solve it." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "provenance": [], + "toc_visible": true + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/demo/15_minutes_object_detection.ipynb b/demo/15_minutes_object_detection.ipynb index 7c533e335..47e0ccbd8 100644 --- a/demo/15_minutes_object_detection.ipynb +++ b/demo/15_minutes_object_detection.ipynb @@ -508,7 +508,7 @@ "visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')])\n", "```\n", "\n", - "After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat.py/{timestamp}/vis_data`.\n", + "After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/{timestamp}/vis_data`.\n", "We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command:" ] }, diff --git a/docs/en/get_started/15_minutes_instance_segmentation.md b/docs/en/get_started/15_minutes_instance_segmentation.md index c66a2f283..b42e25f64 100644 --- a/docs/en/get_started/15_minutes_instance_segmentation.md +++ b/docs/en/get_started/15_minutes_instance_segmentation.md @@ -1,3 +1,332 @@ # 15 minutes to get started with MMYOLO instance segmentation +Instance segmentation is a task in computer vision that aims to segment each object in an image and assign each object a unique identifier. + +Unlike semantic segmentation, instance segmentation not only segments out different categories in an image, but also separates different instances of the same category. + +
+Instance Segmentation +
+ +Taking the downloadable balloon dataset as an example, I will guide you through a 15-minute easy introduction to MMYOLO instance segmentation. The entire process includes the following steps: + +- [Installation](#installation) +- [Dataset](#dataset) +- [Config](#config) +- [Training](#training) +- [Testing](#testing) +- [EasyDeploy](#easydeploy-deployment) + +In this tutorial, we will use YOLOv5-s as an example. For the demo configuration of the balloon dataset with other YOLO series algorithms, please refer to the corresponding algorithm configuration folder. + +## Installation + +Assuming you've already installed Conda in advance, then install PyTorch using the following commands. + +```{note} +Note: Since this repo uses OpenMMLab 2.0, it is better to create a new conda virtual environment to prevent conflicts with the repo installed in OpenMMLab 1.0. +``` + +```shell +conda create -n mmyolo python=3.8 -y +conda activate mmyolo +# If you have GPU +conda install pytorch torchvision -c pytorch +# If you only have CPU +# conda install pytorch torchvision cpuonly -c pytorch +``` + +Install MMYOLO and dependency libraries using the following commands. + +```shell +git clone https://github.com/open-mmlab/mmyolo.git +cd mmyolo +pip install -U openmim +mim install -r requirements/mminstall.txt +# Install albumentations +mim install -r requirements/albu.txt +# Install MMYOLO +mim install -v -e . +# "-v" means verbose, or more output +# "-e" means installing a project in editable mode, +# thus any local modifications made to the code will take effect without reinstallation. +``` + +For details about how to configure the environment, see [Installation and verification](./installation.md). + +## Dataset + +The Balloon dataset is a single-class dataset that consists of 74 images and includes annotated information required for training. Here is an example image from the dataset: + +
+balloon dataset +
+ +You can download and use it directly by the following command: + +```shell +python tools/misc/download_dataset.py --dataset-name balloon --save-dir ./data/balloon --unzip --delete +python ./tools/dataset_converters/balloon2coco.py +``` + +The data for the MMYOLO project is located in the MMYOLO project directory. The `train.json` and `val.json` files store the annotations in COCO format, while the `data/balloon/train` and `data/balloon/val` directories contain all the images for the dataset. + +## Config + +Taking YOLOv5 algorithm as an example, considering the limited GPU memory of users, we need to modify some default training parameters to make them run smoothly. The key parameters to be modified are as follows: + +- YOLOv5 is an Anchor-Based algorithm, and different datasets need to calculate suitable anchors adaptively. +- The default config uses 8 GPUs with a batch size of 16 per GPU. Now change it to a single GPU with a batch size of 12. +- In principle, the learning rate should be linearly scaled accordingly when the batch size is changed, but actual measurements have found that this is not necessary. + +To perform the specific operation, create a new configuration file named `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` in the `configs/yolov5/ins_seg` folder. For convenience, we have already provided this configuration file. Copy the following contents into the configuration file. + +```python +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +data_root = 'data/balloon/' # dataset root +# Training set annotation file of json path +train_ann_file = 'train.json' +train_data_prefix = 'train/' # Dataset prefix +# Validation set annotation file of json path +val_ann_file = 'val.json' +val_data_prefix = 'val/' +metainfo = { + 'classes': ('balloon', ), # dataset category name + 'palette': [ + (220, 20, 60), + ] +} +num_classes = 1 +# Set batch size to 4 +train_batch_size_per_gpu = 4 +# dataloader num workers +train_num_workers = 2 +log_interval = 1 +##################### +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=train_data_prefix), + ann_file=train_ann_file)) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) +test_dataloader = val_dataloader +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = val_evaluator +default_hooks = dict(logger=dict(interval=log_interval)) +##################### + +model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) +``` + +The above configuration inherits from `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py` and updates configurations such as `data_root`, `metainfo`, `train_dataloader`, `val_dataloader`, `num_classes`, etc., based on the characteristics of the balloon dataset. + +## Training + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py +``` + +After running the training command mentioned above, the folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance` will be automatically generated. The weight files and the training configuration file for this session will be saved in this folder. On a lower-end GPU like the GTX 1660, the entire training process will take approximately 30 minutes. + +
+image +
+ +The performance on `val.json` is as follows: + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.330 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.509 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.317 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.103 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.417 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.150 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.396 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.454 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.317 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.525 +``` + +The above performance is obtained by printing using the COCO API, where -1 indicates the absence of objects of that scale. + +### Some Notes + +The key warnings are printed during training: + +- You are using `YOLOv5Head` with num_classes == 1. The loss_cls will be 0. This is a normal phenomenon. + +The warning is because the `num_classes` currently trained is 1, the loss of the classification branch is always 0 according to the community of the YOLOv5 algorithm, which is a normal phenomenon. + +### Training is resumed after the interruption + +If you stop training, you can add `--resume` to the end of the training command and the program will automatically resume training with the latest weights file from `work_dirs`. + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --resume +``` + +### Save GPU memory strategy + +The above config requires about 3G RAM, so if you don't have enough, consider turning on mixed-precision training + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --amp +``` + +### Training visualization + +MMYOLO currently supports local, TensorBoard, WandB and other back-end visualization. The default is to use local visualization, and you can switch to WandB and other real-time visualization of various indicators in the training process. + +#### 1 WandB + +WandB visualization need registered in website, and in the https://wandb.ai/settings for wandb API Keys. + +
+image +
+ +```shell +pip install wandb +# After running wandb login, enter the API Keys obtained above, and the login is successful. +wandb login +``` + +Add the wandb config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`. + +```python +visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) +``` + +Running the training command and you will see the loss, learning rate, and coco/bbox_mAP visualizations in the link. + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py +``` + +#### 2 Tensorboard + +Install Tensorboard package using the following command: + +```shell +pip install tensorboard +``` + +Add the `tensorboard` config at the end of config file we just created: `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`. + +```python +visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')]) +``` + +After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/vis_data`. +We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command: + +```shell +tensorboard --logdir=work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance +``` + +## Testing + +```shell +python tools/test.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ + --show-dir show_results +``` + +Run the above test command, you can not only get the AP performance printed in the **Training** section, You can also automatically save the result images to the `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/show_results` folder. Below is one of the result images, the left image is the actual annotation, and the right image is the inference result of the model. + +
+result_img +
+ +You can also visualize model inference results in a browser window if you use `WandbVisBackend` or `TensorboardVisBackend`. + +## Feature map visualization + +MMYOLO provides visualization scripts for feature map to analyze the current model training. Please refer to [Feature Map Visualization](../recommended_topics/visualization.md) + +Due to the bias of direct visualization of `test_pipeline`, we need to modify the `test_pipeline` of `configs/yolov5/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` + +```python +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +``` + +to the following config: + +```python +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=False), # modify the LetterResize to mmdet.Resize + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) +] +``` + +Let's choose the `data/balloon/train/3927754171_9011487133_b.jpg` image as an example to visualize the output feature maps of YOLOv5 backbone and neck layers. + +**1. Visualize the three channels of YOLOv5s backbone** + +```shell +python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \ + configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ --target-layers backbone \ + --channel-reduction squeeze_mean +``` + +
+image +
+ +The result will be saved to the output folder in current path. Three output feature maps plotted in the above figure correspond to small, medium and large output feature maps. + +**2. Visualize the three channels of YOLOv5 neck** + +```shell +python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \ + configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ --target-layers neck \ + --channel-reduction squeeze_mean +``` + +
+image +
+**3. Grad-Based CAM visualization** + TODO + +## EasyDeploy deployment + +TODO + +The full content above can be viewed in [15_minutes_object_detection.ipynb](../../../demo/15_minutes_object_detection.ipynb). This is the end of the tutorial. If you encounter problems during training or testing, please check the [common troubleshooting steps](../recommended_topics/troubleshooting_steps.md) first and feel free to open an [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose) if you still can't solve it. diff --git a/docs/en/get_started/15_minutes_object_detection.md b/docs/en/get_started/15_minutes_object_detection.md index 953aa5464..354b2e708 100644 --- a/docs/en/get_started/15_minutes_object_detection.md +++ b/docs/en/get_started/15_minutes_object_detection.md @@ -106,7 +106,7 @@ anchors = [ ] # Max training 40 epoch max_epochs = 40 -# bs = 12 +# Set batch size to 12 train_batch_size_per_gpu = 12 # dataloader num workers train_num_workers = 4 @@ -270,7 +270,7 @@ Add the `tensorboard` config at the end of config file we just created: `configs visualizer = dict(vis_backends=[dict(type='LocalVisBackend'),dict(type='TensorboardVisBackend')]) ``` -After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat.py/{timestamp}/vis_data`. +After re-running the training command, Tensorboard file will be generated in the visualization folder `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/{timestamp}/vis_data`. We can use Tensorboard to view the loss, learning rate, and coco/bbox_mAP visualizations from a web link by running the following command: ```shell diff --git a/docs/zh_cn/get_started/15_minutes_instance_segmentation.md b/docs/zh_cn/get_started/15_minutes_instance_segmentation.md index 48fe3ca90..2b9e6aab8 100644 --- a/docs/zh_cn/get_started/15_minutes_instance_segmentation.md +++ b/docs/zh_cn/get_started/15_minutes_instance_segmentation.md @@ -1,3 +1,330 @@ # 15 分钟上手 MMYOLO 实例分割 +实例分割是计算机视觉中的一个任务,旨在将图像中的每个对象都分割出来,并为每个对象分配一个唯一的标识符。与语义分割不同,实例分割不仅分割出图像中的不同类别,还将同一类别的不同实例分开。 + +
+Instance Segmentation +
+ +以可供下载的气球 balloon 小数据集为例,带大家 15 分钟轻松上手 MMYOLO 实例分割。整个流程包含如下步骤: + +- [环境安装](#环境安装) +- [数据集准备](#数据集准备) +- [配置准备](#配置准备) +- [模型训练](#模型训练) +- [模型测试](#模型测试) +- [EasyDeploy 模型部署](#easydeploy-模型部署) + +本文以 YOLOv5-s 为例,其余 YOLO 系列算法的气球 balloon 小数据集 demo 配置请查看对应的算法配置文件夹下。 + +## 环境安装 + +假设你已经提前安装好了 Conda,接下来安装 PyTorch + +```shell +conda create -n mmyolo python=3.8 -y +conda activate mmyolo +# 如果你有 GPU +conda install pytorch torchvision -c pytorch +# 如果你是 CPU +# conda install pytorch torchvision cpuonly -c pytorch +``` + +安装 MMYOLO 和依赖库 + +```shell +git clone https://github.com/open-mmlab/mmyolo.git +cd mmyolo +pip install -U openmim +mim install -r requirements/mminstall.txt +# Install albumentations +mim install -r requirements/albu.txt +# Install MMYOLO +mim install -v -e . +# "-v" 指详细说明,或更多的输出 +# "-e" 表示在可编辑模式下安装项目,因此对代码所做的任何本地修改都会生效,从而无需重新安装。 +``` + +```{note} +温馨提醒:由于本仓库采用的是 OpenMMLab 2.0,请最好新建一个 conda 虚拟环境,防止和 OpenMMLab 1.0 已经安装的仓库冲突。 +``` + +详细环境配置操作请查看 [安装和验证](./installation.md) + +## 数据集准备 + +Balloon 数据集是一个包括 74 张图片的单类别数据集, 包括了训练所需的标注信息。 样例图片如下所示: + +
+balloon dataset +
+ +你只需执行如下命令即可下载并且直接用起来 + +```shell +python tools/misc/download_dataset.py --dataset-name balloon --save-dir ./data/balloon --unzip --delete +python ./tools/dataset_converters/balloon2coco.py +``` + +data 位于 mmyolo 工程目录下, `train.json`, `val.json` 中存放的是 COCO 格式的标注,`data/balloon/train`, `data/balloon/val` 中存放的是所有图片 + +## 配置准备 + +以 YOLOv5 算法为例,考虑到用户显存和内存有限,我们需要修改一些默认训练参数来让大家愉快的跑起来,核心需要修改的参数如下 + +- YOLOv5 是 Anchor-Based 类算法,不同的数据集需要自适应计算合适的 Anchor +- 默认配置是 8 卡,每张卡 batch size 为 16,现将其改成单卡,每张卡 batch size 为 4 +- 原则上 batch size 改变后,学习率也需要进行线性缩放,但是实测发现不需要 + +具体操作为在 `configs/yolov5/ins_seg` 文件夹下新建 `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` 配置文件(为了方便大家直接使用,我们已经提供了该配置),并把以下内容复制配置文件中。 + +```python +_base_ = './yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py' # noqa + +data_root = 'data/balloon/' +# 训练集标注路径 +train_ann_file = 'train.json' +train_data_prefix = 'train/' # 训练集图片路径 +# 测试集标注路径 +val_ann_file = 'val.json' +val_data_prefix = 'val/' # 验证集图片路径 +metainfo = { + 'classes': ('balloon', ), # 数据集类别名称 + 'palette': [ + (220, 20, 60), + ] +} +num_classes = 1 +# 批处理大小batch size设置为 4 +train_batch_size_per_gpu = 4 +# dataloader 加载进程数 +train_num_workers = 2 +log_interval = 1 +##################### +train_dataloader = dict( + batch_size=train_batch_size_per_gpu, + num_workers=train_num_workers, + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=train_data_prefix), + ann_file=train_ann_file)) +val_dataloader = dict( + dataset=dict( + data_root=data_root, + metainfo=metainfo, + data_prefix=dict(img=val_data_prefix), + ann_file=val_ann_file)) +test_dataloader = val_dataloader +val_evaluator = dict(ann_file=data_root + val_ann_file) +test_evaluator = val_evaluator +default_hooks = dict(logger=dict(interval=log_interval)) +##################### + +model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) +``` + +以上配置从 `yolov5_ins_s-v61_syncbn_fast_8xb16-300e_coco_instance.py` 中继承,并根据 balloon 数据的特点更新了 `data_root`、`metainfo`、`train_dataloader`、`val_dataloader`、`num_classes` 等配置。 + +## 模型训练 + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py +``` + +运行以上训练命令 `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance` 文件夹会被自动生成,权重文件以及此次的训练配置文件将会保存在此文件夹中。 在 1660 低端显卡上,整个训练过程大概需要 30 分钟。 + +
+image +
+ +在 `val.json` 上性能如下所示: + +```text + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.330 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.509 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.317 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.103 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.417 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.150 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.396 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.454 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.000 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.317 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.525 +``` + +上述性能是通过 COCO API 打印,其中 -1 表示不存在对于尺度的物体。 + +### 一些注意事项 + +在训练过程中会打印如下关键警告: + +- You are using `YOLOv5Head` with num_classes == 1. The loss_cls will be 0. This is a normal phenomenon. + +这个警告都不会对性能有任何影响。第一个警告是说明由于当前训练的类别数是 1,根据 YOLOv5 算法的社区, 分类分支的 loss 始终是 0,这是正常现象。 + +### 中断后恢复训练 + +如果训练中途停止,可以在训练命令最后加上 `--resume` ,程序会自动从 `work_dirs` 中加载最新的权重文件恢复训练。 + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --resume +``` + +### 节省显存策略 + +上述配置大概需要 1.0G 显存,如果你的显存不够,可以考虑开启混合精度训练 + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py --amp +``` + +### 训练可视化 + +MMYOLO 目前支持本地、TensorBoard 以及 WandB 等多种后端可视化,默认是采用本地可视化方式,你可以切换为 WandB 等实时可视化训练过程中各类指标。 + +#### 1 WandB 可视化使用 + +WandB 官网注册并在 https://wandb.ai/settings 获取到 WandB 的 API Keys。 + +
+image +
+ +```shell +pip install wandb +# 运行了 wandb login 后输入上文中获取到的 API Keys ,便登录成功。 +wandb login +``` + +在 `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py` 配置文件最后添加 WandB 配置 + +```python +visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) +``` + +重新运行训练命令便可以在命令行中提示的网页链接中看到 loss、学习率和 coco/bbox_mAP 等数据可视化了。 + +```shell +python tools/train.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py +``` + +#### 2 Tensorboard 可视化使用 + +安装 Tensorboard 环境 + +```shell +pip install tensorboard +``` + +同上述在配置文件 `configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py`配置的最后添加 `tensorboard` 配置 + +```python +visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='TensorboardVisBackend')]) +``` + +重新运行训练命令后,Tensorboard 文件会生成在可视化文件夹 `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/vis_data` 下, +运行下面的命令便可以在网页链接使用 Tensorboard 查看 loss、学习率和 coco/bbox_mAP 等可视化数据了: + +```shell +tensorboard --logdir=work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance +``` + +## 模型测试 + +```shell +python tools/test.py configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ + --show-dir show_results +``` + +运行以上测试命令, 你不仅可以得到**模型训练**部分所打印的 AP 性能,还可以将推理结果图片自动保存至 `work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/{timestamp}/show_results` 文件夹中。下面为其中一张结果图片,左图为实际标注,右图为模型推理结果。 + +
+result_img +
+ +如果你使用了 `WandbVisBackend` 或者 `TensorboardVisBackend`,则还可以在浏览器窗口可视化模型推理结果。 + +## 特征图相关可视化 + +MMYOLO 中提供了特征图相关可视化脚本,用于分析当前模型训练效果。 详细使用流程请参考 [特征图可视化](../recommended_topics/visualization.md) + +由于 `test_pipeline` 直接可视化会存在偏差,故将需要 `configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py` 中 `test_pipeline` + +```python +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='YOLOv5KeepRatioResize', scale=img_scale), + dict( + type='LetterResize', + scale=img_scale, + allow_scale_up=False, + pad_val=dict(img=114)), + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'pad_param')) +] +``` + +修改为如下配置: + +```python +test_pipeline = [ + dict( + type='LoadImageFromFile', + backend_args=_base_.backend_args), + dict(type='mmdet.Resize', scale=img_scale, keep_ratio=False), # 删除 YOLOv5KeepRatioResize, 将 LetterResize 修改成 mmdet.Resize + dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), + dict( + type='mmdet.PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) # 删除 pad_param +] +``` + +我们选择 `data/balloon/train/3927754171_9011487133_b.jpg` 图片作为例子,可视化 YOLOv5 backbone 和 neck 层的输出特征图。 + +```shell +python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \ + configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ --target-layers backbone \ + --channel-reduction squeeze_mean +``` + +
+image +
+ +结果会保存到当前路径的 output 文件夹下。上图中绘制的 3 个输出特征图对应大中小输出特征图。 + +**2. 可视化 YOLOv5 neck 输出的 3 个通道** + +```shell +python demo/featmap_vis_demo.py data/balloon/train/3927754171_9011487133_b.jpg \ + configs/yolov5/ins_seg/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance.py \ + work_dirs/yolov5_ins_s-v61_syncbn_fast_8xb16-300e_balloon_instance/best_coco_bbox_mAP_epoch_300.pth \ --target-layers neck \ + --channel-reduction squeeze_mean +``` + +
+image +
+ +**3. Grad-Based CAM 可视化** + TODO + +## EasyDeploy 模型部署 + +TODO + +至此本教程结束。 + +以上完整内容可以查看 [15_minutes_instance_segmentation.ipynb](../../../demo/15_minutes_instance_segmentation.ipynb)。 如果你在训练或者测试过程中碰到问题,请先查看 [常见错误排除步骤](../recommended_topics/troubleshooting_steps.md), 如果依然无法解决欢迎提 issue。 diff --git a/docs/zh_cn/get_started/15_minutes_object_detection.md b/docs/zh_cn/get_started/15_minutes_object_detection.md index a348203f4..51022baa9 100644 --- a/docs/zh_cn/get_started/15_minutes_object_detection.md +++ b/docs/zh_cn/get_started/15_minutes_object_detection.md @@ -268,7 +268,7 @@ pip install tensorboard visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='TensorboardVisBackend')]) ``` -重新运行训练命令后,Tensorboard 文件会生成在可视化文件夹 `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat.py/{timestamp}/vis_data` 下, +重新运行训练命令后,Tensorboard 文件会生成在可视化文件夹 `work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/{timestamp}/vis_data` 下, 运行下面的命令便可以在网页链接使用 Tensorboard 查看 loss、学习率和 coco/bbox_mAP 等可视化数据了: ```shell @@ -427,10 +427,10 @@ pip install onnx-simplifier # 如果需要使用 simplify 功能需要安装 pip install tensorrt # 如果有 GPU 环境并且需要输出 TensorRT 模型需要继续执行 ``` -完成安装后就可以用以下命令对已经训练好的针对 cat 数据集的模型一键转换部署,当前设备的 ONNX 版本为 1.13.0,TensorRT 版本为 8.5.3.1,故可保持 `--opset` 为 11,其余各项参数的具体含义和参数值需要对照使用的 config 文件进行调整。此处我们先导出 CPU 版本的 ONNX 模型,`--backend` 为 1。 +完成安装后就可以用以下命令对已经训练好的针对 cat 数据集的模型一键转换部署,当前设备的 ONNX 版本为 1.13.0,TensorRT 版本为 8.5.3.1,故可保持 `--opset` 为 11,其余各项参数的具体含义和参数值需要对照使用的 config 文件进行调整。此处我们先导出 CPU 版本的 ONNX 模型,`--backend` 为 ONNXRUNTIME。 ```shell -python projects/easydeploy/tools/export.py \ +python projects/easydeploy/tools/export_onnx.py \ configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py \ work_dirs/yolov5_s-v61_fast_1xb12-40e_cat/epoch_40.pth \ --work-dir work_dirs/yolov5_s-v61_fast_1xb12-40e_cat \ @@ -439,7 +439,7 @@ python projects/easydeploy/tools/export.py \ --device cpu \ --simplify \ --opset 11 \ - --backend 1 \ + --backend ONNXRUNTIME \ --pre-topk 1000 \ --keep-topk 100 \ --iou-threshold 0.65 \ @@ -530,4 +530,4 @@ python projects/easydeploy/tools/image-demo.py \ 这样我们就完成了将训练完成的模型进行转换部署并且检查推理结果的工作。至此本教程结束。 -以上完整内容可以查看 [15_minutes_object_detection.ipynb](https://github.com/open-mmlab/mmyolo/blob/dev/demo/15_minutes_object_detection.ipynb)。 如果你在训练或者测试过程中碰到问题,请先查看 [常见错误排除步骤](../recommended_topics/troubleshooting_steps.md),如果依然无法解决欢迎提 [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose)。 +以上完整内容可以查看 [15_minutes_object_detection.ipynb](../../..//demo/15_minutes_object_detection.ipynb)。 如果你在训练或者测试过程中碰到问题,请先查看 [常见错误排除步骤](../recommended_topics/troubleshooting_steps.md),如果依然无法解决欢迎提 [issue](https://github.com/open-mmlab/mmyolo/issues/new/choose)。