-
-
- {{data.projectName}} ({{data.projectImagesCount}} images)
-
-
-
-
-
- {
- state.selectedClasses = val.map(x => x.title);
- }
- "
- >
-
-
-
-
- {{ scope.row.title }}
-
-
-
-
-
-
-
-
-
-
- Random
- Based on image tags (not implemented yet)
- Train = Val (not implemented yet)
-
-
-
-
-
-
-
- {{scope.row.name}}
-
-
-
-
-
- {{state.randomSplit.count[scope.row.name]}}
-
-
-
-
-
- {{state.randomSplit.percent[scope.row.name]}}%
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
If image does not have such tags, it will be assigned to training set
-
-
- All images are in both training and validation sets
-
-
-
-
-
-
- Pretrained on COCO
- From custom model
-
-
-
-
-
- state.pretrainedWeights = `${state.modelSize}.pt`">
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Multi-scale
-
-
- Single class
-
-
-
-
-
-
-
-
-
-
-
Training hyperparameters templates:
-
- scratch
- finetune
-
-
Restore
- Defaults
-
-
- Edit settings in YAML format:
-
-
-
-
-
-
-
- Start training
-
-
- 0 training classes are selected
-
-
- Path to model weights is not defined
-
-
-
-
{{data.progressName}}: {{data.currentProgressLabel}} /
- {{data.totalProgressLabel}}
-
-
-
-
-
-
-
-
-
-
-
- {{annotation.name}}
-
-
-
-
-
-
- {{annotation.name}}
-
-
-
-
-
-
-
-
- {
- state.smoothing = val;
- data.mBox.options.smoothingWeight = val;
- data.mObjectness.options.smoothingWeight = val;
- data.mClassification.options.smoothingWeight = val;
- }"
- >
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {{annotation.name}}
-
-
-
-
-
-
-
-
- Link to the directory with training artifacts will be here once training is finished
-
-
-
- {{data.outputName}}
-
-
-
-
-
-
+
+
+ #yolov5-train .el-tabs.el-tabs-cards .el-radio {
+ display: flex;
+ align-items: start;
+ /*margin-bottom: 10px;*/
+ margin-left: 0;
+ white-space: normal;
+ }
+
+ #yolov5-train .el-tabs.el-tabs-cards .el-radio__label div {
+ color: #7f858e;
+ font-size: 13px;
+ }
+
+ .beautiful-table { border-collapse: collapse; }
+ .beautiful-table tr:nth-child(2n) { background-color: #f6f8fa; }
+ .beautiful-table td, .beautiful-table th {
+ border: 1px solid #dfe2e5;
+ padding: 6px 13px;
+ text-align: center;
+ line-height: 20px;
+ }
+
+ #yolov5-train .el-tabs.el-tabs-cards { border-radius: 4px; box-shadow: none; }
+ #yolov5-train .el-tabs.el-tabs-cards .el-tabs__header { background-color: #f6fafd; }
+ #yolov5-train .el-tabs.el-tabs-cards .el-tabs__nav { float: none; display: flex; justify-content:
+ space-between; }
+ #yolov5-train .el-tabs.el-tabs-cards .el-tabs__item { flex: 1; margin-bottom: -3px; padding: 9px 16px 13px;
+ height: auto; line-height: normal; border-radius: 4px; }
+
+
+ {% include 'supervisely/train/src/ui/input_project.html' %}
+ {% include 'supervisely/train/src/ui/classes.html' %}
+ {% include 'supervisely/train/src/ui/splits.html' %}
+ {% include 'supervisely/train/src/ui/architectures.html' %}
+ {% include 'supervisely/train/src/ui/hyperparameters.html' %}
+ {% include 'supervisely/train/src/ui/monitoring.html' %}
+ {% include 'supervisely/train/src/ui/artifacts.html' %}
\ No newline at end of file
diff --git a/supervisely/train/src/sly_init_ui.py b/supervisely/train/src/sly_init_ui.py
deleted file mode 100644
index a36940f5a4c2..000000000000
--- a/supervisely/train/src/sly_init_ui.py
+++ /dev/null
@@ -1,153 +0,0 @@
-import os
-import supervisely_lib as sly
-
-import sly_train_globals as globals
-import sly_metrics as metrics
-
-
-empty_gallery = {
- "content": {
- "projectMeta": sly.ProjectMeta().to_json(),
- "annotations": {},
- "layout": []
- }
-}
-
-
-def init_input_project(data, project_info):
- data["projectId"] = globals.project_id
- data["projectName"] = project_info.name
- data["projectImagesCount"] = project_info.items_count
- data["projectPreviewUrl"] = globals.api.image.preview_url(project_info.reference_image_url, 100, 100)
-
-
-def init_classes_stats(data, state, project_meta):
- stats = globals.api.project.get_stats(globals.project_id)
- class_images = {}
- for item in stats["images"]["objectClasses"]:
- class_images[item["objectClass"]["name"]] = item["total"]
- class_objects = {}
- for item in stats["objects"]["items"]:
- class_objects[item["objectClass"]["name"]] = item["total"]
-
- classes_json = project_meta.obj_classes.to_json()
- for obj_class in classes_json:
- obj_class["imagesCount"] = class_images[obj_class["title"]]
- obj_class["objectsCount"] = class_objects[obj_class["title"]]
-
- data["classes"] = classes_json
- state["selectedClasses"] = []
-
- state["classes"] = len(classes_json) * [True]
-
-
-def init_random_split(PROJECT, data, state):
- data["randomSplit"] = [
- {"name": "train", "type": "success"},
- {"name": "val", "type": "primary"},
- {"name": "total", "type": "gray"},
- ]
- data["totalImagesCount"] = PROJECT.items_count
-
- train_percent = 80
- train_count = int(PROJECT.items_count / 100 * train_percent)
- state["randomSplit"] = {
- "count": {
- "total": PROJECT.items_count,
- "train": train_count,
- "val": PROJECT.items_count - train_count
- },
- "percent": {
- "total": 100,
- "train": train_percent,
- "val": 100 - train_percent
- },
- "shareImagesBetweenSplits": False,
- "sliderDisabled": False,
- }
-
- state["splitMethod"] = 1
- state["trainTagName"] = ""
- state["valTagName"] = ""
-
-
-def init_model_settings(data, state):
- data["modelSizes"] = [
- {"label": "yolov5s", "config": "yolov5s.yaml", "params": "7.3M"},
- {"label": "yolov5m", "config": "yolov5m.yaml", "params": "21.4M"},
- {"label": "yolov5l", "config": "yolov5l.yaml", "params": "47.0M"},
- {"label": "yolov5x", "config": "yolov5x.yaml", "params": "87.7M"},
- ]
- state["modelSize"] = data["modelSizes"][0]["label"]
- state["modelWeightsOptions"] = 1
- state["pretrainedWeights"] = f'{data["modelSizes"][0]["label"]}.pt'
-
- # @TODO: for debug
- #state["weightsPath"] = "/yolov5_train/coco128_002/2390/weights/best.pt"
- state["weightsPath"] = ""
-
-
-def init_training_hyperparameters(state):
- state["epochs"] = 10
- state["batchSize"] = 16
- state["imgSize"] = 640
- state["multiScale"] = False
- state["singleClass"] = False
- state["device"] = '0'
- state["workers"] = 8 # 0 - for debug
- state["activeTabName"] = "General"
- state["hyp"] = {
- "scratch": globals.scratch_str,
- "finetune": globals.finetune_str,
- }
- state["hypRadio"] = "scratch"
-
-
-def init_start_state(state):
- state["started"] = False
- state["activeNames"] = []
-
-
-def init_galleries(data):
- data["vis"] = empty_gallery
- data["labelsVis"] = empty_gallery
- data["predVis"] = empty_gallery
- data["syncBindings"] = []
-
-
-def init_progress(data):
- data["progressName"] = ""
- data["currentProgress"] = 0
- data["totalProgress"] = 0
- data["currentProgressLabel"] = ""
- data["totalProgressLabel"] = ""
-
-
-def init_output(data):
- data["outputUrl"] = ""
- data["outputName"] = ""
-
-
-def init(data, state):
- init_input_project(data, globals.project_info)
- init_classes_stats(data, state, globals.project_meta)
- init_random_split(globals.project_info, data, state)
- init_model_settings(data, state)
- init_training_hyperparameters(state)
- init_start_state(state)
- init_galleries(data)
- init_progress(data)
- init_output(data)
- metrics.init(data, state)
-
-
-def set_output():
- file_info = globals.api.file.get_info_by_path(globals.team_id,
- os.path.join(globals.remote_artifacts_dir, 'results.png'))
- fields = [
- {"field": "data.outputUrl", "payload": globals.api.file.get_url(file_info.id)},
- {"field": "data.outputName", "payload": globals.remote_artifacts_dir},
- ]
- globals.api.app.set_fields(globals.task_id, fields)
- globals.api.task.set_output_directory(globals.task_id, file_info.id, globals.remote_artifacts_dir)
-
diff --git a/supervisely/train/src/sly_metrics.py b/supervisely/train/src/sly_metrics.py
index dd6f3b7ab800..06222df4c6ee 100644
--- a/supervisely/train/src/sly_metrics.py
+++ b/supervisely/train/src/sly_metrics.py
@@ -24,19 +24,19 @@ def init_chart(title, names, xs, ys, smoothing=None):
def init(data, state):
demo_x = [[], []] #[[1, 2, 3, 4], [2, 4, 6, 8]]
demo_y = [[], []] #[[10, 15, 13, 17], [16, 5, 11, 9]]
- data["mBox"] = init_chart("Box Loss",
- names=["train", "val"],
- xs=demo_x,
- ys=demo_y,
- smoothing=0.6)
+ data["mGIoU"] = init_chart("GIoU",
+ names=["train", "val"],
+ xs=demo_x,
+ ys=demo_y,
+ smoothing=0.6)
- data["mObjectness"] = init_chart("Obj Loss",
+ data["mObjectness"] = init_chart("Objectness",
names=["train", "val"],
xs=demo_x,
ys=demo_y,
smoothing=0.6)
- data["mClassification"] = init_chart("Cls Loss",
+ data["mClassification"] = init_chart("Classification",
names=["train", "val"],
xs=demo_x,
ys=demo_y,
@@ -54,23 +54,24 @@ def init(data, state):
state["smoothing"] = 0.6
-def send_metrics(epoch, epochs, metrics):
- sly.logger.debug(f"Metrics: epoch {epoch} / {epochs}", extra={"metrics": metrics})
+def send_metrics(epoch, epochs, metrics, log_period=1):
+ sly.logger.debug(f"Metrics: epoch {epoch + 1} / {epochs}", extra={"metrics": metrics})
- fields = [
- {"field": "data.mBox.series[0].data", "payload": [[epoch, metrics["train/box_loss"]]], "append": True},
- {"field": "data.mBox.series[1].data", "payload": [[epoch, metrics["val/box_loss"]]], "append": True},
+ if epoch % log_period == 0 or epoch + 1 == epochs:
+ fields = [
+ {"field": "data.mGIoU.series[0].data", "payload": [[epoch, metrics["train/box_loss"]]], "append": True},
+ {"field": "data.mGIoU.series[1].data", "payload": [[epoch, metrics["val/box_loss"]]], "append": True},
- {"field": "data.mObjectness.series[0].data", "payload": [[epoch, metrics["train/obj_loss"]]], "append": True},
- {"field": "data.mObjectness.series[1].data", "payload": [[epoch, metrics["val/obj_loss"]]], "append": True},
+ {"field": "data.mObjectness.series[0].data", "payload": [[epoch, metrics["train/obj_loss"]]], "append": True},
+ {"field": "data.mObjectness.series[1].data", "payload": [[epoch, metrics["val/obj_loss"]]], "append": True},
- {"field": "data.mClassification.series[0].data", "payload": [[epoch, metrics["train/cls_loss"]]], "append": True},
- {"field": "data.mClassification.series[1].data", "payload": [[epoch, metrics["val/cls_loss"]]], "append": True},
+ {"field": "data.mClassification.series[0].data", "payload": [[epoch, metrics["train/cls_loss"]]], "append": True},
+ {"field": "data.mClassification.series[1].data", "payload": [[epoch, metrics["val/cls_loss"]]], "append": True},
- {"field": "data.mPR.series[0].data", "payload": [[epoch, metrics["metrics/precision"]]], "append": True},
- {"field": "data.mPR.series[1].data", "payload": [[epoch, metrics["metrics/recall"]]], "append": True},
+ {"field": "data.mPR.series[0].data", "payload": [[epoch, metrics["metrics/precision"]]], "append": True},
+ {"field": "data.mPR.series[1].data", "payload": [[epoch, metrics["metrics/recall"]]], "append": True},
- {"field": "data.mMAP.series[0].data", "payload": [[epoch, metrics["metrics/mAP_0.5"]]], "append": True},
- {"field": "data.mMAP.series[1].data", "payload": [[epoch, metrics["metrics/mAP_0.5:0.95"]]], "append": True},
- ]
- globals.api.app.set_fields(globals.task_id, fields)
+ {"field": "data.mMAP.series[0].data", "payload": [[epoch, metrics["metrics/mAP_0.5"]]], "append": True},
+ {"field": "data.mMAP.series[1].data", "payload": [[epoch, metrics["metrics/mAP_0.5:0.95"]]], "append": True},
+ ]
+ globals.api.app.set_fields(globals.task_id, fields)
diff --git a/supervisely/train/src/sly_metrics_utils.py b/supervisely/train/src/sly_metrics_utils.py
deleted file mode 100644
index a5d175ae766f..000000000000
--- a/supervisely/train/src/sly_metrics_utils.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# old plotly implementation
-# =================================================================
-# import supervisely_lib as sly
-# import sly_train_globals as globals
-#
-#
-# chart_train_style = {
-# "name": "train",
-# "mode": "lines+markers",
-# "line": {
-# "color": "rgb(0, 0, 255)",
-# "width": 2
-# }
-# }
-#
-# chart_val_style = {
-# "name": "val",
-# "mode": "lines+markers",
-# "line": {
-# "color": "rgb(255, 128, 0)",
-# "width": 2
-# }
-# }
-#
-# chart_layout = {
-# "xaxis": {
-# # "title": "epoch",
-# "automargin": True
-# },
-# "yaxis": {
-# # "title": "value",
-# "automargin": True
-# },
-# "legend": {
-# "orientation": "h",
-# "yanchor": "bottom",
-# "y": 0.99,
-# "xanchor": "right",
-# "x": 1
-# }
-# }
-#
-#
-# def init_chart(title, names, colors, xs, ys):
-# data = []
-# for name, color, x, y in zip(names, colors, xs, ys):
-# data.append({
-# "x": x,
-# "y": y,
-# "name": name,
-# "mode": "lines+markers",
-# #"type": "scattergl",
-# "line": {
-# "color": f"rgb({color[0]}, {color[1]}, {color[2]})",
-# "width": 2
-# }
-# })
-#
-# chart = {
-# "data": data,
-# "layout": {
-# "title": {
-# "text": f"
{title}",
-# "xanchor": "left",
-# 'y': 0.97,
-# 'x': 0.03,
-# "font": {
-# "size": 14,
-# "color": "rgb(96, 96, 96)",
-# #"color": "rgb(0, 150, 0)",
-# }
-# },
-# **chart_layout
-# }
-# }
-# return chart
-#
-#
-# def init(data):
-# demo_x = [[], []] #[[1, 2, 3, 4], [2, 4, 6, 8]]
-# demo_y = [[], []] #[[10, 15, 13, 17], [16, 5, 11, 9]]
-# data["mBox"] = init_chart("Box Loss",
-# names=["train", "val"],
-# colors=[[0, 0, 255], [255, 128, 0]],
-# xs=demo_x,
-# ys=demo_y)
-#
-# data["mObjectness"] = init_chart("Objectness Loss",
-# names=["train", "val"],
-# colors=[[0, 0, 255], [255, 128, 0]],
-# xs=demo_x,
-# ys=demo_y)
-#
-# data["mClassification"] = init_chart("Classification Loss",
-# names=["train", "val"],
-# colors=[[0, 0, 255], [255, 128, 0]],
-# xs=demo_x,
-# ys=demo_y)
-#
-# data["mPR"] = init_chart("Precision / Recall",
-# names=["precision", "recall"],
-# colors=[[255, 0, 255], [127, 0, 255]],
-# xs=demo_x,
-# ys=demo_y)
-#
-# data["mMAP"] = init_chart("mAP",
-# names=["mAP@0.5", "mAP@0.5:0.95"],
-# colors=[[255, 0, 255], [0, 255, 255]],
-# xs=demo_x,
-# ys=demo_y)
-#
-#
-# def send_metrics(epoch, epochs, metrics):
-# sly.logger.debug(f"Metrics: epoch {epoch} / {epochs}", extra={"metrics": metrics})
-#
-# fields = [
-# {"field": "data.mBox.data[0].x", "payload": epoch, "append": True},
-# {"field": "data.mBox.data[1].x", "payload": epoch, "append": True},
-# {"field": "data.mBox.data[0].y", "payload": metrics["train/box_loss"], "append": True},
-# {"field": "data.mBox.data[1].y", "payload": metrics["val/box_loss"], "append": True},
-#
-# {"field": "data.mObjectness.data[0].x", "payload": epoch, "append": True},
-# {"field": "data.mObjectness.data[1].x", "payload": epoch, "append": True},
-# {"field": "data.mObjectness.data[0].y", "payload": metrics["train/obj_loss"], "append": True},
-# {"field": "data.mObjectness.data[1].y", "payload": metrics["val/obj_loss"], "append": True},
-#
-# {"field": "data.mClassification.data[0].x", "payload": epoch, "append": True},
-# {"field": "data.mClassification.data[1].x", "payload": epoch, "append": True},
-# {"field": "data.mClassification.data[0].y", "payload": metrics["train/cls_loss"], "append": True},
-# {"field": "data.mClassification.data[1].y", "payload": metrics["val/cls_loss"], "append": True},
-#
-# {"field": "data.mPR.data[0].x", "payload": epoch, "append": True},
-# {"field": "data.mPR.data[1].x", "payload": epoch, "append": True},
-# {"field": "data.mPR.data[0].y", "payload": metrics["metrics/precision"], "append": True},
-# {"field": "data.mPR.data[1].y", "payload": metrics["metrics/recall"], "append": True},
-#
-# {"field": "data.mMAP.data[0].x", "payload": epoch, "append": True},
-# {"field": "data.mMAP.data[1].x", "payload": epoch, "append": True},
-# {"field": "data.mMAP.data[0].y", "payload": metrics["metrics/mAP_0.5"], "append": True},
-# {"field": "data.mMAP.data[1].y", "payload": metrics["metrics/mAP_0.5:0.95"], "append": True},
-# ]
-# globals.api.app.set_fields(globals.task_id, fields)
diff --git a/supervisely/train/src/sly_train.py b/supervisely/train/src/sly_train.py
index 77d77c00c691..91d0fb3d1758 100644
--- a/supervisely/train/src/sly_train.py
+++ b/supervisely/train/src/sly_train.py
@@ -1,23 +1,20 @@
import os
import supervisely_lib as sly
-from sly_train_globals import init_project_info_and_meta, \
- my_app, task_id, \
- team_id, workspace_id, project_id, \
- root_source_path, scratch_str, finetune_str
-
-# to import correct values
-# project_info, project_meta, \
-# local_artifacts_dir, remote_artifacts_dir
import sly_train_globals as g
-from sly_train_val_split import train_val_split
-import sly_init_ui as ui
-from sly_prepare_data import filter_and_transform_labels
+from sly_train_globals import \
+ my_app, task_id, \
+ team_id, workspace_id, project_id, \
+ root_source_dir, scratch_str, finetune_str
+
+import ui as ui
from sly_train_utils import init_script_arguments
from sly_utils import get_progress_cb, upload_artifacts
-
-
+from splits import get_train_val_sets, verify_train_val_sets
+import yolov5_format as yolov5_format
+from architectures import prepare_weights
+from artifacts import set_task_output
import train as train_yolov5
@@ -33,48 +30,53 @@ def restore_hyp(api: sly.Api, task_id, context, state, app_logger):
@my_app.callback("train")
@sly.timeit
def train(api: sly.Api, task_id, context, state, app_logger):
- api.app.set_field(task_id, "state.activeNames", ["labels", "train", "pred", "metrics"]) #"logs",
-
- # prepare directory for original Supervisely project
- project_dir = os.path.join(my_app.data_dir, "sly_project")
- sly.fs.mkdir(project_dir)
- sly.fs.clean_dir(project_dir) # useful for debug, has no effect in production
-
- # download Sypervisely project (using cache)
- sly.download_project(api, project_id, project_dir, cache=my_app.cache,
- progress_cb=get_progress_cb("Download data (using cache)", g.project_info.items_count * 2))
-
- # prepare directory for transformed data (nn will use it for training)
- yolov5_format_dir = os.path.join(my_app.data_dir, "train_data")
- sly.fs.mkdir(yolov5_format_dir)
- sly.fs.clean_dir(yolov5_format_dir) # useful for debug, has no effect in production
-
- # split data to train/val sets, filter objects by classes, convert Supervisely project to YOLOv5 format(COCO)
- train_split, val_split = train_val_split(project_dir, state)
- train_classes = state["selectedClasses"]
- progress_cb = get_progress_cb("Convert Supervisely to YOLOv5 format", g.project_info.items_count)
- filter_and_transform_labels(project_dir, train_classes, train_split, val_split, yolov5_format_dir, progress_cb)
-
- # download initial weights from team files
- if state["modelWeightsOptions"] == 2: # transfer learning from custom weights
- weights_path_remote = state["weightsPath"]
- weights_path_local = os.path.join(my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote))
- file_info = api.file.get_info_by_path(team_id, weights_path_remote)
- api.file.download(team_id, weights_path_remote, weights_path_local, my_app.cache,
- progress_cb=get_progress_cb("Download weights", file_info.sizeb, is_size=True))
-
- # init sys.argv for main training script
- init_script_arguments(state, yolov5_format_dir, g.project_info.name)
-
- # start train script
- get_progress_cb("YOLOv5: Scanning data ", 1)(1)
- train_yolov5.main()
-
- # upload artifacts directory to Team Files
- upload_artifacts(g.local_artifacts_dir, g.remote_artifacts_dir)
-
- # show path to the artifacts directory in Team Files
- ui.set_output()
+ try:
+ prepare_weights(state)
+
+ # prepare directory for original Supervisely project
+ project_dir = os.path.join(my_app.data_dir, "sly_project")
+ sly.fs.mkdir(project_dir, remove_content_if_exists=True) # clean content for debug, has no effect in prod
+
+ # download and preprocess Sypervisely project (using cache)
+ download_progress = get_progress_cb("Download data (using cache)", g.project_info.items_count * 2)
+ sly.download_project(api, project_id, project_dir, cache=my_app.cache, progress_cb=download_progress)
+
+ # preprocessing: transform labels to bboxes, filter classes, ...
+ sly.Project.to_detection_task(project_dir, inplace=True)
+ train_classes = state["selectedClasses"]
+ sly.Project.remove_classes_except(project_dir, classes_to_keep=train_classes, inplace=True)
+ if state["unlabeledImages"] == "ignore":
+ sly.Project.remove_items_without_objects(project_dir, inplace=True)
+
+ # split to train / validation sets (paths to images and annotations)
+ train_set, val_set = get_train_val_sets(project_dir, state)
+ verify_train_val_sets(train_set, val_set)
+ sly.logger.info(f"Train set: {len(train_set)} images")
+ sly.logger.info(f"Val set: {len(val_set)} images")
+
+ # prepare directory for data in YOLOv5 format (nn will use it for training)
+ train_data_dir = os.path.join(my_app.data_dir, "train_data")
+ sly.fs.mkdir(train_data_dir, remove_content_if_exists=True) # clean content for debug, has no effect in prod
+
+ # convert Supervisely project to YOLOv5 format
+ progress_cb = get_progress_cb("Convert Supervisely to YOLOv5 format", len(train_set) + len(val_set))
+ yolov5_format.transform(project_dir, train_data_dir, train_set, val_set, progress_cb)
+
+ # init sys.argv for main training script
+ init_script_arguments(state, train_data_dir, g.project_info.name)
+
+ # start train script
+ api.app.set_field(task_id, "state.activeNames", ["labels", "train", "pred", "metrics"]) # "logs",
+ get_progress_cb("YOLOv5: Scanning data ", 1)(1)
+ train_yolov5.main()
+
+ # upload artifacts directory to Team Files
+ upload_artifacts(g.local_artifacts_dir, g.remote_artifacts_dir)
+ set_task_output()
+ except Exception as e:
+ my_app.show_modal_window(f"Oops! Something went wrong, please try again or contact tech support. "
+ f"Find more info in the app logs. Error: {repr(e)}", level="error")
+ api.app.set_field(task_id, "state.started", False)
# stop application
get_progress_cb("Finished, app is stopped automatically", 1)(1)
@@ -92,8 +94,7 @@ def main():
state = {}
data["taskId"] = task_id
- # read project information and meta (classes + tags)
- init_project_info_and_meta()
+ my_app.compile_template(g.root_source_dir)
# init data for UI widgets
ui.init(data, state)
@@ -101,13 +102,10 @@ def main():
my_app.run(data=data, state=state)
-# @TODO: change pip requirements to quickly skip them (already installed)
-# @TODO: handle soft stop event
-
+# @TODO: doublecheck inference
+# @TODO: add to readme - open_app.lnk
# New features:
-# @TODO: adam or SGD opt?
-# @TODO: train == val - handle case in data_config.yaml to avoid data duplication
# @TODO: resume training
-# @TODO: repeat dataset (for small lemons)
+# @TODO: save checkpoint every N-th epochs
if __name__ == "__main__":
sly.main_wrapper("main", main)
diff --git a/supervisely/train/src/sly_train_globals.py b/supervisely/train/src/sly_train_globals.py
index 549a8976c3f9..c89dc9d15535 100644
--- a/supervisely/train/src/sly_train_globals.py
+++ b/supervisely/train/src/sly_train_globals.py
@@ -14,30 +14,34 @@
local_artifacts_dir = None
remote_artifacts_dir = None
+project_info = api.project.get_info_by_id(project_id)
+project_meta = sly.ProjectMeta.from_json(api.project.get_meta(project_id))
-project_info = None
-project_meta = None
+root_source_dir = str(Path(sys.argv[0]).parents[3])
+sly.logger.info(f"Root source directory: {root_source_dir}")
+sys.path.append(root_source_dir)
-root_source_path = str(Path(sys.argv[0]).parents[3])
-sly.logger.info(f"Root source directory: {root_source_path}")
-sys.path.append(root_source_path)
+source_path = str(Path(sys.argv[0]).parents[0])
+sly.logger.info(f"Source directory: {source_path}")
+sys.path.append(source_path)
-# script_path = str(Path(sys.argv[0]).parents[3]))
-# root_app_dir = script_path.parent.parent.absolute()
-# sly.logger.info(f"Root app directory: {root_app_dir}")
-# sys.path.append(root_app_dir)
+ui_sources_dir = os.path.join(source_path, "ui")
+sys.path.append(ui_sources_dir)
+sly.logger.info(f"Added to sys.path: {ui_sources_dir}")
+with open(os.path.join(root_source_dir, "data/hyp.scratch.yaml"), 'r') as file:
+ scratch_str = file.read() # yaml.safe_load(
-def init_project_info_and_meta():
- global project_info, project_meta
- project_info = api.project.get_info_by_id(project_id)
- project_meta_json = api.project.get_meta(project_id)
- project_meta = sly.ProjectMeta.from_json(project_meta_json)
-
+with open(os.path.join(root_source_dir, "data/hyp.finetune.yaml"), 'r') as file:
+ finetune_str = file.read() # yaml.safe_load(
-with open(os.path.join(root_source_path, "data/hyp.scratch.yaml"), 'r') as file:
- scratch_str = file.read() # yaml.safe_load(
-with open(os.path.join(root_source_path, "data/hyp.finetune.yaml"), 'r') as file:
- finetune_str = file.read() # yaml.safe_load(
\ No newline at end of file
+runs_dir = os.path.join(my_app.data_dir, 'runs')
+sly.fs.mkdir(runs_dir, remove_content_if_exists=True) # for debug, does nothing in production
+experiment_name = str(task_id)
+local_artifacts_dir = os.path.join(runs_dir, experiment_name)
+sly.logger.info(f"All training artifacts will be saved to local directory {local_artifacts_dir}")
+remote_artifacts_dir = os.path.join("/yolov5_train", project_info.name, experiment_name)
+remote_artifacts_dir = api.file.get_free_dir_name(team_id, remote_artifacts_dir)
+sly.logger.info(f"After training artifacts will be uploaded to Team Files: {remote_artifacts_dir}")
\ No newline at end of file
diff --git a/supervisely/train/src/sly_train_utils.py b/supervisely/train/src/sly_train_utils.py
index 0d7f6a0e3570..80e7db28d4a3 100644
--- a/supervisely/train/src/sly_train_utils.py
+++ b/supervisely/train/src/sly_train_utils.py
@@ -11,28 +11,23 @@
def init_script_arguments(state, yolov5_format_dir, input_project_name):
global local_artifacts_dir, remote_artifacts_dir
+ sys.argv.append("--sly")
+
data_path = os.path.join(yolov5_format_dir, 'data_config.yaml')
sys.argv.extend(["--data", data_path])
- try:
- hyp_content = yaml.safe_load(state["hyp"][state["hypRadio"]])
- hyp = os.path.join(my_app.data_dir, 'hyp.custom.yaml')
- with open(hyp, 'w') as f:
- f.write(state["hyp"][state["hypRadio"]])
- except yaml.YAMLError as e:
- sly.logger.error(repr(e))
- api.app.set_field(task_id, "state.started", False)
- return
+ hyp_content = yaml.safe_load(state["hyp"][state["hypRadio"]])
+ hyp = os.path.join(my_app.data_dir, 'hyp.custom.yaml')
+ with open(hyp, 'w') as f:
+ f.write(state["hyp"][state["hypRadio"]])
sys.argv.extend(["--hyp", hyp])
- weights = "" # random (not tested)
- if state["modelWeightsOptions"] == 1:
- weights = state["pretrainedWeights"]
- cfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../models', f"{state['modelSize']}.yaml")
+ if state["weightsInitialization"] == "coco":
+ model_name = state['selectedModel'].lower()
+ _sub_path = "models/hub" if model_name.endswith('6') else "models"
+ cfg = os.path.join(g.root_source_dir, _sub_path, f"{model_name}.yaml")
sys.argv.extend(["--cfg", cfg])
- elif state["modelWeightsOptions"] == 2:
- weights = state["weightsPath"]
- sys.argv.extend(["--weights", weights])
+ sys.argv.extend(["--weights", state["weightsPath"]])
sys.argv.extend(["--epochs", str(state["epochs"])])
sys.argv.extend(["--batch-size", str(state["batchSize"])])
@@ -45,30 +40,12 @@ def init_script_arguments(state, yolov5_format_dir, input_project_name):
if "workers" in state:
sys.argv.extend(["--workers", str(state["workers"])])
+ if state["optimizer"] == "Adam":
+ sys.argv.append("--adam")
- training_dir = os.path.join(my_app.data_dir, 'experiment', input_project_name)
- experiment_name = str(task_id)
- local_artifacts_dir = os.path.join(training_dir, experiment_name)
- _exp_index = 1
- while sly.fs.dir_exists(local_artifacts_dir):
- experiment_name = "{}_{:03d}".format(task_id, _exp_index)
- local_artifacts_dir = os.path.join(training_dir, experiment_name)
- _exp_index += 1
- g.local_artifacts_dir = local_artifacts_dir
-
- sys.argv.extend(["--project", training_dir])
- sys.argv.extend(["--name", experiment_name])
-
- sys.argv.append("--sly")
-
- remote_experiment_name = str(task_id)
- remote_artifacts_dir = os.path.join("/yolov5_train", input_project_name, remote_experiment_name)
- _exp_index = 1
- while api.file.dir_exists(team_id, remote_artifacts_dir):
- remote_experiment_name = "{}_{:03d}".format(task_id, _exp_index)
- remote_artifacts_dir = os.path.join("/yolov5_train", input_project_name, remote_experiment_name)
- _exp_index += 1
- g.remote_artifacts_dir = remote_artifacts_dir
+ sys.argv.extend(["--metrics_period", str(state["metricsPeriod"])])
+ sys.argv.extend(["--project", g.runs_dir])
+ sys.argv.extend(["--name", g.experiment_name])
def send_epoch_log(epoch, epochs, progress):
diff --git a/supervisely/train/src/sly_train_val_split.py b/supervisely/train/src/sly_train_val_split.py
deleted file mode 100644
index e9807c5c3b0a..000000000000
--- a/supervisely/train/src/sly_train_val_split.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import random
-import supervisely_lib as sly
-
-
-def _list_items(project_dir):
- items = []
- project = sly.Project(project_dir, sly.OpenMode.READ)
- for dataset in project:
- for item_name in dataset:
- items.append((dataset.name, item_name))
- return items
-
-
-def _split_random(project_dir, train_count, val_count):
- items = _list_items(project_dir)
- random.shuffle(items)
- train_items = items[:train_count]
- val_items = items[train_count:]
- if len(val_items) != val_count:
- sly.logger.warn("Issue in train/val random split in GUI", extra={
- "train_count": train_count,
- "val_count": val_count,
- "items_count": len(items),
- "train_count + val_count": train_count + val_count
- })
- #raise RuntimeError("Incorrect train/val random split")
- return train_items, val_items
-
-
-def _split_same(project_dir):
- items = _list_items(project_dir)
- return items, items.copy()
-
-
-def _split_tags(project_dir, train_tag_name, val_tag_name):
- raise NotImplementedError()
-
-
-def train_val_split(project_dir, state):
- split_method = state["splitMethod"]
- train_count = state["randomSplit"]["count"]["train"]
- val_count = state["randomSplit"]["count"]["val"]
-
- train_split = None
- val_split = None
- if split_method == 1: # Random
- train_split, val_split = _split_random(project_dir, train_count, val_count)
- elif split_method == 2: # Based on image tags
- train_split, val_split = _split_tags()
- elif split_method == 3: # Train = Val
- train_split, val_split = _split_same()
- else:
- raise ValueError(f"Train/val split method: {split_method} unknown")
-
- return train_split, val_split
\ No newline at end of file
diff --git a/supervisely/train/src/sly_utils.py b/supervisely/train/src/sly_utils.py
index ba652011a4ea..5dd875647fe9 100644
--- a/supervisely/train/src/sly_utils.py
+++ b/supervisely/train/src/sly_utils.py
@@ -36,10 +36,20 @@ def update_uploading_progress(count, api: sly.Api, task_id, progress: sly.Progre
_update_progress_ui(api, task_id, progress, stdout_print=True)
+def _save_link_to_ui(local_dir, app_url):
+ # save report to file *.lnk (link to report)
+ name = "open_app.lnk"
+ local_path = os.path.join(local_dir, name)
+ sly.fs.ensure_base_path(local_path)
+ with open(local_path, "w") as text_file:
+ print(app_url, file=text_file)
+
+
def upload_artifacts(local_dir, remote_dir):
def _gen_message(current, total):
return f"Upload artifacts to Team Files [{current}/{total}] "
+ _save_link_to_ui(local_dir, globals.my_app.app_url)
local_files = sly.fs.list_files_recursively(local_dir)
total_size = sum([sly.fs.get_file_size(file_path) for file_path in local_files])
diff --git a/supervisely/train/src/ui/architectures.html b/supervisely/train/src/ui/architectures.html
new file mode 100644
index 000000000000..4b31235c18d9
--- /dev/null
+++ b/supervisely/train/src/ui/architectures.html
@@ -0,0 +1,46 @@
+
+
+
+
+ Pretrained on COCO
+ Default pretrained checkpoints provided by authors of YOLOv5
+
+
+
+
+
+
+
+ {{row["subtitle"]}}
+
+ |
+
+
+
+
+
+
+
+ {{model[column.key]}}
+
+
+
+ {{model[column.key]}}
+
+ |
+
+
+
+
+
+
+ Custom weights
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/architectures.py b/supervisely/train/src/ui/architectures.py
new file mode 100644
index 000000000000..aa8b09240041
--- /dev/null
+++ b/supervisely/train/src/ui/architectures.py
@@ -0,0 +1,152 @@
+import errno
+import os
+import sly_train_globals as g
+from sly_utils import get_progress_cb
+import supervisely_lib as sly
+
+
+def get_models_list():
+ return [
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5s",
+ "Size": 640,
+ "mAP^val": 36.7,
+ "mAP^test": 36.7,
+ "mAP^val_0.5": 55.4,
+ "Speed": 2.0,
+ "Params": 7.3,
+ "FLOPS": 17.0,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5m",
+ "Size": 640,
+ "mAP^val": 44.5,
+ "mAP^test": 44.5,
+ "mAP^val_0.5": 63.1,
+ "Speed": 2.7,
+ "Params": 21.4,
+ "FLOPS": 51.3,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5l",
+ "Size": 640,
+ "mAP^val": 48.2,
+ "mAP^test": 48.2,
+ "mAP^val_0.5": 66.9,
+ "Speed": 3.8,
+ "Params": 47.0,
+ "FLOPS": 115.4,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5x",
+ "Size": 640,
+ "mAP^val": 50.4,
+ "mAP^test": 50.4,
+ "mAP^val_0.5": 68.8,
+ "Speed": 6.1,
+ "Params": 87.7,
+ "FLOPS": 218.8,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5s6",
+ "Size": 1280,
+ "mAP^val": 43.3,
+ "mAP^test": 43.3,
+ "mAP^val_0.5": 61.9,
+ "Speed": 4.3,
+ "Params": 12.7,
+ "FLOPS": 17.4,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5m6",
+ "Size": 1280,
+ "mAP^val": 50.5,
+ "mAP^test": 50.5,
+ "mAP^val_0.5": 68.7,
+ "Speed": 8.4,
+ "Params": 35.9,
+ "FLOPS": 52.4,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5l6",
+ "Size": 1280,
+ "mAP^val": 53.4,
+ "mAP^test": 53.4,
+ "mAP^val_0.5": 71.1,
+ "Speed": 12.3,
+ "Params": 77.2,
+ "FLOPS": 117.7,
+ },
+ {
+ "config": "",
+ "weightsUrl": "",
+ "Model": "YOLOv5x6",
+ "Size": 1280,
+ "mAP^val": 54.4,
+ "mAP^test": 54.4,
+ "mAP^val_0.5": 72.0,
+ "Speed": 22.4,
+ "Params": 141.8,
+ "FLOPS": 222.9,
+ },
+ ]
+
+
+def get_table_columns():
+ return [
+ {"key": "Model", "title": "Model", "subtitle": None},
+ {"key": "Size", "title": "Size", "subtitle": "(pixels)"},
+ {"key": "mAP^val", "title": "mAP
val", "subtitle": "0.5:0.95"},
+ {"key": "mAP^test", "title": "mAP
test", "subtitle": "0.5:0.95"},
+ {"key": "mAP^val_0.5", "title": "mAP
val", "subtitle": "0.5"},
+ {"key": "Speed", "title": "Speed", "subtitle": "V100 (ms)"},
+ {"key": "Params", "title": "Params", "subtitle": "(M)"},
+ {"key": "FLOPS", "title": "FLOPS", "subtitle": "640 (B)"},
+ ]
+
+
+def init(data, state):
+ data["models"] = get_models_list()
+ data["modelColumns"] = get_table_columns()
+ state["selectedModel"] = "YOLOv5s"
+ state["weightsInitialization"] = "coco"
+
+ # @TODO: for debug
+ #state["weightsPath"] = "/yolov5_train/coco128_002/2390/weights/best.pt"
+ state["weightsPath"] = ""
+
+
+def prepare_weights(state):
+ if state["weightsInitialization"] == "custom":
+ # download custom weights
+ weights_path_remote = state["weightsPath"]
+ if not weights_path_remote.endswith(".pt"):
+ raise ValueError(f"Weights file has unsupported extension {sly.fs.get_file_ext(weights_path_remote)}. "
+ f"Supported: '.pt'")
+ weights_path_local = os.path.join(g.my_app.data_dir, sly.fs.get_file_name_with_ext(weights_path_remote))
+ file_info = g.api.file.get_info_by_path(g.team_id, weights_path_remote)
+ if file_info is None:
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), weights_path_remote)
+ progress_cb = get_progress_cb("Download weights", file_info.sizeb, is_size=True)
+ g.api.file.download(g.team_id, weights_path_remote, weights_path_local, g.my_app.cache, progress_cb)
+
+ state["_weightsPath"] = weights_path_remote
+ state["weightsPath"] = weights_path_local
+ else:
+ model_name = state['selectedModel'].lower()
+ state["weightsPath"] = f"{model_name}.pt"
+ sly.logger.info("Pretrained COCO weights will be added automatically")
diff --git a/supervisely/train/src/ui/artifacts.html b/supervisely/train/src/ui/artifacts.html
new file mode 100644
index 000000000000..437aa169e248
--- /dev/null
+++ b/supervisely/train/src/ui/artifacts.html
@@ -0,0 +1,14 @@
+
+
+ Link to the directory with training artifacts will be here once training is finished
+
+
+
+ {{data.outputName}}
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/artifacts.py b/supervisely/train/src/ui/artifacts.py
new file mode 100644
index 000000000000..cceafcabc13c
--- /dev/null
+++ b/supervisely/train/src/ui/artifacts.py
@@ -0,0 +1,17 @@
+import os
+import sly_train_globals as g
+
+
+def init(data):
+ data["outputUrl"] = None
+ data["outputName"] = None
+
+
+def set_task_output():
+ file_info = g.api.file.get_info_by_path(g.team_id, os.path.join(g.remote_artifacts_dir, 'results.png'))
+ fields = [
+ {"field": "data.outputUrl", "payload": g.api.file.get_url(file_info.id)},
+ {"field": "data.outputName", "payload": g.remote_artifacts_dir},
+ ]
+ g.api.app.set_fields(g.task_id, fields)
+ g.api.task.set_output_directory(g.task_id, file_info.id, g.remote_artifacts_dir)
\ No newline at end of file
diff --git a/supervisely/train/src/ui/classes.html b/supervisely/train/src/ui/classes.html
new file mode 100644
index 000000000000..145d4df23632
--- /dev/null
+++ b/supervisely/train/src/ui/classes.html
@@ -0,0 +1,27 @@
+
+ {
+ state.selectedClasses = val.map(x => x.title);
+ }
+ "
+ >
+
+
+
+
+ {{ scope.row.title }}
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/classes.py b/supervisely/train/src/ui/classes.py
new file mode 100644
index 000000000000..429e68d945bf
--- /dev/null
+++ b/supervisely/train/src/ui/classes.py
@@ -0,0 +1,25 @@
+import supervisely_lib as sly
+
+
+def init(api: sly.Api, data, state, project_id, project_meta: sly.ProjectMeta):
+ stats = api.project.get_stats(project_id)
+ class_images = {}
+ for item in stats["images"]["objectClasses"]:
+ class_images[item["objectClass"]["name"]] = item["total"]
+ class_objects = {}
+ for item in stats["objects"]["items"]:
+ class_objects[item["objectClass"]["name"]] = item["total"]
+
+ classes_json = project_meta.obj_classes.to_json()
+ for obj_class in classes_json:
+ obj_class["imagesCount"] = class_images[obj_class["title"]]
+ obj_class["objectsCount"] = class_objects[obj_class["title"]]
+
+ unlabeled_count = 0
+ for ds_counter in stats["images"]["datasets"]:
+ unlabeled_count += ds_counter["imagesNotMarked"]
+
+ data["classes"] = classes_json
+ state["selectedClasses"] = []
+ state["classes"] = len(classes_json) * [True]
+ data["unlabeledCount"] = unlabeled_count
\ No newline at end of file
diff --git a/supervisely/train/src/ui/hyperparameters.html b/supervisely/train/src/ui/hyperparameters.html
new file mode 100644
index 000000000000..f346fb020f35
--- /dev/null
+++ b/supervisely/train/src/ui/hyperparameters.html
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+ Multi-scale
+
+
+ Single class
+
+
+
+
+
+
+
+
+
+ Log metrics every
+
+ epochs
+
+
+
+
+
+
+
+
+
+
+
+
+ Scratch mode
+ Recommended hyperparameters for training from scratch
+
+
+ Restore Defaults
+
+
+
+
+
+ Finetune mode
+ Recommended hyperparameters for model finutuning
+
+
+ Restore Defaults
+
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/hyperparameters.py b/supervisely/train/src/ui/hyperparameters.py
new file mode 100644
index 000000000000..3b28480d143e
--- /dev/null
+++ b/supervisely/train/src/ui/hyperparameters.py
@@ -0,0 +1,19 @@
+import sly_train_globals as g
+
+
+def init(state):
+ state["epochs"] = 10
+ state["batchSize"] = 16
+ state["imgSize"] = 640
+ state["multiScale"] = False
+ state["singleClass"] = False
+ state["device"] = '0'
+ state["workers"] = 8 # 0 - for debug @TODO: for debug
+ state["activeTabName"] = "General"
+ state["hyp"] = {
+ "scratch": g.scratch_str,
+ "finetune": g.finetune_str,
+ }
+ state["hypRadio"] = "scratch"
+ state["optimizer"] = "SGD"
+ state["metricsPeriod"] = 1
diff --git a/supervisely/train/src/ui/input_project.html b/supervisely/train/src/ui/input_project.html
new file mode 100644
index 000000000000..ff0ef021bc63
--- /dev/null
+++ b/supervisely/train/src/ui/input_project.html
@@ -0,0 +1,8 @@
+
+
+ {{data.projectName}} ({{data.projectImagesCount}}
+ images)
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/input_project.py b/supervisely/train/src/ui/input_project.py
new file mode 100644
index 000000000000..aeda250bd7f5
--- /dev/null
+++ b/supervisely/train/src/ui/input_project.py
@@ -0,0 +1,8 @@
+import sly_train_globals as g
+
+
+def init(data):
+ data["projectId"] = g.project_info.id
+ data["projectName"] = g.project_info.name
+ data["projectImagesCount"] = g.project_info.items_count
+ data["projectPreviewUrl"] = g.api.image.preview_url(g.project_info.reference_image_url, 100, 100)
diff --git a/supervisely/train/src/ui/monitoring.html b/supervisely/train/src/ui/monitoring.html
new file mode 100644
index 000000000000..30fcf6d3c5d8
--- /dev/null
+++ b/supervisely/train/src/ui/monitoring.html
@@ -0,0 +1,115 @@
+
+
+ Start training
+
+
+ 0 training classes are selected
+
+
+ Path to model weights is not defined
+
+
+
{{data.progressName}}: {{data.currentProgressLabel}} /
+ {{data.totalProgressLabel}}
+
+
+
+
+
+
+
+
+
+
+
+ {{annotation.name}}
+
+
+
+
+
+
+
+ {{annotation.name}}
+
+
+
+
+
+
+
+
+ {
+ state.smoothing = val;
+ data.mGIoU.options.smoothingWeight = val;
+ data.mObjectness.options.smoothingWeight = val;
+ data.mClassification.options.smoothingWeight = val;
+ }"
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{annotation.name}}
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/monitoring.py b/supervisely/train/src/ui/monitoring.py
new file mode 100644
index 000000000000..ec25c0562b35
--- /dev/null
+++ b/supervisely/train/src/ui/monitoring.py
@@ -0,0 +1,44 @@
+import supervisely_lib as sly
+import sly_metrics as metrics
+
+
+empty_gallery = {
+ "content": {
+ "projectMeta": sly.ProjectMeta().to_json(),
+ "annotations": {},
+ "layout": []
+ }
+}
+
+
+def init(data, state):
+ _init_start_state(state)
+ _init_galleries(data)
+ _init_progress(data)
+ _init_output(data)
+ metrics.init(data, state)
+
+
+def _init_start_state(state):
+ state["started"] = False
+ state["activeNames"] = []
+
+
+def _init_galleries(data):
+ data["vis"] = empty_gallery
+ data["labelsVis"] = empty_gallery
+ data["predVis"] = empty_gallery
+ data["syncBindings"] = []
+
+
+def _init_progress(data):
+ data["progressName"] = ""
+ data["currentProgress"] = 0
+ data["totalProgress"] = 0
+ data["currentProgressLabel"] = ""
+ data["totalProgressLabel"] = ""
+
+
+def _init_output(data):
+ data["outputUrl"] = ""
+ data["outputName"] = ""
\ No newline at end of file
diff --git a/supervisely/train/src/ui/splits.html b/supervisely/train/src/ui/splits.html
new file mode 100644
index 000000000000..81130cc43b90
--- /dev/null
+++ b/supervisely/train/src/ui/splits.html
@@ -0,0 +1,133 @@
+
+
+
+
+ Random
+ Shuffle data and split with defined probability
+
+
+
+
+
+ {{scope.row.name}}
+
+
+
+
+
+ {{state.randomSplit.count[scope.row.name]}}
+
+
+
+
+
+ {{state.randomSplit.percent[scope.row.name]}}%
+
+
+
+
+
+
+
+
+
+ Based on image tags
+ Images should have assigned train or val tag
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Based on datasets
+ Select one or several datasets for every split
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/supervisely/train/src/ui/splits.py b/supervisely/train/src/ui/splits.py
new file mode 100644
index 000000000000..6ac50b2f4513
--- /dev/null
+++ b/supervisely/train/src/ui/splits.py
@@ -0,0 +1,72 @@
+import supervisely_lib as sly
+
+
+def init(project_info, project_meta: sly.ProjectMeta, data, state):
+ data["randomSplit"] = [
+ {"name": "train", "type": "success"},
+ {"name": "val", "type": "primary"},
+ {"name": "total", "type": "gray"},
+ ]
+ data["totalImagesCount"] = project_info.items_count
+
+ train_percent = 80
+ train_count = int(project_info.items_count / 100 * train_percent)
+ state["randomSplit"] = {
+ "count": {
+ "total": project_info.items_count,
+ "train": train_count,
+ "val": project_info.items_count - train_count
+ },
+ "percent": {
+ "total": 100,
+ "train": train_percent,
+ "val": 100 - train_percent
+ },
+ "shareImagesBetweenSplits": False,
+ "sliderDisabled": False,
+ }
+
+ state["splitMethod"] = "random"
+
+ state["trainTagName"] = ""
+ if project_meta.tag_metas.get("train") is not None:
+ state["trainTagName"] = "train"
+ state["valTagName"] = ""
+ if project_meta.tag_metas.get("val") is not None:
+ state["valTagName"] = "val"
+
+ state["trainDatasets"] = []
+ state["valDatasets"] = []
+
+ state["unlabeledImages"] = "keep"
+ state["untaggedImages"] = "train"
+
+
+def get_train_val_sets(project_dir, state):
+ split_method = state["splitMethod"]
+ sly.logger.info(f"Split method for train/val is '{split_method}'")
+ if split_method == "random":
+ train_count = state["randomSplit"]["count"]["train"]
+ val_count = state["randomSplit"]["count"]["val"]
+ train_set, val_set = sly.Project.get_train_val_splits_by_count(project_dir, train_count, val_count)
+ return train_set, val_set
+ elif split_method == "tags":
+ train_tag_name = state["trainTagName"]
+ val_tag_name = state["valTagName"]
+ add_untagged_to = state["untaggedImages"]
+ train_set, val_set = sly.Project.get_train_val_splits_by_tag(project_dir, train_tag_name, val_tag_name, add_untagged_to)
+ return train_set, val_set
+ elif split_method == "datasets":
+ train_datasets = state["trainDatasets"]
+ val_datasets = state["valDatasets"]
+ train_set, val_set = sly.Project.get_train_val_splits_by_dataset(project_dir, train_datasets, val_datasets)
+ return train_set, val_set
+ else:
+ raise ValueError(f"Unknown split method: {split_method}")
+
+
+def verify_train_val_sets(train_set, val_set):
+ if len(train_set) == 0:
+ raise ValueError("Train set is empty, check or change split configuration")
+ if len(val_set) == 0:
+ raise ValueError("Val set is empty, check or change split configuration")
\ No newline at end of file
diff --git a/supervisely/train/src/ui/ui.py b/supervisely/train/src/ui/ui.py
new file mode 100644
index 000000000000..05c0fe58bb01
--- /dev/null
+++ b/supervisely/train/src/ui/ui.py
@@ -0,0 +1,18 @@
+import sly_train_globals as g
+import input_project as input_project
+import classes as training_classes
+import splits as train_val_split
+import architectures as model_architectures
+import hyperparameters as hyperparameters
+import monitoring as monitoring
+import artifacts as artifacts
+
+
+def init(data, state):
+ input_project.init(data)
+ training_classes.init(g.api, data, state, g.project_id, g.project_meta)
+ train_val_split.init(g.project_info, g.project_meta, data, state)
+ model_architectures.init(data, state)
+ hyperparameters.init(state)
+ monitoring.init(data, state)
+ artifacts.init(data)
diff --git a/supervisely/train/src/sly_prepare_data.py b/supervisely/train/src/yolov5_format.py
similarity index 54%
rename from supervisely/train/src/sly_prepare_data.py
rename to supervisely/train/src/yolov5_format.py
index aab0e6671dcb..ea519213eec5 100644
--- a/supervisely/train/src/sly_prepare_data.py
+++ b/supervisely/train/src/yolov5_format.py
@@ -3,7 +3,7 @@
import supervisely_lib as sly
-def transform_label(class_names, img_size, label: sly.Label):
+def _transform_label(class_names, img_size, label: sly.Label):
class_number = class_names.index(label.obj_class.name)
rect_geometry = label.geometry.to_bbox()
center = rect_geometry.center
@@ -15,13 +15,12 @@ def transform_label(class_names, img_size, label: sly.Label):
return result
-def _create_data_config(output_dir, meta: sly.ProjectMeta, keep_classes):
+def _create_data_config(output_dir, meta: sly.ProjectMeta):
class_names = []
class_colors = []
for obj_class in meta.obj_classes:
- if obj_class.name in keep_classes:
- class_names.append(obj_class.name)
- class_colors.append(obj_class.color)
+ class_names.append(obj_class.name)
+ class_colors.append(obj_class.color)
data_yaml = {
"train": os.path.join(output_dir, "images/train"),
@@ -44,11 +43,11 @@ def _create_data_config(output_dir, meta: sly.ProjectMeta, keep_classes):
return data_yaml
-def transform_annotation(ann, class_names, save_path):
+def _transform_annotation(ann, class_names, save_path):
yolov5_ann = []
for label in ann.labels:
if label.obj_class.name in class_names:
- yolov5_ann.append(transform_label(class_names, ann.img_size, label))
+ yolov5_ann.append(_transform_label(class_names, ann.img_size, label))
with open(save_path, 'w') as file:
file.write("\n".join(yolov5_ann))
@@ -67,9 +66,9 @@ def _process_split(project, class_names, images_dir, labels_dir, split, progress
ann = sly.Annotation.from_json(ann_json, project.meta)
save_ann_path = os.path.join(labels_dir, f"{sly.fs.get_file_name(item_name)}.txt")
- empty = transform_annotation(ann, class_names, save_ann_path)
+ empty = _transform_annotation(ann, class_names, save_ann_path)
if empty:
- sly.logger.warning(f"Empty annotation dataset={dataset_name} image={item_name}")
+ sly.logger.warning(f"Empty annotation: dataset={dataset_name}, image={item_name}")
img_path = dataset.get_img_path(item_name)
save_img_path = os.path.join(images_dir, item_name)
@@ -78,12 +77,31 @@ def _process_split(project, class_names, images_dir, labels_dir, split, progress
progress_cb(len(batch))
-def filter_and_transform_labels(input_dir, train_classes,
- train_split, val_split,
- output_dir, progress_cb):
- project = sly.Project(input_dir, sly.OpenMode.READ)
- data_yaml = _create_data_config(output_dir, project.meta, train_classes)
+def _transform_set(set_name, data_yaml, project_meta, items, progress_cb):
+ res_images_dir = data_yaml[set_name]
+ res_labels_dir = data_yaml[f"labels_{set_name}"]
+ classes_names = data_yaml["names"]
- _process_split(project, data_yaml["names"], data_yaml["train"], data_yaml["labels_train"], train_split, progress_cb)
- _process_split(project, data_yaml["names"], data_yaml["val"], data_yaml["labels_val"], val_split, progress_cb)
+ used_names = set()
+ for batch in sly.batched(items, batch_size=max(int(len(items) / 50), 10)):
+ for item in batch:
+ ann = sly.Annotation.load_json_file(item.ann_path, project_meta)
+ _item_name = sly._utils.generate_free_name(used_names, sly.fs.get_file_name(item.name))
+ used_names.add(_item_name)
+ _ann_name = f"{_item_name}.txt"
+ _img_name = f"{_item_name}{sly.fs.get_file_ext(item.img_path)}"
+
+ save_ann_path = os.path.join(res_labels_dir, _ann_name)
+ _transform_annotation(ann, classes_names, save_ann_path)
+ save_img_path = os.path.join(res_images_dir, _img_name)
+ sly.fs.copy_file(item.img_path, save_img_path) # hardlink not working with yolov5 ds caches
+ progress_cb(len(batch))
+
+
+def transform(sly_project_dir, yolov5_output_dir, train_set, val_set, progress_cb):
+ project = sly.Project(sly_project_dir, sly.OpenMode.READ)
+ data_yaml = _create_data_config(yolov5_output_dir, project.meta)
+
+ _transform_set("train", data_yaml, project.meta, train_set, progress_cb)
+ _transform_set("val", data_yaml, project.meta, val_set, progress_cb)
\ No newline at end of file
diff --git a/test_yolov5.py b/test_yolov5.py
index 4308814b31f5..af412356a064 100644
--- a/test_yolov5.py
+++ b/test_yolov5.py
@@ -17,7 +17,7 @@
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
-from supervisely.train.src.sly_train_utils import upload_pred_vis
+from sly_train_utils import upload_pred_vis
import supervisely_lib as sly
@@ -38,8 +38,11 @@ def test(data,
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
- log_imgs=0, # number of logged images
+ wandb_logger=None,
compute_loss=None,
+ half_precision=True,
+ is_coco=False,
+ opt=None,
opt_sly=False):
# Initialize/load model and set device
@@ -52,52 +55,51 @@ def test(data,
device = select_device(opt.device, batch_size=batch_size)
# Directories
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
- imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
+ imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
- half = device.type != 'cpu' # half precision only supported on CUDA
+ half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
- is_coco = data.endswith('coco.yaml') # is COCO dataset
- with open(data) as f:
- data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ if isinstance(data, str):
+ is_coco = data.endswith('coco.yaml')
+ with open(data) as f:
+ data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
- log_imgs, wandb = min(log_imgs, 100), None # ceil
- try:
- import wandb # Weights & Biases
- except ImportError:
- log_imgs = 0
-
+ log_imgs = 0
+ if wandb_logger and wandb_logger.wandb:
+ log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
- img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
- _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
- path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
- dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True,
- prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
+ if device.type != 'cpu':
+ model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
+ task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
+ dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
+ prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
- #s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
+ #s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
@@ -123,7 +125,7 @@ def test(data,
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
- output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
+ output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
t1 += time_synchronized() - t
# Statistics per image
@@ -140,6 +142,8 @@ def test(data,
continue
# Predictions
+ if single_cls:
+ pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
@@ -152,15 +156,17 @@ def test(data,
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
- # W&B logging
- if plots and len(wandb_images) < log_imgs:
- box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
- "class_id": int(cls),
- "box_caption": "%s %.3f" % (names[cls], conf),
- "scores": {"class_score": conf},
- "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
- boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
- wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
+ # W&B logging - Media Panel Plots
+ if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
+ if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
+ box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
+ wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
+ wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
# Append to pycocotools JSON dictionary
if save_json:
@@ -184,7 +190,7 @@ def test(data,
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
- confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
+ confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
@@ -223,26 +229,26 @@ def test(data,
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
- p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
+ ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
- pf = '%20s' + '%12.3g' * 6 # print format
+ pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
#print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
- sly.logger.info("Test", extra={'Class': 'all',
- 'Images': '%.3g' % seen,
- 'Targets': '%.3g' % nt.sum(),
- 'P': '%.3g' % mp,
- 'R': '%.3g' % mr,
- 'mAP@.5': '%.3g' % map50,
- 'mAP@.5:.95': '%.3g' % map})
+ sly.logger.info("Test", extra={'Class': '%20s' % 'all',
+ 'Images': '%12i' % seen,
+ 'Targets': '%12i' % nt.sum(),
+ 'P': '%12.3g' % mp,
+ 'R': '%12.3g' % mr,
+ 'mAP@.5': '%12.3g' % map50,
+ 'mAP@.5:.95': '%12.3g' % map})
# Print results per class
- if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats):
+ if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
@@ -254,11 +260,13 @@ def test(data,
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
- if wandb and wandb.run:
- wandb.log({"Images": wandb_images})
- wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
+ if wandb_logger and wandb_logger.wandb:
+ val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
+ wandb_logger.log({"Validation": val_batches})
if opt_sly:
upload_pred_vis()
+ if wandb_images:
+ wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json and len(jdict):
@@ -286,10 +294,10 @@ def test(data,
print(f'pycocotools unable to run: {e}')
# Return results
+ model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
- model.float() # for training
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
@@ -304,7 +312,7 @@ def test(data,
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
- parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
+ parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
@@ -322,7 +330,7 @@ def test(data,
print(opt)
check_requirements()
- if opt.task in ['val', 'test']: # run normally
+ if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
@@ -336,18 +344,24 @@ def test(data,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
+ opt=opt
)
+ elif opt.task == 'speed': # speed benchmarks
+ for w in opt.weights:
+ test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
+
elif opt.task == 'study': # run over a range of settings and save/plot
- for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
- f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
- x = list(range(320, 800, 64)) # x axis
+ # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
+ x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
+ for w in opt.weights:
+ f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
- print('\nRunning %s point %s...' % (f, i))
- r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
- plots=False)
+ print(f'\nRunning {f} point {i}...')
+ r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
+ plots=False, opt=opt)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
- plot_study_txt(f, x) # plot
+ plot_study_txt(x=x) # plot
diff --git a/train.py b/train.py
index af0ec21c5c25..7b356327b846 100644
--- a/train.py
+++ b/train.py
@@ -4,6 +4,7 @@
import os
import random
import time
+from copy import deepcopy
from pathlib import Path
from threading import Thread
@@ -33,17 +34,20 @@
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
-from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
+from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
+from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
-from supervisely.train.src.sly_train_utils import send_epoch_log, upload_label_vis, upload_train_data_vis
-from supervisely.train.src.sly_metrics import send_metrics
+from sly_train_utils import send_epoch_log, upload_label_vis, upload_train_data_vis
+from sly_metrics import send_metrics
#logger = logging.getLogger(__name__)
import supervisely_lib as sly
from supervisely_lib import logger
-def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
+def train(hyp, opt, device, tb_writer=None):
+ train_batches_uploaded = False
+
logger.info('hyperparameters', extra=hyp)
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
@@ -57,20 +61,29 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
- yaml.dump(vars(opt), f, sort_keys=False)
+ yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
- with torch_distributed_zero_first(rank):
- check_dataset(data_dict) # check
- train_path = data_dict['train']
- test_path = data_dict['val']
+ data_dict = yaml.safe_load(f) # data dict
+ is_coco = opt.data.endswith('coco.yaml')
+
+ # Logging- Doing this before checking the dataset. Might update data_dict
+ loggers = {'wandb': None} # loggers dict
+ if rank in [-1, 0]:
+ opt.hyp = hyp # add hyperparameters
+ run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
+ wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
+ loggers['wandb'] = wandb_logger.wandb
+ data_dict = wandb_logger.data_dict
+ if wandb_logger.wandb:
+ weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
+
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
@@ -82,16 +95,18 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
- if hyp.get('anchors'):
- ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
- model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
- exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
+ model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
+ exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
- model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
+ model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
+ with torch_distributed_zero_first(rank):
+ check_dataset(data_dict) # check
+ train_path = data_dict['train']
+ test_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
@@ -128,18 +143,15 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
+ if opt.linear_lr:
+ lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
+ else:
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
- # Logging
- if rank in [-1, 0] and wandb and wandb.run is None:
- opt.hyp = hyp # add hyperparameters
- wandb_run = wandb.init(config=opt, resume="allow",
- project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
- name=save_dir.stem,
- id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
- loggers = {'wandb': wandb} # loggers dict
+ # EMA
+ ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
@@ -149,10 +161,14 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
+ # EMA
+ if ema and ckpt.get('ema'):
+ ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
+ ema.updates = ckpt['updates']
+
# Results
if ckpt.get('training_results') is not None:
- with open(results_file, 'w') as file:
- file.write(ckpt['training_results']) # write results.txt
+ results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
@@ -166,7 +182,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
del ckpt, state_dict
# Image sizes
- gs = int(model.stride.max()) # grid size (max stride)
+ gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
@@ -179,13 +195,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
- # EMA
- ema = ModelEMA(model) if rank in [-1, 0] else None
-
- # DDP mode
- if cuda and rank != -1:
- model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
-
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
@@ -197,19 +206,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# Process 0
if rank in [-1, 0]:
- ema.updates = start_epoch * nb // accumulate # set EMA updates
- testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
+ testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
-
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
- plot_labels(labels, save_dir, loggers)
+ plot_labels(labels, names, save_dir, loggers)
if opt.sly:
upload_label_vis()
if tb_writer:
@@ -218,11 +225,19 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
+ model.half().float() # pre-reduce anchor precision
+
+ # DDP mode
+ if cuda and rank != -1:
+ model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
+ # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
+ find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
+ hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
@@ -270,7 +285,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
- #logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
+ #logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
#pbar = tqdm(pbar, total=nb) # progress bar
pass
@@ -325,7 +340,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
#pbar.set_description(s)
- #'Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'
+ #'Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'
logger.info("Training", extra={
"epoch": epoch,
"epochs_count": epochs - 1,
@@ -334,7 +349,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
"mobj_loss": float(mloss[1].cpu().numpy()),
"mcls_loss": float(mloss[2].cpu().numpy()),
"mtotal_loss": float(mloss[3].cpu().numpy()),
- "targets": targets.shape[0],
+ "labels": targets.shape[0],
"img_size": imgs.shape[-1]
})
@@ -342,16 +357,19 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
- Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
+ plot_images(imgs, targets, paths, f)
+ #Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
- # tb_writer.add_graph(model, imgs) # add model to tensorboard
- elif plots and ni == 10 and wandb:
- wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')
- if x.exists()]})
- elif plots and ni == 10 and opt.sly:
+ # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
+ elif plots and ni == 10 and wandb_logger.wandb:
+ wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
+ save_dir.glob('train*.jpg') if x.exists()]})
+ if plots and ni == 10 and opt.sly:
+ train_batches_uploaded = True
upload_train_data_vis()
+
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
@@ -362,25 +380,27 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
- if ema:
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'colors', 'img_size', 'stride', 'class_weights'])
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
- results, maps, times = test.test(opt.data,
- batch_size=total_batch_size,
+ wandb_logger.current_epoch = epoch + 1
+ results, maps, times = test.test(data_dict,
+ batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
+ verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
- log_imgs=opt.log_imgs if wandb else 0,
+ wandb_logger=wandb_logger,
compute_loss=compute_loss,
+ is_coco=is_coco,
opt_sly=opt.sly)
# Write
with open(results_file, 'a') as f:
- f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
+ f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
@@ -395,80 +415,89 @@ def train(hyp, opt, device, tb_writer=None, wandb=None, opt_sly=False):
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
- if wandb:
- wandb.log({tag: x}) # W&B
+ if wandb_logger.wandb:
+ wandb_logger.log({tag: x}) # W&B
if opt.sly:
if torch.is_tensor(x):
x = float(x.cpu().numpy())
metrics[tag] = x
if opt.sly:
- send_metrics(epoch, epochs, metrics)
+ send_metrics(epoch, epochs, metrics, opt.metrics_period)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
+ wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
- save = (not opt.nosave) or (final_epoch and not opt.evolve)
- if save:
- with open(results_file, 'r') as f: # create checkpoint
- ckpt = {'epoch': epoch,
- 'best_fitness': best_fitness,
- 'training_results': f.read(),
- 'model': ema.ema,
- 'optimizer': None if final_epoch else optimizer.state_dict(),
- 'wandb_id': wandb_run.id if wandb else None}
+ if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
+ ckpt = {'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'training_results': results_file.read_text(),
+ 'model': deepcopy(model.module if is_parallel(model) else model).half(),
+ 'ema': deepcopy(ema.ema).half(),
+ 'updates': ema.updates,
+ 'optimizer': optimizer.state_dict(),
+ 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
+ if wandb_logger.wandb:
+ if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
+ wandb_logger.log_model(
+ last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
+
# end epoch ----------------------------------------------------------------------------------------------------
- # end training
- if rank in [-1, 0]:
- # Strip optimizers
- final = best if best.exists() else last # final model
- for f in [last, best]:
- if f.exists():
- strip_optimizer(f) # strip optimizers
- if opt.bucket:
- os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
+ if plots and opt.sly and train_batches_uploaded is False:
+ train_batches_uploaded = True
+ upload_train_data_vis()
+ # end training
+ if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
- if wandb:
- files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
- wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
- if (save_dir / f).exists()]})
- if opt.log_artifacts:
- wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
-
+ if wandb_logger.wandb:
+ files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
+ wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
+ if (save_dir / f).exists()]})
# Test best.pt
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
- for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
+ for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(opt.data,
- batch_size=total_batch_size,
+ batch_size=batch_size * 2,
imgsz=imgsz_test,
- conf_thres=conf,
- iou_thres=iou,
- model=attempt_load(final, device).half(),
+ conf_thres=0.001,
+ iou_thres=0.7,
+ model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
- save_json=save_json,
+ save_json=True,
plots=False,
- opt_sly=opt.sly)
+ is_coco=is_coco)
+ # Strip optimizers
+ final = best if best.exists() else last # final model
+ for f in last, best:
+ if f.exists():
+ strip_optimizer(f) # strip optimizers
+ if opt.bucket:
+ os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
+ if wandb_logger.wandb and not opt.evolve: # Log the stripped model
+ wandb_logger.wandb.log_artifact(str(final), type='model',
+ name='run_' + wandb_logger.wandb_run.id + '_model',
+ aliases=['last', 'best', 'stripped'])
+ wandb_logger.finish_run()
else:
dist.destroy_process_group()
-
- wandb.run.finish() if wandb and wandb.run else None
torch.cuda.empty_cache()
return results
@@ -497,14 +526,20 @@ def main():
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
- parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
- parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
+ parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
+ parser.add_argument('--linear-lr', action='store_true', help='linear LR')
+ parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
+ parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
+ parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
+ parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
+ parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
parser.add_argument('--sly', action='store_true', help='for Supervisely App integration')
+ parser.add_argument('--metrics_period', type=int, default=1, help='Log metrics to Supervisely every "metrics_period" epochs')
opt = parser.parse_args()
print("Input arguments:", opt)
@@ -515,16 +550,18 @@ def main():
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
- #check_requirements(file=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'requirements.txt'))
+ #check_requirements()
# Resume
- if opt.resume: # resume an interrupted run
+ wandb_run = check_wandb_resume(opt)
+ if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
- opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
+ opt = argparse.Namespace(**yaml.safe_load(f)) # replace
+ opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
+ '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
@@ -532,7 +569,7 @@ def main():
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
+ opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
@@ -547,22 +584,17 @@ def main():
# Hyperparameters
with open(opt.hyp) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
+ hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
- try:
- import wandb
- except ImportError:
- wandb = None
- #prefix = colorstr('wandb: ')
- #logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
- #logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
+ #prefix = colorstr('tensorboard: ')
+ #logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
- train(hyp, opt, device, tb_writer, wandb)
+ train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
@@ -636,7 +668,7 @@ def main():
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
- results = train(hyp.copy(), opt, device, wandb=wandb)
+ results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
diff --git a/tutorial.ipynb b/tutorial.ipynb
index 853f42f196d8..245b46aa7d9f 100644
--- a/tutorial.ipynb
+++ b/tutorial.ipynb
@@ -16,7 +16,7 @@
"accelerator": "GPU",
"widgets": {
"application/vnd.jupyter.widget-state+json": {
- "02ac0588602847eea00a0205f87bcce2": {
+ "8815626359d84416a2f44a95500580a4": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
@@ -28,15 +28,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
- "layout": "IPY_MODEL_c472ea49806447a68b5a9221a4ddae85",
+ "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e",
"_model_module": "@jupyter-widgets/controls",
"children": [
- "IPY_MODEL_091fdf499bd44a80af7281d16da4aa93",
- "IPY_MODEL_c79f69c959de4427ba102a87a9f46d80"
+ "IPY_MODEL_876609753c2946248890344722963d44",
+ "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05"
]
}
},
- "c472ea49806447a68b5a9221a4ddae85": {
+ "3b85609c4ce94a74823f2cfe141ce68e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -87,12 +87,12 @@
"left": null
}
},
- "091fdf499bd44a80af7281d16da4aa93": {
+ "876609753c2946248890344722963d44": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
- "style": "IPY_MODEL_c42ae5af74a0491187827d0a1fc259bb",
+ "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
@@ -107,30 +107,30 @@
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_5a90f72d3a2d46cb9ad915daa3ead8b4"
+ "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8"
}
},
- "c79f69c959de4427ba102a87a9f46d80": {
+ "8abfdd8778e44b7ca0d29881cb1ada05": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
- "style": "IPY_MODEL_2a7ed6611da34662b10e37fd4f4e4438",
+ "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": " 781M/781M [00:23<00:00, 35.1MB/s]",
+ "value": " 781M/781M [08:43<00:00, 1.56MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_fead0160658445bf9e966daa4481cad0"
+ "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50"
}
},
- "c42ae5af74a0491187827d0a1fc259bb": {
+ "78c6c3d97c484916b8ee167c63556800": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
@@ -145,7 +145,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "5a90f72d3a2d46cb9ad915daa3ead8b4": {
+ "9dd0f182db5d45378ceafb855e486eb8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -196,7 +196,7 @@
"left": null
}
},
- "2a7ed6611da34662b10e37fd4f4e4438": {
+ "a3dab28b45c247089a3d1b8b09f327de": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
@@ -210,7 +210,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "fead0160658445bf9e966daa4481cad0": {
+ "32451332b7a94ba9aacddeaa6ac94d50": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -261,7 +261,7 @@
"left": null
}
},
- "cf1ab9fde7444d3e874fcd407ba8f0f8": {
+ "0fffa335322b41658508e06aed0acbf0": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"state": {
@@ -273,15 +273,15 @@
"_view_count": null,
"_view_module_version": "1.5.0",
"box_style": "",
- "layout": "IPY_MODEL_9ee03f9c85f34155b2645e89c9211547",
+ "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb",
"_model_module": "@jupyter-widgets/controls",
"children": [
- "IPY_MODEL_933ebc451c09490aadf71afbbb3dff2a",
- "IPY_MODEL_8e7c55cbca624432a84fa7ad8f3a4016"
+ "IPY_MODEL_85823e71fea54c39bd11e2e972348836",
+ "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd"
]
}
},
- "9ee03f9c85f34155b2645e89c9211547": {
+ "a354c6f80ce347e5a3ef64af87c0eccb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -332,50 +332,50 @@
"left": null
}
},
- "933ebc451c09490aadf71afbbb3dff2a": {
+ "85823e71fea54c39bd11e2e972348836": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"state": {
"_view_name": "ProgressView",
- "style": "IPY_MODEL_dd62d83b35d04a178840772e82bd2f2e",
+ "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa",
"_dom_classes": [],
"description": "100%",
"_model_name": "FloatProgressModel",
"bar_style": "success",
- "max": 22090455,
+ "max": 22091032,
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": 22090455,
+ "value": 22091032,
"_view_count": null,
"_view_module_version": "1.5.0",
"orientation": "horizontal",
"min": 0,
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_d5c4f3d1c8b046e3a163faaa6b3a51ab"
+ "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f"
}
},
- "8e7c55cbca624432a84fa7ad8f3a4016": {
+ "fb11acd663fa4e71b041d67310d045fd": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"state": {
"_view_name": "HTMLView",
- "style": "IPY_MODEL_78d1da8efb504b03878ca9ce5b404006",
+ "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee",
"_dom_classes": [],
"description": "",
"_model_name": "HTMLModel",
"placeholder": "",
"_view_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
- "value": " 21.1M/21.1M [00:01<00:00, 16.9MB/s]",
+ "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]",
"_view_count": null,
"_view_module_version": "1.5.0",
"description_tooltip": null,
"_model_module": "@jupyter-widgets/controls",
- "layout": "IPY_MODEL_d28208ba1213436a93926a01d99d97ae"
+ "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb"
}
},
- "dd62d83b35d04a178840772e82bd2f2e": {
+ "8a919053b780449aae5523658ad611fa": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"state": {
@@ -390,7 +390,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "d5c4f3d1c8b046e3a163faaa6b3a51ab": {
+ "5bae9393a58b44f7b69fb04816f94f6f": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -441,7 +441,7 @@
"left": null
}
},
- "78d1da8efb504b03878ca9ce5b404006": {
+ "d26c6d16c7f24030ab2da5285bf198ee": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"state": {
@@ -455,7 +455,7 @@
"_model_module": "@jupyter-widgets/controls"
}
},
- "d28208ba1213436a93926a01d99d97ae": {
+ "f7767886b2364c8d9efdc79e175ad8eb": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"state": {
@@ -528,8 +528,8 @@
"source": [
"
\n",
"\n",
- "This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
- "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com."
+ "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
+ "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!"
]
},
{
@@ -550,7 +550,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "888d5c41-00e9-47d8-d230-dded99325bea"
+ "outputId": "9b022435-4197-41fc-abea-81f86ce857d0"
},
"source": [
"!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
@@ -561,14 +561,14 @@
"from IPython.display import Image, clear_output # to display images\n",
"\n",
"clear_output()\n",
- "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
+ "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
],
- "execution_count": null,
+ "execution_count": 31,
"outputs": [
{
"output_type": "stream",
"text": [
- "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n"
+ "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n"
],
"name": "stdout"
}
@@ -582,7 +582,9 @@
"source": [
"# 1. Inference\n",
"\n",
- "`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)."
+ "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n",
+ "\n",
+ "
"
]
},
{
@@ -604,15 +606,15 @@
{
"output_type": "stream",
"text": [
- "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
+ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n",
+ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Fusing layers... \n",
- "Model Summary: 232 layers, 7459581 parameters, 0 gradients\n",
- "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s)\n",
- "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s)\n",
+ "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
+ "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
+ "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
"Results saved to runs/detect/exp\n",
- "Done. (0.113s)\n"
+ "Done. (0.087)\n"
],
"name": "stdout"
},
@@ -634,16 +636,6 @@
}
]
},
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "4qbaa3iEcrcE"
- },
- "source": [
- "Results are saved to `runs/detect`. A full list of available inference sources:\n",
- "
"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -651,7 +643,7 @@
},
"source": [
"# 2. Test\n",
- "Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
+ "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
]
},
{
@@ -670,19 +662,19 @@
"id": "WQPtK1QYVaD_",
"colab": {
"base_uri": "https://localhost:8080/",
- "height": 66,
+ "height": 65,
"referenced_widgets": [
- "02ac0588602847eea00a0205f87bcce2",
- "c472ea49806447a68b5a9221a4ddae85",
- "091fdf499bd44a80af7281d16da4aa93",
- "c79f69c959de4427ba102a87a9f46d80",
- "c42ae5af74a0491187827d0a1fc259bb",
- "5a90f72d3a2d46cb9ad915daa3ead8b4",
- "2a7ed6611da34662b10e37fd4f4e4438",
- "fead0160658445bf9e966daa4481cad0"
+ "8815626359d84416a2f44a95500580a4",
+ "3b85609c4ce94a74823f2cfe141ce68e",
+ "876609753c2946248890344722963d44",
+ "8abfdd8778e44b7ca0d29881cb1ada05",
+ "78c6c3d97c484916b8ee167c63556800",
+ "9dd0f182db5d45378ceafb855e486eb8",
+ "a3dab28b45c247089a3d1b8b09f327de",
+ "32451332b7a94ba9aacddeaa6ac94d50"
]
},
- "outputId": "780d8f5f-766e-4b99-e370-11f9b884c27a"
+ "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363"
},
"source": [
"# Download COCO val2017\n",
@@ -695,7 +687,7 @@
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "02ac0588602847eea00a0205f87bcce2",
+ "model_id": "8815626359d84416a2f44a95500580a4",
"version_minor": 0,
"version_major": 2
},
@@ -723,7 +715,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "013935a5-ba81-4810-b723-0cb01cf7bc79"
+ "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f"
},
"source": [
"# Run YOLOv5x on COCO val2017\n",
@@ -734,45 +726,46 @@
{
"output_type": "stream",
"text": [
- "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
+ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
+ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5x.pt to yolov5x.pt...\n",
- "100% 170M/170M [00:05<00:00, 32.6MB/s]\n",
+ "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
+ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n",
"\n",
"Fusing layers... \n",
- "Model Summary: 484 layers, 88922205 parameters, 0 gradients\n",
- "Scanning labels ../coco/labels/val2017.cache (4952 found, 0 missing, 48 empty, 0 duplicate, for 5000 images): 5000it [00:00, 14785.71it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.74it/s]\n",
- " all 5e+03 3.63e+04 0.409 0.754 0.672 0.484\n",
- "Speed: 5.9/2.1/7.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
+ "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n",
+ " all 5000 36335 0.745 0.627 0.68 0.49\n",
+ "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n",
"\n",
"Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
"loading annotations into memory...\n",
- "Done (t=0.43s)\n",
+ "Done (t=0.48s)\n",
"creating index...\n",
"index created!\n",
"Loading and preparing results...\n",
- "DONE (t=4.67s)\n",
+ "DONE (t=5.08s)\n",
"creating index...\n",
"index created!\n",
"Running per image evaluation...\n",
"Evaluate annotation type *bbox*\n",
- "DONE (t=92.11s).\n",
+ "DONE (t=90.51s).\n",
"Accumulating evaluation results...\n",
- "DONE (t=13.24s).\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.492\n",
- " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.676\n",
- " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.534\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.318\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.541\n",
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.633\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.376\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.617\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.670\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.493\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.723\n",
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.812\n",
+ "DONE (t=15.16s).\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n",
+ " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
+ " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n",
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n",
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n",
"Results saved to runs/test/exp\n"
],
"name": "stdout"
@@ -786,7 +779,7 @@
},
"source": [
"## COCO test-dev2017\n",
- "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
+ "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
]
},
{
@@ -797,9 +790,9 @@
"source": [
"# Download COCO test-dev2017\n",
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
- "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
+ "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
- "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5"
+ "%mv ./test2017 ../coco/images # move to /coco"
],
"execution_count": null,
"outputs": []
@@ -833,19 +826,19 @@
"id": "Knxi2ncxWffW",
"colab": {
"base_uri": "https://localhost:8080/",
- "height": 66,
+ "height": 65,
"referenced_widgets": [
- "cf1ab9fde7444d3e874fcd407ba8f0f8",
- "9ee03f9c85f34155b2645e89c9211547",
- "933ebc451c09490aadf71afbbb3dff2a",
- "8e7c55cbca624432a84fa7ad8f3a4016",
- "dd62d83b35d04a178840772e82bd2f2e",
- "d5c4f3d1c8b046e3a163faaa6b3a51ab",
- "78d1da8efb504b03878ca9ce5b404006",
- "d28208ba1213436a93926a01d99d97ae"
+ "0fffa335322b41658508e06aed0acbf0",
+ "a354c6f80ce347e5a3ef64af87c0eccb",
+ "85823e71fea54c39bd11e2e972348836",
+ "fb11acd663fa4e71b041d67310d045fd",
+ "8a919053b780449aae5523658ad611fa",
+ "5bae9393a58b44f7b69fb04816f94f6f",
+ "d26c6d16c7f24030ab2da5285bf198ee",
+ "f7767886b2364c8d9efdc79e175ad8eb"
]
},
- "outputId": "59f9a94b-21e1-4626-f36a-a8e1b1e5c8f6"
+ "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43"
},
"source": [
"# Download COCO128\n",
@@ -858,12 +851,12 @@
"output_type": "display_data",
"data": {
"application/vnd.jupyter.widget-view+json": {
- "model_id": "cf1ab9fde7444d3e874fcd407ba8f0f8",
+ "model_id": "0fffa335322b41658508e06aed0acbf0",
"version_minor": 0,
"version_major": 2
},
"text/plain": [
- "HBox(children=(FloatProgress(value=0.0, max=22090455.0), HTML(value='')))"
+ "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))"
]
},
"metadata": {
@@ -923,7 +916,7 @@
"colab": {
"base_uri": "https://localhost:8080/"
},
- "outputId": "138f2d1d-364c-405a-cf13-ea91a2aff915"
+ "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014"
},
"source": [
"# Train YOLOv5s on COCO128 for 3 epochs\n",
@@ -934,76 +927,76 @@
{
"output_type": "stream",
"text": [
- "Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB)\n",
- "\n",
- "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
- "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
- "2020-11-20 11:45:17.042357: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\n",
- "Hyperparameters {'lr0': 0.01, 'lrf': 0.2, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0, 'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0}\n",
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt...\n",
- "100% 14.5M/14.5M [00:01<00:00, 14.8MB/s]\n",
+ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
+ "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
+ "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n",
+ "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
+ "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n",
+ "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
+ "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
" 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
- " 2 -1 1 19904 models.common.BottleneckCSP [64, 64, 1] \n",
+ " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
" 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
- " 4 -1 1 161152 models.common.BottleneckCSP [128, 128, 3] \n",
+ " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n",
" 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
- " 6 -1 1 641792 models.common.BottleneckCSP [256, 256, 3] \n",
+ " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n",
" 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
" 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
- " 9 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
+ " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
" 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 12 [-1, 6] 1 0 models.common.Concat [1] \n",
- " 13 -1 1 378624 models.common.BottleneckCSP [512, 256, 1, False] \n",
+ " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
" 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
" 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 16 [-1, 4] 1 0 models.common.Concat [1] \n",
- " 17 -1 1 95104 models.common.BottleneckCSP [256, 128, 1, False] \n",
+ " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
" 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
" 19 [-1, 14] 1 0 models.common.Concat [1] \n",
- " 20 -1 1 313088 models.common.BottleneckCSP [256, 256, 1, False] \n",
+ " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
" 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
- " 23 -1 1 1248768 models.common.BottleneckCSP [512, 512, 1, False] \n",
+ " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
- "Model Summary: 283 layers, 7468157 parameters, 7468157 gradients\n",
+ "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n",
"\n",
- "Transferred 370/370 items from yolov5s.pt\n",
- "Optimizer groups: 62 .bias, 70 conv.weight, 59 other\n",
- "Scanning images: 100% 128/128 [00:00<00:00, 5395.63it/s]\n",
- "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 13972.28it/s]\n",
- "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 173.55it/s]\n",
- "Scanning labels ../coco128/labels/train2017.cache (126 found, 0 missing, 2 empty, 0 duplicate, for 128 images): 128it [00:00, 8693.98it/s]\n",
- "Caching images (0.1GB): 100% 128/128 [00:00<00:00, 133.30it/s]\n",
- "NumExpr defaulting to 2 threads.\n",
+ "Transferred 362/362 items from yolov5s.pt\n",
+ "Scaled weight_decay = 0.0005\n",
+ "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
+ "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n",
+ "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n",
+ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n",
+ "Plotting labels... \n",
"\n",
- "Analyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
+ "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
"Image sizes 640 train, 640 test\n",
"Using 2 dataloader workers\n",
"Logging results to runs/train/exp\n",
"Starting training for 3 epochs...\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 0/2 5.24G 0.04202 0.06745 0.01503 0.1245 194 640: 100% 8/8 [00:03<00:00, 2.01it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:03<00:00, 2.40it/s]\n",
- " all 128 929 0.404 0.758 0.701 0.45\n",
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n",
+ " all 128 929 0.605 0.657 0.666 0.434\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 1/2 5.12G 0.04461 0.05874 0.0169 0.1202 142 640: 100% 8/8 [00:01<00:00, 4.14it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.75it/s]\n",
- " all 128 929 0.403 0.772 0.703 0.453\n",
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n",
+ " all 128 929 0.61 0.66 0.669 0.438\n",
"\n",
- " Epoch gpu_mem box obj cls total targets img_size\n",
- " 2/2 5.12G 0.04445 0.06545 0.01667 0.1266 149 640: 100% 8/8 [00:01<00:00, 4.15it/s]\n",
- " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:06<00:00, 1.18it/s]\n",
- " all 128 929 0.395 0.767 0.702 0.452\n",
- "Optimizer stripped from runs/train/exp/weights/last.pt, 15.2MB\n",
- "3 epochs completed in 0.006 hours.\n",
- "\n"
+ " Epoch gpu_mem box obj cls total labels img_size\n",
+ " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n",
+ " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n",
+ " all 128 929 0.618 0.659 0.671 0.438\n",
+ "3 epochs completed in 0.007 hours.\n",
+ "\n",
+ "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
+ "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n"
],
"name": "stdout"
}
@@ -1026,9 +1019,9 @@
"source": [
"## Weights & Biases Logging 🌟 NEW\n",
"\n",
- "[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
+ "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
"\n",
- "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
+ "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
"\n",
"
"
]
@@ -1114,10 +1107,23 @@
"\n",
"YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
"\n",
- "- **Google Colab Notebook** with free GPU:
\n",
- "- **Kaggle Notebook** with free GPU: [https://www.kaggle.com/ultralytics/yolov5](https://www.kaggle.com/ultralytics/yolov5)\n",
- "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) \n",
- "- **Docker Image** https://hub.docker.com/r/ultralytics/yolov5. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) ![Docker Pulls](https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker)\n"
+ "- **Google Colab and Kaggle** notebooks with free GPU:
\n",
+ "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
+ "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
+ "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart)
\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6Qu7Iesl0p54"
+ },
+ "source": [
+ "# Status\n",
+ "\n",
+ "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n",
+ "\n",
+ "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
]
},
{
@@ -1152,11 +1158,32 @@
},
"source": [
"# Reproduce\n",
- "%%shell\n",
- "for x in yolov5s yolov5m yolov5l yolov5x; do\n",
- " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
- " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n",
- "done"
+ "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n",
+ " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
+ " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "GMusP4OAxFu6"
+ },
+ "source": [
+ "# PyTorch Hub\n",
+ "import torch\n",
+ "\n",
+ "# Model\n",
+ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n",
+ "\n",
+ "# Images\n",
+ "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n",
+ "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n",
+ "\n",
+ "# Inference\n",
+ "results = model(imgs)\n",
+ "results.print() # or .show(), .save()"
],
"execution_count": null,
"outputs": []
@@ -1205,6 +1232,19 @@
"execution_count": null,
"outputs": []
},
+ {
+ "cell_type": "code",
+ "metadata": {
+ "id": "RVRSOhEvUdb5"
+ },
+ "source": [
+ "# Evolve\n",
+ "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n",
+ "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)"
+ ],
+ "execution_count": null,
+ "outputs": []
+ },
{
"cell_type": "code",
"metadata": {
diff --git a/utils/activations.py b/utils/activations.py
index aa3ddf071d28..92a3b5eaa54b 100644
--- a/utils/activations.py
+++ b/utils/activations.py
@@ -19,23 +19,6 @@ def forward(x):
return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
-class MemoryEfficientSwish(nn.Module):
- class F(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x):
- ctx.save_for_backward(x)
- return x * torch.sigmoid(x)
-
- @staticmethod
- def backward(ctx, grad_output):
- x = ctx.saved_tensors[0]
- sx = torch.sigmoid(x)
- return grad_output * (sx * (1 + x * (1 - sx)))
-
- def forward(self, x):
- return self.F.apply(x)
-
-
# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
class Mish(nn.Module):
@staticmethod
@@ -70,3 +53,46 @@ def __init__(self, c1, k=3): # ch_in, kernel
def forward(self, x):
return torch.max(x, self.bn(self.conv(x)))
+
+
+# ACON https://arxiv.org/pdf/2009.04759.pdf ----------------------------------------------------------------------------
+class AconC(nn.Module):
+ r""" ACON activation (activate or not).
+ AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter
+ according to "Activate or Not: Learning Customized Activation"
.
+ """
+
+ def __init__(self, c1):
+ super().__init__()
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.beta = nn.Parameter(torch.ones(1, c1, 1, 1))
+
+ def forward(self, x):
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x
+
+
+class MetaAconC(nn.Module):
+ r""" ACON activation (activate or not).
+ MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network
+ according to "Activate or Not: Learning Customized Activation" .
+ """
+
+ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r
+ super().__init__()
+ c2 = max(r, c1 // r)
+ self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1))
+ self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True)
+ self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True)
+ # self.bn1 = nn.BatchNorm2d(c2)
+ # self.bn2 = nn.BatchNorm2d(c1)
+
+ def forward(self, x):
+ y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True)
+ # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891
+ # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable
+ beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed
+ dpx = (self.p1 - self.p2) * x
+ return dpx * torch.sigmoid(beta * dpx) + self.p2 * x
diff --git a/utils/autoanchor.py b/utils/autoanchor.py
index c00f0382ff71..75b350da729c 100644
--- a/utils/autoanchor.py
+++ b/utils/autoanchor.py
@@ -37,17 +37,21 @@ def metric(k): # compute metric
bpr = (best > 1. / thr).float().mean() # best possible recall
return bpr, aat
- bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
+ anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors
+ bpr, aat = metric(anchors)
print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')
if bpr < 0.98: # threshold to recompute
print('. Attempting to improve anchors, please wait...')
na = m.anchor_grid.numel() // 2 # number of anchors
- new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
- new_bpr = metric(new_anchors.reshape(-1, 2))[0]
+ try:
+ anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
+ except Exception as e:
+ print(f'{prefix}ERROR: {e}')
+ new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
- new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
- m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
- m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
+ anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
+ m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference
+ m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
check_anchor_order(m)
print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')
else:
@@ -98,7 +102,7 @@ def print_results(k):
if isinstance(path, str): # *.yaml file
with open(path) as f:
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
@@ -119,6 +123,7 @@ def print_results(k):
print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...')
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
+ assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}')
k *= s
wh = torch.tensor(wh, dtype=torch.float32) # filtered
wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh
new file mode 100644
index 000000000000..c319a83cfbdf
--- /dev/null
+++ b/utils/aws/mime.sh
@@ -0,0 +1,26 @@
+# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/
+# This script will run on every instance restart, not only on first start
+# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA ---
+
+Content-Type: multipart/mixed; boundary="//"
+MIME-Version: 1.0
+
+--//
+Content-Type: text/cloud-config; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Content-Disposition: attachment; filename="cloud-config.txt"
+
+#cloud-config
+cloud_final_modules:
+- [scripts-user, always]
+
+--//
+Content-Type: text/x-shellscript; charset="us-ascii"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Content-Disposition: attachment; filename="userdata.txt"
+
+#!/bin/bash
+# --- paste contents of userdata.sh here ---
+--//
diff --git a/utils/aws/resume.py b/utils/aws/resume.py
new file mode 100644
index 000000000000..4b0d4246b594
--- /dev/null
+++ b/utils/aws/resume.py
@@ -0,0 +1,37 @@
+# Resume all interrupted trainings in yolov5/ dir including DDP trainings
+# Usage: $ python utils/aws/resume.py
+
+import os
+import sys
+from pathlib import Path
+
+import torch
+import yaml
+
+sys.path.append('./') # to run '$ python *.py' files in subdirectories
+
+port = 0 # --master_port
+path = Path('').resolve()
+for last in path.rglob('*/**/last.pt'):
+ ckpt = torch.load(last)
+ if ckpt['optimizer'] is None:
+ continue
+
+ # Load opt.yaml
+ with open(last.parent.parent / 'opt.yaml') as f:
+ opt = yaml.safe_load(f)
+
+ # Get device count
+ d = opt['device'].split(',') # devices
+ nd = len(d) # number of devices
+ ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
+
+ if ddp: # multi-GPU
+ port += 1
+ cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
+ else: # single-GPU
+ cmd = f'python train.py --resume {last}'
+
+ cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
+ print(cmd)
+ os.system(cmd)
diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh
new file mode 100644
index 000000000000..890606b76a06
--- /dev/null
+++ b/utils/aws/userdata.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html
+# This script will run only once on first instance start (for a re-start script see mime.sh)
+# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir
+# Use >300 GB SSD
+
+cd home/ubuntu
+if [ ! -d yolov5 ]; then
+ echo "Running first-time script." # install dependencies, download COCO, pull Docker
+ git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5
+ cd yolov5
+ bash data/scripts/get_coco.sh && echo "Data done." &
+ sudo docker pull ultralytics/yolov5:latest && echo "Docker done." &
+ python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." &
+ wait && echo "All tasks done." # finish background tasks
+else
+ echo "Running re-start script." # resume interrupted runs
+ i=0
+ list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour'
+ while IFS= read -r id; do
+ ((i++))
+ echo "restarting container $i: $id"
+ sudo docker start $id
+ # sudo docker exec -it $id python train.py --resume # single-GPU
+ sudo docker exec -d $id python utils/aws/resume.py # multi-scenario
+ done <<<"$list"
+fi
diff --git a/utils/datasets.py b/utils/datasets.py
index 6e6e3253771b..3fcdddd7c013 100755
--- a/utils/datasets.py
+++ b/utils/datasets.py
@@ -20,12 +20,13 @@
from torch.utils.data import Dataset
from tqdm import tqdm
-from utils.general import xyxy2xywh, xywh2xyxy, clean_str
+from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
+ resample_segments, clean_str
from utils.torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
-img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
+img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
@@ -119,9 +120,8 @@ def __iter__(self):
class LoadImages: # for inference
- def __init__(self, path, img_size=640):
- p = str(Path(path)) # os-agnostic
- p = os.path.abspath(p) # absolute path
+ def __init__(self, path, img_size=640, stride=32):
+ p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
@@ -136,6 +136,7 @@ def __init__(self, path, img_size=640):
ni, nv = len(images), len(videos)
self.img_size = img_size
+ self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
@@ -181,7 +182,7 @@ def __next__(self):
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
- img = letterbox(img0, new_shape=self.img_size)[0]
+ img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
@@ -199,8 +200,9 @@ def __len__(self):
class LoadWebcam: # for inference
- def __init__(self, pipe='0', img_size=640):
+ def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
+ self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
@@ -243,7 +245,7 @@ def __next__(self):
print(f'webcam {self.count}: ', end='')
# Padded resize
- img = letterbox(img0, new_shape=self.img_size)[0]
+ img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
@@ -256,9 +258,10 @@ def __len__(self):
class LoadStreams: # multiple IP or RTSP cameras
- def __init__(self, sources='streams.txt', img_size=640):
+ def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
+ self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
@@ -269,22 +272,28 @@ def __init__(self, sources='streams.txt', img_size=640):
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
- for i, s in enumerate(sources):
- # Start the thread to read frames from the video stream
+ for i, s in enumerate(sources): # index, source
+ # Start thread to read frames from video stream
print(f'{i + 1}/{n}: {s}... ', end='')
- cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
+ if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video
+ check_requirements(('pafy', 'youtube_dl'))
+ import pafy
+ s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
+ s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
+ cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- fps = cap.get(cv2.CAP_PROP_FPS) % 100
+ self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
+
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
- print(f' success ({w}x{h} at {fps:.2f} FPS).')
+ print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
- s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
+ s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
@@ -297,9 +306,10 @@ def update(self, index, cap):
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
- _, self.imgs[index] = cap.retrieve()
+ success, im = cap.retrieve()
+ self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
- time.sleep(0.01) # wait time
+ time.sleep(1 / self.fps) # wait time
def __iter__(self):
self.count = -1
@@ -313,7 +323,7 @@ def __next__(self):
raise StopIteration
# Letterbox
- img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
+ img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
@@ -331,7 +341,7 @@ def __len__(self):
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
- return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
+ return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
@@ -345,6 +355,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
+ self.path = path
try:
f = [] # image files
@@ -352,37 +363,42 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
+ # f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
+ # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
+ # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
- cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
+ cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
- cache = torch.load(cache_path) # load
- if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
- cache = self.cache_labels(cache_path, prefix) # re-cache
+ cache, exists = torch.load(cache_path), True # load
+ if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
+ cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
- cache = self.cache_labels(cache_path, prefix) # cache
+ cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
- [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
- desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
- tqdm(None, desc=prefix + desc, total=n, initial=n)
+ nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
+ if exists:
+ d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+ tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
- labels, shapes = zip(*cache.values())
+ cache.pop('version') # remove version
+ labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
@@ -433,6 +449,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
+ pbar.close()
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
@@ -445,13 +462,20 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
- assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
+ segments = [] # instance segments
+ assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
+ assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
- l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
+ l = [x.split() for x in f.read().strip().splitlines()]
+ if any([len(x) > 8 for x in l]): # is segment
+ classes = np.array([x[0] for x in l], dtype=np.float32)
+ segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
+ l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
+ l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
@@ -463,19 +487,21 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''):
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
- x[im_file] = [l, shape]
+ x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
- pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
+ pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+ pbar.close()
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
- x['results'] = [nf, nm, ne, nc, i + 1]
+ x['results'] = nf, nm, ne, nc, i + 1
+ x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
@@ -515,16 +541,9 @@ def __getitem__(self, index):
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
- # Load labels
- labels = []
- x = self.labels[index]
- if x.size > 0:
- # Normalized xywh to pixel xyxy format
- labels = x.copy()
- labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
- labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
- labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
- labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
+ labels = self.labels[index].copy()
+ if labels.size: # normalized xywh to pixel xyxy format
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
# Augment imagespace
@@ -615,10 +634,10 @@ def load_image(self, index):
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
- r = self.img_size / max(h0, w0) # resize image to img_size
- if r != 1: # always resize down, only resize up if training with augmentation
- interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
- img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
+ r = self.img_size / max(h0, w0) # ratio
+ if r != 1: # if sizes are not equal
+ img = cv2.resize(img, (int(w0 * r), int(h0 * r)),
+ interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
@@ -637,19 +656,25 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
- # Histogram equalization
- # if random.random() < 0.2:
- # for i in range(3):
- # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
+
+def hist_equalize(img, clahe=True, bgr=False):
+ # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
+ yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
+ if clahe:
+ c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
+ yuv[:, :, 0] = c.apply(yuv[:, :, 0])
+ else:
+ yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
+ return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
- labels4 = []
+ labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
- indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
+ indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
@@ -674,23 +699,21 @@ def load_mosaic(self, index):
padh = y1a - y1b
# Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
+ if labels.size:
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
+ segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
+ segments4.extend(segments)
# Concat/clip labels
- if len(labels4):
- labels4 = np.concatenate(labels4, 0)
- np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
- # img4, labels4 = replicate(img4, labels4) # replicate
+ labels4 = np.concatenate(labels4, 0)
+ for x in (labels4[:, 1:], *segments4):
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
+ # img4, labels4 = replicate(img4, labels4) # replicate
# Augment
- img4, labels4 = random_perspective(img4, labels4,
+ img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
@@ -704,9 +727,9 @@ def load_mosaic(self, index):
def load_mosaic9(self, index):
# loads images in a 9-mosaic
- labels9 = []
+ labels9, segments9 = [], []
s = self.img_size
- indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
+ indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
@@ -737,34 +760,34 @@ def load_mosaic9(self, index):
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
- x = self.labels[index]
- labels = x.copy()
- if x.size > 0: # Normalized xywh to pixel xyxy format
- labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
- labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
- labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
- labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
+ labels, segments = self.labels[index].copy(), self.segments[index].copy()
+ if labels.size:
+ labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
+ segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
+ segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
- yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
+ yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
- if len(labels9):
- labels9 = np.concatenate(labels9, 0)
- labels9[:, [1, 3]] -= xc
- labels9[:, [2, 4]] -= yc
+ labels9 = np.concatenate(labels9, 0)
+ labels9[:, [1, 3]] -= xc
+ labels9[:, [2, 4]] -= yc
+ c = np.array([xc, yc]) # centers
+ segments9 = [x - c for x in segments9]
- np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
- # img9, labels9 = replicate(img9, labels9) # replicate
+ for x in (labels9[:, 1:], *segments9):
+ np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
+ # img9, labels9 = replicate(img9, labels9) # replicate
# Augment
- img9, labels9 = random_perspective(img9, labels9,
+ img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
@@ -792,8 +815,8 @@ def replicate(img, labels):
return img, labels
-def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
- # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
+def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
+ # Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
@@ -808,7 +831,7 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
- dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
@@ -825,7 +848,8 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale
return img, ratio, (dw, dh)
-def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
+def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
+ border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
@@ -877,37 +901,38 @@ def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shea
# Transform label coordinates
n = len(targets)
if n:
- # warp points
- xy = np.ones((n * 4, 3))
- xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
- xy = xy @ M.T # transform
- if perspective:
- xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
- else: # affine
- xy = xy[:, :2].reshape(n, 8)
-
- # create new boxes
- x = xy[:, [0, 2, 4, 6]]
- y = xy[:, [1, 3, 5, 7]]
- xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
-
- # # apply angle-based reduction of bounding boxes
- # radians = a * math.pi / 180
- # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
- # x = (xy[:, 2] + xy[:, 0]) / 2
- # y = (xy[:, 3] + xy[:, 1]) / 2
- # w = (xy[:, 2] - xy[:, 0]) * reduction
- # h = (xy[:, 3] - xy[:, 1]) * reduction
- # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
-
- # clip boxes
- xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
- xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+ use_segments = any(x.any() for x in segments)
+ new = np.zeros((n, 4))
+ if use_segments: # warp segments
+ segments = resample_segments(segments) # upsample
+ for i, segment in enumerate(segments):
+ xy = np.ones((len(segment), 3))
+ xy[:, :2] = segment
+ xy = xy @ M.T # transform
+ xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
+
+ # clip
+ new[i] = segment2box(xy, width, height)
+
+ else: # warp boxes
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ xy = xy @ M.T # transform
+ xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
+
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+ # clip
+ new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
+ new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
- i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
- targets[:, 1:5] = xy[i]
+ targets[:, 1:5] = new[i]
return img, targets
@@ -1016,19 +1041,24 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
-def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
+def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
- # Arguments
- path: Path to images directory
- weights: Train, val, test weights (list)
+ Usage: from utils.datasets import *; autosplit('../coco128')
+ Arguments
+ path: Path to images directory
+ weights: Train, val, test weights (list)
+ annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
- files = list(path.rglob('*.*'))
+ files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
+
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
+
+ print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
- if img.suffix[1:] in img_formats:
+ if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md
new file mode 100644
index 000000000000..0cdc51be692d
--- /dev/null
+++ b/utils/flask_rest_api/README.md
@@ -0,0 +1,51 @@
+# Flask REST API
+[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/).
+
+## Requirements
+
+[Flask](https://palletsprojects.com/p/flask/) is required. Install with:
+```shell
+$ pip install Flask
+```
+
+## Run
+
+After Flask installation run:
+
+```shell
+$ python3 restapi.py --port 5000
+```
+
+Then use [curl](https://curl.se/) to perform a request:
+
+```shell
+$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'`
+```
+
+The model inference results are returned:
+
+```shell
+[{'class': 0,
+ 'confidence': 0.8197850585,
+ 'name': 'person',
+ 'xmax': 1159.1403808594,
+ 'xmin': 750.912902832,
+ 'ymax': 711.2583007812,
+ 'ymin': 44.0350036621},
+ {'class': 0,
+ 'confidence': 0.5667674541,
+ 'name': 'person',
+ 'xmax': 1065.5523681641,
+ 'xmin': 116.0448303223,
+ 'ymax': 713.8904418945,
+ 'ymin': 198.4603881836},
+ {'class': 27,
+ 'confidence': 0.5661227107,
+ 'name': 'tie',
+ 'xmax': 516.7975463867,
+ 'xmin': 416.6880187988,
+ 'ymax': 717.0524902344,
+ 'ymin': 429.2020568848}]
+```
+
+An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py`
diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py
new file mode 100644
index 000000000000..ff21f30f93ca
--- /dev/null
+++ b/utils/flask_rest_api/example_request.py
@@ -0,0 +1,13 @@
+"""Perform test request"""
+import pprint
+
+import requests
+
+DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s"
+TEST_IMAGE = "zidane.jpg"
+
+image_data = open(TEST_IMAGE, "rb").read()
+
+response = requests.post(DETECTION_URL, files={"image": image_data}).json()
+
+pprint.pprint(response)
diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py
new file mode 100644
index 000000000000..9d88f618905d
--- /dev/null
+++ b/utils/flask_rest_api/restapi.py
@@ -0,0 +1,38 @@
+"""
+Run a rest API exposing the yolov5s object detection model
+"""
+import argparse
+import io
+
+import torch
+from PIL import Image
+from flask import Flask, request
+
+app = Flask(__name__)
+
+DETECTION_URL = "/v1/object-detection/yolov5s"
+
+
+@app.route(DETECTION_URL, methods=["POST"])
+def predict():
+ if not request.method == "POST":
+ return
+
+ if request.files.get("image"):
+ image_file = request.files["image"]
+ image_bytes = image_file.read()
+
+ img = Image.open(io.BytesIO(image_bytes))
+
+ results = model(img, size=640)
+ data = results.pandas().xyxy[0].to_json(orient="records")
+ return data
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model")
+ parser.add_argument("--port", default=5000, type=int, help="port number")
+ args = parser.parse_args()
+
+ model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache
+ app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
diff --git a/utils/general.py b/utils/general.py
index 3247b66da0ce..fbb99b9e7f99 100755
--- a/utils/general.py
+++ b/utils/general.py
@@ -1,17 +1,21 @@
-# General utils
+# YOLOv5 general utils
import glob
import logging
import math
import os
+import platform
import random
import re
import subprocess
import time
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
import numpy as np
+import pandas as pd
import torch
import torchvision
import yaml
@@ -23,14 +27,15 @@
# Settings
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
+pd.options.display.max_columns = 10
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
-def set_logging(rank=-1):
+def set_logging(rank=-1, verbose=True):
logging.basicConfig(
format="%(message)s",
- level=logging.INFO if rank in [-1, 0] else logging.WARN)
+ level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN)
def init_seeds(seed=0):
@@ -46,11 +51,26 @@ def get_latest_run(search_dir='.'):
return max(last_list, key=os.path.getctime) if last_list else ''
+def isdocker():
+ # Is environment a Docker container
+ return Path('/workspace').exists() # or Path('/.dockerenv').exists()
+
+
+def emojis(str=''):
+ # Return platform-dependent emoji-safe version of string
+ return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
+
+
+def file_size(file):
+ # Return file size in MB
+ return Path(file).stat().st_size / 1e6
+
+
def check_online():
# Check internet connectivity
import socket
try:
- socket.create_connection(("1.1.1.1", 53)) # check host accesability
+ socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
return True
except OSError:
return False
@@ -60,26 +80,51 @@ def check_git_status():
# Recommend 'git pull' if code is out of date
print(colorstr('github: '), end='')
try:
- if Path('.git').exists() and check_online():
- url = subprocess.check_output(
- 'git fetch && git config --get remote.origin.url', shell=True).decode('utf-8')[:-1]
- n = int(subprocess.check_output(
- 'git rev-list $(git rev-parse --abbrev-ref HEAD)..origin/master --count', shell=True)) # commits behind
- if n > 0:
- print(f"⚠️ WARNING: code is out of date by {n} {'commits' if n > 1 else 'commmit'}. "
- f"Use 'git pull' to update or 'git clone {url}' to download latest.")
- else:
- print(f'up to date with {url} ✅')
+ assert Path('.git').exists(), 'skipping check (not a git repository)'
+ assert not isdocker(), 'skipping check (Docker image)'
+ assert check_online(), 'skipping check (offline)'
+
+ cmd = 'git fetch && git config --get remote.origin.url'
+ url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
+ branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
+ n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
+ if n > 0:
+ s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
+ f"Use 'git pull' to update or 'git clone {url}' to download latest."
+ else:
+ s = f'up to date with {url} ✅'
+ print(emojis(s)) # emoji-safe
except Exception as e:
print(e)
-def check_requirements(file='requirements.txt'):
- # Check installed dependencies meet requirements
- import pkg_resources
- requirements = pkg_resources.parse_requirements(Path(file).open())
- requirements = [x.name + ''.join(*x.specs) if len(x.specs) else x.name for x in requirements]
- pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met
+def check_requirements(requirements='requirements.txt', exclude=()):
+ # Check installed dependencies meet requirements (pass *.txt file or list of packages)
+ import pkg_resources as pkg
+ prefix = colorstr('red', 'bold', 'requirements:')
+ if isinstance(requirements, (str, Path)): # requirements.txt file
+ file = Path(requirements)
+ if not file.exists():
+ print(f"{prefix} {file.resolve()} not found, check failed.")
+ return
+ requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
+ else: # list or tuple of packages
+ requirements = [x for x in requirements if x not in exclude]
+
+ n = 0 # number of packages updates
+ for r in requirements:
+ try:
+ pkg.require(r)
+ except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
+ n += 1
+ print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...")
+ print(subprocess.check_output(f"pip install '{r}'", shell=True).decode())
+
+ if n: # if packages updated
+ source = file.resolve() if 'file' in locals() else requirements
+ s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
+ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
+ print(emojis(s)) # emoji-safe
def check_img_size(img_size, s=32):
@@ -90,14 +135,28 @@ def check_img_size(img_size, s=32):
return new_size
+def check_imshow():
+ # Check if environment supports image displays
+ try:
+ assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
+ cv2.imshow('test', np.zeros((1, 1, 3)))
+ cv2.waitKey(1)
+ cv2.destroyAllWindows()
+ cv2.waitKey(1)
+ return True
+ except Exception as e:
+ print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
+ return False
+
+
def check_file(file):
# Search for file if not found
- if os.path.isfile(file) or file == '':
+ if Path(file).is_file() or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file
- assert len(files), 'File Not Found: %s' % file # assert file was found
- assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
+ assert len(files), f'File Not Found: {file}' # assert file was found
+ assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
return files[0] # return file
@@ -109,18 +168,45 @@ def check_dataset(dict):
if not all(x.exists() for x in val):
print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
if s and len(s): # download script
- print('Downloading %s ...' % s)
if s.startswith('http') and s.endswith('.zip'): # URL
f = Path(s).name # filename
+ print(f'Downloading {s} ...')
torch.hub.download_url_to_file(s, f)
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
- else: # bash script
+ r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip
+ elif s.startswith('bash '): # bash script
+ print(f'Running {s} ...')
r = os.system(s)
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
+ else: # python script
+ r = exec(s) # return None
+ print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result
else:
raise Exception('Dataset not found.')
+def download(url, dir='.', multi_thread=False):
+ # Multi-threaded file download and unzip function
+ def download_one(url, dir):
+ # Download 1 file
+ f = dir / Path(url).name # filename
+ if not f.exists():
+ print(f'Downloading {url} to {f}...')
+ torch.hub.download_url_to_file(url, f, progress=True) # download
+ if f.suffix in ('.zip', '.gz'):
+ print(f'Unzipping {f}...')
+ if f.suffix == '.zip':
+ os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite
+ elif f.suffix == '.gz':
+ os.system(f'tar xfz {f} --directory {f.parent} && rm {f}') # unzip
+
+ dir = Path(dir)
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ if multi_thread:
+ ThreadPool(8).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # 8 threads
+ else:
+ for u in tuple(url) if isinstance(url, str) else url:
+ download_one(u, dir)
+
+
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
@@ -220,6 +306,50 @@ def xywh2xyxy(x):
return y
+def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
+ # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
+ y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
+ y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
+ y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
+ return y
+
+
+def xyn2xy(x, w=640, h=640, padw=0, padh=0):
+ # Convert normalized segments into pixel segments, shape (n,2)
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = w * x[:, 0] + padw # top left x
+ y[:, 1] = h * x[:, 1] + padh # top left y
+ return y
+
+
+def segment2box(segment, width=640, height=640):
+ # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
+ x, y = segment.T # segment xy
+ inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
+ x, y, = x[inside], y[inside]
+ return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
+
+
+def segments2boxes(segments):
+ # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
+ boxes = []
+ for s in segments:
+ x, y = s.T # segment xy
+ boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
+ return xyxy2xywh(np.array(boxes)) # cls, xywh
+
+
+def resample_segments(segments, n=1000):
+ # Up-sample an (n,2) segment
+ for i, s in enumerate(segments):
+ x = np.linspace(0, len(s) - 1, n)
+ xp = np.arange(len(s))
+ segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
+ return segments
+
+
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
@@ -244,7 +374,7 @@ def clip_coords(boxes, img_shape):
boxes[:, 3].clamp_(0, img_shape[0]) # y2
-def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
+def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.T
@@ -280,7 +410,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
- alpha = v / ((1 + eps) - iou + v)
+ alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + eps # convex area
@@ -322,11 +452,12 @@ def wh_iou(wh1, wh2):
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
-def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
- """Performs Non-Maximum Suppression (NMS) on inference results
+def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
+ labels=()):
+ """Runs Non-Maximum Suppression (NMS) on inference results
Returns:
- detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
@@ -338,7 +469,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
- multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
@@ -412,18 +543,20 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non
return output
-def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
+def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
# Strip optimizer from 'f' to finalize training, optionally save as 's'
x = torch.load(f, map_location=torch.device('cpu'))
- for key in 'optimizer', 'training_results', 'wandb_id':
- x[key] = None
+ if x.get('ema'):
+ x['model'] = x['ema'] # replace model with ema
+ for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
+ x[k] = None
x['epoch'] = -1
x['model'].half() # to FP16
for p in x['model'].parameters():
p.requires_grad = False
torch.save(x, s or f)
mb = os.path.getsize(s or f) / 1E6 # filesize
- print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
+ print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
@@ -451,14 +584,14 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
results = tuple(x[0, :7])
c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
- yaml.dump(hyp, f, sort_keys=False)
+ yaml.safe_dump(hyp, f, sort_keys=False)
if bucket:
os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
def apply_classifier(x, model, img, im0):
- # applies a second stage classifier to yolo outputs
+ # Apply a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
@@ -492,14 +625,31 @@ def apply_classifier(x, model, img, im0):
return x
-def increment_path(path, exist_ok=True, sep=''):
- # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
+def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False):
+ # Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels
+ xyxy = torch.tensor(xyxy).view(-1, 4)
+ b = xyxy2xywh(xyxy) # boxes
+ if square:
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square
+ b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad
+ xyxy = xywh2xyxy(b).long()
+ clip_coords(xyxy, im.shape)
+ crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])]
+ cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1])
+
+
+def increment_path(path, exist_ok=False, sep='', mkdir=False):
+ # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
- if (path.exists() and exist_ok) or (not path.exists()):
- return str(path)
- else:
+ if path.exists() and not exist_ok:
+ suffix = path.suffix
+ path = path.with_suffix('')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
- return f"{path}{sep}{n}" # update path
+ path = Path(f"{path}{sep}{n}{suffix}") # update path
+ dir = path if path.suffix == '' else path.parent # directory
+ if not dir.exists() and mkdir:
+ dir.mkdir(parents=True, exist_ok=True) # make directory
+ return path
diff --git a/utils/google_utils.py b/utils/google_utils.py
index 024dc7802f15..6a4660bad509 100644
--- a/utils/google_utils.py
+++ b/utils/google_utils.py
@@ -18,7 +18,7 @@ def gsutil_getsize(url=''):
def attempt_download(file, repo='ultralytics/yolov5'):
# Attempt file download if does not exist
- file = Path(str(file).strip().replace("'", '').lower())
+ file = Path(str(file).strip().replace("'", ''))
if not file.exists():
try:
@@ -26,8 +26,12 @@ def attempt_download(file, repo='ultralytics/yolov5'):
assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
tag = response['tag_name'] # i.e. 'v1.0'
except: # fallback plan
- assets = ['yolov5.pt', 'yolov5.pt', 'yolov5l.pt', 'yolov5x.pt']
- tag = subprocess.check_output('git tag', shell=True).decode('utf-8').split('\n')[-2]
+ assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt',
+ 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt']
+ try:
+ tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1]
+ except:
+ tag = 'v5.0' # current release
name = file.name
if name in assets:
diff --git a/utils/loss.py b/utils/loss.py
index 889ddf7295da..9e78df17fdf3 100644
--- a/utils/loss.py
+++ b/utils/loss.py
@@ -97,7 +97,7 @@ def __init__(self, model, autobalance=False):
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- self.cp, self.cn = smooth_BCE(eps=0.0)
+ self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
@@ -105,9 +105,8 @@ def __init__(self, model, autobalance=False):
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
- self.balance = {3: [3.67, 1.0, 0.43], 4: [3.78, 1.0, 0.39, 0.22], 5: [3.88, 1.0, 0.37, 0.17, 0.10]}[det.nl]
- # self.balance = [1.0] * det.nl
- self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index
+ self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
+ self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
for k in 'na', 'nc', 'nl', 'anchors':
setattr(self, k, getattr(det, k))
diff --git a/utils/metrics.py b/utils/metrics.py
index 99d5bcfaf2af..323c84b6c873 100644
--- a/utils/metrics.py
+++ b/utils/metrics.py
@@ -15,7 +15,7 @@ def fitness(x):
return (x[:, :4] * w).sum(1)
-def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
+def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
@@ -35,12 +35,11 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision
# Find unique classes
unique_classes = np.unique(target_cls)
+ nc = unique_classes.shape[0] # number of classes, number of detections
# Create Precision-Recall curve and compute AP for each class
px, py = np.linspace(0, 1, 1000), [] # for plotting
- pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
- s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
- ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
+ ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_l = (target_cls == c).sum() # number of labels
@@ -55,25 +54,28 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision
# Recall
recall = tpc / (n_l + 1e-16) # recall curve
- r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
+ r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
- p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
+ p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
- if plot and (j == 0):
+ if plot and j == 0:
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
- # Compute F1 score (harmonic mean of precision and recall)
+ # Compute F1 (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
-
if plot:
- plot_pr_curve(px, py, ap, save_dir, names)
+ plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names)
+ plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1')
+ plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision')
+ plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall')
- return p, r, ap, f1, unique_classes.astype('int32')
+ i = f1.mean(0).argmax() # max F1 index
+ return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')
def compute_ap(recall, precision):
@@ -143,14 +145,14 @@ def process_batch(self, detections, labels):
for i, gc in enumerate(gt_classes):
j = m0 == i
if n and sum(j) == 1:
- self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
else:
- self.matrix[gc, self.nc] += 1 # background FP
+ self.matrix[self.nc, gc] += 1 # background FP
if n:
for i, dc in enumerate(detection_classes):
if not any(m1 == i):
- self.matrix[self.nc, dc] += 1 # background FN
+ self.matrix[dc, self.nc] += 1 # background FN
def matrix(self):
return self.matrix
@@ -166,8 +168,8 @@ def plot(self, save_dir='', names=()):
sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
- xticklabels=names + ['background FN'] if labels else "auto",
- yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1))
+ xticklabels=names + ['background FP'] if labels else "auto",
+ yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
fig.axes[0].set_xlabel('True')
fig.axes[0].set_ylabel('Predicted')
fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
@@ -181,13 +183,14 @@ def print(self):
# Plots ----------------------------------------------------------------------------------------------------------------
-def plot_pr_curve(px, py, ap, save_dir='.', names=()):
+def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
+ # Precision-recall curve
fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
py = np.stack(py, axis=1)
- if 0 < len(names) < 21: # show mAP in legend if < 10 classes
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
for i, y in enumerate(py.T):
- ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision)
+ ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
else:
ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
@@ -197,4 +200,24 @@ def plot_pr_curve(px, py, ap, save_dir='.', names=()):
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
- fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250)
+ fig.savefig(Path(save_dir), dpi=250)
+
+
+def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
+ # Metric-confidence curve
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
+
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
+ for i, y in enumerate(py):
+ ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
+ else:
+ ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
+
+ y = py.mean(0)
+ ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
+ ax.set_xlabel(xlabel)
+ ax.set_ylabel(ylabel)
+ ax.set_xlim(0, 1)
+ ax.set_ylim(0, 1)
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ fig.savefig(Path(save_dir), dpi=250)
diff --git a/utils/plots.py b/utils/plots.py
index 47cd70776005..f24513c6998d 100644
--- a/utils/plots.py
+++ b/utils/plots.py
@@ -15,7 +15,7 @@
import seaborn as sns
import torch
import yaml
-from PIL import Image, ImageDraw
+from PIL import Image, ImageDraw, ImageFont
from scipy.signal import butter, filtfilt
from utils.general import xywh2xyxy, xyxy2xywh
@@ -31,7 +31,7 @@ def color_list():
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
- return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
+ return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949)
def hist2d(x, y, n=100):
@@ -54,18 +54,34 @@ def butter_lowpass(cutoff, fs, order):
return filtfilt(b, a, data) # forward-backward filter
-def plot_one_box(x, img, color=None, label=None, line_thickness=None):
- # Plots one bounding box on image img
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
+def plot_one_box(x, im, color=None, label=None, line_thickness=3):
+ # Plots one bounding box on image 'im' using OpenCV
+ assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
+ tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+ cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+
+
+def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None):
+ # Plots one bounding box on image 'im' using PIL
+ im = Image.fromarray(im)
+ draw = ImageDraw.Draw(im)
+ line_thickness = line_thickness or max(int(min(im.size) / 200), 2)
+ draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot
+ if label:
+ fontsize = max(round(max(im.size) / 40), 12)
+ font = ImageFont.truetype("Arial.ttf", fontsize)
+ txt_width, txt_height = font.getsize(label)
+ draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
+ draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
+ return np.asarray(im)
def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
@@ -223,38 +239,39 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
plt.savefig('targets.jpg', dpi=200)
-def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt()
+def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt()
# Plot study.txt generated by test.py
fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
- ax = ax.ravel()
+ # ax = ax.ravel()
fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
- for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
+ # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:
+ for f in sorted(Path(path).glob('study*.txt')):
y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
x = np.arange(y.shape[1]) if x is None else np.array(x)
s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
- for i in range(7):
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
- ax[i].set_title(s[i])
+ # for i in range(7):
+ # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
+ # ax[i].set_title(s[i])
j = y[3].argmax() + 1
- ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
+ ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8,
label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
- ax2.grid()
- ax2.set_yticks(np.arange(30, 60, 5))
- ax2.set_xlim(0, 30)
- ax2.set_ylim(29, 51)
+ ax2.grid(alpha=0.2)
+ ax2.set_yticks(np.arange(20, 60, 5))
+ ax2.set_xlim(0, 57)
+ ax2.set_ylim(30, 55)
ax2.set_xlabel('GPU Speed (ms/img)')
ax2.set_ylabel('COCO AP val')
ax2.legend(loc='lower right')
- plt.savefig('test_study.png', dpi=300)
+ plt.savefig(str(Path(path).name) + '.png', dpi=300)
-def plot_labels(labels, save_dir=Path(''), loggers=None):
+def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):
# plot dataset labels
print('Plotting labels... ')
c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
@@ -271,7 +288,12 @@ def plot_labels(labels, save_dir=Path(''), loggers=None):
matplotlib.use('svg') # faster
ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
- ax[0].set_xlabel('classes')
+ ax[0].set_ylabel('instances')
+ if 0 < len(names) < 30:
+ ax[0].set_xticks(range(len(names)))
+ ax[0].set_xticklabels(names, rotation=90, fontsize=10)
+ else:
+ ax[0].set_xlabel('classes')
sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
@@ -295,13 +317,13 @@ def plot_labels(labels, save_dir=Path(''), loggers=None):
# loggers
for k, v in loggers.items() or {}:
if k == 'wandb' and v:
- v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]})
+ v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)
def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
# Plot hyperparameter evolution results in evolve.txt
with open(yaml_file) as f:
- hyp = yaml.load(f, Loader=yaml.FullLoader)
+ hyp = yaml.safe_load(f)
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
# weights = (f - f.min()) ** 2 # for weighted results
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
index 2cb09e71ce71..9991e5ec87d8 100644
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -1,8 +1,10 @@
-# PyTorch utils
+# YOLOv5 PyTorch utils
+import datetime
import logging
import math
import os
+import platform
import subprocess
import time
from contextlib import contextmanager
@@ -43,17 +45,24 @@ def init_torch_seeds(seed=0):
cudnn.benchmark, cudnn.deterministic = True, False
-def git_describe():
+def date_modified(path=__file__):
+ # return human-readable file modification date, i.e. '2021-3-26'
+ t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
+ return f'{t.year}-{t.month}-{t.day}'
+
+
+def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
- if Path('.git').exists():
- return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1]
- else:
- return ''
+ s = f'git -C {path} describe --tags --long --always'
+ try:
+ return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
+ except subprocess.CalledProcessError as e:
+ return '' # not a git repository
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
- s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string
+ s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
@@ -73,7 +82,7 @@ def select_device(device='', batch_size=None):
else:
s += 'CPU\n'
- logger.info(s) # skip a line
+ logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
@@ -120,7 +129,7 @@ def profile(x, ops, n=100, device=None):
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
- print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
+ print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
def is_parallel(model):
@@ -182,7 +191,7 @@ def fuse_conv_and_bn(conv, bn):
# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
@@ -205,7 +214,7 @@ def model_info(model, verbose=False, img_size=640):
try: # FLOPS
from thop import profile
- stride = int(model.stride.max()) if hasattr(model, 'stride') else 32
+ stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py
new file mode 100644
index 000000000000..f45a23011f15
--- /dev/null
+++ b/utils/wandb_logging/log_dataset.py
@@ -0,0 +1,24 @@
+import argparse
+
+import yaml
+
+from wandb_utils import WandbLogger
+
+WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
+
+
+def create_dataset_artifact(opt):
+ with open(opt.data) as f:
+ data = yaml.safe_load(f) # data dict
+ logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
+ parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
+ parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project')
+ opt = parser.parse_args()
+ opt.resume = False # Explicitly disallow resume check for dataset upload job
+
+ create_dataset_artifact(opt)
diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py
new file mode 100644
index 000000000000..d8fbd1ef42aa
--- /dev/null
+++ b/utils/wandb_logging/wandb_utils.py
@@ -0,0 +1,306 @@
+import json
+import sys
+from pathlib import Path
+
+import torch
+import yaml
+from tqdm import tqdm
+
+sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
+from utils.datasets import LoadImagesAndLabels
+from utils.datasets import img2label_paths
+from utils.general import colorstr, xywh2xyxy, check_dataset
+
+try:
+ import wandb
+ from wandb import init, finish
+except ImportError:
+ wandb = None
+
+WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
+
+
+def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
+ return from_string[len(prefix):]
+
+
+def check_wandb_config_file(data_config_file):
+ wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
+ if Path(wandb_config).is_file():
+ return wandb_config
+ return data_config_file
+
+
+def get_run_info(run_path):
+ run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
+ run_id = run_path.stem
+ project = run_path.parent.stem
+ model_artifact_name = 'run_' + run_id + '_model'
+ return run_id, project, model_artifact_name
+
+
+def check_wandb_resume(opt):
+ process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None
+ if isinstance(opt.resume, str):
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ if opt.global_rank not in [-1, 0]: # For resuming DDP runs
+ run_id, project, model_artifact_name = get_run_info(opt.resume)
+ api = wandb.Api()
+ artifact = api.artifact(project + '/' + model_artifact_name + ':latest')
+ modeldir = artifact.download()
+ opt.weights = str(Path(modeldir) / "last.pt")
+ return True
+ return None
+
+
+def process_wandb_config_ddp_mode(opt):
+ with open(opt.data) as f:
+ data_dict = yaml.safe_load(f) # data dict
+ train_dir, val_dir = None, None
+ if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
+ api = wandb.Api()
+ train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
+ train_dir = train_artifact.download()
+ train_path = Path(train_dir) / 'data/images/'
+ data_dict['train'] = str(train_path)
+
+ if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
+ api = wandb.Api()
+ val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
+ val_dir = val_artifact.download()
+ val_path = Path(val_dir) / 'data/images/'
+ data_dict['val'] = str(val_path)
+ if train_dir or val_dir:
+ ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
+ with open(ddp_data_path, 'w') as f:
+ yaml.safe_dump(data_dict, f)
+ opt.data = ddp_data_path
+
+
+class WandbLogger():
+ def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
+ # Pre-training routine --
+ self.job_type = job_type
+ self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict
+ # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
+ if isinstance(opt.resume, str): # checks resume from artifact
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ run_id, project, model_artifact_name = get_run_info(opt.resume)
+ model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
+ assert wandb, 'install wandb to resume wandb runs'
+ # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
+ self.wandb_run = wandb.init(id=run_id, project=project, resume='allow')
+ opt.resume = model_artifact_name
+ elif self.wandb:
+ self.wandb_run = wandb.init(config=opt,
+ resume="allow",
+ project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
+ name=name,
+ job_type=job_type,
+ id=run_id) if not wandb.run else wandb.run
+ if self.wandb_run:
+ if self.job_type == 'Training':
+ if not opt.resume:
+ wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict
+ # Info useful for resuming from artifacts
+ self.wandb_run.config.opt = vars(opt)
+ self.wandb_run.config.data_dict = wandb_data_dict
+ self.data_dict = self.setup_training(opt, data_dict)
+ if self.job_type == 'Dataset Creation':
+ self.data_dict = self.check_and_upload_dataset(opt)
+ else:
+ prefix = colorstr('wandb: ')
+ print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
+
+ def check_and_upload_dataset(self, opt):
+ assert wandb, 'Install wandb to upload dataset'
+ check_dataset(self.data_dict)
+ config_path = self.log_dataset_artifact(opt.data,
+ opt.single_cls,
+ 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
+ print("Created dataset config file ", config_path)
+ with open(config_path) as f:
+ wandb_data_dict = yaml.safe_load(f)
+ return wandb_data_dict
+
+ def setup_training(self, opt, data_dict):
+ self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants
+ self.bbox_interval = opt.bbox_interval
+ if isinstance(opt.resume, str):
+ modeldir, _ = self.download_model_artifact(opt)
+ if modeldir:
+ self.weights = Path(modeldir) / "last.pt"
+ config = self.wandb_run.config
+ opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
+ self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \
+ config.opt['hyp']
+ data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
+ if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download
+ self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
+ opt.artifact_alias)
+ self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
+ opt.artifact_alias)
+ self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None
+ if self.train_artifact_path is not None:
+ train_path = Path(self.train_artifact_path) / 'data/images/'
+ data_dict['train'] = str(train_path)
+ if self.val_artifact_path is not None:
+ val_path = Path(self.val_artifact_path) / 'data/images/'
+ data_dict['val'] = str(val_path)
+ self.val_table = self.val_artifact.get("val")
+ self.map_val_table_path()
+ if self.val_artifact is not None:
+ self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
+ self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
+ if opt.bbox_interval == -1:
+ self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
+ return data_dict
+
+ def download_dataset_artifact(self, path, alias):
+ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
+ dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
+ assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
+ datadir = dataset_artifact.download()
+ return datadir, dataset_artifact
+ return None, None
+
+ def download_model_artifact(self, opt):
+ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
+ model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
+ assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
+ modeldir = model_artifact.download()
+ epochs_trained = model_artifact.metadata.get('epochs_trained')
+ total_epochs = model_artifact.metadata.get('total_epochs')
+ assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % (
+ total_epochs)
+ return modeldir, model_artifact
+ return None, None
+
+ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
+ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
+ 'original_url': str(path),
+ 'epochs_trained': epoch + 1,
+ 'save period': opt.save_period,
+ 'project': opt.project,
+ 'total_epochs': opt.epochs,
+ 'fitness_score': fitness_score
+ })
+ model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
+ wandb.log_artifact(model_artifact,
+ aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
+ print("Saving model artifact on epoch ", epoch + 1)
+
+ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
+ with open(data_file) as f:
+ data = yaml.safe_load(f) # data dict
+ nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
+ names = {k: v for k, v in enumerate(names)} # to index dictionary
+ self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
+ data['train']), names, name='train') if data.get('train') else None
+ self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
+ data['val']), names, name='val') if data.get('val') else None
+ if data.get('train'):
+ data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
+ if data.get('val'):
+ data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
+ path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
+ data.pop('download', None)
+ with open(path, 'w') as f:
+ yaml.safe_dump(data, f)
+
+ if self.job_type == 'Training': # builds correct artifact pipeline graph
+ self.wandb_run.use_artifact(self.val_artifact)
+ self.wandb_run.use_artifact(self.train_artifact)
+ self.val_artifact.wait()
+ self.val_table = self.val_artifact.get('val')
+ self.map_val_table_path()
+ else:
+ self.wandb_run.log_artifact(self.train_artifact)
+ self.wandb_run.log_artifact(self.val_artifact)
+ return path
+
+ def map_val_table_path(self):
+ self.val_table_map = {}
+ print("Mapping dataset")
+ for i, data in enumerate(tqdm(self.val_table.data)):
+ self.val_table_map[data[3]] = data[0]
+
+ def create_dataset_table(self, dataset, class_to_id, name='dataset'):
+ # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
+ artifact = wandb.Artifact(name=name, type="dataset")
+ img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
+ img_files = tqdm(dataset.img_files) if not img_files else img_files
+ for img_file in img_files:
+ if Path(img_file).is_dir():
+ artifact.add_dir(img_file, name='data/images')
+ labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
+ artifact.add_dir(labels_path, name='data/labels')
+ else:
+ artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
+ label_file = Path(img2label_paths([img_file])[0])
+ artifact.add_file(str(label_file),
+ name='data/labels/' + label_file.name) if label_file.exists() else None
+ table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
+ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
+ for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
+ height, width = shapes[0]
+ labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height])
+ box_data, img_classes = [], {}
+ for cls, *xyxy in labels[:, 1:].tolist():
+ cls = int(cls)
+ box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": cls,
+ "box_caption": "%s" % (class_to_id[cls]),
+ "scores": {"acc": 1},
+ "domain": "pixel"})
+ img_classes[cls] = class_to_id[cls]
+ boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
+ table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes),
+ Path(paths).name)
+ artifact.add(table, name)
+ return artifact
+
+ def log_training_progress(self, predn, path, names):
+ if self.val_table and self.result_table:
+ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
+ box_data = []
+ total_conf = 0
+ for *xyxy, conf, cls in predn.tolist():
+ if conf >= 0.25:
+ box_data.append(
+ {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"})
+ total_conf = total_conf + conf
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
+ id = self.val_table_map[Path(path).name]
+ self.result_table.add_data(self.current_epoch,
+ id,
+ wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
+ total_conf / max(1, len(box_data))
+ )
+
+ def log(self, log_dict):
+ if self.wandb_run:
+ for key, value in log_dict.items():
+ self.log_dict[key] = value
+
+ def end_epoch(self, best_result=False):
+ if self.wandb_run:
+ wandb.log(self.log_dict)
+ self.log_dict = {}
+ if self.result_artifact:
+ train_results = wandb.JoinedTable(self.val_table, self.result_table, "id")
+ self.result_artifact.add(train_results, 'result')
+ wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch),
+ ('best' if best_result else '')])
+ self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"])
+ self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
+
+ def finish_run(self):
+ if self.wandb_run:
+ if self.log_dict:
+ wandb.log(self.log_dict)
+ wandb.run.finish()