From c7ff3017470ed34251e434510414fe620ab05c02 Mon Sep 17 00:00:00 2001 From: igeni Date: Sat, 23 Mar 2024 08:21:58 +0300 Subject: [PATCH 1/3] Removed manual accounting of field's count and added option to change text justification --- segment/train.py | 5 +---- segment/val.py | 14 +------------- train.py | 2 +- val.py | 2 +- 4 files changed, 4 insertions(+), 19 deletions(-) diff --git a/segment/train.py b/segment/train.py index 5a6e9afb8ec0..b494e6bda843 100644 --- a/segment/train.py +++ b/segment/train.py @@ -344,10 +344,7 @@ def train(hyp, opt, device, callbacks): if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info( - ("\n" + "%11s" * 8) - % ("Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size") - ) + LOGGER.info('\n' + ''.join([f"{el:>11}" for el in ["Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size"]])) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() diff --git a/segment/val.py b/segment/val.py index bafdb5dcec07..61cad9e02b9d 100644 --- a/segment/val.py +++ b/segment/val.py @@ -259,19 +259,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ("%22s" + "%11s" * 10) % ( - "Class", - "Images", - "Instances", - "Box(P", - "R", - "mAP50", - "mAP50-95)", - "Mask(P", - "R", - "mAP50", - "mAP50-95)", - ) + s = ''.join([f"{el:>11}" if idx>0 else f"{el:>22}" for idx, el in enumerate(["Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)"])]) dt = Profile(device=device), Profile(device=device), Profile(device=device) metrics = Metrics() loss = torch.zeros(4, device=device) diff --git a/train.py b/train.py index df0972a67c70..a4d6b9f55495 100644 --- a/train.py +++ b/train.py @@ -349,7 +349,7 @@ def train(hyp, opt, device, callbacks): if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(("\n" + "%11s" * 7) % ("Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size")) + LOGGER.info('\n' + ''.join([f"{el:>11}" for el in ["Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"]])) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() diff --git a/val.py b/val.py index 1c8c65ba89aa..41c4cb9c6203 100644 --- a/val.py +++ b/val.py @@ -214,7 +214,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95") + s = ''.join([f"{el:>11}" if idx>0 else f"{el:>22}" for idx, el in enumerate(["Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95"])]) tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 dt = Profile(device=device), Profile(device=device), Profile(device=device) # profiling times loss = torch.zeros(3, device=device) From 70dae61391d6674655a9c4dd765ec5659f66375b Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sun, 28 Apr 2024 15:19:48 +0000 Subject: [PATCH 2/3] Auto-format by https://ultralytics.com/actions --- segment/train.py | 10 +++++++++- segment/val.py | 21 ++++++++++++++++++++- train.py | 7 ++++++- val.py | 7 ++++++- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/segment/train.py b/segment/train.py index b494e6bda843..046e8d5330b2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -344,7 +344,15 @@ def train(hyp, opt, device, callbacks): if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info('\n' + ''.join([f"{el:>11}" for el in ["Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size"]])) + LOGGER.info( + "\n" + + "".join( + [ + f"{el:>11}" + for el in ["Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size"] + ] + ) + ) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() diff --git a/segment/val.py b/segment/val.py index 61cad9e02b9d..68a8980f2413 100644 --- a/segment/val.py +++ b/segment/val.py @@ -259,7 +259,26 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ''.join([f"{el:>11}" if idx>0 else f"{el:>22}" for idx, el in enumerate(["Class", "Images", "Instances", "Box(P", "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)"])]) + s = "".join( + [ + f"{el:>11}" if idx > 0 else f"{el:>22}" + for idx, el in enumerate( + [ + "Class", + "Images", + "Instances", + "Box(P", + "R", + "mAP50", + "mAP50-95)", + "Mask(P", + "R", + "mAP50", + "mAP50-95)", + ] + ) + ] + ) dt = Profile(device=device), Profile(device=device), Profile(device=device) metrics = Metrics() loss = torch.zeros(4, device=device) diff --git a/train.py b/train.py index a4d6b9f55495..b51addec21ac 100644 --- a/train.py +++ b/train.py @@ -349,7 +349,12 @@ def train(hyp, opt, device, callbacks): if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info('\n' + ''.join([f"{el:>11}" for el in ["Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"]])) + LOGGER.info( + "\n" + + "".join( + [f"{el:>11}" for el in ["Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size"]] + ) + ) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() diff --git a/val.py b/val.py index 41c4cb9c6203..40af4141a05c 100644 --- a/val.py +++ b/val.py @@ -214,7 +214,12 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ''.join([f"{el:>11}" if idx>0 else f"{el:>22}" for idx, el in enumerate(["Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95"])]) + s = "".join( + [ + f"{el:>11}" if idx > 0 else f"{el:>22}" + for idx, el in enumerate(["Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95"]) + ] + ) tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 dt = Profile(device=device), Profile(device=device), Profile(device=device) # profiling times loss = torch.zeros(3, device=device) From fa8c344e7f9ada02dacca912473e95a476019b05 Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sat, 24 Aug 2024 21:41:45 +0000 Subject: [PATCH 3/3] Auto-format by https://ultralytics.com/actions --- export.py | 1 + utils/augmentations.py | 1 - utils/callbacks.py | 1 - utils/dataloaders.py | 7 ++++--- utils/general.py | 2 -- utils/loggers/__init__.py | 3 ++- utils/loggers/clearml/clearml_utils.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 12 ++++++------ utils/metrics.py | 8 +++----- utils/segment/augmentations.py | 1 - utils/segment/general.py | 3 --- utils/triton.py | 3 +-- 12 files changed, 24 insertions(+), 32 deletions(-) diff --git a/export.py b/export.py index dfb1c06fb5e2..f3216a564290 100644 --- a/export.py +++ b/export.py @@ -449,6 +449,7 @@ def transform_fn(data_item): Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. + Parameters: data_item: Tuple with data item produced by DataLoader during iteration Returns: diff --git a/utils/augmentations.py b/utils/augmentations.py index 4a6e441d7c45..bdbe07712716 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -156,7 +156,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective transformation to an image, modifying the image and corresponding labels.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/callbacks.py b/utils/callbacks.py index 0a0bcbdb2b96..21c587bd74c6 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -64,7 +64,6 @@ def run(self, hook, *args, thread=False, **kwargs): thread: (boolean) Run callbacks in daemon thread kwargs: Keyword Arguments to receive from YOLOv5 """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" for logger in self._callbacks[hook]: if thread: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 21308f0cedbd..bdeffec465e7 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1104,7 +1104,8 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() - Arguments + + Arguments: path: Path to images directory weights: Train, val, test weights (list, tuple) annotated_only: Only use images with an annotated txt file @@ -1183,7 +1184,7 @@ class HUBDatasetStats: """ Class for generating HUB dataset JSON and `-hub` dataset directory. - Arguments + Arguments: path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -1314,7 +1315,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): """ YOLOv5 Classification Dataset. - Arguments + Arguments: root: Dataset path transform: torchvision transforms, used by default album_transform: Albumentations transforms, used if installed diff --git a/utils/general.py b/utils/general.py index e311504b3031..57db68a7ac76 100644 --- a/utils/general.py +++ b/utils/general.py @@ -518,7 +518,6 @@ def check_font(font=FONT, progress=False): def check_dataset(data, autodownload=True): """Validates and/or auto-downloads a dataset, returning its configuration as a dictionary.""" - # Download (optional) extract_dir = "" if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): @@ -1023,7 +1022,6 @@ def non_max_suppression( Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ - # Checks assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2bd8583d2ade..7051e8da0a29 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -350,7 +350,8 @@ class GenericLogger: """ YOLOv5 General purpose logger for non-task specific logging Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments + + Arguments: opt: Run arguments console_logger: Console logger include: loggers to include diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 2b5351ef8533..de4129e08a16 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -80,7 +80,7 @@ def __init__(self, opt, hyp): - Initialize ClearML Task, this object will capture the experiment - Upload dataset version to ClearML Data if opt.upload_dataset is True - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run hyp (dict) -- Hyperparameters for this run @@ -133,7 +133,7 @@ def log_scalars(self, metrics, epoch): """ Log scalars/metrics to ClearML. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} epoch (int) iteration number for the current set of metrics """ @@ -145,7 +145,7 @@ def log_model(self, model_path, model_name, epoch=0): """ Log model weights to ClearML. - arguments: + Arguments: model_path (PosixPath or str) Path to the model weights model_name (str) Name of the model visible in ClearML epoch (int) Iteration / epoch of the model weights @@ -158,7 +158,7 @@ def log_summary(self, metrics): """ Log final metrics to a summary table. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} """ for k, v in metrics.items(): @@ -168,7 +168,7 @@ def log_plot(self, title, plot_path): """ Log image as plot in the plot section of ClearML. - arguments: + Arguments: title (str) Title of the plot plot_path (PosixPath or str) Path to the saved image file """ @@ -183,7 +183,7 @@ def log_debug_samples(self, files, title="Debug Samples"): """ Log files (images) as debug samples in the ClearML task. - arguments: + Arguments: files (List(PosixPath)) a list of file paths in PosixPath format title (str) A title that groups together images with the same values """ @@ -199,7 +199,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres """ Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - arguments: + Arguments: image_path (PosixPath) the path the original image file boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] class_names (dict): dict containing mapping of class int to class name diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 930f2c7543af..6a32c8cc7b03 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -49,7 +49,7 @@ def __init__(self, opt, run_id=None, job_type="Training"): - Upload dataset if opt.upload_dataset is True - Setup training processes if job_type is 'Training' - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed job_type (str) -- To set the job_type for this run @@ -90,7 +90,7 @@ def setup_training(self, opt): - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval - arguments: + Arguments: opt (namespace) -- commandline arguments for this run """ @@ -120,7 +120,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact. - arguments: + Arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number @@ -159,7 +159,7 @@ def log(self, log_dict): """ Save the metrics to the logging dictionary. - arguments: + Arguments: log_dict (Dict) -- metrics/media to be logged in current step """ if self.wandb_run: @@ -170,7 +170,7 @@ def end_epoch(self): """ Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - arguments: + Arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ if self.wandb_run: @@ -197,7 +197,7 @@ def finish_run(self): @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): - """source - https://gist.github.com/simon-weber/7853144 + """Source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. diff --git a/utils/metrics.py b/utils/metrics.py index 385fdc471748..9acc38591f96 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -41,7 +41,6 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names # Returns The average precision as computed in py-faster-rcnn. """ - # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] @@ -103,7 +102,6 @@ def compute_ap(recall, precision): # Returns Average precision, precision curve, recall curve """ - # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) @@ -137,6 +135,7 @@ def process_batch(self, detections, labels): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 @@ -233,7 +232,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 Input shapes are box1(1,4) to box2(n,4). """ - # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) @@ -279,14 +277,15 @@ def box_iou(box1, box2, eps=1e-7): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) + Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) @@ -304,7 +303,6 @@ def bbox_ioa(box1, box2, eps=1e-7): box2: np.array of shape(nx4) returns: np.array of shape(n) """ - # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index d7dd8aec6691..2e1dca1198b0 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -29,7 +29,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/segment/general.py b/utils/segment/general.py index 2f65d60238dd..0793470a95e4 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -14,7 +14,6 @@ def crop_mask(masks, boxes): - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ - n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) @@ -33,7 +32,6 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): return: h, w, n """ - c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW @@ -51,7 +49,6 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return: h, w, n """ - c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW diff --git a/utils/triton.py b/utils/triton.py index 3d529ec88a07..2fee42815517 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -17,10 +17,9 @@ class TritonRemoteModel: def __init__(self, url: str): """ - Keyword arguments: + Keyword Arguments: url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 """ - parsed_url = urlparse(url) if parsed_url.scheme == "grpc": from tritonclient.grpc import InferenceServerClient, InferInput