diff --git a/classify/train.py b/classify/train.py index 5ae6980716a..8ff9d1582d2 100644 --- a/classify/train.py +++ b/classify/train.py @@ -180,6 +180,7 @@ def train(opt, device): # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine def lf(x): + """Linear learning rate scheduler function, scaling learning rate from initial value to `lrf` over `epochs`.""" return (1 - x / epochs) * (1 - lrf) + lrf # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) diff --git a/export.py b/export.py index 32f6d303acc..7d79e8bf6a4 100644 --- a/export.py +++ b/export.py @@ -134,6 +134,7 @@ def try_export(inner_func): inner_args = get_default_args(inner_func) def outer_func(*args, **kwargs): + """Logs success/failure and execution details of model export functions wrapped with @try_export decorator.""" prefix = inner_args["prefix"] try: with Profile() as dt: @@ -224,7 +225,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): - # YOLOv5 OpenVINO export + """Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see https://pypi.org/project/openvino-dev/.""" check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa from openvino.tools import mo # noqa @@ -244,6 +245,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO: from utils.dataloaders import create_dataloader def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4): + """Generates a DataLoader for model training or validation based on the given YAML dataset configuration.""" data_yaml = check_yaml(yaml_path) data = check_dataset(data_yaml) dataloader = create_dataloader( diff --git a/segment/train.py b/segment/train.py index ffd1746ade0..379fed0b2f1 100644 --- a/segment/train.py +++ b/segment/train.py @@ -216,6 +216,7 @@ def train(hyp, opt, device, callbacks): else: def lf(x): + """Linear learning rate scheduler decreasing from 1 to hyp['lrf'] over 'epochs'.""" return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) diff --git a/segment/val.py b/segment/val.py index b0a941faa15..b5e9f7557ec 100644 --- a/segment/val.py +++ b/segment/val.py @@ -91,6 +91,7 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): + """Encodes binary mask arrays into RLE (Run-Length Encoding) format for JSON serialization.""" rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle diff --git a/train.py b/train.py index 44cbd1ac062..472c1d39598 100644 --- a/train.py +++ b/train.py @@ -226,6 +226,7 @@ def train(hyp, opt, device, callbacks): else: def lf(x): + """Linear learning rate scheduler function with decay calculated by epoch proportion.""" return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) diff --git a/utils/__init__.py b/utils/__init__.py index 91fc7694676..c7ece49fae1 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -34,6 +34,7 @@ def threaded(func): """Decorator @threaded to run a function in a separate thread, returning the thread instance.""" def wrapper(*args, **kwargs): + """Runs the decorated function in a separate daemon thread and returns the thread instance.""" thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) thread.start() return thread diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 79b79db0fc1..00eee2eb776 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -33,6 +33,7 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640): wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh def metric(k): # compute metric + """Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.""" r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric best = x.max(1)[0] # best_x @@ -86,16 +87,19 @@ def kmean_anchors(dataset="./data/coco128.yaml", n=9, img_size=640, thr=4.0, gen thr = 1 / thr def metric(k, wh): # compute metrics + """Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.""" r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric # x = wh_iou(wh, torch.tensor(k)) # iou metric return x, x.max(1)[0] # x, best_x def anchor_fitness(k): # mutation fitness + """Evaluates fitness of YOLOv5 anchors by computing recall and ratio metrics for an anchor evolution process.""" _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness def print_results(k, verbose=True): + """Sorts and logs kmeans-evolved anchor metrics and best possible recall values for YOLOv5 anchor evaluation.""" k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr diff --git a/utils/downloads.py b/utils/downloads.py index a7b599efad2..c7e2273c794 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -92,7 +92,7 @@ def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"): from utils.general import LOGGER def github_assets(repository, version="latest"): - # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + """Fetches GitHub repository release tag and asset names using the GitHub API.""" if version != "latest": version = f"tags/{version}" # i.e. tags/v7.0 response = requests.get(f"https://github.com/gitapi/repos/{repository}/releases/{version}").json() # github api diff --git a/utils/general.py b/utils/general.py index ed38cc0d60c..95a76644776 100644 --- a/utils/general.py +++ b/utils/general.py @@ -343,7 +343,7 @@ def check_online(): import socket def run_once(): - # Check once + """Checks internet connectivity by attempting to create a connection to "1.1.1.1" on port 443.""" try: socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True @@ -587,7 +587,7 @@ def check_amp(model): from models.common import AutoShape, DetectMultiBackend def amp_allclose(model, im): - # All close FP32 vs AMP results + """Compares FP32 and AMP model inference outputs, ensuring they are close within a 10% absolute tolerance.""" m = AutoShape(model, verbose=False) # model a = m(im).xywhn[0] # FP32 inference m.amp = True @@ -652,7 +652,7 @@ def download(url, dir=".", unzip=True, delete=True, curl=False, threads=1, retry """Downloads and optionally unzips files concurrently, supporting retries and curl fallback.""" def download_one(url, dir): - # Download 1 file + """Downloads a single file from `url` to `dir`, with retry support and optional curl fallback.""" success = True if os.path.isfile(url): f = Path(url) # filename diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 3a91c49258a..846dcb42a22 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -67,6 +67,9 @@ class CometLogger: """Log metrics, parameters, source code, models and much more with Comet.""" def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + """Initializes CometLogger with given options, hyperparameters, run ID, job type, and additional experiment + arguments. + """ self.job_type = job_type self.opt = opt self.hyp = hyp diff --git a/utils/plots.py b/utils/plots.py index 062658cda97..9bec34a159f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -124,6 +124,9 @@ def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy def butter_lowpass(cutoff, fs, order): + """Applies a low-pass Butterworth filter to a signal with specified cutoff frequency, sample rate, and filter + order. + """ nyq = 0.5 * fs normal_cutoff = cutoff / nyq return butter(order, normal_cutoff, btype="low", analog=False) diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 7bdf3258abb..6f57dec132e 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -55,6 +55,9 @@ def ap_per_class_box_and_mask( class Metric: def __init__(self) -> None: + """Initializes performance metric attributes for precision, recall, F1 score, average precision, and class + indices. + """ self.p = [] # (nc, ) self.r = [] # (nc, ) self.f1 = [] # (nc, ) @@ -151,6 +154,9 @@ class Metrics: """Metric for boxes and masks.""" def __init__(self) -> None: + """Initializes Metric objects for bounding boxes and masks to compute performance metrics in the Metrics + class. + """ self.metric_box = Metric() self.metric_mask = Metric() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 0b006d80562..d15f1f73f6c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -37,6 +37,7 @@ def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0")): """Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions.""" def decorate(fn): + """Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() to the decorated function.""" return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) return decorate