diff --git a/models/common.py b/models/common.py index 8035ef11a791..f9e4fc69f006 100644 --- a/models/common.py +++ b/models/common.py @@ -3,7 +3,6 @@ Common modules """ -import logging import math import warnings from copy import copy @@ -18,12 +17,10 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync -LOGGER = logging.getLogger(__name__) - def autopad(k, p=None): # kernel, padding # Pad to 'same' diff --git a/train.py b/train.py index 90abdc59db88..fedc55d8be5c 100644 --- a/train.py +++ b/train.py @@ -7,7 +7,6 @@ """ import argparse -import logging import math import os import random @@ -201,8 +200,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/utils/augmentations.py b/utils/augmentations.py index 1c3e66fb87ab..5dcfd49fdd05 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -3,14 +3,13 @@ Image augmentation functions """ -import logging import math import random import cv2 import numpy as np -from utils.general import check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa @@ -32,11 +31,11 @@ def __init__(self): A.ImageCompression(quality_lower=75, p=0.0)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - logging.info(colorstr('albumentations: ') + f'{e}') + LOGGER.info(colorstr('albumentations: ') + f'{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: diff --git a/utils/datasets.py b/utils/datasets.py index 15fca1775849..94acaaa92cd7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,7 +6,6 @@ import glob import hashlib import json -import logging import os import random import shutil @@ -335,7 +334,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] *= 0 cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time @@ -427,7 +426,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: - logging.info('\n'.join(cache['msgs'])) # display warnings + LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache @@ -525,9 +524,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.close() if msgs: - logging.info('\n'.join(msgs)) + LOGGER.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings @@ -535,9 +534,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - logging.info(f'{prefix}New cache created: {path}') + LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): diff --git a/utils/general.py b/utils/general.py index 0f45d72498fe..b0ea1527129a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -45,7 +45,7 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b36e98d0b656..73acec8e819c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -4,7 +4,6 @@ """ import datetime -import logging import math import os import platform @@ -100,7 +99,6 @@ def profile(input, ops, n=10, device=None): # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] - logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}")