Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

emoji-safe default logging #8888

Merged
merged 2 commits into from
Aug 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions utils/autoanchor.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import yaml
from tqdm import tqdm

from utils.general import LOGGER, colorstr, emojis
from utils.general import LOGGER, colorstr

PREFIX = colorstr('AutoAnchor: ')

Expand Down Expand Up @@ -45,9 +45,9 @@ def metric(k): # compute metric
bpr, aat = metric(anchors.cpu().view(-1, 2))
s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
if bpr > 0.98: # threshold to recompute
LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))
LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅')
else:
LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))
LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')
na = m.anchors.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
Expand All @@ -62,7 +62,7 @@ def metric(k): # compute metric
s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
else:
s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
LOGGER.info(emojis(s))
LOGGER.info(s)


def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
Expand Down
4 changes: 2 additions & 2 deletions utils/autobatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy as np
import torch

from utils.general import LOGGER, colorstr, emojis
from utils.general import LOGGER, colorstr
from utils.torch_utils import profile


Expand Down Expand Up @@ -62,5 +62,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16):
b = batch_sizes[max(i - 1, 0)] # select prior safe point

fraction = np.polyval(p, b) / t # actual fraction predicted
LOGGER.info(emojis(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅'))
LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
return b
20 changes: 11 additions & 9 deletions utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,8 @@ def set_logging(name=None, verbose=VERBOSE):

set_logging() # run before defining LOGGER
LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.)
for fn in LOGGER.info, LOGGER.warning:
_fn, fn = fn, lambda x: _fn(emojis(x)) # emoji safe logging


def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):
Expand Down Expand Up @@ -334,7 +336,7 @@ def check_git_status(repo='ultralytics/yolov5'):
s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update."
else:
s += f'up to date with {url} ✅'
LOGGER.info(emojis(s)) # emoji-safe
LOGGER.info(s)


def check_python(minimum='3.7.0'):
Expand Down Expand Up @@ -388,7 +390,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta
source = file.resolve() if 'file' in locals() else requirements
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
LOGGER.info(emojis(s))
LOGGER.info(s)


def check_img_size(imgsz, s=32, floor=0):
Expand Down Expand Up @@ -489,9 +491,9 @@ def check_dataset(data, autodownload=True):

# Checks
for k in 'train', 'val', 'nc':
assert k in data, emojis(f"data.yaml '{k}:' field missing ❌")
assert k in data, f"data.yaml '{k}:' field missing ❌"
if 'names' not in data:
LOGGER.warning(emojis("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc."))
LOGGER.warning("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.")
data['names'] = [f'class{i}' for i in range(data['nc'])] # default names

# Resolve paths
Expand All @@ -507,9 +509,9 @@ def check_dataset(data, autodownload=True):
if val:
val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
if not all(x.exists() for x in val):
LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]))
LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])
if not s or not autodownload:
raise Exception(emojis('Dataset not found ❌'))
raise Exception('Dataset not found ❌')
t = time.time()
root = path.parent if 'path' in data else '..' # unzip directory i.e. '../'
if s.startswith('http') and s.endswith('.zip'): # URL
Expand All @@ -527,7 +529,7 @@ def check_dataset(data, autodownload=True):
r = exec(s, {'yaml': data}) # return None
dt = f'({round(time.time() - t, 1)}s)'
s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌"
LOGGER.info(emojis(f"Dataset download {s}"))
LOGGER.info(f"Dataset download {s}")
check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts
return data # dictionary

Expand All @@ -552,11 +554,11 @@ def amp_allclose(model, im):
im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3))
try:
assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im)
LOGGER.info(emojis(f'{prefix}checks passed ✅'))
LOGGER.info(f'{prefix}checks passed ✅')
return True
except Exception:
help_url = 'https://github.com/ultralytics/yolov5/issues/7908'
LOGGER.warning(emojis(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}'))
LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}')
return False


Expand Down
6 changes: 3 additions & 3 deletions utils/loggers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import torch
from torch.utils.tensorboard import SummaryWriter

from utils.general import colorstr, cv2, emojis
from utils.general import colorstr, cv2
from utils.loggers.clearml.clearml_utils import ClearmlLogger
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
Expand Down Expand Up @@ -73,11 +73,11 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None,
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases"
self.logger.info(emojis(s))
self.logger.info(s)
if not clearml:
prefix = colorstr('ClearML: ')
s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML"
self.logger.info(emojis(s))
self.logger.info(s)

# TensorBoard
s = self.save_dir
Expand Down
2 changes: 1 addition & 1 deletion utils/torch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def select_device(device='', batch_size=0, newline=True):

if not newline:
s = s.rstrip()
LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
LOGGER.info(s)
return torch.device(arg)


Expand Down
6 changes: 3 additions & 3 deletions val.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
from utils.callbacks import Callbacks
from utils.dataloaders import create_dataloader
from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml,
coco80_to_coco91_class, colorstr, emojis, increment_path, non_max_suppression, print_args,
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
scale_coords, xywh2xyxy, xyxy2xywh)
from utils.metrics import ConfusionMatrix, ap_per_class, box_iou
from utils.plots import output_to_target, plot_images, plot_val_study
Expand Down Expand Up @@ -274,7 +274,7 @@ def run(
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
if nt.sum() == 0:
LOGGER.warning(emojis(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️'))
LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')

# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
Expand Down Expand Up @@ -365,7 +365,7 @@ def main(opt):

if opt.task in ('train', 'val', 'test'): # run normally
if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466
LOGGER.info(emojis(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️'))
LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️')
run(**vars(opt))

else:
Expand Down