diff --git a/train.py b/train.py index f6e66cb0ef09..876e1097e8e8 100644 --- a/train.py +++ b/train.py @@ -100,7 +100,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio callbacks.register_action(k, callback=getattr(loggers, k)) # Config - plots = not evolve # create plots + plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) with torch_distributed_zero_first(LOCAL_RANK): @@ -373,7 +373,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) + callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ @@ -488,6 +488,7 @@ def parse_opt(known=False): parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--noval', action='store_true', help='only validate final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 3a3ec1ee455b..b33b04e46752 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -99,11 +99,11 @@ def on_pretrain_routine_end(self): if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end if plots: if ni == 0: - if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 + if not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e65d028f28db..713b25c7e8d8 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -250,8 +250,8 @@ def setup_training(self, opt): self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - if opt.evolve: - self.bbox_interval = opt.bbox_interval = opt.epochs + 1 + if opt.evolve or opt.noplots: + self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None # Update the the data_dict to point to local artifacts dir if train_from_artifact: