From 28eb90f62326725b0d13c5b5754b6547aa1407fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 21:18:17 +0100 Subject: [PATCH 1/9] Update EMA --- train.py | 2 +- utils/torch_utils.py | 20 +++++++++----------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/train.py b/train.py index d8df31b72282..8c63c41b3e22 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model) if RANK in [-1, 0] else None + ema = ModelEMA(model, k=0.001 * (nbs / batch_size)) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c5257c6ebfeb..77be8579dabb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -32,9 +32,7 @@ @contextmanager def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ + # Decorator to make all processes in distributed training wait for each local_master to do something if local_rank not in [-1, 0]: dist.barrier(device_ids=[local_rank]) yield @@ -43,13 +41,13 @@ def torch_distributed_zero_first(local_rank: int): def date_modified(path=__file__): - # return human-readable file modification date, i.e. '2021-3-26' + # Return human-readable file modification date, i.e. '2021-3-26' t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) return f'{t.year}-{t.month}-{t.day}' def git_describe(path=Path(__file__).parent): # path must be a directory - # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe s = f'git -C {path} describe --tags --long --always' try: return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] @@ -99,7 +97,7 @@ def select_device(device='', batch_size=0, newline=True): def time_sync(): - # pytorch-accurate time + # PyTorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() @@ -205,7 +203,7 @@ def prune(model, amount=0.3): def fuse_conv_and_bn(conv, bn): - # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = nn.Conv2d(conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, @@ -214,12 +212,12 @@ def fuse_conv_and_bn(conv, bn): groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) - # prepare filters + # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) - # prepare spatial bias + # Prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) @@ -252,7 +250,7 @@ def model_info(model, verbose=False, img_size=640): def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # scales img(bs,3,y,x) by ratio constrained to gs-multiple + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: return img else: @@ -308,7 +306,7 @@ def __init__(self, model, decay=0.9999, updates=0): # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + self.decay = lambda x: decay * (1 - math.exp(-x * k)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) From 791e795f92cc5f5886d4aaca7fa39705094e35f2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 21:18:29 +0100 Subject: [PATCH 2/9] Update EMA --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77be8579dabb..ae82a51074e7 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -300,7 +300,7 @@ class ModelEMA: For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, updates=0): + def __init__(self, model, decay=0.9999, k=0.001, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': From ab080d6a233c9f0f18e9ad86bb0aa9ad6e2a6b82 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 21:41:08 +0100 Subject: [PATCH 3/9] ratio invert --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 8c63c41b3e22..4792418e578d 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, k=0.001 * (nbs / batch_size)) if RANK in [-1, 0] else None + ema = ModelEMA(model, k=0.001 * (batch_size / nbs)) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 From 0dca44ee2a409d154f80c7061fac7a3efcc2a44b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 22:01:30 +0100 Subject: [PATCH 4/9] fix ratio invert --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4792418e578d..8c63c41b3e22 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, k=0.001 * (batch_size / nbs)) if RANK in [-1, 0] else None + ema = ModelEMA(model, k=0.001 * (nbs / batch_size)) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 From dc1ace07fe794d5f1a23ffb0bf91c85216546b00 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 22:09:27 +0100 Subject: [PATCH 5/9] fix2 ratio invert --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 8c63c41b3e22..4792418e578d 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, k=0.001 * (nbs / batch_size)) if RANK in [-1, 0] else None + ema = ModelEMA(model, k=0.001 * (batch_size / nbs)) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 From 8d968789e2f35ca47c99272eff5c748760a9d2da Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Feb 2022 23:22:21 +0100 Subject: [PATCH 6/9] warmup iterations to 100 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 4792418e578d..6d2c7ce43403 100644 --- a/train.py +++ b/train.py @@ -268,7 +268,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Start training t0 = time.time() - nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 maps = np.zeros(nc) # mAP per class From c4cd18dd53d4f2d5d043ee6e9a1c4570376d6056 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Feb 2022 09:48:37 +0100 Subject: [PATCH 7/9] ema_k --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 6d2c7ce43403..3c27bae973d0 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, k=0.001 * (batch_size / nbs)) if RANK in [-1, 0] else None + ema = ModelEMA(model, k=opt.ema_k) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 @@ -481,6 +481,7 @@ def parse_opt(known=False): parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--ema-k', type=float, default=0.001, help='EMA ramp constant') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') From 41adeaa519ee01fe4286ed61f3ff0a0e2f4433cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Feb 2022 12:00:14 +0100 Subject: [PATCH 8/9] implement tau --- train.py | 3 +-- utils/torch_utils.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 3c27bae973d0..60be962d447f 100644 --- a/train.py +++ b/train.py @@ -183,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model, k=opt.ema_k) if RANK in [-1, 0] else None + ema = ModelEMA(model) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 @@ -481,7 +481,6 @@ def parse_opt(known=False): parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--ema-k', type=float, default=0.001, help='EMA ramp constant') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') diff --git a/utils/torch_utils.py b/utils/torch_utils.py index ae82a51074e7..bde28c1a15c5 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -306,7 +306,7 @@ def __init__(self, model, decay=0.9999, k=0.001, updates=0): # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates - self.decay = lambda x: decay * (1 - math.exp(-x * k)) # decay exponential ramp (to help early epochs) + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): p.requires_grad_(False) From da3673d98c822bb5e6923e975c14302825d87139 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Feb 2022 12:00:24 +0100 Subject: [PATCH 9/9] implement tau --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index bde28c1a15c5..c11d2a4269ef 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -300,7 +300,7 @@ class ModelEMA: For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, k=0.001, updates=0): + def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu':