Skip to content

Commit

Permalink
Fix batch-size on resume for multi-gpu (ultralytics#1942)
Browse files Browse the repository at this point in the history
  • Loading branch information
NanoCode012 committed Jan 14, 2021
1 parent 91bca3c commit cb9ba0d
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion train.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
Expand Down

0 comments on commit cb9ba0d

Please sign in to comment.