From 787582f97251834f955ef05a77072b8c673a8397 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Tue, 14 Jul 2020 20:38:58 +0700 Subject: [PATCH] Fixed issue with single gpu not having world_size --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 7494af2754b5..18c8b531e6f0 100644 --- a/train.py +++ b/train.py @@ -449,9 +449,9 @@ def train(hyp, tb_writer, opt, device): opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size) opt.total_batch_size = opt.batch_size + opt.world_size = 1 if device.type == 'cpu': mixed_precision = False - opt.world_size = 1 elif opt.local_rank != -1: # DDP mode assert torch.cuda.device_count() > opt.local_rank