Skip to content

Commit

Permalink
DDP after autoanchor reorder (ultralytics#2421)
Browse files Browse the repository at this point in the history
  • Loading branch information
glenn-jocher committed Mar 10, 2021
1 parent d36f9a2 commit 712e540
Showing 1 changed file with 5 additions and 5 deletions.
10 changes: 5 additions & 5 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,10 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')

# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
Expand Down Expand Up @@ -214,7 +210,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None):
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
model.half().float() # pre-reduce anchor precision

# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

# Model parameters
hyp['box'] *= 3. / nl # scale to layers
Expand Down

0 comments on commit 712e540

Please sign in to comment.