Skip to content

Commit

Permalink
Remove apex.parallel. Use torch.nn.parallel
Browse files Browse the repository at this point in the history
For future compatibility
  • Loading branch information
NanoCode012 committed Jul 7, 2020
1 parent 77c8e27 commit 2aa3301
Showing 1 changed file with 2 additions and 6 deletions.
8 changes: 2 additions & 6 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel as DDP

import test # import test.py to get mAP after each epoch
from models.yolo import Model
Expand All @@ -17,9 +18,7 @@
mixed_precision = True
try: # Mixed precision training https://github.com/NVIDIA/apex
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
except:
from torch.nn.parallel import DistributedDataParallel as DDP
print('Apex recommended for faster mixed precision training: https://github.com/NVIDIA/apex')
mixed_precision = False # not installed

Expand Down Expand Up @@ -195,10 +194,7 @@ def train(hyp, tb_writer, opt, device):
if device.type != 'cpu' and opt.local_rank != -1:
# pip install torch==1.4.0+cku100 torchvision==0.5.0+cu100 -f https://download.pytorch.org/whl/torch_stable.html
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if mixed_precision:
model = DDP(model, delay_allreduce=True)
else:
model = DDP(model, device_ids=[opt.local_rank])
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)

# Model parameters
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
Expand Down

0 comments on commit 2aa3301

Please sign in to comment.