Skip to content

Commit

Permalink
New val.py cuda variable (#6957)
Browse files Browse the repository at this point in the history
* New val.py `cuda` variable

Fix for ONNX GPU val.

* Update val.py
  • Loading branch information
glenn-jocher committed Mar 12, 2022
1 parent c6b4f84 commit c84dd27
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions val.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def run(data,
batch_size = model.batch_size
else:
device = model.device
if not pt or jit:
if not (pt or jit):
batch_size = 1 # export.py models default to batch-size 1
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')

Expand All @@ -152,6 +152,7 @@ def run(data,

# Configure
model.eval()
cuda = device.type != 'cpu'
is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
Expand All @@ -177,7 +178,7 @@ def run(data,
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
t1 = time_sync()
if pt or jit or engine:
if cuda:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32
Expand Down

0 comments on commit c84dd27

Please sign in to comment.