diff --git a/val.py b/val.py index 64c4d4ff9dae..8f2119531949 100644 --- a/val.py +++ b/val.py @@ -143,7 +143,7 @@ def run(data, batch_size = model.batch_size else: device = model.device - if not pt or jit: + if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') @@ -152,6 +152,7 @@ def run(data, # Configure model.eval() + cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 @@ -177,7 +178,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or jit or engine: + if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32