diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c269cfef9a6c 100644 --- a/models/common.py +++ b/models/common.py @@ -320,9 +320,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download