From d80d80a8287df7e5d884c482e6d38297043bae57 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:19:43 +0100 Subject: [PATCH 1/2] Add ONNX inference providers Fix for https://github.com/ultralytics/yolov5/issues/5916 --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c18fe10d4089 100644 --- a/models/common.py +++ b/models/common.py @@ -322,7 +322,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): LOGGER.info(f'Loading {w} for ONNX Runtime inference...') check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + session = onnxruntime.InferenceSession(w, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download From e90192e38283e42bf1dc9cdc81b977ceb267e92d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:34:42 +0100 Subject: [PATCH 2/2] Update common.py --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index c18fe10d4089..c269cfef9a6c 100644 --- a/models/common.py +++ b/models/common.py @@ -320,9 +320,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download