From ec2b8538cdf1ec3a37f7f2ffff4d591f322cc410 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1bio=20Franco=20Uechi?= <308613+fabito@users.noreply.github.com> Date: Tue, 16 May 2023 21:19:16 +1200 Subject: [PATCH] Fix fp16 (`--half`) support for `TritonRemoteModel` model type (#10787) * Fix fp16 (--half) support for TritonRemoteModel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4ef03760c65d..16537703e730 100644 --- a/models/common.py +++ b/models/common.py @@ -333,7 +333,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) - fp16 &= pt or jit or onnx or engine # FP16 + fp16 &= pt or jit or onnx or engine or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA