From a9f895d304aea5920e694606927fa9208aa7f0ed Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Thu, 17 Nov 2022 20:42:26 +0800 Subject: [PATCH] Apply make_divisible for ONNX models in Autoshape (#10172) * Apply make_divisible for onnx models in Autoshape At line 697 we have this `make_divisible` function for pytorch models. * Context: we want to run inference on varied input sizes instead of fixed image size. * When I test an image of size [720, 720] for a pytorch model (e.g., yolov5n.pt), we can see that it will be reshaped to [736, 736] by the function. This is as expected. * When I test the same image for the onnx model (e.g., yolov5n.onnx, exported with `--dynamic`), I got an error and it's due to the indivisible problem ``` onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Concat node. Name:'Concat_143' Status Message: concat.cc:156 PrepareForCompute Non concat axis dimensions must match: Axis 3 has mismatched dimensions of 45 and 46 ``` The simple solution is to enable the `make_divisible` function for onnx model too. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * revise indent Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * Apply make_divisible to all formats All formats from DetectMultiBackend should have default stride=32 Signed-off-by: Glenn Jocher Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 64f1b9354225..8b5ec1c786d8 100644 --- a/models/common.py +++ b/models/common.py @@ -694,7 +694,7 @@ def forward(self, ims, size=640, augment=False, profile=False): g = max(size) / max(s) # gain shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32