From edbfcf355bce57a8b2cf6b77fc693ec8fea699c1 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Wed, 18 Jan 2023 10:08:37 +0100 Subject: [PATCH] Fix the ORTQuantizer model path of the model to quantize (#701) Fix ort quantizer loading from specific file --- optimum/onnxruntime/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/onnxruntime/quantization.py b/optimum/onnxruntime/quantization.py index 962fa2096a..4bbb978e6a 100644 --- a/optimum/onnxruntime/quantization.py +++ b/optimum/onnxruntime/quantization.py @@ -134,7 +134,7 @@ def from_pretrained( if isinstance(model_or_path, ORTModelForConditionalGeneration): raise ValueError(ort_quantizer_error_message) - elif isinstance(model_or_path, Path): + elif isinstance(model_or_path, Path) and file_name is None: onnx_files = list(model_or_path.glob("*.onnx")) if len(onnx_files) == 0: raise FileNotFoundError(f"Could not find any ONNX model file in {model_or_path}")