From e145517f777cef54996bf7405127d15993c32ef6 Mon Sep 17 00:00:00 2001 From: Nanohana <56324869+GabrielDornelles@users.noreply.github.com> Date: Mon, 16 May 2022 16:03:02 -0300 Subject: [PATCH] Replace `openvino-dev` with OpenVINO Runtime inference (#7843) * Uses OpenVINO runtime instead of openvino-dev * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * export with openvino package * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert export.py * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/models/common.py b/models/common.py index 00641eaa8d15..0c028352abac 100644 --- a/models/common.py +++ b/models/common.py @@ -354,13 +354,14 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - core = ie.IECore() + check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core + ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths - executable_network = core.load_network(network, device_name='CPU', num_requests=1) + network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + executable_network = ie.compile_model(model=network, device_name="CPU") + self.output_layer = next(iter(executable_network.outputs)) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -444,11 +445,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 - desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description - request = self.executable_network.requests[0] # inference request - request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) - request.infer() - y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) + y = self.executable_network([im])[self.output_layer] elif self.engine: # TensorRT assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) self.binding_addrs['images'] = int(im.data_ptr())