Skip to content

Commit e145517

Browse files
GabrielDornellespre-commit-ci[bot]glenn-jocher
authored andcommitted
Replace openvino-dev with OpenVINO Runtime inference (ultralytics#7843)
* Uses OpenVINO runtime instead of openvino-dev * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * export with openvino package * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert export.py * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <[email protected]>
1 parent 62ab9e9 commit e145517

File tree

1 file changed

+7
-10
lines changed

1 file changed

+7
-10
lines changed

models/common.py

Lines changed: 7 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -354,13 +354,14 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False,
354354
stride, names = int(meta['stride']), eval(meta['names'])
355355
elif xml: # OpenVINO
356356
LOGGER.info(f'Loading {w} for OpenVINO inference...')
357-
check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
358-
import openvino.inference_engine as ie
359-
core = ie.IECore()
357+
check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/
358+
from openvino.runtime import Core
359+
ie = Core()
360360
if not Path(w).is_file(): # if not *.xml
361361
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
362-
network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths
363-
executable_network = core.load_network(network, device_name='CPU', num_requests=1)
362+
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
363+
executable_network = ie.compile_model(model=network, device_name="CPU")
364+
self.output_layer = next(iter(executable_network.outputs))
364365
elif engine: # TensorRT
365366
LOGGER.info(f'Loading {w} for TensorRT inference...')
366367
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
@@ -444,11 +445,7 @@ def forward(self, im, augment=False, visualize=False, val=False):
444445
y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]
445446
elif self.xml: # OpenVINO
446447
im = im.cpu().numpy() # FP32
447-
desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description
448-
request = self.executable_network.requests[0] # inference request
449-
request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs))
450-
request.infer()
451-
y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs))
448+
y = self.executable_network([im])[self.output_layer]
452449
elif self.engine: # TensorRT
453450
assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)
454451
self.binding_addrs['images'] = int(im.data_ptr())

0 commit comments

Comments
 (0)