module onnxrt.backend#

Inheritance diagram of mlprodict.onnxrt.backend

Short summary#

module mlprodict.onnxrt.backend

ONNX Backend for OnnxInference.

import unittest
from onnx.backend.test import BackendTest
backend_test = BackendTest(backend, __name__)
back_test.include('.*add.*')
globals().update(backend_test.enable_report().test_cases)
unittest.main()

source on GitHub

Classes#

class

truncated documentation

_CombineModels

OnnxInferenceBackend

ONNX backend following the pattern from onnx/backend/base.py. …

OnnxInferenceBackendMicro

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxMicroRuntime.

OnnxInferenceBackendOrt

Same backend as @see cl OnnxInferenceBackend but runtime is onnxruntime1.

OnnxInferenceBackendPyC

Same backend as @see cl OnnxInferenceBackend but runtime is python_compiled.

OnnxInferenceBackendPyEval

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxShapeInference.

OnnxInferenceBackendRep

Computes the prediction for an ONNX graph loaded with @see cl OnnxInference.

OnnxInferenceBackendShape

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxShapeInference.

Properties#

property

truncated documentation

input_names

Returns the input names.

output_names

Returns the output names.

Static Methods#

staticmethod

truncated documentation

create_inference_session

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime …

create_inference_session

create_inference_session

create_inference_session

create_inference_session

create_inference_session

is_compatible

Returns whether the model is compatible with the backend.

is_compatible

Returns whether the model is compatible with the backend.

is_compatible

Returns whether the model is compatible with the backend.

is_compatible

Returns whether the model is compatible with the backend.

is_compatible

Returns whether the model is compatible with the backend.

is_compatible

Returns whether the model is compatible with the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

is_opset_supported

Returns whether the opset for the model is supported by the backend.

prepare

Loads the model and creates @see cl OnnxInference.

prepare

Loads the model and creates @see cl OnnxInference.

prepare

Loads the model and creates @see cl OnnxInference.

prepare

Loads the model and creates @see cl OnnxInference.

prepare

Loads the model and creates @see cl OnnxInference.

prepare

Loads the model and creates @see cl OnnxInference.

run_model

Computes the prediction.

run_model

Computes the prediction.

run_model

Computes the prediction.

run_model

Computes the prediction.

run_model

Computes the prediction.

run_model

Computes the prediction.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

run_node

This method is not implemented as it is much more efficient to run a whole model than every node independently.

supports_device

Checks whether the backend is compiled with particular device support.

supports_device

Checks whether the backend is compiled with particular device support.

supports_device

Checks whether the backend is compiled with particular device support.

supports_device

Checks whether the backend is compiled with particular device support.

supports_device

Checks whether the backend is compiled with particular device support.

supports_device

Checks whether the backend is compiled with particular device support.

Methods#

method

truncated documentation

__init__

__init__

run

Computes the prediction. See @see meth OnnxInference.run.

run

Runs shape inferance and onnx inference.

Documentation#

@file @brief ONNX Backend for @see cl OnnxInference.

import unittest
from onnx.backend.test import BackendTest
backend_test = BackendTest(backend, __name__)
back_test.include('.*add.*')
globals().update(backend_test.enable_report().test_cases)
unittest.main()
class mlprodict.onnxrt.backend.OnnxInferenceBackend#

Bases: Backend

ONNX backend following the pattern from onnx/backend/base.py. This backend can be ran through the following code:

import unittest
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from onnx.backend.test import BackendTest
import mlprodict.onnxrt.backend_py as backend

back_test = BackendTest(backend, __name__)
back_test.exclude('.*_blvc_.*')
back_test.exclude('.*_densenet_.*')
back_test.exclude('.*_densenet121_.*')
back_test.exclude('.*_inception_.*')
back_test.exclude('.*_resnet50_.*')
back_test.exclude('.*_shufflenet_.*')
back_test.exclude('.*_squeezenet_.*')
back_test.exclude('.*_vgg19_.*')
back_test.exclude('.*_zfnet512_.*')
globals().update(back_test.enable_report().test_cases)
buffer = StringIO()
print('---------------------------------')

if True:
    with redirect_stdout(buffer):
        with redirect_stderr(buffer):
            res = unittest.main(verbosity=2, exit=False)
else:
    res = unittest.main(verbosity=2, exit=False)

testsRun = res.result.testsRun
errors = len(res.result.errors)
skipped = len(res.result.skipped)
unexpectedSuccesses = len(res.result.unexpectedSuccesses)
expectedFailures = len(res.result.expectedFailures)
print('---------------------------------')
print("testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d "
      "expectedFailures=%d" % (
    testsRun, errors, skipped, unexpectedSuccesses,
    expectedFailures))
ran = testsRun - skipped
print("ratio=%f" % (1 - errors * 1.0 / ran))
print('---------------------------------')
print(buffer.getvalue())
classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

classmethod is_compatible(model, device=None, **kwargs)#

Returns whether the model is compatible with the backend.

Parameters:
  • model – unused

  • device – None to use the default device or a string (ex: ‘CPU’)

Returns:

boolean

classmethod is_opset_supported(model)#

Returns whether the opset for the model is supported by the backend.

Parameters:

model – Model whose opsets needed to be verified.

Returns:

boolean and error message if opset is not supported.

classmethod prepare(model, device=None, **kwargs)#

Loads the model and creates @see cl OnnxInference.

Parameters:
  • model – ModelProto (returned by onnx.load), string for a filename or bytes for a serialized model

  • device – requested device for the computation, None means the default one which depends on the compilation settings

  • kwargs – see @see cl OnnxInference

Returns:

see @see cl OnnxInference

classmethod run_model(model, inputs, device=None, **kwargs)#

Computes the prediction.

Parameters:
  • model – see @see cl OnnxInference returned by function prepare

  • inputs – inputs

  • device – requested device for the computation, None means the default one which depends on the compilation settings

  • kwargs – see @see cl OnnxInference

Returns:

predictions

classmethod run_node(node, inputs, device=None, outputs_info=None, **kwargs)#

This method is not implemented as it is much more efficient to run a whole model than every node independently.

classmethod supports_device(device)#

Checks whether the backend is compiled with particular device support.

class mlprodict.onnxrt.backend.OnnxInferenceBackendMicro#

Bases: OnnxInferenceBackend

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxMicroRuntime.

classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

class mlprodict.onnxrt.backend.OnnxInferenceBackendOrt#

Bases: OnnxInferenceBackend

Same backend as @see cl OnnxInferenceBackend but runtime is onnxruntime1.

classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

class mlprodict.onnxrt.backend.OnnxInferenceBackendPyC#

Bases: OnnxInferenceBackend

Same backend as @see cl OnnxInferenceBackend but runtime is python_compiled.

classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

class mlprodict.onnxrt.backend.OnnxInferenceBackendPyEval#

Bases: OnnxInferenceBackend

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxShapeInference.

classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

classmethod run_model(model, inputs, device=None, **kwargs)#

Computes the prediction.

Parameters:
  • model – see @see cl OnnxShapeInference returned by function prepare

  • inputs – inputs

  • device – requested device for the computation, None means the default one which depends on the compilation settings

  • kwargs – see @see cl OnnxInference

Returns:

predictions

class mlprodict.onnxrt.backend.OnnxInferenceBackendRep(session)#

Bases: BackendRep

Computes the prediction for an ONNX graph loaded with @see cl OnnxInference.

Parameters:

session – @see cl OnnxInference

__init__(session)#
run(inputs: Any, **kwargs: Any) Tuple[Any, ...]#

Computes the prediction. See @see meth OnnxInference.run.

class mlprodict.onnxrt.backend.OnnxInferenceBackendShape#

Bases: OnnxInferenceBackend

Same backend as @see cl OnnxInferenceBackend but runtime is @see cl OnnxShapeInference.

classmethod create_inference_session(model)#

Instantiates an instance of class @see cl OnnxInference. This method should be overwritten to change the runtime or any other runtime options.

classmethod run_model(model, inputs, device=None, **kwargs)#

Computes the prediction.

Parameters:
  • model – see @see cl OnnxShapeInference returned by function prepare

  • inputs – inputs

  • device – requested device for the computation, None means the default one which depends on the compilation settings

  • kwargs – see @see cl OnnxInference

Returns:

predictions

class mlprodict.onnxrt.backend._CombineModels(onnx_inference, shape_inference)#

Bases: object

__init__(onnx_inference, shape_inference)#
property input_names#

Returns the input names.

property output_names#

Returns the output names.

run(inputs, **kwargs)#

Runs shape inferance and onnx inference.