.. DO NOT EDIT. .. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. .. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: .. "gyexamples/plot_investigate_bench.py" .. LINE NUMBERS ARE GIVEN BELOW. .. only:: html .. note:: :class: sphx-glr-download-link-note Click :ref:`here ` to download the full example code .. rst-class:: sphx-glr-example-title .. _sphx_glr_gyexamples_plot_investigate_bench.py: Investigate a failure from a benchmark ====================================== The method ``validate`` may raise an exception and in that case, the class :class:`BenchPerfTest `. The following script shows how to investigate. .. contents:: :local: .. GENERATED FROM PYTHON SOURCE LINES 14-29 .. code-block:: default from onnxruntime import InferenceSession from pickle import load from time import time import numpy from numpy.testing import assert_almost_equal import matplotlib.pyplot as plt import pandas from scipy.special import expit import sklearn from sklearn.utils._testing import ignore_warnings from sklearn.linear_model import LogisticRegression from pymlbenchmark.benchmark import BenchPerf from pymlbenchmark.external import OnnxRuntimeBenchPerfTestBinaryClassification .. GENERATED FROM PYTHON SOURCE LINES 30-32 Defines the benchmark and runs it +++++++++++++++++++++++++++++++++ .. GENERATED FROM PYTHON SOURCE LINES 32-103 .. code-block:: default class OnnxRuntimeBenchPerfTestBinaryClassification3( OnnxRuntimeBenchPerfTestBinaryClassification): """ Overwrites the class to add a pure python implementation of the logistic regression. """ def fcts(self, dim=None, **kwargs): def predict_py_predict(X, model=self.skl): coef = model.coef_ intercept = model.intercept_ pred = numpy.dot(X, coef.T) + intercept return (pred >= 0).astype(numpy.int32) def predict_py_predict_proba(X, model=self.skl): coef = model.coef_ intercept = model.intercept_ pred = numpy.dot(X, coef.T) + intercept decision_2d = numpy.c_[-pred, pred] return expit(decision_2d) res = OnnxRuntimeBenchPerfTestBinaryClassification.fcts( self, dim=dim, **kwargs) res.extend([ {'method': 'predict', 'lib': 'py', 'fct': predict_py_predict}, {'method': 'predict_proba', 'lib': 'py', 'fct': predict_py_predict_proba}, ]) return res def validate(self, results, **kwargs): """ Raises an exception and locally dump everything we need to investigate. """ # Checks that methods *predict* and *predict_proba* returns # the same results for both scikit-learn and onnxruntime. OnnxRuntimeBenchPerfTestBinaryClassification.validate( self, results, **kwargs) # Let's dump anything we need for later. # kwargs contains the input data. self.dump_error("Just for fun", skl=self.skl, ort_onnx=self.ort_onnx, results=results, **kwargs) raise AssertionError("Just for fun") @ignore_warnings(category=FutureWarning) def run_bench(repeat=10, verbose=False): pbefore = dict(dim=[1, 5], fit_intercept=[True]) pafter = dict(N=[1, 10, 100]) test = lambda dim=None, **opts: ( OnnxRuntimeBenchPerfTestBinaryClassification3( LogisticRegression, dim=dim, **opts)) bp = BenchPerf(pbefore, pafter, test) with sklearn.config_context(assume_finite=True): start = time() results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose)) end = time() results_df = pandas.DataFrame(results) print("Total time = %0.3f sec\n" % (end - start)) return results_df .. GENERATED FROM PYTHON SOURCE LINES 104-105 Runs the benchmark. .. GENERATED FROM PYTHON SOURCE LINES 105-110 .. code-block:: default try: run_bench(verbose=True) except AssertionError as e: print(e) .. rst-class:: sphx-glr-script-out .. code-block:: none 0%| | 0/6 [00:00` .. container:: sphx-glr-download sphx-glr-download-jupyter :download:`Download Jupyter notebook: plot_investigate_bench.ipynb ` .. only:: html .. rst-class:: sphx-glr-signature `Gallery generated by Sphinx-Gallery `_