Using C implementation of operator Conv#

onnx-extended includes an implementation of operator Conv in language C++ must faster than the python implementation available in package onnx. These implementations are automatically available through class onnx.reference.CReferenceEvaluator. The following example compares the processing time for three runtimes.

Creation of a simple model#

import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame
from tqdm import tqdm
from onnx import TensorProto
from onnx.helper import (
    make_graph,
    make_model,
    make_node,
    make_opsetid,
    make_tensor_value_info,
)
from onnx.reference import ReferenceEvaluator
from onnxruntime import InferenceSession
from onnx_extended.ext_test_case import measure_time, unit_test_going
from onnx_extended.reference import CReferenceEvaluator


X = make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None, None])
Y = make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None, None])
B = make_tensor_value_info("B", TensorProto.FLOAT, [None, None, None, None])
W = make_tensor_value_info("W", TensorProto.FLOAT, [None, None, None, None])
node = make_node(
    "Conv",
    ["X", "W", "B"],
    ["Y"],
    pads=[1, 1, 1, 1],
    dilations=[1, 1],
    strides=[2, 2],
)
graph = make_graph([node], "g", [X, W, B], [Y])
onnx_model = make_model(graph, opset_imports=[make_opsetid("", 16)])

ReferenceEvaluator and CReferenceEvaluator#

Let’s first compare the outputs are the same.

sH, sW = 64, 64
X = np.arange(sW * sH).reshape((1, 1, sH, sW)).astype(np.float32)
W = np.ones((1, 1, 3, 3), dtype=np.float32)
B = np.array([[[[0]]]], dtype=np.float32)

sess1 = ReferenceEvaluator(onnx_model)
sess2 = CReferenceEvaluator(onnx_model)

expected = sess1.run(None, {"X": X, "W": W, "B": B})[0]
got = sess2.run(None, {"X": X, "W": W, "B": B})[0]
diff = np.abs(expected - got).max()
print(f"difference: {diff}")
difference: 0.0

Everything works fine.

Time measurement#

feeds = {"X": X, "W": W, "B": B}

t1 = measure_time(lambda: sess1.run(None, feeds))
print(f"ReferenceEvaluator: {t1['average']}s")

t2 = measure_time(lambda: sess2.run(None, feeds))
print(f"CReferenceEvaluator: {t2['average']}s")
print(f"speedup is {t1['average'] / t2['average']}")
ReferenceEvaluator: 0.07763761757779866s
CReferenceEvaluator: 0.0002711154464632273s
speedup is 286.36368230066535

Let’s add onnxruntime as well.

sess3 = InferenceSession(
    onnx_model.SerializeToString(), provider=["CPUExecutionProvider"]
)

t3 = measure_time(lambda: sess3.run(None, feeds))
print(f"InferenceSession: {t3['average']}s")
print(f"speedup is {t1['average'] / t3['average']}")
InferenceSession: 0.00019881358463317154s
speedup is 390.5045911276478

Plotting#

data = []

for i in tqdm([16, 32, 48, 64]):
    sH, sW = i, i
    X = np.arange(sW * sH).reshape((1, 1, sH, sW)).astype(np.float32)
    W = np.ones((1, 1, 3, 3), dtype=np.float32)
    B = np.array([[[[0]]]], dtype=np.float32)
    feeds = {"X": X, "W": W, "B": B}
    t1 = measure_time(lambda: sess1.run(None, feeds))
    t2 = measure_time(lambda: sess2.run(None, feeds))
    obs = dict(size=i, onnx=t1["average"], onnx_extended=t2["average"])
    data.append(obs)
    if unit_test_going() and len(data) >= 3:
        break

df = DataFrame(data)
df

print(df)
  0%|          | 0/4 [00:00<?, ?it/s]
 25%|##5       | 1/4 [00:02<00:08,  2.70s/it]
 50%|#####     | 2/4 [00:12<00:13,  6.91s/it]
 75%|#######5  | 3/4 [00:34<00:13, 13.73s/it]
100%|##########| 4/4 [01:12<00:00, 23.46s/it]
100%|##########| 4/4 [01:12<00:00, 18.20s/it]
   size      onnx  onnx_extended
0    16  0.005208       0.000180
1    32  0.019504       0.000194
2    48  0.043463       0.000220
3    64  0.076512       0.000265

Finally.

df = df.set_index("size")
fig, ax = plt.subplots(1, 1, figsize=(10, 4))
df.plot(
    ax=ax, logx=True, logy=True, title="Comparison python / C implementation for Conv"
)
df["speedup"] = df["onnx"] / df["onnx_extended"]
ax2 = ax.twinx()
df[["speedup"]].plot(ax=ax2, color="green")

fig.savefig("plot_conv.png")
# plt.show()
Comparison python / C implementation for Conv

Total running time of the script: ( 1 minutes 55.197 seconds)

Gallery generated by Sphinx-Gallery