Compares implementations of ReduceSum#

This example compares the numpy.sum from numpy, to onnxruntime implementation. If available, tensorflow and pytorch are included as well.

Available optimisation#

The code shows which parallelisation optimisation could be used, AVX or SSE and the number of available processors.

import numpy
import pandas
import matplotlib.pyplot as plt
from onnxruntime import InferenceSession
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.algebra.onnx_ops import OnnxReduceSumApi11
from cpyquickhelper.numbers import measure_time
from tqdm import tqdm
from mlprodict.testing.experimental_c_impl.experimental_c import (
    code_optimisation, custom_reducesum_rk_float)
print(code_optimisation())
AVX-omp=8

ReduceSum implementations#

try:
    from tensorflow.math import reduce_sum as tf_reduce_sum
    from tensorflow import convert_to_tensor
except ImportError:
    tf_reduce_sum = None
try:
    from torch import sum as torch_sum, from_numpy
except ImportError:
    torch_sum = None


def build_ort_reducesum(axes, op_version=14):  # opset=13, 14, ...
    node = OnnxReduceSumApi11('x', axes=axes, op_version=op_version,
                              output_names=['z'])
    onx = node.to_onnx(inputs=[('x', FloatTensorType())],
                       target_opset=op_version)
    sess = InferenceSession(onx.SerializeToString())
    return lambda x, y: sess.run(None, {'x': x})


def loop_fct(fct, xs, ys):
    for x, y in zip(xs, ys):
        fct(x, y)


def benchmark_op(axes, repeat=5, number=5, name="ReduceSum", shape_fct=None,
                 custom_impl=False):
    if shape_fct is None:
        def shape_fct(dim):
            return (3, dim, 1, 128, 64)
    ort_fct = build_ort_reducesum(axes)
    res = []
    for dim in tqdm([8, 16, 32, 64, 100, 128, 200,
                     256, 400, 512, 1024]):
        shape = shape_fct(dim)
        n_arrays = 10 if dim < 512 else 4
        xs = [numpy.random.rand(*shape).astype(numpy.float32)
              for _ in range(n_arrays)]
        ys = [numpy.array(axes, dtype=numpy.int64)
              for _ in range(n_arrays)]
        info = dict(axes=axes, shape=shape)

        # numpy
        ctx = dict(
            xs=xs, ys=ys,
            fct=lambda x, y: numpy.sum(x, *y),
            loop_fct=loop_fct)
        obs = measure_time(
            "loop_fct(fct, xs, ys)",
            div_by_number=True, context=ctx, repeat=repeat, number=number)
        obs['dim'] = dim
        obs['fct'] = 'numpy'
        obs.update(info)
        res.append(obs)

        # onnxruntime
        ctx['fct'] = ort_fct
        obs = measure_time(
            "loop_fct(fct, xs, ys)",
            div_by_number=True, context=ctx, repeat=repeat, number=number)
        obs['dim'] = dim
        obs['fct'] = 'ort'
        obs.update(info)
        res.append(obs)

        if custom_impl:
            if axes != (0, ):
                raise RuntimeError(
                    f"Unexpected axes={axes!r}.")
            ctx['fct'] = lambda x, y: custom_reducesum_rk_float(x)
            ctx['xs'] = [x.reshape((x.shape[0], -1)).copy() for x in xs]
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'custom'
            obs.update(info)
            res.append(obs)

        if tf_reduce_sum is not None:
            # tensorflow
            ctx['fct'] = tf_reduce_sum
            ctx['xs'] = [convert_to_tensor(x) for x in xs]
            ctx['ys'] = ys
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'tf'
            obs.update(info)
            res.append(obs)

        if torch_sum is not None:
            def torch_sum1(x, y):
                return torch_sum(x, y[0])

            def torch_sum2(x, y):
                return torch_sum(torch_sum(x, y[1]), y[0])

            # torch
            ctx['fct'] = torch_sum1 if len(axes) == 1 else torch_sum2
            ctx['xs'] = [from_numpy(x) for x in xs]
            ctx['ys'] = ys  # [from_numpy(y) for y in ys]
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'torch'
            obs.update(info)
            res.append(obs)

    # Dataframes
    shape_name = str(shape).replace(str(dim), "N")
    df = pandas.DataFrame(res)
    df.columns = [_.replace('dim', 'N') for _ in df.columns]
    piv = df.pivot('N', 'fct', 'average')

    rs = piv.copy()
    for c in ['ort', 'torch', 'tf', 'tf_copy']:
        if c in rs.columns:
            rs[c] = rs['numpy'] / rs[c]
    rs['numpy'] = 1.

    # Graphs.
    fig, ax = plt.subplots(1, 2, figsize=(12, 4))
    piv.plot(logx=True, logy=True, ax=ax[0],
             title=f"{name} benchmark\n{shape_name!r} - {axes!r} lower better")
    ax[0].legend(prop={"size": 9})
    rs.plot(logx=True, logy=True, ax=ax[1],
            title="%s Speedup, baseline=numpy\n%r - %r"
                  " higher better" % (name, shape_name, axes))
    ax[1].plot([min(rs.index), max(rs.index)], [0.5, 0.5], 'g--')
    ax[1].plot([min(rs.index), max(rs.index)], [2., 2.], 'g--')
    ax[1].legend(prop={"size": 9})
    return df, rs, ax


dfs = []

Reduction on a particular case KR#

Consecutive axis not reduced and consecutive reduced axis are merged. KR means kept axis - reduced axis

(8, 24, 48, N), axis=(3, )#

axes = (3, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(8, 24, 48, N)' - (3,) lower better, ReduceSum Speedup, baseline=numpy '(8, 24, 48, N)' - (3,) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:03,  3.23it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.26it/s]
 27%|##7       | 3/11 [00:01<00:02,  2.85it/s]
 36%|###6      | 4/11 [00:01<00:03,  2.30it/s]
 45%|####5     | 5/11 [00:02<00:03,  1.82it/s]
 55%|#####4    | 6/11 [00:03<00:03,  1.49it/s]
 64%|######3   | 7/11 [00:04<00:03,  1.13it/s]
 73%|#######2  | 8/11 [00:06<00:03,  1.11s/it]
 82%|########1 | 9/11 [00:08<00:03,  1.50s/it]
 91%|######### | 10/11 [00:09<00:01,  1.41s/it]
100%|##########| 11/11 [00:12<00:00,  1.68s/it]
100%|##########| 11/11 [00:12<00:00,  1.09s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:189: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.004024 0.004814 0.006326 0.007844 0.010115 0.012040 0.018186 0.022168 0.031708 0.015660 0.031980
ort 0.001632 0.001233 0.001795 0.003184 0.004429 0.005217 0.008500 0.009085 0.015010 0.006456 0.011757
torch 0.005897 0.004501 0.004947 0.005478 0.006469 0.007164 0.008706 0.009246 0.011793 0.007036 0.010962


Reduction on a particular case RK#

Consecutive axis not reduced and consecutive reduced axis are merged. RK means reduced axis - kept axis

(8, 24, 48, N), axis=(0, )#

axes = (0, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim),
                           custom_impl=True)
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(8, 24, 48, N)' - (0,) lower better, ReduceSum Speedup, baseline=numpy '(8, 24, 48, N)' - (0,) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:01,  5.22it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.89it/s]
 27%|##7       | 3/11 [00:00<00:02,  2.94it/s]
 36%|###6      | 4/11 [00:01<00:03,  1.94it/s]
 45%|####5     | 5/11 [00:02<00:04,  1.35it/s]
 55%|#####4    | 6/11 [00:04<00:04,  1.00it/s]
 64%|######3   | 7/11 [00:06<00:05,  1.39s/it]
 73%|#######2  | 8/11 [00:09<00:05,  1.83s/it]
 82%|########1 | 9/11 [00:13<00:05,  2.56s/it]
 91%|######### | 10/11 [00:15<00:02,  2.43s/it]
100%|##########| 11/11 [00:19<00:00,  2.96s/it]
100%|##########| 11/11 [00:19<00:00,  1.80s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:206: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
custom 0.003196 0.004435 0.004800 0.007050 0.009676 0.011843 0.015121 0.017506 0.025969 0.013598 0.024493
numpy 0.001161 0.002294 0.003906 0.007069 0.011530 0.017175 0.028913 0.037940 0.056859 0.028923 0.056224
ort 0.001135 0.001771 0.003254 0.006536 0.008988 0.011828 0.016492 0.020352 0.031042 0.015441 0.030472
torch 0.001222 0.001819 0.002389 0.004399 0.005675 0.006681 0.008345 0.010634 0.015793 0.008301 0.016806


Reduction on a particular case KRK#

Consecutive axis not reduced and consecutive reduced axis are merged. KRK means kept axis - reduced axis - kept axis,

(8, 24, 48, N), axis=(1, 2)#

axes = (1, 2)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(8, 24, 48, N)' - (1, 2) lower better, ReduceSum Speedup, baseline=numpy '(8, 24, 48, N)' - (1, 2) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:03,  3.10it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.34it/s]
 27%|##7       | 3/11 [00:01<00:03,  2.61it/s]
 36%|###6      | 4/11 [00:01<00:04,  1.74it/s]
 45%|####5     | 5/11 [00:03<00:04,  1.21it/s]
 55%|#####4    | 6/11 [00:04<00:05,  1.11s/it]
 64%|######3   | 7/11 [00:07<00:06,  1.55s/it]
 73%|#######2  | 8/11 [00:10<00:06,  2.04s/it]
 82%|########1 | 9/11 [00:15<00:05,  2.89s/it]
 91%|######### | 10/11 [00:17<00:02,  2.77s/it]
100%|##########| 11/11 [00:22<00:00,  3.50s/it]
100%|##########| 11/11 [00:22<00:00,  2.08s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:222: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.002673 0.005068 0.009394 0.018971 0.028363 0.035920 0.055446 0.069780 0.109836 0.056985 0.118645
ort 0.005741 0.001495 0.003054 0.005058 0.006783 0.008875 0.012738 0.015796 0.024761 0.012611 0.026435
torch 0.003499 0.003111 0.003830 0.004735 0.006935 0.009564 0.012038 0.014270 0.020574 0.011918 0.023718


(8, 24 * 48, N), axis=1#

axes = (1, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24 * 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(8, 1152, N)' - (1,) lower better, ReduceSum Speedup, baseline=numpy '(8, 1152, N)' - (1,) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:03,  2.95it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.35it/s]
 27%|##7       | 3/11 [00:00<00:02,  2.98it/s]
 36%|###6      | 4/11 [00:01<00:03,  2.22it/s]
 45%|####5     | 5/11 [00:02<00:03,  1.67it/s]
 55%|#####4    | 6/11 [00:03<00:03,  1.31it/s]
 64%|######3   | 7/11 [00:05<00:04,  1.02s/it]
 73%|#######2  | 8/11 [00:07<00:04,  1.34s/it]
 82%|########1 | 9/11 [00:10<00:03,  1.83s/it]
 91%|######### | 10/11 [00:11<00:01,  1.77s/it]
100%|##########| 11/11 [00:14<00:00,  2.18s/it]
100%|##########| 11/11 [00:14<00:00,  1.34s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:231: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.004319 0.005039 0.006714 0.010040 0.011653 0.013207 0.017595 0.020922 0.031354 0.014958 0.031167
ort 0.005708 0.001328 0.002432 0.004527 0.006691 0.008156 0.012828 0.015658 0.023941 0.013589 0.026374
torch 0.002543 0.002798 0.002953 0.004555 0.006878 0.010299 0.013837 0.021142 0.025380 0.017785 0.029619


(2, 8, 12, 24, 2, N), axis=(2, 3)#

axes = (2, 3)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (2, 8, 12, 24, 2, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(2, 8, 12, 24, 2, N)' - (2, 3) lower better, ReduceSum Speedup, baseline=numpy '(2, 8, 12, 24, 2, N)' - (2, 3) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:02,  4.86it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.87it/s]
 27%|##7       | 3/11 [00:01<00:02,  2.70it/s]
 36%|###6      | 4/11 [00:01<00:04,  1.70it/s]
 45%|####5     | 5/11 [00:03<00:05,  1.16it/s]
 55%|#####4    | 6/11 [00:04<00:05,  1.14s/it]
 64%|######3   | 7/11 [00:07<00:06,  1.60s/it]
 73%|#######2  | 8/11 [00:10<00:06,  2.12s/it]
 82%|########1 | 9/11 [00:15<00:05,  3.00s/it]
 91%|######### | 10/11 [00:18<00:02,  2.88s/it]
100%|##########| 11/11 [00:23<00:00,  3.63s/it]
100%|##########| 11/11 [00:23<00:00,  2.15s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:240: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.002727 0.005218 0.009830 0.019297 0.029423 0.037310 0.057374 0.074560 0.114667 0.057383 0.121640
ort 0.001637 0.001422 0.002734 0.005088 0.006873 0.009042 0.012207 0.016188 0.022555 0.015311 0.031242
torch 0.003024 0.003536 0.004540 0.006494 0.008301 0.009279 0.013562 0.016119 0.023344 0.013090 0.023768


Reduction on a particular case RKRK#

(8, 24, 48, N), axis=(0, 2)#

axes = (0, 2)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
ReduceSum benchmark '(8, 24, 48, N)' - (0, 2) lower better, ReduceSum Speedup, baseline=numpy '(8, 24, 48, N)' - (0, 2) higher better
  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:01,  5.14it/s]
 18%|#8        | 2/11 [00:00<00:02,  3.79it/s]
 27%|##7       | 3/11 [00:01<00:03,  2.67it/s]
 36%|###6      | 4/11 [00:02<00:04,  1.60it/s]
 45%|####5     | 5/11 [00:03<00:05,  1.07it/s]
 55%|#####4    | 6/11 [00:05<00:06,  1.27s/it]
 64%|######3   | 7/11 [00:08<00:07,  1.80s/it]
 73%|#######2  | 8/11 [00:12<00:07,  2.46s/it]
 82%|########1 | 9/11 [00:17<00:06,  3.49s/it]
 91%|######### | 10/11 [00:22<00:03,  3.69s/it]
100%|##########| 11/11 [00:33<00:00,  6.05s/it]
100%|##########| 11/11 [00:33<00:00,  3.04s/it]
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:151: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  piv = df.pivot('N', 'fct', 'average')
somewhere/workspace/mlprodict/mlprodict_UT_39_std/_doc/examples/plot_op_reducesum.py:252: FutureWarning: In a future version of pandas all arguments of DataFrame.pivot will be keyword-only.
  df.pivot("fct", "N", "average")
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.002921 0.005470 0.009918 0.021992 0.034231 0.043360 0.067025 0.086299 0.134165 0.067976 0.134674
ort 0.001041 0.001831 0.003339 0.007167 0.008859 0.013138 0.019462 0.031652 0.039470 0.067336 0.260028
torch 0.002970 0.003533 0.003994 0.005258 0.006908 0.008267 0.011630 0.014117 0.020758 0.011562 0.023695


Conclusion#

Some of the configurations should be investigated. l-reducesum-problem1. The reduction on tensorflow in one dimension seems to be lazy.

merged = pandas.concat(dfs)
name = "reducesum"
merged.to_csv(f"plot_{name}.csv", index=False)
merged.to_excel(f"plot_{name}.xlsx", index=False)
plt.savefig(f"plot_{name}.png")

plt.show()
plot op reducesum

Total running time of the script: ( 2 minutes 18.630 seconds)

Gallery generated by Sphinx-Gallery