File size: 3,228 Bytes
dee9fba | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 | """Benchmark n_jobs=1 on high number of fast tasks
The goal of this script is to study the overhead incurred when calling small
tasks with `n_jobs=1` compared to just running a simple list comprehension.
"""
# Author: Thomas Moreau
# License: BSD 3 clause
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.cm import ScalarMappable
from joblib import Parallel, delayed
# Style for plots
LINE_STYLES = {'iter': '--', 'parallel': '-', 'loop': ':'}
COLORS = {'none': 'indianred'}
CMAP = plt.colormaps['viridis']
# Generate functions that are more and more complex, to see
# the relative impact depending on the task complexity
funcs = [("none", lambda x: None, None)]
n_size = 3
for i, n in enumerate(np.logspace(0, 2, n_size, dtype=int)):
n = max(1, n)
label = f'mat({n:3d}, {n:3d})'
A = np.random.randn(n, n)
funcs.append((label, lambda A: A @ A, A))
COLORS[label] = CMAP(i / (n_size - 1))
# For each function and for different number of repetition,
# time the Parallel call.
results = []
for f_name, func, arg in funcs:
print('Benchmarking:', f_name)
f_delayed = delayed(func)
for N in np.logspace(1, 4, 4, dtype=int):
print('# tasks:', N)
for _ in range(10):
t_start = time.perf_counter()
list(func(arg) for _ in range(N))
runtime = time.perf_counter() - t_start
results.append(dict(
method="iter", N=N, func=f_name, runtime=runtime / N
))
t_start = time.perf_counter()
Parallel(n_jobs=1)(f_delayed(arg) for _ in range(N))
runtime = time.perf_counter() - t_start
results.append(dict(
method="parallel", N=N, func=f_name, runtime=runtime / N
))
# Use a DataFrame to manipulate the results.
df = pd.DataFrame(results)
# Compute median runtime for each set of parameters
curve = df.groupby(["method", "N", "func"])["runtime"].median().reset_index()
# Print the overhead incurred for each task (estimated as the median of the
# time difference per task).
for k, grp in curve.groupby("func"):
c_iter = grp.query('method == "iter"').set_index("N")
c_parallel = grp.query('method == "parallel"').set_index("N")
overhead_percent = (c_parallel["runtime"] / c_iter["runtime"]).median() - 1
overhead_time = (c_parallel["runtime"] - c_iter["runtime"]) / c_iter.index
print(
f"For func {k}, overhead_time is {overhead_time.median()/1e-6:.2f}us, "
f"increasing runtime by {overhead_percent * 100:.2f}%"
)
# Plot the scaling curves.
fig, ax = plt.subplots()
for key, grp in curve.groupby(["method", "func"]):
ax.loglog(
grp["N"],
grp["runtime"],
label=key,
ls=LINE_STYLES[key[0]],
color=COLORS[key[1]],
)
ax.set_xlabel("# Tasks")
ax.set_ylabel("Runtime per task")
ax.legend(
(plt.Line2D([], [], ls=ls, c="k") for ls in LINE_STYLES.values()),
LINE_STYLES,
bbox_to_anchor=(0.3, 1, 0.3, 0.1),
loc='center',
ncol=3,
)
plt.colorbar(
ScalarMappable(norm=LogNorm(1, 200), cmap=plt.cm.viridis),
label="Matrix size"
)
plt.show()
|