{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "\n# Prediction Latency\n\nThis is an example showing the prediction latency of various scikit-learn\nestimators.\n\nThe goal is to measure the latency one can expect when doing predictions\neither in bulk or atomic (i.e. one by one) mode.\n\nThe plots represent the distribution of the prediction latency as a boxplot.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport gc\nimport time\nfrom collections import defaultdict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.datasets import make_regression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import Ridge, SGDRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVR\nfrom sklearn.utils import shuffle\n\n\ndef _not_in_sphinx():\n # Hack to detect whether we are running by the sphinx builder\n return \"__file__\" in globals()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Benchmark and plot helper functions\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "def atomic_benchmark_estimator(estimator, X_test, verbose=False):\n \"\"\"Measure runtime prediction of each instance.\"\"\"\n n_instances = X_test.shape[0]\n runtimes = np.zeros(n_instances, dtype=float)\n for i in range(n_instances):\n instance = X_test[[i], :]\n start = time.time()\n estimator.predict(instance)\n runtimes[i] = time.time() - start\n if verbose:\n print(\n \"atomic_benchmark runtimes:\",\n min(runtimes),\n np.percentile(runtimes, 50),\n max(runtimes),\n )\n return runtimes\n\n\ndef bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):\n \"\"\"Measure runtime prediction of the whole input.\"\"\"\n n_instances = X_test.shape[0]\n runtimes = np.zeros(n_bulk_repeats, dtype=float)\n for i in range(n_bulk_repeats):\n start = time.time()\n estimator.predict(X_test)\n runtimes[i] = time.time() - start\n runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))\n if verbose:\n print(\n \"bulk_benchmark runtimes:\",\n min(runtimes),\n np.percentile(runtimes, 50),\n max(runtimes),\n )\n return runtimes\n\n\ndef benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):\n \"\"\"\n Measure runtimes of prediction in both atomic and bulk mode.\n\n Parameters\n ----------\n estimator : already trained estimator supporting `predict()`\n X_test : test input\n n_bulk_repeats : how many times to repeat when evaluating bulk mode\n\n Returns\n -------\n atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the\n runtimes in seconds.\n\n \"\"\"\n atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)\n bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose)\n return atomic_runtimes, bulk_runtimes\n\n\ndef generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):\n \"\"\"Generate a regression dataset with the given parameters.\"\"\"\n if verbose:\n print(\"generating dataset...\")\n\n X, y, coef = make_regression(\n n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True\n )\n\n random_seed = 13\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=n_train, test_size=n_test, random_state=random_seed\n )\n X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)\n\n X_scaler = StandardScaler()\n X_train = X_scaler.fit_transform(X_train)\n X_test = X_scaler.transform(X_test)\n\n y_scaler = StandardScaler()\n y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]\n y_test = y_scaler.transform(y_test[:, None])[:, 0]\n\n gc.collect()\n if verbose:\n print(\"ok\")\n return X_train, y_train, X_test, y_test\n\n\ndef boxplot_runtimes(runtimes, pred_type, configuration):\n \"\"\"\n Plot a new `Figure` with boxplots of prediction runtimes.\n\n Parameters\n ----------\n runtimes : list of `np.array` of latencies in micro-seconds\n cls_names : list of estimator class names that generated the runtimes\n pred_type : 'bulk' or 'atomic'\n\n \"\"\"\n\n fig, ax1 = plt.subplots(figsize=(10, 6))\n bp = plt.boxplot(\n runtimes,\n )\n\n cls_infos = [\n \"%s\\n(%d %s)\"\n % (\n estimator_conf[\"name\"],\n estimator_conf[\"complexity_computer\"](estimator_conf[\"instance\"]),\n estimator_conf[\"complexity_label\"],\n )\n for estimator_conf in configuration[\"estimators\"]\n ]\n plt.setp(ax1, xticklabels=cls_infos)\n plt.setp(bp[\"boxes\"], color=\"black\")\n plt.setp(bp[\"whiskers\"], color=\"black\")\n plt.setp(bp[\"fliers\"], color=\"red\", marker=\"+\")\n\n ax1.yaxis.grid(True, linestyle=\"-\", which=\"major\", color=\"lightgrey\", alpha=0.5)\n\n ax1.set_axisbelow(True)\n ax1.set_title(\n \"Prediction Time per Instance - %s, %d feats.\"\n % (pred_type.capitalize(), configuration[\"n_features\"])\n )\n ax1.set_ylabel(\"Prediction Time (us)\")\n\n plt.show()\n\n\ndef benchmark(configuration):\n \"\"\"Run the whole benchmark.\"\"\"\n X_train, y_train, X_test, y_test = generate_dataset(\n configuration[\"n_train\"], configuration[\"n_test\"], configuration[\"n_features\"]\n )\n\n stats = {}\n for estimator_conf in configuration[\"estimators\"]:\n print(\"Benchmarking\", estimator_conf[\"instance\"])\n estimator_conf[\"instance\"].fit(X_train, y_train)\n gc.collect()\n a, b = benchmark_estimator(estimator_conf[\"instance\"], X_test)\n stats[estimator_conf[\"name\"]] = {\"atomic\": a, \"bulk\": b}\n\n cls_names = [\n estimator_conf[\"name\"] for estimator_conf in configuration[\"estimators\"]\n ]\n runtimes = [1e6 * stats[clf_name][\"atomic\"] for clf_name in cls_names]\n boxplot_runtimes(runtimes, \"atomic\", configuration)\n runtimes = [1e6 * stats[clf_name][\"bulk\"] for clf_name in cls_names]\n boxplot_runtimes(runtimes, \"bulk (%d)\" % configuration[\"n_test\"], configuration)\n\n\ndef n_feature_influence(estimators, n_train, n_test, n_features, percentile):\n \"\"\"\n Estimate influence of the number of features on prediction time.\n\n Parameters\n ----------\n\n estimators : dict of (name (str), estimator) to benchmark\n n_train : nber of training instances (int)\n n_test : nber of testing instances (int)\n n_features : list of feature-space dimensionality to test (int)\n percentile : percentile at which to measure the speed (int [0-100])\n\n Returns:\n --------\n\n percentiles : dict(estimator_name,\n dict(n_features, percentile_perf_in_us))\n\n \"\"\"\n percentiles = defaultdict(defaultdict)\n for n in n_features:\n print(\"benchmarking with %d features\" % n)\n X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)\n for cls_name, estimator in estimators.items():\n estimator.fit(X_train, y_train)\n gc.collect()\n runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)\n percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile)\n return percentiles\n\n\ndef plot_n_features_influence(percentiles, percentile):\n fig, ax1 = plt.subplots(figsize=(10, 6))\n colors = [\"r\", \"g\", \"b\"]\n for i, cls_name in enumerate(percentiles.keys()):\n x = np.array(sorted(percentiles[cls_name].keys()))\n y = np.array([percentiles[cls_name][n] for n in x])\n plt.plot(\n x,\n y,\n color=colors[i],\n )\n ax1.yaxis.grid(True, linestyle=\"-\", which=\"major\", color=\"lightgrey\", alpha=0.5)\n ax1.set_axisbelow(True)\n ax1.set_title(\"Evolution of Prediction Time with #Features\")\n ax1.set_xlabel(\"#Features\")\n ax1.set_ylabel(\"Prediction Time at %d%%-ile (us)\" % percentile)\n plt.show()\n\n\ndef benchmark_throughputs(configuration, duration_secs=0.1):\n \"\"\"benchmark throughput for different estimators.\"\"\"\n X_train, y_train, X_test, y_test = generate_dataset(\n configuration[\"n_train\"], configuration[\"n_test\"], configuration[\"n_features\"]\n )\n throughputs = dict()\n for estimator_config in configuration[\"estimators\"]:\n estimator_config[\"instance\"].fit(X_train, y_train)\n start_time = time.time()\n n_predictions = 0\n while (time.time() - start_time) < duration_secs:\n estimator_config[\"instance\"].predict(X_test[[0]])\n n_predictions += 1\n throughputs[estimator_config[\"name\"]] = n_predictions / duration_secs\n return throughputs\n\n\ndef plot_benchmark_throughput(throughputs, configuration):\n fig, ax = plt.subplots(figsize=(10, 6))\n colors = [\"r\", \"g\", \"b\"]\n cls_infos = [\n \"%s\\n(%d %s)\"\n % (\n estimator_conf[\"name\"],\n estimator_conf[\"complexity_computer\"](estimator_conf[\"instance\"]),\n estimator_conf[\"complexity_label\"],\n )\n for estimator_conf in configuration[\"estimators\"]\n ]\n cls_values = [\n throughputs[estimator_conf[\"name\"]]\n for estimator_conf in configuration[\"estimators\"]\n ]\n plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)\n ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))\n ax.set_xticklabels(cls_infos, fontsize=10)\n ymax = max(cls_values) * 1.2\n ax.set_ylim((0, ymax))\n ax.set_ylabel(\"Throughput (predictions/sec)\")\n ax.set_title(\n \"Prediction Throughput for different estimators (%d features)\"\n % configuration[\"n_features\"]\n )\n plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Benchmark bulk/atomic prediction speed for various regressors\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "configuration = {\n \"n_train\": int(1e3),\n \"n_test\": int(1e2),\n \"n_features\": int(1e2),\n \"estimators\": [\n {\n \"name\": \"Linear Model\",\n \"instance\": SGDRegressor(\n penalty=\"elasticnet\", alpha=0.01, l1_ratio=0.25, tol=1e-4\n ),\n \"complexity_label\": \"non-zero coefficients\",\n \"complexity_computer\": lambda clf: np.count_nonzero(clf.coef_),\n },\n {\n \"name\": \"RandomForest\",\n \"instance\": RandomForestRegressor(),\n \"complexity_label\": \"estimators\",\n \"complexity_computer\": lambda clf: clf.n_estimators,\n },\n {\n \"name\": \"SVR\",\n \"instance\": SVR(kernel=\"rbf\"),\n \"complexity_label\": \"support vectors\",\n \"complexity_computer\": lambda clf: len(clf.support_vectors_),\n },\n ],\n}\nbenchmark(configuration)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Benchmark n_features influence on prediction speed\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "percentile = 90\npercentiles = n_feature_influence(\n {\"ridge\": Ridge()},\n configuration[\"n_train\"],\n configuration[\"n_test\"],\n [100, 250, 500],\n percentile,\n)\nplot_n_features_influence(percentiles, percentile)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Benchmark throughput\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "throughputs = benchmark_throughputs(configuration)\nplot_benchmark_throughput(throughputs, configuration)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.21" } }, "nbformat": 4, "nbformat_minor": 0 }