{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "\n# Feature discretization\n\nA demonstration of feature discretization on synthetic classification datasets.\nFeature discretization decomposes each feature into a set of bins, here equally\ndistributed in width. The discrete values are then one-hot encoded, and given\nto a linear classifier. This preprocessing enables a non-linear behavior even\nthough the classifier is linear.\n\nOn this example, the first two rows represent linearly non-separable datasets\n(moons and concentric circles) while the third is approximately linearly\nseparable. On the two linearly non-separable datasets, feature discretization\nlargely increases the performance of linear classifiers. On the linearly\nseparable dataset, feature discretization decreases the performance of linear\nclassifiers. Two non-linear classifiers are also shown for comparison.\n\nThis example should be taken with a grain of salt, as the intuition conveyed\ndoes not necessarily carry over to real datasets. Particularly in\nhigh-dimensional spaces, data can more easily be separated linearly. Moreover,\nusing feature discretization and one-hot encoding increases the number of\nfeatures, which easily lead to overfitting when the number of samples is small.\n\nThe plots show training points in solid colors and testing points\nsemi-transparent. The lower right shows the classification accuracy on the test\nset.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\nfrom sklearn.datasets import make_circles, make_classification, make_moons\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import KBinsDiscretizer, StandardScaler\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.utils._testing import ignore_warnings\n\nh = 0.02 # step size in the mesh\n\n\ndef get_name(estimator):\n name = estimator.__class__.__name__\n if name == \"Pipeline\":\n name = [get_name(est[1]) for est in estimator.steps]\n name = \" + \".join(name)\n return name\n\n\n# list of (estimator, param_grid), where param_grid is used in GridSearchCV\n# The parameter spaces in this example are limited to a narrow band to reduce\n# its runtime. In a real use case, a broader search space for the algorithms\n# should be used.\nclassifiers = [\n (\n make_pipeline(StandardScaler(), LogisticRegression(random_state=0)),\n {\"logisticregression__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(StandardScaler(), LinearSVC(random_state=0)),\n {\"linearsvc__C\": np.logspace(-1, 1, 3)},\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\", random_state=0),\n LogisticRegression(random_state=0),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"logisticregression__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(),\n KBinsDiscretizer(encode=\"onehot\", random_state=0),\n LinearSVC(random_state=0),\n ),\n {\n \"kbinsdiscretizer__n_bins\": np.arange(5, 8),\n \"linearsvc__C\": np.logspace(-1, 1, 3),\n },\n ),\n (\n make_pipeline(\n StandardScaler(), GradientBoostingClassifier(n_estimators=5, random_state=0)\n ),\n {\"gradientboostingclassifier__learning_rate\": np.logspace(-2, 0, 5)},\n ),\n (\n make_pipeline(StandardScaler(), SVC(random_state=0)),\n {\"svc__C\": np.logspace(-1, 1, 3)},\n ),\n]\n\nnames = [get_name(e).replace(\"StandardScaler + \", \"\") for e, _ in classifiers]\n\nn_samples = 100\ndatasets = [\n make_moons(n_samples=n_samples, noise=0.2, random_state=0),\n make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1),\n make_classification(\n n_samples=n_samples,\n n_features=2,\n n_redundant=0,\n n_informative=2,\n random_state=2,\n n_clusters_per_class=1,\n ),\n]\n\nfig, axes = plt.subplots(\n nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9)\n)\n\ncm_piyg = plt.cm.PiYG\ncm_bright = ListedColormap([\"#b30065\", \"#178000\"])\n\n# iterate over datasets\nfor ds_cnt, (X, y) in enumerate(datasets):\n print(f\"\\ndataset {ds_cnt}\\n---------\")\n\n # split into training and test part\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.5, random_state=42\n )\n\n # create the grid for background colors\n x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5\n y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # plot the dataset first\n ax = axes[ds_cnt, 0]\n if ds_cnt == 0:\n ax.set_title(\"Input data\")\n # plot the training points\n ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\")\n # and testing points\n ax.scatter(\n X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors=\"k\"\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n # iterate over classifiers\n for est_idx, (name, (estimator, param_grid)) in enumerate(zip(names, classifiers)):\n ax = axes[ds_cnt, est_idx + 1]\n\n clf = GridSearchCV(estimator=estimator, param_grid=param_grid)\n with ignore_warnings(category=ConvergenceWarning):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n print(f\"{name}: {score:.2f}\")\n\n # plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]*[y_min, y_max].\n if hasattr(clf, \"decision_function\"):\n Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()]))\n else:\n Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1]\n\n # put the result into a color plot\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm_piyg, alpha=0.8)\n\n # plot the training points\n ax.scatter(\n X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors=\"k\"\n )\n # and testing points\n ax.scatter(\n X_test[:, 0],\n X_test[:, 1],\n c=y_test,\n cmap=cm_bright,\n edgecolors=\"k\",\n alpha=0.6,\n )\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n\n if ds_cnt == 0:\n ax.set_title(name.replace(\" + \", \"\\n\"))\n ax.text(\n 0.95,\n 0.06,\n (f\"{score:.2f}\").lstrip(\"0\"),\n size=15,\n bbox=dict(boxstyle=\"round\", alpha=0.8, facecolor=\"white\"),\n transform=ax.transAxes,\n horizontalalignment=\"right\",\n )\n\n\nplt.tight_layout()\n\n# Add suptitles above the figure\nplt.subplots_adjust(top=0.90)\nsuptitles = [\n \"Linear classifiers\",\n \"Feature discretization and linear classifiers\",\n \"Non-linear classifiers\",\n]\nfor i, suptitle in zip([1, 3, 5], suptitles):\n ax = axes[0, i]\n ax.text(\n 1.05,\n 1.25,\n suptitle,\n transform=ax.transAxes,\n horizontalalignment=\"center\",\n size=\"x-large\",\n )\nplt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.21" } }, "nbformat": 4, "nbformat_minor": 0 }