{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "\n# Multiclass sparse logistic regression on 20newgroups\n\nComparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression\nto classify documents from the newgroups20 dataset. Multinomial logistic\nregression yields more accurate results and is faster to train on the larger\nscale dataset.\n\nHere we use the l1 sparsity that trims the weights of not informative\nfeatures to zero. This is good if the goal is to extract the strongly\ndiscriminative vocabulary of each class. If the goal is to get the best\npredictive accuracy, it is better to use the non sparsity-inducing l2 penalty\ninstead.\n\nA more traditional (and possibly better) way to predict on a sparse subset of\ninput features would be to use univariate feature selection followed by a\ntraditional (l2-penalised) logistic regression model.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport timeit\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.datasets import fetch_20newsgroups_vectorized\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\n\nwarnings.filterwarnings(\"ignore\", category=ConvergenceWarning, module=\"sklearn\")\nt0 = timeit.default_timer()\n\n# We use SAGA solver\nsolver = \"saga\"\n\n# Turn down for faster run time\nn_samples = 5000\n\nX, y = fetch_20newsgroups_vectorized(subset=\"all\", return_X_y=True)\nX = X[:n_samples]\ny = y[:n_samples]\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, random_state=42, stratify=y, test_size=0.1\n)\ntrain_samples, n_features = X_train.shape\nn_classes = np.unique(y).shape[0]\n\nprint(\n \"Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i\"\n % (train_samples, n_features, n_classes)\n)\n\nmodels = {\n \"ovr\": {\"name\": \"One versus Rest\", \"iters\": [1, 2, 3]},\n \"multinomial\": {\"name\": \"Multinomial\", \"iters\": [1, 2, 5]},\n}\n\nfor model in models:\n # Add initial chance-level values for plotting purpose\n accuracies = [1 / n_classes]\n times = [0]\n densities = [1]\n\n model_params = models[model]\n\n # Small number of epochs for fast runtime\n for this_max_iter in model_params[\"iters\"]:\n print(\n \"[model=%s, solver=%s] Number of epochs: %s\"\n % (model_params[\"name\"], solver, this_max_iter)\n )\n clf = LogisticRegression(\n solver=solver,\n penalty=\"l1\",\n max_iter=this_max_iter,\n random_state=42,\n )\n if model == \"ovr\":\n clf = OneVsRestClassifier(clf)\n t1 = timeit.default_timer()\n clf.fit(X_train, y_train)\n train_time = timeit.default_timer() - t1\n\n y_pred = clf.predict(X_test)\n accuracy = np.sum(y_pred == y_test) / y_test.shape[0]\n if model == \"ovr\":\n coef = np.concatenate([est.coef_ for est in clf.estimators_])\n else:\n coef = clf.coef_\n density = np.mean(coef != 0, axis=1) * 100\n accuracies.append(accuracy)\n densities.append(density)\n times.append(train_time)\n models[model][\"times\"] = times\n models[model][\"densities\"] = densities\n models[model][\"accuracies\"] = accuracies\n print(\"Test accuracy for model %s: %.4f\" % (model, accuracies[-1]))\n print(\n \"%% non-zero coefficients for model %s, per class:\\n %s\"\n % (model, densities[-1])\n )\n print(\n \"Run time (%i epochs) for model %s:%.2f\"\n % (model_params[\"iters\"][-1], model, times[-1])\n )\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nfor model in models:\n name = models[model][\"name\"]\n times = models[model][\"times\"]\n accuracies = models[model][\"accuracies\"]\n ax.plot(times, accuracies, marker=\"o\", label=\"Model: %s\" % name)\n ax.set_xlabel(\"Train time (s)\")\n ax.set_ylabel(\"Test accuracy\")\nax.legend()\nfig.suptitle(\"Multinomial vs One-vs-Rest Logistic L1\\nDataset %s\" % \"20newsgroups\")\nfig.tight_layout()\nfig.subplots_adjust(top=0.85)\nrun_time = timeit.default_timer() - t0\nprint(\"Example run in %.3f s\" % run_time)\nplt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.21" } }, "nbformat": 4, "nbformat_minor": 0 }