{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "from __future__ import print_function\n", "\n", "from sklearn.datasets import fetch_20newsgroups\n", "from sklearn.decomposition import TruncatedSVD\n", "from sklearn.feature_extraction.text import TfidfVectorizer\n", "from sklearn.feature_extraction.text import HashingVectorizer\n", "from sklearn.feature_extraction.text import TfidfTransformer\n", "from sklearn.pipeline import make_pipeline\n", "from sklearn.preprocessing import Normalizer\n", "from sklearn import metrics\n", "\n", "from sklearn.cluster import KMeans, MiniBatchKMeans\n", "\n", "import logging\n", "from optparse import OptionParser\n", "import sys\n", "from time import time\n", "\n", "import numpy as np" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Loading 20 newsgroups dataset for categories:\n", "['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space']\n", "3387 documents\n", "4 categories\n", "\n", "Extracting features from the training dataset using a sparse vectorizer\n" ] }, { "ename": "NameError", "evalue": "name 'opts' is not defined", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 23\u001b[0m \u001b[1;32mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Extracting features from the training dataset using a sparse vectorizer\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 24\u001b[0m \u001b[0mt0\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtime\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 25\u001b[1;33m vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,\n\u001b[0m\u001b[0;32m 26\u001b[0m \u001b[0mmin_df\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstop_words\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'english'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 27\u001b[0m use_idf=opts.use_idf)\n", "\u001b[1;31mNameError\u001b[0m: name 'opts' is not defined" ] } ], "source": [ "categories = [\n", " 'alt.atheism',\n", " 'talk.religion.misc',\n", " 'comp.graphics',\n", " 'sci.space',\n", "]\n", "# Uncomment the following to do the analysis on all the categories\n", "#categories = None\n", "\n", "print(\"Loading 20 newsgroups dataset for categories:\")\n", "print(categories)\n", "\n", "dataset = fetch_20newsgroups(subset='all', categories=categories,\n", " shuffle=True, random_state=42)\n", "\n", "print(\"%d documents\" % len(dataset.data))\n", "print(\"%d categories\" % len(dataset.target_names))\n", "print()\n", "\n", "labels = dataset.target\n", "true_k = np.unique(labels).shape[0]\n", "\n", "print(\"Extracting features from the training dataset using a sparse vectorizer\")\n", "t0 = time()\n", "vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,\n", " min_df=2, stop_words='english',\n", " use_idf=opts.use_idf)\n", "X = vectorizer.fit_transform(dataset.data)\n", "\n", "print(\"done in %fs\" % (time() - t0))\n", "print(\"n_samples: %d, n_features: %d\" % X.shape)\n", "print()\n", "\n", "if opts.n_components:\n", " print(\"Performing dimensionality reduction using LSA\")\n", " t0 = time()\n", " # Vectorizer results are normalized, which makes KMeans behave as\n", " # spherical k-means for better results. Since LSA/SVD results are\n", " # not normalized, we have to redo the normalization.\n", " svd = TruncatedSVD(opts.n_components)\n", " lsa = make_pipeline(svd, Normalizer(copy=False))\n", "\n", " X = lsa.fit_transform(X)\n", "\n", " print(\"done in %fs\" % (time() - t0))\n", "\n", " explained_variance = svd.explained_variance_ratio_.sum()\n", " print(\"Explained variance of the SVD step: {}%\".format(\n", " int(explained_variance * 100)))\n", "\n", " print()\n", "\n", "\n", "###############################################################################\n", "# Do the actual clustering\n", "\n", "if opts.minibatch:\n", " km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\n", " init_size=1000, batch_size=1000, verbose=opts.verbose)\n", "else:\n", " km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\n", " verbose=opts.verbose)\n", "\n", "print(\"Clustering sparse data with %s\" % km)\n", "t0 = time()\n", "km.fit(X)\n", "print(\"done in %0.3fs\" % (time() - t0))\n", "print()\n", "\n", "print(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\n", "print(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\n", "print(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\n", "print(\"Adjusted Rand-Index: %.3f\"\n", " % metrics.adjusted_rand_score(labels, km.labels_))\n", "print(\"Silhouette Coefficient: %0.3f\"\n", " % metrics.silhouette_score(X, labels, sample_size=1000))\n", "\n", "print()\n", "\n", "if not (opts.n_components or opts.use_hashing):\n", " print(\"Top terms per cluster:\")\n", " order_centroids = km.cluster_centers_.argsort()[:, ::-1]\n", " terms = vectorizer.get_feature_names()\n", " for i in range(true_k):\n", " print(\"Cluster %d:\" % i, end='')\n", " for ind in order_centroids[i, :10]:\n", " print(' %s' % terms[ind], end='')\n", " print()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.6" } }, "nbformat": 4, "nbformat_minor": 0 }