{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Python机器学习" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 数据导入和可视化" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from sklearn import datasets\n", "iris = datasets.load_iris()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'data': array([[5.1, 3.5, 1.4, 0.2],\n", " [4.9, 3. , 1.4, 0.2],\n", " [4.7, 3.2, 1.3, 0.2],\n", " [4.6, 3.1, 1.5, 0.2],\n", " [5. , 3.6, 1.4, 0.2],\n", " [5.4, 3.9, 1.7, 0.4],\n", " [4.6, 3.4, 1.4, 0.3],\n", " [5. , 3.4, 1.5, 0.2],\n", " [4.4, 2.9, 1.4, 0.2],\n", " [4.9, 3.1, 1.5, 0.1],\n", " [5.4, 3.7, 1.5, 0.2],\n", " [4.8, 3.4, 1.6, 0.2],\n", " [4.8, 3. , 1.4, 0.1],\n", " [4.3, 3. , 1.1, 0.1],\n", " [5.8, 4. , 1.2, 0.2],\n", " [5.7, 4.4, 1.5, 0.4],\n", " [5.4, 3.9, 1.3, 0.4],\n", " [5.1, 3.5, 1.4, 0.3],\n", " [5.7, 3.8, 1.7, 0.3],\n", " [5.1, 3.8, 1.5, 0.3],\n", " [5.4, 3.4, 1.7, 0.2],\n", " [5.1, 3.7, 1.5, 0.4],\n", " [4.6, 3.6, 1. , 0.2],\n", " [5.1, 3.3, 1.7, 0.5],\n", " [4.8, 3.4, 1.9, 0.2],\n", " [5. , 3. , 1.6, 0.2],\n", " [5. , 3.4, 1.6, 0.4],\n", " [5.2, 3.5, 1.5, 0.2],\n", " [5.2, 3.4, 1.4, 0.2],\n", " [4.7, 3.2, 1.6, 0.2],\n", " [4.8, 3.1, 1.6, 0.2],\n", " [5.4, 3.4, 1.5, 0.4],\n", " [5.2, 4.1, 1.5, 0.1],\n", " [5.5, 4.2, 1.4, 0.2],\n", " [4.9, 3.1, 1.5, 0.2],\n", " [5. , 3.2, 1.2, 0.2],\n", " [5.5, 3.5, 1.3, 0.2],\n", " [4.9, 3.6, 1.4, 0.1],\n", " [4.4, 3. , 1.3, 0.2],\n", " [5.1, 3.4, 1.5, 0.2],\n", " [5. , 3.5, 1.3, 0.3],\n", " [4.5, 2.3, 1.3, 0.3],\n", " [4.4, 3.2, 1.3, 0.2],\n", " [5. , 3.5, 1.6, 0.6],\n", " [5.1, 3.8, 1.9, 0.4],\n", " [4.8, 3. , 1.4, 0.3],\n", " [5.1, 3.8, 1.6, 0.2],\n", " [4.6, 3.2, 1.4, 0.2],\n", " [5.3, 3.7, 1.5, 0.2],\n", " [5. , 3.3, 1.4, 0.2],\n", " [7. , 3.2, 4.7, 1.4],\n", " [6.4, 3.2, 4.5, 1.5],\n", " [6.9, 3.1, 4.9, 1.5],\n", " [5.5, 2.3, 4. , 1.3],\n", " [6.5, 2.8, 4.6, 1.5],\n", " [5.7, 2.8, 4.5, 1.3],\n", " [6.3, 3.3, 4.7, 1.6],\n", " [4.9, 2.4, 3.3, 1. ],\n", " [6.6, 2.9, 4.6, 1.3],\n", " [5.2, 2.7, 3.9, 1.4],\n", " [5. , 2. , 3.5, 1. ],\n", " [5.9, 3. , 4.2, 1.5],\n", " [6. , 2.2, 4. , 1. ],\n", " [6.1, 2.9, 4.7, 1.4],\n", " [5.6, 2.9, 3.6, 1.3],\n", " [6.7, 3.1, 4.4, 1.4],\n", " [5.6, 3. , 4.5, 1.5],\n", " [5.8, 2.7, 4.1, 1. ],\n", " [6.2, 2.2, 4.5, 1.5],\n", " [5.6, 2.5, 3.9, 1.1],\n", " [5.9, 3.2, 4.8, 1.8],\n", " [6.1, 2.8, 4. , 1.3],\n", " [6.3, 2.5, 4.9, 1.5],\n", " [6.1, 2.8, 4.7, 1.2],\n", " [6.4, 2.9, 4.3, 1.3],\n", " [6.6, 3. , 4.4, 1.4],\n", " [6.8, 2.8, 4.8, 1.4],\n", " [6.7, 3. , 5. , 1.7],\n", " [6. , 2.9, 4.5, 1.5],\n", " [5.7, 2.6, 3.5, 1. ],\n", " [5.5, 2.4, 3.8, 1.1],\n", " [5.5, 2.4, 3.7, 1. ],\n", " [5.8, 2.7, 3.9, 1.2],\n", " [6. , 2.7, 5.1, 1.6],\n", " [5.4, 3. , 4.5, 1.5],\n", " [6. , 3.4, 4.5, 1.6],\n", " [6.7, 3.1, 4.7, 1.5],\n", " [6.3, 2.3, 4.4, 1.3],\n", " [5.6, 3. , 4.1, 1.3],\n", " [5.5, 2.5, 4. , 1.3],\n", " [5.5, 2.6, 4.4, 1.2],\n", " [6.1, 3. , 4.6, 1.4],\n", " [5.8, 2.6, 4. , 1.2],\n", " [5. , 2.3, 3.3, 1. ],\n", " [5.6, 2.7, 4.2, 1.3],\n", " [5.7, 3. , 4.2, 1.2],\n", " [5.7, 2.9, 4.2, 1.3],\n", " [6.2, 2.9, 4.3, 1.3],\n", " [5.1, 2.5, 3. , 1.1],\n", " [5.7, 2.8, 4.1, 1.3],\n", " [6.3, 3.3, 6. , 2.5],\n", " [5.8, 2.7, 5.1, 1.9],\n", " [7.1, 3. , 5.9, 2.1],\n", " [6.3, 2.9, 5.6, 1.8],\n", " [6.5, 3. , 5.8, 2.2],\n", " [7.6, 3. , 6.6, 2.1],\n", " [4.9, 2.5, 4.5, 1.7],\n", " [7.3, 2.9, 6.3, 1.8],\n", " [6.7, 2.5, 5.8, 1.8],\n", " [7.2, 3.6, 6.1, 2.5],\n", " [6.5, 3.2, 5.1, 2. ],\n", " [6.4, 2.7, 5.3, 1.9],\n", " [6.8, 3. , 5.5, 2.1],\n", " [5.7, 2.5, 5. , 2. ],\n", " [5.8, 2.8, 5.1, 2.4],\n", " [6.4, 3.2, 5.3, 2.3],\n", " [6.5, 3. , 5.5, 1.8],\n", " [7.7, 3.8, 6.7, 2.2],\n", " [7.7, 2.6, 6.9, 2.3],\n", " [6. , 2.2, 5. , 1.5],\n", " [6.9, 3.2, 5.7, 2.3],\n", " [5.6, 2.8, 4.9, 2. ],\n", " [7.7, 2.8, 6.7, 2. ],\n", " [6.3, 2.7, 4.9, 1.8],\n", " [6.7, 3.3, 5.7, 2.1],\n", " [7.2, 3.2, 6. , 1.8],\n", " [6.2, 2.8, 4.8, 1.8],\n", " [6.1, 3. , 4.9, 1.8],\n", " [6.4, 2.8, 5.6, 2.1],\n", " [7.2, 3. , 5.8, 1.6],\n", " [7.4, 2.8, 6.1, 1.9],\n", " [7.9, 3.8, 6.4, 2. ],\n", " [6.4, 2.8, 5.6, 2.2],\n", " [6.3, 2.8, 5.1, 1.5],\n", " [6.1, 2.6, 5.6, 1.4],\n", " [7.7, 3. , 6.1, 2.3],\n", " [6.3, 3.4, 5.6, 2.4],\n", " [6.4, 3.1, 5.5, 1.8],\n", " [6. , 3. , 4.8, 1.8],\n", " [6.9, 3.1, 5.4, 2.1],\n", " [6.7, 3.1, 5.6, 2.4],\n", " [6.9, 3.1, 5.1, 2.3],\n", " [5.8, 2.7, 5.1, 1.9],\n", " [6.8, 3.2, 5.9, 2.3],\n", " [6.7, 3.3, 5.7, 2.5],\n", " [6.7, 3. , 5.2, 2.3],\n", " [6.3, 2.5, 5. , 1.9],\n", " [6.5, 3. , 5.2, 2. ],\n", " [6.2, 3.4, 5.4, 2.3],\n", " [5.9, 3. , 5.1, 1.8]]),\n", " 'target': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", " 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n", " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n", " 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n", " 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]),\n", " 'target_names': array(['setosa', 'versicolor', 'virginica'], dtype='\n", "(422, 10)\n", "[[ 0.03807591 0.05068012 0.06169621 0.02187235 -0.0442235 -0.03482076\n", " -0.04340085 -0.00259226 0.01990842 -0.01764613]\n", " [-0.00188202 -0.04464164 -0.05147406 -0.02632783 -0.00844872 -0.01916334\n", " 0.07441156 -0.03949338 -0.06832974 -0.09220405]\n", " [ 0.08529891 0.05068012 0.04445121 -0.00567061 -0.04559945 -0.03419447\n", " -0.03235593 -0.00259226 0.00286377 -0.02593034]\n", " [-0.08906294 -0.04464164 -0.01159501 -0.03665645 0.01219057 0.02499059\n", " -0.03603757 0.03430886 0.02269202 -0.00936191]\n", " [ 0.00538306 -0.04464164 -0.03638469 0.02187235 0.00393485 0.01559614\n", " 0.00814208 -0.00259226 -0.03199144 -0.04664087]]\n" ] } ], "source": [ "#糖尿病数据集包含442个患者的10个生理特征(年龄,性别、体重、血压)和一年以后疾病级数指标。\n", "\n", "diabetes = datasets.load_diabetes()\n", "\n", "diabetes_X_train = diabetes.data[:-20]\n", "diabetes_X_test = diabetes.data[-20:]\n", "\n", "diabetes_y_train = diabetes.target[:-20]\n", "diabetes_y_test = diabetes.target[-20:]\n", "\n", "print (type(diabetes_X_train))\n", "print (diabetes_X_train.shape)\n", "\n", "print (diabetes_X_train[:5])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 预测集和测试集 cross_validation.train_test_split" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[5.1, 3.5, 1.4, 0.2],\n", " [4.9, 3. , 1.4, 0.2],\n", " [4.7, 3.2, 1.3, 0.2],\n", " [4.6, 3.1, 1.5, 0.2],\n", " [5. , 3.6, 1.4, 0.2],\n", " [5.4, 3.9, 1.7, 0.4],\n", " [4.6, 3.4, 1.4, 0.3],\n", " [5. , 3.4, 1.5, 0.2],\n", " [4.4, 2.9, 1.4, 0.2],\n", " [4.9, 3.1, 1.5, 0.1],\n", " [5.4, 3.7, 1.5, 0.2],\n", " [4.8, 3.4, 1.6, 0.2],\n", " [4.8, 3. , 1.4, 0.1],\n", " [4.3, 3. , 1.1, 0.1],\n", " [5.8, 4. , 1.2, 0.2],\n", " [5.7, 4.4, 1.5, 0.4],\n", " [5.4, 3.9, 1.3, 0.4],\n", " [5.1, 3.5, 1.4, 0.3],\n", " [5.7, 3.8, 1.7, 0.3],\n", " [5.1, 3.8, 1.5, 0.3],\n", " [5.4, 3.4, 1.7, 0.2],\n", " [5.1, 3.7, 1.5, 0.4],\n", " [4.6, 3.6, 1. , 0.2],\n", " [5.1, 3.3, 1.7, 0.5],\n", " [4.8, 3.4, 1.9, 0.2],\n", " [5. , 3. , 1.6, 0.2],\n", " [5. , 3.4, 1.6, 0.4],\n", " [5.2, 3.5, 1.5, 0.2],\n", " [5.2, 3.4, 1.4, 0.2],\n", " [4.7, 3.2, 1.6, 0.2],\n", " [4.8, 3.1, 1.6, 0.2],\n", " [5.4, 3.4, 1.5, 0.4],\n", " [5.2, 4.1, 1.5, 0.1],\n", " [5.5, 4.2, 1.4, 0.2],\n", " [4.9, 3.1, 1.5, 0.2],\n", " [5. , 3.2, 1.2, 0.2],\n", " [5.5, 3.5, 1.3, 0.2],\n", " [4.9, 3.6, 1.4, 0.1],\n", " [4.4, 3. , 1.3, 0.2],\n", " [5.1, 3.4, 1.5, 0.2],\n", " [5. , 3.5, 1.3, 0.3],\n", " [4.5, 2.3, 1.3, 0.3],\n", " [4.4, 3.2, 1.3, 0.2],\n", " [5. , 3.5, 1.6, 0.6],\n", " [5.1, 3.8, 1.9, 0.4],\n", " [4.8, 3. , 1.4, 0.3],\n", " [5.1, 3.8, 1.6, 0.2],\n", " [4.6, 3.2, 1.4, 0.2],\n", " [5.3, 3.7, 1.5, 0.2],\n", " [5. , 3.3, 1.4, 0.2],\n", " [7. , 3.2, 4.7, 1.4],\n", " [6.4, 3.2, 4.5, 1.5],\n", " [6.9, 3.1, 4.9, 1.5],\n", " [5.5, 2.3, 4. , 1.3],\n", " [6.5, 2.8, 4.6, 1.5],\n", " [5.7, 2.8, 4.5, 1.3],\n", " [6.3, 3.3, 4.7, 1.6],\n", " [4.9, 2.4, 3.3, 1. ],\n", " [6.6, 2.9, 4.6, 1.3],\n", " [5.2, 2.7, 3.9, 1.4],\n", " [5. , 2. , 3.5, 1. ],\n", " [5.9, 3. , 4.2, 1.5],\n", " [6. , 2.2, 4. , 1. ],\n", " [6.1, 2.9, 4.7, 1.4],\n", " [5.6, 2.9, 3.6, 1.3],\n", " [6.7, 3.1, 4.4, 1.4],\n", " [5.6, 3. , 4.5, 1.5],\n", " [5.8, 2.7, 4.1, 1. ],\n", " [6.2, 2.2, 4.5, 1.5],\n", " [5.6, 2.5, 3.9, 1.1],\n", " [5.9, 3.2, 4.8, 1.8],\n", " [6.1, 2.8, 4. , 1.3],\n", " [6.3, 2.5, 4.9, 1.5],\n", " [6.1, 2.8, 4.7, 1.2],\n", " [6.4, 2.9, 4.3, 1.3],\n", " [6.6, 3. , 4.4, 1.4],\n", " [6.8, 2.8, 4.8, 1.4],\n", " [6.7, 3. , 5. , 1.7],\n", " [6. , 2.9, 4.5, 1.5],\n", " [5.7, 2.6, 3.5, 1. ],\n", " [5.5, 2.4, 3.8, 1.1],\n", " [5.5, 2.4, 3.7, 1. ],\n", " [5.8, 2.7, 3.9, 1.2],\n", " [6. , 2.7, 5.1, 1.6],\n", " [5.4, 3. , 4.5, 1.5],\n", " [6. , 3.4, 4.5, 1.6],\n", " [6.7, 3.1, 4.7, 1.5],\n", " [6.3, 2.3, 4.4, 1.3],\n", " [5.6, 3. , 4.1, 1.3],\n", " [5.5, 2.5, 4. , 1.3],\n", " [5.5, 2.6, 4.4, 1.2],\n", " [6.1, 3. , 4.6, 1.4],\n", " [5.8, 2.6, 4. , 1.2],\n", " [5. , 2.3, 3.3, 1. ],\n", " [5.6, 2.7, 4.2, 1.3],\n", " [5.7, 3. , 4.2, 1.2],\n", " [5.7, 2.9, 4.2, 1.3],\n", " [6.2, 2.9, 4.3, 1.3],\n", " [5.1, 2.5, 3. , 1.1],\n", " [5.7, 2.8, 4.1, 1.3],\n", " [6.3, 3.3, 6. , 2.5],\n", " [5.8, 2.7, 5.1, 1.9],\n", " [7.1, 3. , 5.9, 2.1],\n", " [6.3, 2.9, 5.6, 1.8],\n", " [6.5, 3. , 5.8, 2.2],\n", " [7.6, 3. , 6.6, 2.1],\n", " [4.9, 2.5, 4.5, 1.7],\n", " [7.3, 2.9, 6.3, 1.8],\n", " [6.7, 2.5, 5.8, 1.8],\n", " [7.2, 3.6, 6.1, 2.5],\n", " [6.5, 3.2, 5.1, 2. ],\n", " [6.4, 2.7, 5.3, 1.9],\n", " [6.8, 3. , 5.5, 2.1],\n", " [5.7, 2.5, 5. , 2. ],\n", " [5.8, 2.8, 5.1, 2.4],\n", " [6.4, 3.2, 5.3, 2.3],\n", " [6.5, 3. , 5.5, 1.8],\n", " [7.7, 3.8, 6.7, 2.2],\n", " [7.7, 2.6, 6.9, 2.3],\n", " [6. , 2.2, 5. , 1.5],\n", " [6.9, 3.2, 5.7, 2.3],\n", " [5.6, 2.8, 4.9, 2. ],\n", " [7.7, 2.8, 6.7, 2. ],\n", " [6.3, 2.7, 4.9, 1.8],\n", " [6.7, 3.3, 5.7, 2.1],\n", " [7.2, 3.2, 6. , 1.8],\n", " [6.2, 2.8, 4.8, 1.8],\n", " [6.1, 3. , 4.9, 1.8],\n", " [6.4, 2.8, 5.6, 2.1],\n", " [7.2, 3. , 5.8, 1.6],\n", " [7.4, 2.8, 6.1, 1.9],\n", " [7.9, 3.8, 6.4, 2. ],\n", " [6.4, 2.8, 5.6, 2.2],\n", " [6.3, 2.8, 5.1, 1.5],\n", " [6.1, 2.6, 5.6, 1.4],\n", " [7.7, 3. , 6.1, 2.3],\n", " [6.3, 3.4, 5.6, 2.4],\n", " [6.4, 3.1, 5.5, 1.8],\n", " [6. , 3. , 4.8, 1.8],\n", " [6.9, 3.1, 5.4, 2.1],\n", " [6.7, 3.1, 5.6, 2.4],\n", " [6.9, 3.1, 5.1, 2.3],\n", " [5.8, 2.7, 5.1, 1.9],\n", " [6.8, 3.2, 5.9, 2.3],\n", " [6.7, 3.3, 5.7, 2.5],\n", " [6.7, 3. , 5.2, 2.3],\n", " [6.3, 2.5, 5. , 1.9],\n", " [6.5, 3. , 5.2, 2. ],\n", " [6.2, 3.4, 5.4, 2.3],\n", " [5.9, 3. , 5.1, 1.8]])" ] }, "execution_count": 36, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn import datasets\n", "import numpy as np\n", "iris = datasets.load_iris()\n", "data= iris.data\n", "target= iris.target\n", "\n", "#from sklearn import cross_validation\n", "from sklearn.model_selection import cross_validate\n", "from sklearn.model_selection import train_test_split\n", "train, test, t_train, t_test = train_test_split(data, target,test_size=0.2, random_state=0)\n", "\n", "data" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Container object for datasets\n", "\n", " Dictionary-like object that exposes its keys as attributes.\n", "\n", " >>> b = Bunch(a=1, b=2)\n", " >>> b['b']\n", " 2\n", " >>> b.b\n", " 2\n", " >>> b.a = 3\n", " >>> b['a']\n", " 3\n", " >>> b.c = 6\n", " >>> b['c']\n", " 6\n", "\n", " \n" ] } ], "source": [ "print(iris.__doc__)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "train_test_split是交叉验证中常用的函数,功能是从样本中随机的按比例选取train data和testdata,形式为:\n", "\n", "X_train,X_test, y_train, y_test =\n", "\n", "cross_validation.train_test_split(train_data,train_target,test_size=0.4, random_state=0)\n", "•参数解释:\n", "\n", "train_data:所要划分的样本特征集\n", "\n", "train_target:所要划分的样本结果\n", "\n", "test_size:样本占比,如果是整数的话就是样本的数量\n", "\n", "random_state:是随机数的种子。\n", "\n", "随机数种子:其实就是该组随机数的编号,在需要重复试验的时候,保证得到一组一样的随机数。比如你每次都填1,其他参数一样的情况下你得到的随机数组是一样的。但填0或不填,每次都会不一样。\n", "\n", "随机数的产生取决于种子,随机数和种子之间的关系遵从以下两个规则:\n", "\n", "种子不同,产生不同的随机数;种子相同,即使实例不同也产生相同的随机数。\n" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [], "source": [ "## 下面使用的数据集" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[[5.1 3.5 1.4 0.2]\n", " [4.9 3. 1.4 0.2]\n", " [4.7 3.2 1.3 0.2]\n", " [4.6 3.1 1.5 0.2]\n", " [5. 3.6 1.4 0.2]\n", " [5.4 3.9 1.7 0.4]\n", " [4.6 3.4 1.4 0.3]\n", " [5. 3.4 1.5 0.2]\n", " [4.4 2.9 1.4 0.2]\n", " [4.9 3.1 1.5 0.1]\n", " [5.4 3.7 1.5 0.2]\n", " [4.8 3.4 1.6 0.2]\n", " [4.8 3. 1.4 0.1]\n", " [4.3 3. 1.1 0.1]\n", " [5.8 4. 1.2 0.2]\n", " [5.7 4.4 1.5 0.4]\n", " [5.4 3.9 1.3 0.4]\n", " [5.1 3.5 1.4 0.3]\n", " [5.7 3.8 1.7 0.3]\n", " [5.1 3.8 1.5 0.3]\n", " [5.4 3.4 1.7 0.2]\n", " [5.1 3.7 1.5 0.4]\n", " [4.6 3.6 1. 0.2]\n", " [5.1 3.3 1.7 0.5]\n", " [4.8 3.4 1.9 0.2]\n", " [5. 3. 1.6 0.2]\n", " [5. 3.4 1.6 0.4]\n", " [5.2 3.5 1.5 0.2]\n", " [5.2 3.4 1.4 0.2]\n", " [4.7 3.2 1.6 0.2]\n", " [4.8 3.1 1.6 0.2]\n", " [5.4 3.4 1.5 0.4]\n", " [5.2 4.1 1.5 0.1]\n", " [5.5 4.2 1.4 0.2]\n", " [4.9 3.1 1.5 0.2]\n", " [5. 3.2 1.2 0.2]\n", " [5.5 3.5 1.3 0.2]\n", " [4.9 3.6 1.4 0.1]\n", " [4.4 3. 1.3 0.2]\n", " [5.1 3.4 1.5 0.2]\n", " [5. 3.5 1.3 0.3]\n", " [4.5 2.3 1.3 0.3]\n", " [4.4 3.2 1.3 0.2]\n", " [5. 3.5 1.6 0.6]\n", " [5.1 3.8 1.9 0.4]\n", " [4.8 3. 1.4 0.3]\n", " [5.1 3.8 1.6 0.2]\n", " [4.6 3.2 1.4 0.2]\n", " [5.3 3.7 1.5 0.2]\n", " [5. 3.3 1.4 0.2]\n", " [7. 3.2 4.7 1.4]\n", " [6.4 3.2 4.5 1.5]\n", " [6.9 3.1 4.9 1.5]\n", " [5.5 2.3 4. 1.3]\n", " [6.5 2.8 4.6 1.5]\n", " [5.7 2.8 4.5 1.3]\n", " [6.3 3.3 4.7 1.6]\n", " [4.9 2.4 3.3 1. ]\n", " [6.6 2.9 4.6 1.3]\n", " [5.2 2.7 3.9 1.4]\n", " [5. 2. 3.5 1. ]\n", " [5.9 3. 4.2 1.5]\n", " [6. 2.2 4. 1. ]\n", " [6.1 2.9 4.7 1.4]\n", " [5.6 2.9 3.6 1.3]\n", " [6.7 3.1 4.4 1.4]\n", " [5.6 3. 4.5 1.5]\n", " [5.8 2.7 4.1 1. ]\n", " [6.2 2.2 4.5 1.5]\n", " [5.6 2.5 3.9 1.1]\n", " [5.9 3.2 4.8 1.8]\n", " [6.1 2.8 4. 1.3]\n", " [6.3 2.5 4.9 1.5]\n", " [6.1 2.8 4.7 1.2]\n", " [6.4 2.9 4.3 1.3]\n", " [6.6 3. 4.4 1.4]\n", " [6.8 2.8 4.8 1.4]\n", " [6.7 3. 5. 1.7]\n", " [6. 2.9 4.5 1.5]\n", " [5.7 2.6 3.5 1. ]\n", " [5.5 2.4 3.8 1.1]\n", " [5.5 2.4 3.7 1. ]\n", " [5.8 2.7 3.9 1.2]\n", " [6. 2.7 5.1 1.6]\n", " [5.4 3. 4.5 1.5]\n", " [6. 3.4 4.5 1.6]\n", " [6.7 3.1 4.7 1.5]\n", " [6.3 2.3 4.4 1.3]\n", " [5.6 3. 4.1 1.3]\n", " [5.5 2.5 4. 1.3]\n", " [5.5 2.6 4.4 1.2]\n", " [6.1 3. 4.6 1.4]\n", " [5.8 2.6 4. 1.2]\n", " [5. 2.3 3.3 1. ]\n", " [5.6 2.7 4.2 1.3]\n", " [5.7 3. 4.2 1.2]\n", " [5.7 2.9 4.2 1.3]\n", " [6.2 2.9 4.3 1.3]\n", " [5.1 2.5 3. 1.1]\n", " [5.7 2.8 4.1 1.3]\n", " [6.3 3.3 6. 2.5]\n", " [5.8 2.7 5.1 1.9]\n", " [7.1 3. 5.9 2.1]\n", " [6.3 2.9 5.6 1.8]\n", " [6.5 3. 5.8 2.2]\n", " [7.6 3. 6.6 2.1]\n", " [4.9 2.5 4.5 1.7]\n", " [7.3 2.9 6.3 1.8]\n", " [6.7 2.5 5.8 1.8]\n", " [7.2 3.6 6.1 2.5]\n", " [6.5 3.2 5.1 2. ]\n", " [6.4 2.7 5.3 1.9]\n", " [6.8 3. 5.5 2.1]\n", " [5.7 2.5 5. 2. ]\n", " [5.8 2.8 5.1 2.4]\n", " [6.4 3.2 5.3 2.3]\n", " [6.5 3. 5.5 1.8]\n", " [7.7 3.8 6.7 2.2]\n", " [7.7 2.6 6.9 2.3]\n", " [6. 2.2 5. 1.5]\n", " [6.9 3.2 5.7 2.3]\n", " [5.6 2.8 4.9 2. ]\n", " [7.7 2.8 6.7 2. ]\n", " [6.3 2.7 4.9 1.8]\n", " [6.7 3.3 5.7 2.1]\n", " [7.2 3.2 6. 1.8]\n", " [6.2 2.8 4.8 1.8]\n", " [6.1 3. 4.9 1.8]\n", " [6.4 2.8 5.6 2.1]\n", " [7.2 3. 5.8 1.6]\n", " [7.4 2.8 6.1 1.9]\n", " [7.9 3.8 6.4 2. ]\n", " [6.4 2.8 5.6 2.2]\n", " [6.3 2.8 5.1 1.5]\n", " [6.1 2.6 5.6 1.4]\n", " [7.7 3. 6.1 2.3]\n", " [6.3 3.4 5.6 2.4]\n", " [6.4 3.1 5.5 1.8]\n", " [6. 3. 4.8 1.8]\n", " [6.9 3.1 5.4 2.1]\n", " [6.7 3.1 5.6 2.4]\n", " [6.9 3.1 5.1 2.3]\n", " [5.8 2.7 5.1 1.9]\n", " [6.8 3.2 5.9 2.3]\n", " [6.7 3.3 5.7 2.5]\n", " [6.7 3. 5.2 2.3]\n", " [6.3 2.5 5. 1.9]\n", " [6.5 3. 5.2 2. ]\n", " [6.2 3.4 5.4 2.3]\n", " [5.9 3. 5.1 1.8]]\n", "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", " 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n", " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n", " 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n", " 2 2]\n" ] }, { "ename": "ImportError", "evalue": "cannot import name 'cross_validation' from 'sklearn' (C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\__init__.py)", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[0mprint\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 7\u001b[0m \u001b[0mprint\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mtarget\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 8\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mcross_validation\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 9\u001b[0m \u001b[0mtrain\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtest\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt_test\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcross_validation\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain_test_split\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtest_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.4\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mrandom_state\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mImportError\u001b[0m: cannot import name 'cross_validation' from 'sklearn' (C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\__init__.py)" ] } ], "source": [ "from sklearn import datasets\n", "import numpy as np\n", "iris = datasets.load_iris()\n", "data= iris.data\n", "target= iris.target\n", "print (data)\n", "print (target)\n", "from sklearn import cross_validation\n", "train, test, t_train, t_test = cross_validation.train_test_split(data, target,test_size=0.4, random_state=0)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 贝叶斯分类" ] }, { "cell_type": "code", "execution_count": 53, "metadata": { "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[2 1 0 2 0 2 0 1 1 1 1 1 1 1 1 0 1 1 0 0 2 1 0 0 2 0 0 1 1 0]\n", "0.9666666666666667\n", "0.95\n", " precision recall f1-score support\n", "\n", " 0 1.00 1.00 1.00 11\n", " 1 0.93 1.00 0.96 13\n", " 2 1.00 0.83 0.91 6\n", "\n", " micro avg 0.97 0.97 0.97 30\n", " macro avg 0.98 0.94 0.96 30\n", "weighted avg 0.97 0.97 0.97 30\n", "\n", "[[11 0 0]\n", " [ 0 13 0]\n", " [ 0 1 5]]\n" ] } ], "source": [ "from sklearn import metrics\n", "from sklearn.naive_bayes import GaussianNB#####\n", "\n", "model = GaussianNB()###\n", "\n", "model.fit(train,t_train) # train\n", "\n", "expected = t_test\n", "predicted = model.predict(test) ###\n", "\n", "print (model.predict(test))\n", "\n", "print (model.score(test,t_test)) # test\n", "\n", "print (model.score(train,t_train)) # test\n", "#0.93333333333333335\n", "\n", "# summarize the fit of the model\n", "print(metrics.classification_report(expected, predicted))\n", "print(metrics.confusion_matrix(expected, predicted))" ] }, { "cell_type": "code", "execution_count": 54, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Returns the mean accuracy on the given test data and labels.\n", "\n", " In multi-label classification, this is the subset accuracy\n", " which is a harsh metric since you require for each sample that\n", " each label set be correctly predicted.\n", "\n", " Parameters\n", " ----------\n", " X : array-like, shape = (n_samples, n_features)\n", " Test samples.\n", "\n", " y : array-like, shape = (n_samples) or (n_samples, n_outputs)\n", " True labels for X.\n", "\n", " sample_weight : array-like, shape = [n_samples], optional\n", " Sample weights.\n", "\n", " Returns\n", " -------\n", " score : float\n", " Mean accuracy of self.predict(X) wrt. y.\n", "\n", " \n" ] } ], "source": [ "print(model.score.__doc__)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 线性回归" ] }, { "cell_type": "code", "execution_count": 41, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[ 3.03499549e-01 -2.37639315e+02 5.10530605e+02 3.27736980e+02\n", " -8.14131709e+02 4.92814588e+02 1.02848452e+02 1.84606489e+02\n", " 7.43519617e+02 7.60951722e+01]\n", "{'intercept': 152.76430691633442, 'coefficient': array([ 3.03499549e-01, -2.37639315e+02, 5.10530605e+02, 3.27736980e+02,\n", " -8.14131709e+02, 4.92814588e+02, 1.02848452e+02, 1.84606489e+02,\n", " 7.43519617e+02, 7.60951722e+01])}\n" ] }, { "data": { "text/plain": [ "2004.5676026898211" ] }, "execution_count": 41, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn import linear_model\n", "regr = linear_model.LinearRegression() ########\n", "\n", "\n", "from sklearn.linear_model import LinearRegression\n", "regr = LinearRegression()\n", "\n", "\n", "\n", "regr.fit(diabetes_X_train, diabetes_y_train)\n", "\n", "print(regr.coef_)\n", "\n", "predictions={}\n", "predictions['intercept'] = regr.intercept_\n", "predictions['coefficient'] = regr.coef_\n", "\n", "print (predictions)\n", "\n", "# The mean square error\n", "np.mean((regr.predict(diabetes_X_test)-diabetes_y_test)**2)\n", "# Explained variance score: 1 is perfect prediction# and 0 means that there is no linear relationsh" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Logistic回归" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " precision recall f1-score support\n", "\n", " 0 1.00 1.00 1.00 11\n", " 1 1.00 0.92 0.96 13\n", " 2 0.86 1.00 0.92 6\n", "\n", " micro avg 0.97 0.97 0.97 30\n", " macro avg 0.95 0.97 0.96 30\n", "weighted avg 0.97 0.97 0.97 30\n", "\n", "[[11 0 0]\n", " [ 0 12 1]\n", " [ 0 0 6]]\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", " FutureWarning)\n", "C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n", " \"this warning.\", FutureWarning)\n" ] } ], "source": [ "from sklearn import metrics\n", "from sklearn.linear_model import LogisticRegression#####\n", "\n", "model = LogisticRegression()\n", "\n", "model.fit(train,t_train)\n", "\n", "expected = t_test\n", "predicted = model.predict(test) ###\n", "\n", "# summarize the fit of the model\n", "print(metrics.classification_report(expected, predicted))\n", "print(metrics.confusion_matrix(expected, predicted))" ] }, { "cell_type": "code", "execution_count": 56, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'C:\\\\Users\\\\yuli\\\\AppData\\\\Local\\\\Continuum\\\\anaconda3\\\\lib\\\\site-packages\\\\sklearn\\\\linear_model\\\\__init__.py'" ] }, "execution_count": 56, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import sklearn\n", "sklearn.linear_model.__file__" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## K近邻" ] }, { "cell_type": "code", "execution_count": 43, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " precision recall f1-score support\n", "\n", " 0 1.00 1.00 1.00 11\n", " 1 1.00 0.92 0.96 13\n", " 2 0.86 1.00 0.92 6\n", "\n", " micro avg 0.97 0.97 0.97 30\n", " macro avg 0.95 0.97 0.96 30\n", "weighted avg 0.97 0.97 0.97 30\n", "\n", "[[11 0 0]\n", " [ 0 12 1]\n", " [ 0 0 6]]\n" ] } ], "source": [ "from sklearn import metrics\n", "from sklearn.neighbors import KNeighborsClassifier ####\n", "\n", "# fit a k-nearest neighbor model to the data\n", "model = KNeighborsClassifier() ####\n", "\n", "model.fit(train,t_train)\n", "\n", "expected = t_test\n", "predicted = model.predict(test) ###\n", "\n", "# summarize the fit of the model\n", "print(metrics.classification_report(expected, predicted))\n", "print(metrics.confusion_matrix(expected, predicted))" ] }, { "cell_type": "code", "execution_count": 57, "metadata": {}, "outputs": [], "source": [ "from sklearn import datasets, neighbors, linear_model\n", "digits = datasets.load_digits()\n", "X_digits = digits.data\n", "y_digits = digits.target\n", "n_samples = len(X_digits)" ] }, { "cell_type": "code", "execution_count": 59, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "1797" ] }, "execution_count": 59, "metadata": {}, "output_type": "execute_result" } ], "source": [ "n_samples" ] }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "X_train: \n", " [[ 0. 0. 5. ... 0. 0. 0.]\n", " [ 0. 0. 0. ... 10. 0. 0.]\n", " [ 0. 0. 0. ... 16. 9. 0.]\n", " ...\n", " [ 0. 0. 12. ... 0. 0. 0.]\n", " [ 0. 0. 0. ... 9. 0. 0.]\n", " [ 0. 0. 1. ... 16. 5. 0.]]\n", "y_train: \n", " [0 1 2 ... 5 0 9]\n", "KNN score: 0.961111\n", "LogisticRegression score: 0.938889\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:433: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n", " FutureWarning)\n", "C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n", " \"this warning.\", FutureWarning)\n" ] } ], "source": [ "from sklearn import datasets, neighbors, linear_model\n", "digits = datasets.load_digits()\n", "X_digits = digits.data\n", "y_digits = digits.target\n", "n_samples = len(X_digits)\n", "\n", "train_n=int(0.9 * n_samples)\n", "X_train = X_digits[:train_n]\n", "y_train = y_digits[:train_n]\n", "X_test = X_digits[train_n:]\n", "y_test = y_digits[train_n:]\n", "print ('X_train: \\n', X_train)\n", "print ('y_train: \\n ', y_train)\n", "\n", "\n", "knn = neighbors.KNeighborsClassifier()\n", "print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))\n", "\n", "logistic = linear_model.LogisticRegression()\n", "print('LogisticRegression score: %f' % logistic.fit(X_train, y_train).score(X_test, y_test))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 决策树" ] }, { "cell_type": "code", "execution_count": 45, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,\n", " max_features=None, max_leaf_nodes=None,\n", " min_impurity_decrease=0.0, min_impurity_split=None,\n", " min_samples_leaf=1, min_samples_split=2,\n", " min_weight_fraction_leaf=0.0, presort=False, random_state=None,\n", " splitter='best')\n", " precision recall f1-score support\n", "\n", " 0 1.00 1.00 1.00 11\n", " 1 1.00 1.00 1.00 13\n", " 2 1.00 1.00 1.00 6\n", "\n", " micro avg 1.00 1.00 1.00 30\n", " macro avg 1.00 1.00 1.00 30\n", "weighted avg 1.00 1.00 1.00 30\n", "\n", "[[11 0 0]\n", " [ 0 13 0]\n", " [ 0 0 6]]\n" ] } ], "source": [ "from sklearn import metrics\n", "from sklearn.tree import DecisionTreeClassifier ####\n", "\n", "model = DecisionTreeClassifier() ####\n", "\n", "print (model)\n", "\n", "model.fit(train,t_train)\n", "\n", "expected = t_test\n", "predicted = model.predict(test) ###\n", "\n", "# summarize the fit of the model\n", "print(metrics.classification_report(expected, predicted))\n", "print(metrics.confusion_matrix(expected, predicted))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 支持向量机" ] }, { "cell_type": "code", "execution_count": 46, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n", " decision_function_shape='ovr', degree=3, gamma='auto_deprecated',\n", " kernel='rbf', max_iter=-1, probability=False, random_state=None,\n", " shrinking=True, tol=0.001, verbose=False)\n", " precision recall f1-score support\n", "\n", " 0 1.00 1.00 1.00 11\n", " 1 1.00 1.00 1.00 13\n", " 2 1.00 1.00 1.00 6\n", "\n", " micro avg 1.00 1.00 1.00 30\n", " macro avg 1.00 1.00 1.00 30\n", "weighted avg 1.00 1.00 1.00 30\n", "\n", "[[11 0 0]\n", " [ 0 13 0]\n", " [ 0 0 6]]\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\yuli\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n", " \"avoid this warning.\", FutureWarning)\n" ] } ], "source": [ "from sklearn import metrics\n", "from sklearn.svm import SVC ####\n", "\n", "model = SVC() ####\n", "model.fit(train,t_train)\n", "\n", "print (model)\n", "\n", "expected = t_test\n", "predicted = model.predict(test) ###\n", "\n", "# summarize the fit of the model\n", "print(metrics.classification_report(expected, predicted))\n", "print(metrics.confusion_matrix(expected, predicted))" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "C-Support Vector Classification.\n", "\n", " The implementation is based on libsvm. The fit time complexity\n", " is more than quadratic with the number of samples which makes it hard\n", " to scale to dataset with more than a couple of 10000 samples.\n", "\n", " The multiclass support is handled according to a one-vs-one scheme.\n", "\n", " For details on the precise mathematical formulation of the provided\n", " kernel functions and how `gamma`, `coef0` and `degree` affect each\n", " other, see the corresponding section in the narrative documentation:\n", " :ref:`svm_kernels`.\n", "\n", " Read more in the :ref:`User Guide `.\n", "\n", " Parameters\n", " ----------\n", " C : float, optional (default=1.0)\n", " Penalty parameter C of the error term.\n", "\n", " kernel : string, optional (default='rbf')\n", " Specifies the kernel type to be used in the algorithm.\n", " It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or\n", " a callable.\n", " If none is given, 'rbf' will be used. If a callable is given it is\n", " used to pre-compute the kernel matrix from data matrices; that matrix\n", " should be an array of shape ``(n_samples, n_samples)``.\n", "\n", " degree : int, optional (default=3)\n", " Degree of the polynomial kernel function ('poly').\n", " Ignored by all other kernels.\n", "\n", " gamma : float, optional (default='auto')\n", " Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.\n", "\n", " Current default is 'auto' which uses 1 / n_features,\n", " if ``gamma='scale'`` is passed then it uses 1 / (n_features * X.var())\n", " as value of gamma. The current default of gamma, 'auto', will change\n", " to 'scale' in version 0.22. 'auto_deprecated', a deprecated version of\n", " 'auto' is used as a default indicating that no explicit value of gamma\n", " was passed.\n", "\n", " coef0 : float, optional (default=0.0)\n", " Independent term in kernel function.\n", " It is only significant in 'poly' and 'sigmoid'.\n", "\n", " shrinking : boolean, optional (default=True)\n", " Whether to use the shrinking heuristic.\n", "\n", " probability : boolean, optional (default=False)\n", " Whether to enable probability estimates. This must be enabled prior\n", " to calling `fit`, and will slow down that method.\n", "\n", " tol : float, optional (default=1e-3)\n", " Tolerance for stopping criterion.\n", "\n", " cache_size : float, optional\n", " Specify the size of the kernel cache (in MB).\n", "\n", " class_weight : {dict, 'balanced'}, optional\n", " Set the parameter C of class i to class_weight[i]*C for\n", " SVC. If not given, all classes are supposed to have\n", " weight one.\n", " The \"balanced\" mode uses the values of y to automatically adjust\n", " weights inversely proportional to class frequencies in the input data\n", " as ``n_samples / (n_classes * np.bincount(y))``\n", "\n", " verbose : bool, default: False\n", " Enable verbose output. Note that this setting takes advantage of a\n", " per-process runtime setting in libsvm that, if enabled, may not work\n", " properly in a multithreaded context.\n", "\n", " max_iter : int, optional (default=-1)\n", " Hard limit on iterations within solver, or -1 for no limit.\n", "\n", " decision_function_shape : 'ovo', 'ovr', default='ovr'\n", " Whether to return a one-vs-rest ('ovr') decision function of shape\n", " (n_samples, n_classes) as all other classifiers, or the original\n", " one-vs-one ('ovo') decision function of libsvm which has shape\n", " (n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n", " ('ovo') is always used as multi-class strategy.\n", "\n", " .. versionchanged:: 0.19\n", " decision_function_shape is 'ovr' by default.\n", "\n", " .. versionadded:: 0.17\n", " *decision_function_shape='ovr'* is recommended.\n", "\n", " .. versionchanged:: 0.17\n", " Deprecated *decision_function_shape='ovo' and None*.\n", "\n", " random_state : int, RandomState instance or None, optional (default=None)\n", " The seed of the pseudo random number generator used when shuffling\n", " the data for probability estimates. If int, random_state is the\n", " seed used by the random number generator; If RandomState instance,\n", " random_state is the random number generator; If None, the random\n", " number generator is the RandomState instance used by `np.random`.\n", "\n", " Attributes\n", " ----------\n", " support_ : array-like, shape = [n_SV]\n", " Indices of support vectors.\n", "\n", " support_vectors_ : array-like, shape = [n_SV, n_features]\n", " Support vectors.\n", "\n", " n_support_ : array-like, dtype=int32, shape = [n_class]\n", " Number of support vectors for each class.\n", "\n", " dual_coef_ : array, shape = [n_class-1, n_SV]\n", " Coefficients of the support vector in the decision function.\n", " For multiclass, coefficient for all 1-vs-1 classifiers.\n", " The layout of the coefficients in the multiclass case is somewhat\n", " non-trivial. See the section about multi-class classification in the\n", " SVM section of the User Guide for details.\n", "\n", " coef_ : array, shape = [n_class * (n_class-1) / 2, n_features]\n", " Weights assigned to the features (coefficients in the primal\n", " problem). This is only available in the case of a linear kernel.\n", "\n", " `coef_` is a readonly property derived from `dual_coef_` and\n", " `support_vectors_`.\n", "\n", " intercept_ : array, shape = [n_class * (n_class-1) / 2]\n", " Constants in decision function.\n", "\n", " fit_status_ : int\n", " 0 if correctly fitted, 1 otherwise (will raise warning)\n", "\n", " probA_ : array, shape = [n_class * (n_class-1) / 2]\n", " probB_ : array, shape = [n_class * (n_class-1) / 2]\n", " If probability=True, the parameters learned in Platt scaling to\n", " produce probability estimates from decision values. If\n", " probability=False, an empty array. Platt scaling uses the logistic\n", " function\n", " ``1 / (1 + exp(decision_value * probA_ + probB_))``\n", " where ``probA_`` and ``probB_`` are learned from the dataset [2]_. For\n", " more information on the multiclass case and training procedure see\n", " section 8 of [1]_.\n", "\n", " Examples\n", " --------\n", " >>> import numpy as np\n", " >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])\n", " >>> y = np.array([1, 1, 2, 2])\n", " >>> from sklearn.svm import SVC\n", " >>> clf = SVC(gamma='auto')\n", " >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE\n", " SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n", " decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',\n", " max_iter=-1, probability=False, random_state=None, shrinking=True,\n", " tol=0.001, verbose=False)\n", " >>> print(clf.predict([[-0.8, -1]]))\n", " [1]\n", "\n", " See also\n", " --------\n", " SVR\n", " Support Vector Machine for Regression implemented using libsvm.\n", "\n", " LinearSVC\n", " Scalable Linear Support Vector Machine for classification\n", " implemented using liblinear. Check the See also section of\n", " LinearSVC for more comparison element.\n", "\n", " References\n", " ----------\n", " .. [1] `LIBSVM: A Library for Support Vector Machines\n", " `_\n", "\n", " .. [2] `Platt, John (1999). \"Probabilistic outputs for support vector\n", " machines and comparison to regularizedlikelihood methods.\"\n", " `_\n", " \n" ] } ], "source": [ "from sklearn import svm\n", "print(SVC.__doc__)" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n", " decision_function_shape='ovr', degree=3, gamma='auto_deprecated',\n", " kernel='linear', max_iter=-1, probability=False, random_state=None,\n", " shrinking=True, tol=0.001, verbose=False)" ] }, "execution_count": 48, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn import datasets\n", "iris = datasets.load_iris()\n", "iris_X = iris.data\n", "indices = np.random.permutation(len(iris_X))\n", "iris_X_train = iris_X[indices[:-10]]\n", "iris_y_train = iris_y[indices[:-10]]\n", "iris_X_test = iris_X[indices[-10:]]\n", "iris_y_test = iris_y[indices[-10:]]\n", "\n", "\n", "from sklearn import svm\n", "\n", "svc = svm.SVC(kernel='linear') #kernel='rbf'\n", "svc.fit(iris_X_train, iris_y_train)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 聚类" ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[1 1 1 1 1 0 0 0 0 0 2 2 2 2 2]\n", "[0 0 0 0 0 1 1 1 1 1 2 2 2 2 2]\n", "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", " 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n", " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2\n", " 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2\n", " 2 2]\n" ] } ], "source": [ "from sklearn import cluster, datasets\n", "iris = datasets.load_iris()\n", "X_iris = iris.data\n", "y_iris = iris.target\n", "k_means = cluster.KMeans(n_clusters=3)\n", "k_means.fit(X_iris) \n", "\n", "print(k_means.labels_[::10])\n", "print(y_iris[::10])\n", "\n", "print (y_iris)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "toc": { "base_numbering": 1, "nav_menu": { "height": "12px", "width": "274px" }, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": { "height": "533px", "left": "2px", "right": "auto", "top": "107px", "width": "165px" }, "toc_section_display": true, "toc_window_display": true } }, "nbformat": 4, "nbformat_minor": 2 }