{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Classification of Iris" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Package imports" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using TensorFlow backend.\n" ] } ], "source": [ "# For building neural networks.\n", "import keras as kr\n", "\n", "# For interacting with data sets.\n", "import pandas as pd\n", "\n", "# For encoding categorical variables.\n", "import sklearn.preprocessing as pre\n", "\n", "# For splitting into training and test sets.\n", "import sklearn.model_selection as mod" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Load data" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# Load the iris data set from a URL.\n", "df = pd.read_csv(\"https://raw.githubusercontent.com/ianmcloughlin/datasets/master/iris.csv\")" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
sepal_lengthsepal_widthpetal_lengthpetal_widthclass
05.13.51.40.2setosa
14.93.01.40.2setosa
24.73.21.30.2setosa
34.63.11.50.2setosa
45.03.61.40.2setosa
..................
1456.73.05.22.3virginica
1466.32.55.01.9virginica
1476.53.05.22.0virginica
1486.23.45.42.3virginica
1495.93.05.11.8virginica
\n", "

150 rows × 5 columns

\n", "
" ], "text/plain": [ " sepal_length sepal_width petal_length petal_width class\n", "0 5.1 3.5 1.4 0.2 setosa\n", "1 4.9 3.0 1.4 0.2 setosa\n", "2 4.7 3.2 1.3 0.2 setosa\n", "3 4.6 3.1 1.5 0.2 setosa\n", "4 5.0 3.6 1.4 0.2 setosa\n", ".. ... ... ... ... ...\n", "145 6.7 3.0 5.2 2.3 virginica\n", "146 6.3 2.5 5.0 1.9 virginica\n", "147 6.5 3.0 5.2 2.0 virginica\n", "148 6.2 3.4 5.4 2.3 virginica\n", "149 5.9 3.0 5.1 1.8 virginica\n", "\n", "[150 rows x 5 columns]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Inputs" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# Separate the inputs from the rest of the variables.\n", "inputs = df[['petal_length', 'petal_width', 'sepal_length', 'sepal_width']]" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
petal_lengthpetal_widthsepal_lengthsepal_width
01.40.25.13.5
11.40.24.93.0
21.30.24.73.2
31.50.24.63.1
41.40.25.03.6
...............
1455.22.36.73.0
1465.01.96.32.5
1475.22.06.53.0
1485.42.36.23.4
1495.11.85.93.0
\n", "

150 rows × 4 columns

\n", "
" ], "text/plain": [ " petal_length petal_width sepal_length sepal_width\n", "0 1.4 0.2 5.1 3.5\n", "1 1.4 0.2 4.9 3.0\n", "2 1.3 0.2 4.7 3.2\n", "3 1.5 0.2 4.6 3.1\n", "4 1.4 0.2 5.0 3.6\n", ".. ... ... ... ...\n", "145 5.2 2.3 6.7 3.0\n", "146 5.0 1.9 6.3 2.5\n", "147 5.2 2.0 6.5 3.0\n", "148 5.4 2.3 6.2 3.4\n", "149 5.1 1.8 5.9 3.0\n", "\n", "[150 rows x 4 columns]" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "inputs" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Encoded outputs" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "$$\n", "\\begin{align*}\n", " setosa & \\rightarrow [1,0,0] \\\\\n", " versicolor & \\rightarrow [0,1,0] \\\\\n", " virginica & \\rightarrow [0,0,1]\n", "\\end{align*}\n", "$$" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [1, 0, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 1, 0],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1],\n", " [0, 0, 1]])" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Encode the classes as above.\n", "encoder = pre.LabelBinarizer()\n", "encoder.fit(df['class'])\n", "outputs = encoder.transform(df['class'])\n", "\n", "outputs" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Idea\n", "\n", "The neural network will turn four floating point inputs into three \"floating point\" outputs.\n", "\n", "$$ [5.1, 3.5, 1.4, 0.2] \\rightarrow [0.8, 0.19, 0.01] $$\n", "\n", "$$ [5.1, 3.5, 1.4, 0.2] \\rightarrow [1, 0, 0] $$" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Build model" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:74: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4138: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3295: The name tf.log is deprecated. Please use tf.math.log instead.\n", "\n" ] } ], "source": [ "# Start a neural network, building it by layers.\n", "model = kr.models.Sequential()\n", "\n", "# Add a hidden layer with x neurons and an input layer with 4.\n", "model.add(kr.layers.Dense(units=30, activation='relu', input_dim=4))\n", "# Add a three neuron output layer.\n", "model.add(kr.layers.Dense(units=3, activation='softmax'))\n", "\n", "# Build the graph.\n", "model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Split" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "# Split the inputs and outputs into training and test sets.\n", "inputs_train, inputs_test, outputs_train, outputs_test = mod.train_test_split(inputs, outputs, test_size=0.2)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "petal_length 4.8\n", "petal_width 1.4\n", "sepal_length 6.8\n", "sepal_width 2.8\n", "Name: 76, dtype: float64" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "inputs_test.iloc[0]" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:2741: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:174: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:181: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:190: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:199: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n", "\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n", " \"\"\"Entry point for launching an IPython kernel.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:206: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", "\n" ] }, { "data": { "text/plain": [ "array([[0.02864362, 0.96718645, 0.00416994]], dtype=float32)" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.predict(inputs_test.as_matrix()[0:1])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Train" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\tensorflow_core\\python\\ops\\math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use tf.where in 2.0, which has the same broadcast rule as np.where\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n", "\n", "WARNING:tensorflow:From C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:973: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n", "\n", "Epoch 1/15\n", "120/120 [==============================] - 1s 10ms/step - loss: 1.6282 - acc: 0.3417\n", "Epoch 2/15\n", "120/120 [==============================] - 0s 671us/step - loss: 1.0749 - acc: 0.4250\n", "Epoch 3/15\n", "120/120 [==============================] - 0s 767us/step - loss: 0.8976 - acc: 0.5167\n", "Epoch 4/15\n", "120/120 [==============================] - 0s 650us/step - loss: 0.8089 - acc: 0.7500\n", "Epoch 5/15\n", "120/120 [==============================] - 0s 850us/step - loss: 0.7383 - acc: 0.7750\n", "Epoch 6/15\n", "120/120 [==============================] - 0s 813us/step - loss: 0.6916 - acc: 0.8500\n", "Epoch 7/15\n", "120/120 [==============================] - 0s 700us/step - loss: 0.6547 - acc: 0.8583\n", "Epoch 8/15\n", "120/120 [==============================] - 0s 333us/step - loss: 0.6256 - acc: 0.8167\n", "Epoch 9/15\n", "120/120 [==============================] - 0s 342us/step - loss: 0.5870 - acc: 0.7667\n", "Epoch 10/15\n", "120/120 [==============================] - 0s 550us/step - loss: 0.5789 - acc: 0.8667\n", "Epoch 11/15\n", "120/120 [==============================] - 0s 817us/step - loss: 0.5514 - acc: 0.8500\n", "Epoch 12/15\n", "120/120 [==============================] - 0s 900us/step - loss: 0.5314 - acc: 0.8917\n", "Epoch 13/15\n", "120/120 [==============================] - 0s 746us/step - loss: 0.5162 - acc: 0.8417\n", "Epoch 14/15\n", "120/120 [==============================] - 0s 392us/step - loss: 0.5029 - acc: 0.8583\n", "Epoch 15/15\n", "120/120 [==============================] - 0s 292us/step - loss: 0.4830 - acc: 0.8417\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Train the neural network.\n", "model.fit(inputs_train, outputs_train, epochs=15, batch_size=10)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Predict" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\mclou\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:1: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.\n", " \"\"\"Entry point for launching an IPython kernel.\n" ] }, { "data": { "text/plain": [ "array([[0.07707678, 0.5562058 , 0.36671743]], dtype=float32)" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.predict(inputs_test.as_matrix()[0:1])" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array(['versicolor', 'virginica', 'setosa', 'setosa', 'setosa',\n", " 'virginica', 'setosa', 'virginica', 'versicolor', 'versicolor',\n", " 'virginica', 'versicolor', 'virginica', 'setosa', 'setosa',\n", " 'versicolor', 'setosa', 'setosa', 'versicolor', 'virginica',\n", " 'setosa', 'versicolor', 'versicolor', 'setosa', 'setosa',\n", " 'virginica', 'virginica', 'versicolor', 'virginica', 'virginica'],\n", " dtype='\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
petal_lengthpetal_widthsepal_lengthsepal_width
00.3872210.4846041.3533350.761078
10.394136-0.687376-2.114471-0.958366
20.9255730.3512292.526484-0.225238
30.675388-0.325813-0.203693-0.997615
40.221818-1.285400-0.0498261.718668
...............
1150.015610-1.118584-0.7880060.266962
116-1.3873090.5804640.3045571.042104
1171.1092620.7871300.9966070.446290
1180.611141-0.2747290.178736-0.899852
1191.3882670.856468-1.7350970.208429
\n", "

120 rows × 4 columns

\n", "" ], "text/plain": [ " petal_length petal_width sepal_length sepal_width\n", "0 0.387221 0.484604 1.353335 0.761078\n", "1 0.394136 -0.687376 -2.114471 -0.958366\n", "2 0.925573 0.351229 2.526484 -0.225238\n", "3 0.675388 -0.325813 -0.203693 -0.997615\n", "4 0.221818 -1.285400 -0.049826 1.718668\n", ".. ... ... ... ...\n", "115 0.015610 -1.118584 -0.788006 0.266962\n", "116 -1.387309 0.580464 0.304557 1.042104\n", "117 1.109262 0.787130 0.996607 0.446290\n", "118 0.611141 -0.274729 0.178736 -0.899852\n", "119 1.388267 0.856468 -1.735097 0.208429\n", "\n", "[120 rows x 4 columns]" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pca = dec.PCA(n_components=4, whiten=True)\n", "pca.fit(inputs_train)\n", "inputs_train_white = pd.DataFrame(pca.transform(inputs_train), columns=inputs_train.columns)\n", "inputs_train_white" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "# Start a neural network, building it by layers.\n", "model = kr.models.Sequential()\n", "\n", "# Add a hidden layer with x neurons and an input layer with 4.\n", "model.add(kr.layers.Dense(units=30, activation='relu', input_dim=4))\n", "# Add a three neuron output layer.\n", "model.add(kr.layers.Dense(units=3, activation='softmax'))\n", "\n", "# Build the graph.\n", "model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/15\n", "120/120 [==============================] - 1s 8ms/step - loss: 1.0737 - acc: 0.4500\n", "Epoch 2/15\n", "120/120 [==============================] - 0s 283us/step - loss: 1.0342 - acc: 0.5667\n", "Epoch 3/15\n", "120/120 [==============================] - 0s 392us/step - loss: 0.9963 - acc: 0.6083\n", "Epoch 4/15\n", "120/120 [==============================] - 0s 291us/step - loss: 0.9618 - acc: 0.6333\n", "Epoch 5/15\n", "120/120 [==============================] - 0s 417us/step - loss: 0.9288 - acc: 0.6500\n", "Epoch 6/15\n", "120/120 [==============================] - 0s 325us/step - loss: 0.8977 - acc: 0.6667\n", "Epoch 7/15\n", "120/120 [==============================] - 0s 496us/step - loss: 0.8687 - acc: 0.6833\n", "Epoch 8/15\n", "120/120 [==============================] - 0s 881us/step - loss: 0.8403 - acc: 0.6917\n", "Epoch 9/15\n", "120/120 [==============================] - 0s 792us/step - loss: 0.8140 - acc: 0.7333\n", "Epoch 10/15\n", "120/120 [==============================] - 0s 933us/step - loss: 0.7887 - acc: 0.7333\n", "Epoch 11/15\n", "120/120 [==============================] - 0s 325us/step - loss: 0.7646 - acc: 0.7583\n", "Epoch 12/15\n", "120/120 [==============================] - 0s 342us/step - loss: 0.7417 - acc: 0.7500\n", "Epoch 13/15\n", "120/120 [==============================] - 0s 267us/step - loss: 0.7200 - acc: 0.7917\n", "Epoch 14/15\n", "120/120 [==============================] - 0s 375us/step - loss: 0.6999 - acc: 0.8000\n", "Epoch 15/15\n", "120/120 [==============================] - 0s 292us/step - loss: 0.6800 - acc: 0.8000\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Train the neural network.\n", "model.fit(inputs_train_white, outputs_train, epochs=15, batch_size=10)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array(['versicolor', 'virginica', 'setosa', 'setosa', 'setosa',\n", " 'virginica', 'setosa', 'virginica', 'versicolor', 'versicolor',\n", " 'virginica', 'virginica', 'virginica', 'setosa', 'setosa',\n", " 'versicolor', 'setosa', 'setosa', 'virginica', 'virginica',\n", " 'setosa', 'virginica', 'versicolor', 'setosa', 'setosa',\n", " 'virginica', 'virginica', 'versicolor', 'virginica', 'virginica'],\n", " dtype='