{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# These are all the modules we'll be using later. Make sure you can import them\n", "# before proceeding further.\n", "from __future__ import print_function\n", "import numpy as np\n", "import tensorflow as tf\n", "from six.moves import cPickle as pickle\n", "from six.moves import range" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Training set (200000, 28, 28) (200000,)\n", "Validation set (10000, 28, 28) (10000,)\n", "Test set (10000, 28, 28) (10000,)\n" ] } ], "source": [ "# The folder when dumped big 3D array has been stored from previous excercise\n", "data_root = 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data'\n", "#a big 3D array to a big file. \n", "pickle_file = 'notMNIST.pickle'\n", "\n", "with open(data_root + '\\\\' + pickle_file, 'rb') as f:\n", " save = pickle.load(f)\n", " train_dataset = save['train_dataset']\n", " train_labels = save['train_labels']\n", " valid_dataset = save['valid_dataset']\n", " valid_labels = save['valid_labels']\n", " test_dataset = save['test_dataset']\n", " test_labels = save['test_labels']\n", " del save # hint to help gc free up memory\n", " print('Training set', train_dataset.shape, train_labels.shape)\n", " print('Validation set', valid_dataset.shape, valid_labels.shape)\n", " print('Test set', test_dataset.shape, test_labels.shape)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Training set (200000, 784) (200000, 10)\n", "Validation set (10000, 784) (10000, 10)\n", "Test set (10000, 784) (10000, 10)\n" ] } ], "source": [ "image_size = 28\n", "num_labels = 10\n", "\n", "def reformat(dataset, labels):\n", " dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n", " # Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]\n", " labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n", " return dataset, labels\n", "train_dataset, train_labels = reformat(train_dataset, train_labels)\n", "valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\n", "test_dataset, test_labels = reformat(test_dataset, test_labels)\n", "print('Training set', train_dataset.shape, train_labels.shape)\n", "print('Validation set', valid_dataset.shape, valid_labels.shape)\n", "print('Test set', test_dataset.shape, test_labels.shape)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# It loads all the data into TensorFlow and build the computation graph corresponding to our training:\n", "\n", "# With gradient descent training, even this much data is prohibitive.\n", "# Subset the training data for faster turnaround.\n", "train_subset = 10000\n", "\n", "graph = tf.Graph()\n", "with graph.as_default():\n", "\n", " # Input data.\n", " # Load the training, validation and test data into constants that are\n", " # attached to the graph.\n", " tf_train_dataset = tf.constant(train_dataset[:train_subset, :])\n", " tf_train_labels = tf.constant(train_labels[:train_subset])\n", " tf_valid_dataset = tf.constant(valid_dataset)\n", " tf_test_dataset = tf.constant(test_dataset)\n", " \n", " # Variables.\n", " # These are the parameters that we are going to be training. The weight\n", " # matrix will be initialized using random values following a (truncated)\n", " # normal distribution. The biases get initialized to zero.\n", " weights = tf.Variable(\n", " tf.truncated_normal([image_size * image_size, num_labels]))\n", " biases = tf.Variable(tf.zeros([num_labels]))\n", " \n", " # Training computation.\n", " # We multiply the inputs with the weight matrix, and add biases. We compute\n", " # the softmax and cross-entropy (it's one operation in TensorFlow, because\n", " # it's very common, and it can be optimized). We take the average of this\n", " # cross-entropy across all training examples: that's our loss.\n", " logits = tf.matmul(tf_train_dataset, weights) + biases\n", " loss = tf.reduce_mean(\n", " tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n", " \n", " # Optimizer.\n", " # We are going to find the minimum of this loss using gradient descent.\n", " optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n", " \n", " # Predictions for the training, validation, and test data.\n", " # These are not part of training, but merely here so that we can report\n", " # accuracy figures as we train.\n", " train_prediction = tf.nn.softmax(logits)\n", " valid_prediction = tf.nn.softmax(\n", " tf.matmul(tf_valid_dataset, weights) + biases)\n", " test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Tensorflow graph initialized\n", "Loss at step 0: 19.005764\n", "Training accuracy: 8.6%\n", "Validation accuracy: 9.5%\n", "Loss at step 100: 2.306310\n", "Training accuracy: 71.9%\n", "Validation accuracy: 70.7%\n", "Loss at step 200: 1.862624\n", "Training accuracy: 75.1%\n", "Validation accuracy: 72.8%\n", "Loss at step 300: 1.619365\n", "Training accuracy: 76.3%\n", "Validation accuracy: 73.7%\n", "Loss at step 400: 1.452627\n", "Training accuracy: 77.3%\n", "Validation accuracy: 74.1%\n", "Loss at step 500: 1.328036\n", "Training accuracy: 77.8%\n", "Validation accuracy: 74.4%\n", "Loss at step 600: 1.230201\n", "Training accuracy: 78.4%\n", "Validation accuracy: 74.8%\n", "Loss at step 700: 1.151069\n", "Training accuracy: 79.0%\n", "Validation accuracy: 74.9%\n", "Loss at step 800: 1.085528\n", "Training accuracy: 79.5%\n", "Validation accuracy: 75.0%\n", "Loss at step 900: 1.030110\n", "Training accuracy: 79.9%\n", "Validation accuracy: 75.1%\n", "Loss at step 1000: 0.982457\n", "Training accuracy: 80.3%\n", "Validation accuracy: 75.3%\n", "Loss at step 1100: 0.940917\n", "Training accuracy: 80.7%\n", "Validation accuracy: 75.3%\n", "Loss at step 1200: 0.904299\n", "Training accuracy: 81.0%\n", "Validation accuracy: 75.5%\n", "Loss at step 1300: 0.871726\n", "Training accuracy: 81.6%\n", "Validation accuracy: 75.5%\n", "Loss at step 1400: 0.842531\n", "Training accuracy: 81.8%\n", "Validation accuracy: 75.6%\n", "Loss at step 1500: 0.816180\n", "Training accuracy: 82.2%\n", "Validation accuracy: 75.7%\n", "Loss at step 1600: 0.792249\n", "Training accuracy: 82.4%\n", "Validation accuracy: 75.9%\n", "Loss at step 1700: 0.770396\n", "Training accuracy: 82.6%\n", "Validation accuracy: 76.0%\n", "Loss at step 1800: 0.750347\n", "Training accuracy: 82.8%\n", "Validation accuracy: 76.2%\n", "Loss at step 1900: 0.731872\n", "Training accuracy: 83.0%\n", "Validation accuracy: 76.2%\n", "Loss at step 2000: 0.714789\n", "Training accuracy: 83.3%\n", "Validation accuracy: 76.2%\n", "Loss at step 2100: 0.698939\n", "Training accuracy: 83.5%\n", "Validation accuracy: 76.3%\n", "Loss at step 2200: 0.684192\n", "Training accuracy: 83.7%\n", "Validation accuracy: 76.4%\n", "Loss at step 2300: 0.670434\n", "Training accuracy: 84.0%\n", "Validation accuracy: 76.5%\n", "Loss at step 2400: 0.657569\n", "Training accuracy: 84.1%\n", "Validation accuracy: 76.4%\n", "Loss at step 2500: 0.645511\n", "Training accuracy: 84.3%\n", "Validation accuracy: 76.4%\n", "Loss at step 2600: 0.634187\n", "Training accuracy: 84.5%\n", "Validation accuracy: 76.5%\n", "Loss at step 2700: 0.623530\n", "Training accuracy: 84.7%\n", "Validation accuracy: 76.4%\n", "Loss at step 2800: 0.613483\n", "Training accuracy: 84.9%\n", "Validation accuracy: 76.4%\n", "Loss at step 2900: 0.603993\n", "Training accuracy: 85.0%\n", "Validation accuracy: 76.5%\n", "Loss at step 3000: 0.595014\n", "Training accuracy: 85.2%\n", "Validation accuracy: 76.5%\n", "Loss at step 3100: 0.586504\n", "Training accuracy: 85.4%\n", "Validation accuracy: 76.5%\n", "Loss at step 3200: 0.578426\n", "Training accuracy: 85.6%\n", "Validation accuracy: 76.6%\n", "Loss at step 3300: 0.570746\n", "Training accuracy: 85.8%\n", "Validation accuracy: 76.6%\n", "Loss at step 3400: 0.563433\n", "Training accuracy: 85.8%\n", "Validation accuracy: 76.6%\n", "Loss at step 3500: 0.556461\n", "Training accuracy: 86.0%\n", "Validation accuracy: 76.7%\n", "Loss at step 3600: 0.549805\n", "Training accuracy: 86.1%\n", "Validation accuracy: 76.7%\n", "Loss at step 3700: 0.543443\n", "Training accuracy: 86.2%\n", "Validation accuracy: 76.6%\n", "Loss at step 3800: 0.537355\n", "Training accuracy: 86.4%\n", "Validation accuracy: 76.7%\n", "Loss at step 3900: 0.531522\n", "Training accuracy: 86.5%\n", "Validation accuracy: 76.7%\n", "Loss at step 4000: 0.525927\n", "Training accuracy: 86.5%\n", "Validation accuracy: 76.7%\n", "Loss at step 4100: 0.520556\n", "Training accuracy: 86.7%\n", "Validation accuracy: 76.8%\n", "Loss at step 4200: 0.515394\n", "Training accuracy: 86.8%\n", "Validation accuracy: 76.8%\n", "Loss at step 4300: 0.510427\n", "Training accuracy: 86.9%\n", "Validation accuracy: 76.8%\n", "Loss at step 4400: 0.505645\n", "Training accuracy: 87.0%\n", "Validation accuracy: 76.9%\n", "Loss at step 4500: 0.501034\n", "Training accuracy: 87.1%\n", "Validation accuracy: 76.9%\n", "Loss at step 4600: 0.496588\n", "Training accuracy: 87.3%\n", "Validation accuracy: 77.0%\n", "Loss at step 4700: 0.492295\n", "Training accuracy: 87.5%\n", "Validation accuracy: 77.0%\n", "Loss at step 4800: 0.488147\n", "Training accuracy: 87.6%\n", "Validation accuracy: 77.0%\n", "Loss at step 4900: 0.484136\n", "Training accuracy: 87.7%\n", "Validation accuracy: 77.0%\n", "Loss at step 5000: 0.480255\n", "Training accuracy: 87.8%\n", "Validation accuracy: 77.0%\n", "Loss at step 5100: 0.476498\n", "Training accuracy: 87.9%\n", "Validation accuracy: 76.9%\n", "Loss at step 5200: 0.472857\n", "Training accuracy: 88.0%\n", "Validation accuracy: 77.0%\n", "Loss at step 5300: 0.469328\n", "Training accuracy: 88.0%\n", "Validation accuracy: 76.9%\n", "Loss at step 5400: 0.465905\n", "Training accuracy: 88.1%\n", "Validation accuracy: 77.0%\n", "Loss at step 5500: 0.462581\n", "Training accuracy: 88.2%\n", "Validation accuracy: 77.0%\n", "Loss at step 5600: 0.459354\n", "Training accuracy: 88.2%\n", "Validation accuracy: 77.0%\n", "Loss at step 5700: 0.456219\n", "Training accuracy: 88.2%\n", "Validation accuracy: 77.0%\n", "Loss at step 5800: 0.453170\n", "Training accuracy: 88.3%\n", "Validation accuracy: 77.0%\n", "Loss at step 5900: 0.450206\n", "Training accuracy: 88.3%\n", "Validation accuracy: 77.1%\n", "Loss at step 6000: 0.447320\n", "Training accuracy: 88.4%\n", "Validation accuracy: 77.1%\n", "Loss at step 6100: 0.444511\n", "Training accuracy: 88.4%\n", "Validation accuracy: 77.2%\n", "Loss at step 6200: 0.441775\n", "Training accuracy: 88.5%\n", "Validation accuracy: 77.2%\n", "Loss at step 6300: 0.439109\n", "Training accuracy: 88.5%\n", "Validation accuracy: 77.2%\n", "Loss at step 6400: 0.436509\n", "Training accuracy: 88.6%\n", "Validation accuracy: 77.2%\n", "Loss at step 6500: 0.433974\n", "Training accuracy: 88.6%\n", "Validation accuracy: 77.2%\n", "Loss at step 6600: 0.431500\n", "Training accuracy: 88.7%\n", "Validation accuracy: 77.2%\n", "Loss at step 6700: 0.429086\n", "Training accuracy: 88.8%\n", "Validation accuracy: 77.2%\n", "Loss at step 6800: 0.426728\n", "Training accuracy: 88.8%\n", "Validation accuracy: 77.2%\n", "Loss at step 6900: 0.424425\n", "Training accuracy: 88.9%\n", "Validation accuracy: 77.2%\n", "Loss at step 7000: 0.422174\n", "Training accuracy: 89.0%\n", "Validation accuracy: 77.2%\n", "Loss at step 7100: 0.419974\n", "Training accuracy: 89.1%\n", "Validation accuracy: 77.1%\n", "Loss at step 7200: 0.417823\n", "Training accuracy: 89.1%\n", "Validation accuracy: 77.2%\n", "Loss at step 7300: 0.415718\n", "Training accuracy: 89.2%\n", "Validation accuracy: 77.2%\n", "Loss at step 7400: 0.413659\n", "Training accuracy: 89.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 7500: 0.411643\n", "Training accuracy: 89.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 7600: 0.409670\n", "Training accuracy: 89.4%\n", "Validation accuracy: 77.2%\n", "Loss at step 7700: 0.407737\n", "Training accuracy: 89.5%\n", "Validation accuracy: 77.2%\n", "Loss at step 7800: 0.405843\n", "Training accuracy: 89.5%\n", "Validation accuracy: 77.2%\n", "Loss at step 7900: 0.403987\n", "Training accuracy: 89.6%\n", "Validation accuracy: 77.2%\n", "Loss at step 8000: 0.402168\n", "Training accuracy: 89.7%\n", "Validation accuracy: 77.2%\n", "Loss at step 8100: 0.400384\n", "Training accuracy: 89.7%\n", "Validation accuracy: 77.2%\n", "Loss at step 8200: 0.398635\n", "Training accuracy: 89.8%\n", "Validation accuracy: 77.2%\n", "Loss at step 8300: 0.396918\n", "Training accuracy: 89.8%\n", "Validation accuracy: 77.1%\n", "Loss at step 8400: 0.395234\n", "Training accuracy: 89.8%\n", "Validation accuracy: 77.1%\n", "Loss at step 8500: 0.393582\n", "Training accuracy: 89.8%\n", "Validation accuracy: 77.2%\n", "Loss at step 8600: 0.391959\n", "Training accuracy: 89.9%\n", "Validation accuracy: 77.2%\n", "Loss at step 8700: 0.390365\n", "Training accuracy: 89.9%\n", "Validation accuracy: 77.2%\n", "Loss at step 8800: 0.388800\n", "Training accuracy: 90.0%\n", "Validation accuracy: 77.2%\n", "Loss at step 8900: 0.387263\n", "Training accuracy: 90.0%\n", "Validation accuracy: 77.2%\n", "Loss at step 9000: 0.385752\n", "Training accuracy: 90.1%\n", "Validation accuracy: 77.3%\n", "Loss at step 9100: 0.384267\n", "Training accuracy: 90.1%\n", "Validation accuracy: 77.2%\n", "Loss at step 9200: 0.382808\n", "Training accuracy: 90.2%\n", "Validation accuracy: 77.2%\n", "Loss at step 9300: 0.381373\n", "Training accuracy: 90.2%\n", "Validation accuracy: 77.2%\n", "Loss at step 9400: 0.379962\n", "Training accuracy: 90.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 9500: 0.378574\n", "Training accuracy: 90.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 9600: 0.377209\n", "Training accuracy: 90.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 9700: 0.375866\n", "Training accuracy: 90.3%\n", "Validation accuracy: 77.2%\n", "Loss at step 9800: 0.374544\n", "Training accuracy: 90.4%\n", "Validation accuracy: 77.2%\n", "Loss at step 9900: 0.373243\n", "Training accuracy: 90.4%\n", "Validation accuracy: 77.2%\n", "Test accuracy: 84.2%\n" ] } ], "source": [ "num_steps = 10000 #why 801?\n", "\n", "def accuracy(predictions, labels):\n", " return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n", " / predictions.shape[0])\n", "\n", "#it performs the train\n", "with tf.Session(graph=graph) as session:\n", " # This is a one-time operation which ensures the parameters get initialized as\n", " # we described in the graph: random weights for the matrix, zeros for the\n", " # biases. \n", " tf.global_variables_initializer().run()\n", " print('Tensorflow graph initialized')\n", " for step in range(num_steps):\n", " # Run the computations. We tell .run() that we want to run the optimizer,\n", " # and get the loss value and the training predictions returned as numpy\n", " # arrays.\n", " _, l, predictions = session.run([optimizer, loss, train_prediction])\n", " if (step % 100 == 0):\n", " print('Loss at step %d: %f' % (step, l))\n", " print('Training accuracy: %.1f%%' % accuracy(\n", " predictions, train_labels[:train_subset, :]))\n", " # Calling .eval() on valid_prediction is basically like calling run(), but\n", " # just to get that one numpy array. Note that it recomputes all its graph\n", " # dependencies.\n", " print('Validation accuracy: %.1f%%' % accuracy(\n", " valid_prediction.eval(), valid_labels))\n", " print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))\n", "#TODO plot graph from accuracy data" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Let's now switch to stochastic gradient descent training instead, which is much faster.\n", "\n", "batch_size = 128\n", "\n", "graph = tf.Graph()\n", "with graph.as_default():\n", "\n", " # Input data. For the training data, we use a placeholder that will be fed\n", " # at run time with a training minibatch.\n", " tf_train_dataset = tf.placeholder(tf.float32,\n", " shape=(batch_size, image_size * image_size))\n", " tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n", " tf_valid_dataset = tf.constant(valid_dataset)\n", " tf_test_dataset = tf.constant(test_dataset)\n", " \n", " # Variables.\n", " weights = tf.Variable(\n", " tf.truncated_normal([image_size * image_size, num_labels]))\n", " biases = tf.Variable(tf.zeros([num_labels]))\n", " \n", " # Training computation.\n", " logits = tf.matmul(tf_train_dataset, weights) + biases\n", " loss = tf.reduce_mean(\n", " tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))\n", " \n", " # Optimizer.\n", " optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n", " \n", " # Predictions for the training, validation, and test data.\n", " train_prediction = tf.nn.softmax(logits)\n", " valid_prediction = tf.nn.softmax(\n", " tf.matmul(tf_valid_dataset, weights) + biases)\n", " test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Initialized\n", "Minibatch loss at step 0: 17.899033\n", "Minibatch accuracy: 7.8%\n", "Validation accuracy: 10.0%\n", "Minibatch loss at step 500: 1.193777\n", "Minibatch accuracy: 79.7%\n", "Validation accuracy: 75.0%\n", "Minibatch loss at step 1000: 1.287950\n", "Minibatch accuracy: 82.0%\n", "Validation accuracy: 76.5%\n", "Minibatch loss at step 1500: 0.656950\n", "Minibatch accuracy: 81.2%\n", "Validation accuracy: 77.4%\n", "Minibatch loss at step 2000: 0.924349\n", "Minibatch accuracy: 78.9%\n", "Validation accuracy: 77.3%\n", "Minibatch loss at step 2500: 1.161976\n", "Minibatch accuracy: 74.2%\n", "Validation accuracy: 78.2%\n", "Minibatch loss at step 3000: 0.851813\n", "Minibatch accuracy: 80.5%\n", "Validation accuracy: 78.8%\n", "Minibatch loss at step 3500: 0.921817\n", "Minibatch accuracy: 81.2%\n", "Validation accuracy: 78.6%\n", "Minibatch loss at step 4000: 0.835667\n", "Minibatch accuracy: 80.5%\n", "Validation accuracy: 79.0%\n", "Minibatch loss at step 4500: 0.808454\n", "Minibatch accuracy: 79.7%\n", "Validation accuracy: 79.4%\n", "Minibatch loss at step 5000: 0.658319\n", "Minibatch accuracy: 81.2%\n", "Validation accuracy: 79.5%\n", "Minibatch loss at step 5500: 0.857714\n", "Minibatch accuracy: 74.2%\n", "Validation accuracy: 79.5%\n", "Minibatch loss at step 6000: 0.938224\n", "Minibatch accuracy: 76.6%\n", "Validation accuracy: 79.7%\n", "Minibatch loss at step 6500: 0.562490\n", "Minibatch accuracy: 81.2%\n", "Validation accuracy: 80.0%\n", "Minibatch loss at step 7000: 0.773726\n", "Minibatch accuracy: 76.6%\n", "Validation accuracy: 80.2%\n", "Minibatch loss at step 7500: 0.964890\n", "Minibatch accuracy: 78.9%\n", "Validation accuracy: 80.0%\n", "Minibatch loss at step 8000: 1.113154\n", "Minibatch accuracy: 72.7%\n", "Validation accuracy: 80.1%\n", "Minibatch loss at step 8500: 0.644020\n", "Minibatch accuracy: 83.6%\n", "Validation accuracy: 80.6%\n", "Minibatch loss at step 9000: 0.761561\n", "Minibatch accuracy: 82.0%\n", "Validation accuracy: 80.0%\n", "Minibatch loss at step 9500: 0.601618\n", "Minibatch accuracy: 85.2%\n", "Validation accuracy: 80.9%\n", "Test accuracy: 86.6%\n" ] } ], "source": [ "num_steps = 10000\n", "\n", "with tf.Session(graph=graph) as session:\n", " tf.global_variables_initializer().run()\n", " print(\"Initialized\")\n", " for step in range(num_steps):\n", " # Pick an offset within the training data, which has been randomized.\n", " # Note: we could use better randomization across epochs.\n", " offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n", " # Generate a minibatch.\n", " batch_data = train_dataset[offset:(offset + batch_size), :]\n", " batch_labels = train_labels[offset:(offset + batch_size), :]\n", " # Prepare a dictionary telling the session where to feed the minibatch.\n", " # The key of the dictionary is the placeholder node of the graph to be fed,\n", " # and the value is the numpy array to feed to it.\n", " feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n", " _, l, predictions = session.run(\n", " [optimizer, loss, train_prediction], feed_dict=feed_dict)\n", " if (step % 500 == 0):\n", " print(\"Minibatch loss at step %d: %f\" % (step, l))\n", " print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n", " print(\"Validation accuracy: %.1f%%\" % accuracy(\n", " valid_prediction.eval(), valid_labels))\n", " print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))\n", "\n", "#TODO measure time" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# Turn the logistic regression example with SGD into a 1-hidden layer neural network with rectified linear units nn.relu() \n", "# and 1024 hidden nodes. This model should improve your validation / test accuracy.\n", "\n", "#Do TODOs\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.0" } }, "nbformat": 4, "nbformat_minor": 2 }