{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.\n", "- Author: Sebastian Raschka\n", "- GitHub Repository: https://github.com/rasbt/deeplearning-models" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sebastian Raschka \n", "\n", "CPython 3.6.1\n", "IPython 6.0.0\n", "\n", "tensorflow 1.2.0\n" ] } ], "source": [ "%load_ext watermark\n", "%watermark -a 'Sebastian Raschka' -v -p tensorflow" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import tensorflow as tf\n", "import numpy as np\n", "from tensorflow.examples.tutorials.mnist import input_data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Model Zoo -- Multilayer Perceptron with Backpropagation from Scratch" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "This notebook contains three different approaches for training a simple 1-hidden layer multilayer perceptron using TensorFlow:\n", " \n", "- Gradient descent via the \"high-level\" `tf.train.GradientDescentOptimizer`\n", "- A lower-level implementation to perform backpropagation via `tf.gradients`\n", "- An implementation of backpropagation and gradient descent learning based on basic linear algebra operations" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Gradient Descent Using `tf.train.GradientDescentOptimizer`" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting ./train-images-idx3-ubyte.gz\n", "Extracting ./train-labels-idx1-ubyte.gz\n", "Extracting ./t10k-images-idx3-ubyte.gz\n", "Extracting ./t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "# Dataset\n", "np.random.seed(123) # set seed for mnist shuffling\n", "mnist = input_data.read_data_sets(\"./\", one_hot=True)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 001 | AvgCost: 0.785 | Train/Valid ACC: 0.885/0.891\n", "Epoch: 002 | AvgCost: 0.370 | Train/Valid ACC: 0.906/0.915\n", "Epoch: 003 | AvgCost: 0.317 | Train/Valid ACC: 0.914/0.921\n", "Epoch: 004 | AvgCost: 0.289 | Train/Valid ACC: 0.922/0.925\n", "Epoch: 005 | AvgCost: 0.268 | Train/Valid ACC: 0.926/0.929\n", "Epoch: 006 | AvgCost: 0.250 | Train/Valid ACC: 0.931/0.933\n", "Epoch: 007 | AvgCost: 0.235 | Train/Valid ACC: 0.936/0.937\n", "Epoch: 008 | AvgCost: 0.221 | Train/Valid ACC: 0.939/0.941\n", "Epoch: 009 | AvgCost: 0.209 | Train/Valid ACC: 0.943/0.943\n", "Epoch: 010 | AvgCost: 0.198 | Train/Valid ACC: 0.947/0.948\n", "Test ACC: 0.945\n" ] } ], "source": [ "##########################\n", "### SETTINGS\n", "##########################\n", "\n", "# Hyperparameters\n", "learning_rate = 0.1\n", "training_epochs = 10\n", "batch_size = 64\n", "\n", "# Architecture\n", "n_hidden_1 = 128\n", "n_input = 784\n", "n_classes = 10\n", "\n", "\n", "##########################\n", "### GRAPH DEFINITION\n", "##########################\n", "\n", "g = tf.Graph()\n", "with g.as_default():\n", " \n", " tf.set_random_seed(123)\n", "\n", " # Input data\n", " tf_x = tf.placeholder(tf.float32, [None, n_input], name='features')\n", " tf_y = tf.placeholder(tf.float32, [None, n_classes], name='targets')\n", "\n", " # Model parameters\n", " weights = {\n", " 'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1)),\n", " 'out': tf.Variable(tf.truncated_normal([n_hidden_1, n_classes], stddev=0.1))\n", " }\n", " biases = {\n", " 'b1': tf.Variable(tf.zeros([n_hidden_1])),\n", " 'out': tf.Variable(tf.zeros([n_classes]))\n", " }\n", "\n", " # Forward Propagation\n", " h1_z = tf.add(tf.matmul(tf_x, weights['h1']), biases['b1'])\n", " h1_act = tf.nn.sigmoid(h1_z)\n", " out_z = tf.matmul(h1_act, weights['out']) + biases['out']\n", " out_act = tf.nn.softmax(out_z, name='predicted_probabilities')\n", " out_labels = tf.argmax(out_z, axis=1, name='predicted_labels')\n", " \n", " ######################\n", " # Forward Propagation\n", " ######################\n", "\n", " loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_z, labels=tf_y)\n", " cost = tf.reduce_mean(loss, name='cost')\n", " \n", " ##################\n", " # Backpropagation\n", " ##################\n", "\n", " optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n", " train = optimizer.minimize(cost, name='train')\n", "\n", " ##############\n", " # Prediction\n", " ##############\n", "\n", " correct_prediction = tf.equal(tf.argmax(tf_y, 1), out_labels)\n", " accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n", "\n", " \n", "##########################\n", "### TRAINING & EVALUATION\n", "##########################\n", "\n", "with tf.Session(graph=g) as sess:\n", " sess.run(tf.global_variables_initializer())\n", "\n", " for epoch in range(training_epochs):\n", " avg_cost = 0.\n", " total_batch = mnist.train.num_examples // batch_size\n", "\n", " for i in range(total_batch):\n", " batch_x, batch_y = mnist.train.next_batch(batch_size)\n", " _, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,\n", " 'targets:0': batch_y})\n", " avg_cost += c\n", " \n", " train_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.train.images,\n", " 'targets:0': mnist.train.labels})\n", " valid_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.validation.images,\n", " 'targets:0': mnist.validation.labels}) \n", " \n", " print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch + 1, avg_cost / (i + 1)), end=\"\")\n", " print(\" | Train/Valid ACC: %.3f/%.3f\" % (train_acc, valid_acc))\n", " \n", " test_acc = sess.run(accuracy, feed_dict={'features:0': mnist.test.images,\n", " 'targets:0': mnist.test.labels})\n", " print('Test ACC: %.3f' % test_acc)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. Gradient Descent Using `tf.gradients` (low level)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting ./train-images-idx3-ubyte.gz\n", "Extracting ./train-labels-idx1-ubyte.gz\n", "Extracting ./t10k-images-idx3-ubyte.gz\n", "Extracting ./t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "# Dataset\n", "np.random.seed(123) # set seed for mnist shuffling\n", "mnist = input_data.read_data_sets(\"./\", one_hot=True)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 001 | AvgCost: 0.785 | Train/Valid ACC: 0.890/0.894\n", "Epoch: 002 | AvgCost: 0.370 | Train/Valid ACC: 0.906/0.912\n", "Epoch: 003 | AvgCost: 0.317 | Train/Valid ACC: 0.915/0.918\n", "Epoch: 004 | AvgCost: 0.289 | Train/Valid ACC: 0.922/0.926\n", "Epoch: 005 | AvgCost: 0.268 | Train/Valid ACC: 0.927/0.930\n", "Epoch: 006 | AvgCost: 0.250 | Train/Valid ACC: 0.932/0.934\n", "Epoch: 007 | AvgCost: 0.235 | Train/Valid ACC: 0.936/0.938\n", "Epoch: 008 | AvgCost: 0.221 | Train/Valid ACC: 0.940/0.941\n", "Epoch: 009 | AvgCost: 0.210 | Train/Valid ACC: 0.942/0.944\n", "Epoch: 010 | AvgCost: 0.198 | Train/Valid ACC: 0.946/0.947\n", "Test ACC: 0.945\n" ] } ], "source": [ "##########################\n", "### SETTINGS\n", "##########################\n", "\n", "# Hyperparameters\n", "learning_rate = 0.1\n", "training_epochs = 10\n", "batch_size = 64\n", "\n", "# Architecture\n", "n_hidden_1 = 128\n", "n_input = 784\n", "n_classes = 10\n", "\n", "\n", "##########################\n", "### GRAPH DEFINITION\n", "##########################\n", "\n", "g = tf.Graph()\n", "with g.as_default():\n", " \n", " tf.set_random_seed(123)\n", "\n", " # Input data\n", " tf_x = tf.placeholder(tf.float32, [None, n_input], name='features')\n", " tf_y = tf.placeholder(tf.float32, [None, n_classes], name='targets')\n", "\n", " # Model parameters\n", " weights = {\n", " 'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1)),\n", " 'out': tf.Variable(tf.truncated_normal([n_hidden_1, n_classes], stddev=0.1))\n", " }\n", " biases = {\n", " 'b1': tf.Variable(tf.zeros([n_hidden_1])),\n", " 'out': tf.Variable(tf.zeros([n_classes]))\n", " }\n", "\n", " ######################\n", " # Forward Propagation\n", " ######################\n", "\n", " h1_z = tf.add(tf.matmul(tf_x, weights['h1']), biases['b1'])\n", " h1_act = tf.nn.sigmoid(h1_z)\n", " out_z = tf.matmul(h1_act, weights['out']) + biases['out']\n", " out_act = tf.nn.softmax(out_z, name='predicted_probabilities')\n", " out_labels = tf.argmax(out_z, axis=1, name='predicted_labels')\n", " \n", " # Loss\n", " loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_z, labels=tf_y)\n", " cost = tf.reduce_mean(loss, name='cost')\n", " \n", " ##################\n", " # Backpropagation\n", " ##################\n", "\n", " # Get Gradients\n", " dc_dw_out, dc_db_out = tf.gradients(cost, [weights['out'], biases['out']])\n", " dc_dw_1, dc_db_1 = tf.gradients(cost, [weights['h1'], biases['b1']])\n", " \n", " # Update Weights\n", " upd_w_1 = tf.assign(weights['h1'], weights['h1'] - learning_rate * dc_dw_1)\n", " upd_b_1 = tf.assign(biases['b1'], biases['b1'] - learning_rate * dc_db_1)\n", " upd_w_out = tf.assign(weights['out'], weights['out'] - learning_rate * dc_dw_out)\n", " upd_b_out = tf.assign(biases['out'], biases['out'] - learning_rate * dc_db_out)\n", " \n", " train = tf.group(upd_w_1, upd_b_1, upd_w_out, upd_b_out, name='train')\n", "\n", " ##############\n", " # Prediction\n", " ##############\n", "\n", " correct_prediction = tf.equal(tf.argmax(tf_y, 1), out_labels)\n", " accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n", "\n", " \n", "##########################\n", "### TRAINING & EVALUATION\n", "##########################\n", "\n", "with tf.Session(graph=g) as sess:\n", " sess.run(tf.global_variables_initializer())\n", "\n", " for epoch in range(training_epochs):\n", " avg_cost = 0.\n", " total_batch = mnist.train.num_examples // batch_size\n", "\n", " for i in range(total_batch):\n", " batch_x, batch_y = mnist.train.next_batch(batch_size)\n", " _, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,\n", " 'targets:0': batch_y})\n", " avg_cost += c\n", " \n", " train_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.train.images,\n", " 'targets:0': mnist.train.labels})\n", " valid_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.validation.images,\n", " 'targets:0': mnist.validation.labels}) \n", " \n", " print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch + 1, avg_cost / (i + 1)), end=\"\")\n", " print(\" | Train/Valid ACC: %.3f/%.3f\" % (train_acc, valid_acc))\n", " \n", " test_acc = sess.run(accuracy, feed_dict={'features:0': mnist.test.images,\n", " 'targets:0': mnist.test.labels})\n", " print('Test ACC: %.3f' % test_acc)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Gradient Descent from scratch (very low level)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting ./train-images-idx3-ubyte.gz\n", "Extracting ./train-labels-idx1-ubyte.gz\n", "Extracting ./t10k-images-idx3-ubyte.gz\n", "Extracting ./t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "# Dataset\n", "np.random.seed(123) # set seed for mnist shuffling\n", "mnist = input_data.read_data_sets(\"./\", one_hot=True)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 001 | AvgCost: 0.785 | Train/Valid ACC: 0.884/0.892\n", "Epoch: 002 | AvgCost: 0.370 | Train/Valid ACC: 0.905/0.909\n", "Epoch: 003 | AvgCost: 0.317 | Train/Valid ACC: 0.914/0.916\n", "Epoch: 004 | AvgCost: 0.288 | Train/Valid ACC: 0.921/0.926\n", "Epoch: 005 | AvgCost: 0.268 | Train/Valid ACC: 0.927/0.931\n", "Epoch: 006 | AvgCost: 0.251 | Train/Valid ACC: 0.931/0.934\n", "Epoch: 007 | AvgCost: 0.235 | Train/Valid ACC: 0.936/0.937\n", "Epoch: 008 | AvgCost: 0.222 | Train/Valid ACC: 0.940/0.942\n", "Epoch: 009 | AvgCost: 0.209 | Train/Valid ACC: 0.944/0.944\n", "Epoch: 010 | AvgCost: 0.199 | Train/Valid ACC: 0.946/0.948\n", "Test ACC: 0.945\n" ] } ], "source": [ "##########################\n", "### SETTINGS\n", "##########################\n", "\n", "# Hyperparameters\n", "learning_rate = 0.1\n", "training_epochs = 10\n", "batch_size = 64\n", "\n", "# Architecture\n", "n_hidden_1 = 128\n", "n_input = 784\n", "n_classes = 10\n", "\n", "\n", "##########################\n", "### GRAPH DEFINITION\n", "##########################\n", "\n", "g = tf.Graph()\n", "with g.as_default():\n", " \n", " tf.set_random_seed(123)\n", "\n", " # Input data\n", " tf_x = tf.placeholder(tf.float32, [None, n_input], name='features')\n", " tf_y = tf.placeholder(tf.float32, [None, n_classes], name='targets')\n", "\n", " # Model parameters\n", " weights = {\n", " 'h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1], stddev=0.1)),\n", " 'out': tf.Variable(tf.truncated_normal([n_hidden_1, n_classes], stddev=0.1))\n", " }\n", " biases = {\n", " 'b1': tf.Variable(tf.zeros([n_hidden_1])),\n", " 'out': tf.Variable(tf.zeros([n_classes]))\n", " }\n", "\n", " ######################\n", " # Forward Propagation\n", " ######################\n", " \n", " h1_z = tf.add(tf.matmul(tf_x, weights['h1']), biases['b1'])\n", " h1_act = tf.nn.sigmoid(h1_z)\n", " out_z = tf.matmul(h1_act, weights['out']) + biases['out']\n", " out_act = tf.nn.softmax(out_z, name='predicted_probabilities')\n", " out_labels = tf.argmax(out_z, axis=1, name='predicted_labels')\n", " \n", " # Loss\n", " loss = tf.nn.softmax_cross_entropy_with_logits(logits=out_z, labels=tf_y)\n", " cost = tf.reduce_mean(loss, name='cost')\n", " \n", " ##################\n", " # Backpropagation\n", " ##################\n", " \n", " # Get Gradients\n", " \n", " # input/output dim: [n_samples, n_classlabels]\n", " sigma_out = (out_act - tf_y) / batch_size\n", " \n", " # input/output dim: [n_samples, n_hidden_1]\n", " softmax_derivative_h1 = h1_act * (1. - h1_act)\n", " \n", " # input dim: [n_samples, n_classlabels] dot [n_classlabels, n_hidden]\n", " # output dim: [n_samples, n_hidden]\n", " sigma_h = (tf.matmul(sigma_out, tf.transpose(weights['out'])) *\n", " softmax_derivative_h1)\n", " \n", " # input dim: [n_features, n_samples] dot [n_samples, n_hidden]\n", " # output dim: [n_features, n_hidden]\n", " grad_w_h1 = tf.matmul(tf.transpose(tf_x), sigma_h)\n", " grad_b_h1 = tf.reduce_sum(sigma_h, axis=0)\n", "\n", " # input dim: [n_hidden, n_samples] dot [n_samples, n_classlabels]\n", " # output dim: [n_hidden, n_classlabels]\n", " grad_w_out = tf.matmul(tf.transpose(h1_act), sigma_out)\n", " grad_b_out = tf.reduce_sum(sigma_out, axis=0)\n", " \n", " # Update weights\n", " upd_w_1 = tf.assign(weights['h1'], weights['h1'] - learning_rate * grad_w_h1)\n", " upd_b_1 = tf.assign(biases['b1'], biases['b1'] - learning_rate * grad_b_h1)\n", " upd_w_out = tf.assign(weights['out'], weights['out'] - learning_rate * grad_w_out)\n", " upd_b_out = tf.assign(biases['out'], biases['out'] - learning_rate * grad_b_out)\n", " \n", " train = tf.group(upd_w_1, upd_b_1, upd_w_out, upd_b_out, name='train')\n", " \n", " ##############\n", " # Prediction\n", " ##############\n", "\n", " correct_prediction = tf.equal(tf.argmax(tf_y, 1), out_labels)\n", " accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n", "\n", " \n", "##########################\n", "### TRAINING & EVALUATION\n", "##########################\n", "\n", "with tf.Session(graph=g) as sess:\n", " sess.run(tf.global_variables_initializer())\n", "\n", " for epoch in range(training_epochs):\n", " avg_cost = 0.\n", " total_batch = mnist.train.num_examples // batch_size\n", "\n", " for i in range(total_batch):\n", " batch_x, batch_y = mnist.train.next_batch(batch_size)\n", " _, c = sess.run(['train', 'cost:0'], feed_dict={'features:0': batch_x,\n", " 'targets:0': batch_y})\n", " avg_cost += c\n", " \n", " train_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.train.images,\n", " 'targets:0': mnist.train.labels})\n", " valid_acc = sess.run('accuracy:0', feed_dict={'features:0': mnist.validation.images,\n", " 'targets:0': mnist.validation.labels}) \n", " \n", " print(\"Epoch: %03d | AvgCost: %.3f\" % (epoch + 1, avg_cost / (i + 1)), end=\"\")\n", " print(\" | Train/Valid ACC: %.3f/%.3f\" % (train_acc, valid_acc))\n", " \n", " test_acc = sess.run(accuracy, feed_dict={'features:0': mnist.test.images,\n", " 'targets:0': mnist.test.labels})\n", " print('Test ACC: %.3f' % test_acc)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.1" } }, "nbformat": 4, "nbformat_minor": 2 }