{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import tensorflow as tf\n", "from tensorflow.examples.tutorials.mnist import input_data" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting MNIST_data\\train-images-idx3-ubyte.gz\n", "Extracting MNIST_data\\train-labels-idx1-ubyte.gz\n", "Extracting MNIST_data\\t10k-images-idx3-ubyte.gz\n", "Extracting MNIST_data\\t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "mnist = input_data.read_data_sets('MNIST_data', one_hot=True)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# 权值\n", "def weight_variable(shape):\n", " initial = tf.truncated_normal(shape=shape, stddev=0.1)\n", " return tf.Variable(initial_value=initial)\n", "\n", "# 偏置值\n", "def bias_variable(shape):\n", " initial = tf.constant(0.1, shape=shape)\n", " return tf.Variable(initial_value=initial)\n", "\n", "# 卷积层\n", "def conv2d(x, W):\n", " # x: ` [batch, in_height, in_width, in_channels]`\n", " # [批次大小, 输入图片的长和宽, 通道数 (黑白:2; 彩色: 3)]\n", " # W: `[filter_height, filter_width, in_channels, out_channels]`\n", " # [滤波器长, 宽,输入通道数, 输出通道数]\n", " # strides: `[1, stride, stride, 1]`\n", " # [固定为1, x/y方向的步长, 固定为1]\n", " # padding: 是否在外部补零\n", " return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", "\n", "# 池化层\n", "def max_pool_2x2(x):\n", " # x: ` [batch, in_height, in_width, in_channels]`\n", " # [批次大小, 输入图片的长和宽, 通道数 (黑白:2; 彩色: 3)]\n", " # ksize: [固定为1, 窗口大小, 固定为1]\n", " # strides: `[1, stride, stride, 1]`\n", " # [固定为1, x/y方向的步长, 固定为1]\n", " # padding: 是否在外部补零\n", " return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "# Place Holder\n", "x = tf.placeholder(tf.float32, [None, 784])\n", "y = tf.placeholder(tf.float32, [None, 10])\n", "# Learn Rate学习率\n", "lr = tf.Variable(0.001, dtype = tf.float32)\n", "\n", "# 将 x的转化为 4D向量\n", "# [batch, in_height, in_width, in_channels]\n", "x_image = tf.reshape(x, [-1, 28, 28, 1])\n", "\n", "# 初始化第一个卷积层 权值和偏置值\n", "# 5*5 的采样窗口,32个卷积核(输出channels数) 从1个平面(输入channels数) 抽取特征,获得 32个特征平面\n", "W_conv1 = weight_variable([5, 5, 1, 32])\n", "# 32个卷积核,每个卷积核对应一个偏置值\n", "b_conv1 = bias_variable([32])\n", "\n", "# 执行卷积采样操作,并加上偏置值\n", "conv2d_1 = conv2d(x_image, W_conv1) + b_conv1\n", "# ReLU激活函数,获得第一个卷积层,计算得到的结果\n", "h_conv1 = tf.nn.relu(conv2d_1)\n", "# 执行 pooling池化操作\n", "h_pool1 = max_pool_2x2(h_conv1)\n", "\n", "# 第二个卷积层 + 激活函数 + 池化层\n", "W_conv2 = weight_variable([5, 5, 32, 64])\n", "b_conv2 = bias_variable([64])\n", "conv2d_2 = conv2d(h_pool1, W_conv2) + b_conv2\n", "h_conv2 = tf.nn.relu(conv2d_2)\n", "h_pool2 = max_pool_2x2(h_conv2)\n", "\n", "# 第一次卷积操作后,28 * 28图片仍然是 28 * 28\n", "# 第一次池化之后,因为 2 * 2的窗口,所以变成了 14 * 14\n", "# 第二次卷积之后,仍然保持 14 * 14的平面大小\n", "# 第二次池化之后,因为 2 * 2的窗口,所以变成了 7 * 7" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": true, "scrolled": true }, "outputs": [], "source": [ "# 全连接层一共有 1000个神经元,连接上一层的 7 * 7* 64 = 3136个神经元\n", "W_fc1 = weight_variable([7 * 7 * 64, 1000])\n", "b_fc1 = bias_variable([1000])\n", "\n", "# 把上一层的池化层,转化为 1维 (-1代表任意值)\n", "h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n", "# 矩阵相乘,并加上偏置值\n", "wx_plus_b1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1\n", "# ReLU激活函数\n", "h_fc1 = tf.nn.relu(wx_plus_b1)\n", "\n", "# dropout正则化\n", "keep_prob = tf.placeholder(tf.float32)\n", "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n", "\n", "# 第二个全连接层\n", "W_fc2 = weight_variable([1000, 10])\n", "b_fc2 = bias_variable([10])\n", "\n", "# 计算输出\n", "wx_plus_b2 = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n", "prediction = tf.nn.softmax(wx_plus_b2)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# 交叉熵 Loss Function\n", "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction)) \n", "# Adam优化器,配合一个不断下降的学习率\n", "train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\n", "\n", "# argmax方法,会返回一维张量中最大值所在的位置\n", "# 计算正确率\n", "correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n", "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Iterator: 0 Accuracy: 0.954\n", "Iterator: 20 Accuracy: 0.9912\n", "Iterator: 40 Accuracy: 0.9916\n", "Iterator: 60 Accuracy: 0.9924\n", "Iterator: 80 Accuracy: 0.9924\n", "Iterator: 100 Accuracy: 0.9926\n" ] } ], "source": [ "with tf.device('/gpu:0'):\n", " with tf.Session() as sess:\n", " tf.global_variables_initializer().run()\n", "\n", " batch_size = 100\n", " batch = (int) (60000 / batch_size)\n", "\n", " for _ in range(101):\n", " sess.run(tf.assign(lr, 0.0001 * (0.95 ** _)))\n", " for batch_step in range(batch):\n", " batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n", " sess.run(train_step, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.68})\n", "\n", " if _ % 20 == 0:\n", " test_acc = sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0})\n", " print(\"Iterator: \", _, \"Accuracy:\", test_acc)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.3" } }, "nbformat": 4, "nbformat_minor": 2 }