{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" ] } ], "source": [ "import tensorflow as tf\n", "# Download MNIST datasource\n", "# 6w个 28 * 28个像素的手写数字图片集\n", "# 用 [60000, 784]的张量表示 [图片索引, 图片像素点索引]\n", "from tensorflow.examples.tutorials.mnist import input_data\n", "from tensorflow.contrib.tensorboard.plugins import projector\n", "\n", "# `one-hot vectors`:向量中只有一个数据为 1,其余维度只能为 0\n", "# 转化为 [60000, 10]的张量表示 [图片索引, 图片表示的数值]\n", "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot = True)\n", "\n", "image_num = 10000\n", "embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]), trainable = False, name = 'embedding')" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# 定义求统计指标的方法\n", "def summaries(var):\n", " # 申明一个命名空间\n", " with tf.name_scope('summaries'):\n", " tf.summary.scalar('max', tf.reduce_max(var)) #最大值\n", " tf.summary.scalar('min', tf.reduce_min(var)) #最小值\n", " mean = tf.reduce_mean(var)\n", " tf.summary.scalar('mean', mean) #平均值\n", " with tf.name_scope('stddev'):\n", " stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n", " tf.summary.scalar('stddev', stddev) #标准差\n", " tf.summary.histogram('histogram', var) #直方图" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Batch: 0 Accuracy: [ 0.9549 , 0.958182 ]\n", "Batch: 1 Accuracy: [ 0.9655 , 0.971745 ]\n", "Batch: 2 Accuracy: [ 0.9703 , 0.980455 ]\n", "Batch: 3 Accuracy: [ 0.9708 , 0.983745 ]\n", "Batch: 4 Accuracy: [ 0.9746 , 0.986818 ]\n" ] } ], "source": [ "with tf.name_scope('input'):\n", " # 28 * 28 = 784的占位符\n", " # None表示可能是任何数值\n", " x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')\n", " y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')\n", " # 用于 drop_out操作时的依据 (0.8: 80%的神经元在工作)\n", " z = tf.placeholder(tf.float32, name = 'drop_output_input')\n", " lr = tf.Variable(0.001, dtype = tf.float32) # 用于不断递减的学习率,使得梯度下降到最低点时,能更好地命中\n", "\n", "with tf.name_scope('layer'):\n", " with tf.name_scope('layer_1'):\n", " # 权重值(截断的随机正太分布) 和 偏置量 (0.1)\n", " W1 = tf.Variable(tf.truncated_normal([784, 600], stddev = 0.1), name = 'W1')\n", " b1 = tf.Variable(tf.zeros([600]) + 0.1, name = 'b1')\n", " # 调用函数求权重、偏置值的统计指标\n", " summaries(W1)\n", " summaries(b1)\n", " L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)\n", " L1_drop = tf.nn.dropout(L1, z)\n", "\n", " with tf.name_scope('layer_2'):\n", " # 隐藏层\n", " W2 = tf.Variable(tf.truncated_normal([600, 400], stddev = 0.1), name = 'W2')\n", " b2 = tf.Variable(tf.zeros([400]) + 0.1, name = 'b2')\n", " summaries(W2)\n", " summaries(b2)\n", " L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)\n", " L2_drop = tf.nn.dropout(L2, z)\n", "\n", " with tf.name_scope('layer_output'):\n", " W3 = tf.Variable(tf.truncated_normal([400, 10], stddev = 0.1), name = 'W3')\n", " b3 = tf.Variable(tf.zeros([10]) + 0.1, name = 'b3')\n", " summaries(W3)\n", " summaries(b3)\n", "\n", " with tf.name_scope('softmax'):\n", " # softmax回归模型\n", " prediction = tf.nn.softmax(tf.matmul(L2_drop, W3) + b3)\n", "\n", "with tf.name_scope('loss'):\n", " # 二次 Loss Func\n", " # loss = tf.reduce_mean(tf.square(y - prediction))\n", " # 交叉熵 Loss Func\n", " # loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), reduction_indices=[1]))\n", " loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = prediction))\n", " tf.summary.scalar('loss', loss)\n", "\n", "with tf.name_scope('train'):\n", " # 梯度下降\n", " # train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n", " train_step = tf.train.AdamOptimizer(lr).minimize(loss)\n", "\n", "with tf.name_scope('accuracy'):\n", " with tf.name_scope('correct_prediction'):\n", " # 评估模型\n", " # 判断 一维张量 y、prediction中最大值的位置是否相等\n", " correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction,1))\n", " with tf.name_scope('accuracy'):\n", " # 准确率\n", " # 将 布尔型列表 corrent_prediction转化为 float32类型\n", " # [true, false, false, ...] => [1.0, 0., 0., ...]\n", " accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", " tf.summary.scalar('accuracy', accuracy)\n", "\n", "# 获得所有定义的 Summary\n", "summary_all = tf.summary.merge_all()\n", "\n", "# 配置运行资源\n", "session_config = tf.ConfigProto(device_count={\"CPU\": 8}, inter_op_parallelism_threads = 32, intra_op_parallelism_threads = 48)\n", "\n", "with tf.Session(config = session_config) as sess:\n", " tf.global_variables_initializer().run()\n", "\n", " # 产生 MetaData文件\n", " base_path = 'E:/Jupyter/_drafts/ipython/TensorFlow/tensorboard/'\n", " metadata_path = base_path + 'metadata.tsv'\n", " if tf.gfile.Exists(metadata_path):\n", " tf.gfile.DeleteRecursively(metadata_path)\n", " with open(metadata_path, 'w') as f:\n", " labels = sess.run(tf.argmax(mnist.test.labels[:], 1))\n", " for i in range(image_num):\n", " f.write(str(labels[i]) + '\\n') \n", "\n", " writer = tf.summary.FileWriter(base_path, sess.graph)\n", "\n", " saver = tf.train.Saver()\n", " config = projector.ProjectorConfig()\n", " embed = config.embeddings.add()\n", " embed.tensor_name = embedding.name\n", " embed.metadata_path = metadata_path\n", " embed.sprite.image_path = base_path + 'data/mnist_10k_sprite.png'\n", " embed.sprite.single_image_dim.extend([28, 28])\n", " projector.visualize_embeddings(writer, config)\n", "\n", " batch_size = 100\n", " batch = (int) (60000 / batch_size)\n", " # batch = mnist.train.num_examples\n", "\n", " # 这里主要是为了测试 TensorBoard,所以只训练 5次\n", " summary_count = 0\n", " for _ in range(5):\n", " sess.run(tf.assign(lr, 0.001 * (0.95 ** _)))\n", " for batch_step in range(batch):\n", " batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n", " # 真正开始生成 metadata\n", " run_options = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)\n", " run_metadata = tf.RunMetadata()\n", " summary_, result = sess.run([summary_all, train_step], feed_dict = {x: batch_xs, y: batch_ys, z: 0.997}, options = run_options, run_metadata = run_metadata)\n", " summary_count = summary_count + 1\n", " writer.add_run_metadata(run_metadata, 'step%03d' % summary_count)\n", " writer.add_summary(summary_, summary_count)\n", "\n", " test_accuracy = sess.run(accuracy, feed_dict = {x: mnist.test.images, y: mnist.test.labels, z: 1.0})\n", " train_accuracy = sess.run(accuracy, feed_dict = {x: mnist.train.images, y: mnist.train.labels, z: 1.0})\n", " print(\"Batch: \", _, \"Accuracy: [\", test_accuracy, \",\", train_accuracy, \"]\")\n", "\n", " saver.save(sess, base_path + 'minst_model.ckpt', global_step = summary_count)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.3" } }, "nbformat": 4, "nbformat_minor": 2 }