{ "cells": [ { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using TensorFlow backend.\n" ] } ], "source": [ "import numpy as np\n", "import keras\n", "from keras import backend\n", "import tensorflow as tf\n", "\n", "from cleverhans.utils_mnist import data_mnist\n", "from cleverhans.utils_tf import model_train, model_eval\n", "from cleverhans.attacks import FastGradientMethod\n", "from cleverhans.utils import AccuracyReport\n", "from cleverhans.utils_keras import cnn_model\n", "from cleverhans.utils_keras import KerasModelWrapper" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from tensorflow.python.platform import flags\n", "\n", "FLAGS = flags.FLAGS\n", "flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')\n", "flags.DEFINE_integer('batch_size', 128, 'Size of training batches')\n", "flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')\n", "flags.DEFINE_string('train_dir', '/tmp', 'Directory where to save model.')\n", "flags.DEFINE_string('filename', 'mnist.ckpt', 'Checkpoint filename.')\n", "flags.DEFINE_boolean('load_model', True, 'Load saved model or train.')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "FLAGS" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'mnist_tutorial' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m mnist_tutorial(nb_epochs=FLAGS.nb_epochs,\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mtrain_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_dir\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mfilename\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mFLAGS\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mNameError\u001b[0m: name 'mnist_tutorial' is not defined" ] } ], "source": [ "mnist_tutorial(nb_epochs=FLAGS.nb_epochs,\n", " batch_size=FLAGS.batch_size,\n", " learning_rate=FLAGS.learning_rate,\n", " train_dir=FLAGS.train_dir,\n", " filename=FLAGS.filename,\n", " load_model=FLAGS.load_model)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def mnist_tutorial(train_start=0, train_end=60000, test_start=0,\n", " test_end=10000, nb_epochs=6, batch_size=128,\n", " learning_rate=0.001, train_dir=\"/tmp\",\n", " filename=\"mnist.ckpt\", load_model=False,\n", " testing=False):\n", " \"\"\"\n", " MNIST CleverHans tutorial\n", " :param train_start: index of first training set example\n", " :param train_end: index of last training set example\n", " :param test_start: index of first test set example\n", " :param test_end: index of last test set example\n", " :param nb_epochs: number of epochs to train model\n", " :param batch_size: size of training batches\n", " :param learning_rate: learning rate for training\n", " :param train_dir: Directory storing the saved model\n", " :param filename: Filename to save model under\n", " :param load_model: True for load, False for not load\n", " :param testing: if true, test error is calculated\n", " :return: an AccuracyReport object\n", " \"\"\"\n", " keras.layers.core.K.set_learning_phase(0)\n", "\n", " # Object used to keep track of (and return) key accuracies\n", " report = AccuracyReport()\n", "\n", " # Set TF random seed to improve reproducibility\n", " tf.set_random_seed(1234)\n", "\n", " if not hasattr(backend, \"tf\"):\n", " raise RuntimeError(\"This tutorial requires keras to be configured\"\n", " \" to use the TensorFlow backend.\")\n", "\n", " if keras.backend.image_dim_ordering() != 'tf':\n", " keras.backend.set_image_dim_ordering('tf')\n", " print(\"INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to \"\n", " \"'th', temporarily setting to 'tf'\")\n", "\n", " # Create TF session and set as Keras backend session\n", " sess = tf.Session()\n", " keras.backend.set_session(sess)\n", "\n", " # Get MNIST test data\n", " X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,\n", " train_end=train_end,\n", " test_start=test_start,\n", " test_end=test_end)\n", "\n", " # Use label smoothing\n", " assert Y_train.shape[1] == 10\n", " label_smooth = .1\n", " Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)\n", "\n", " # Define input TF placeholder\n", " x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))\n", " y = tf.placeholder(tf.float32, shape=(None, 10))\n", "\n", " # Define TF model graph\n", " model = cnn_model()\n", " preds = model(x)\n", " print(\"Defined TensorFlow model graph.\")\n", "\n", " def evaluate():\n", " # Evaluate the accuracy of the MNIST model on legitimate test examples\n", " eval_params = {'batch_size': batch_size}\n", " acc = model_eval(sess, x, y, preds, X_test, Y_test, args=eval_params)\n", " report.clean_train_clean_eval = acc\n", " assert X_test.shape[0] == test_end - test_start, X_test.shape\n", " print('Test accuracy on legitimate examples: %0.4f' % acc)\n", "\n", " # Train an MNIST model\n", " train_params = {\n", " 'nb_epochs': nb_epochs,\n", " 'batch_size': batch_size,\n", " 'learning_rate': learning_rate,\n", " 'train_dir': train_dir,\n", " 'filename': filename\n", " }\n", " ckpt = tf.train.get_checkpoint_state(train_dir)\n", " ckpt_path = False if ckpt is None else ckpt.model_checkpoint_path\n", "\n", " rng = np.random.RandomState([2017, 8, 30])\n", " if load_model and ckpt_path:\n", " saver = tf.train.Saver()\n", " saver.restore(sess, ckpt_path)\n", " print(\"Model loaded from: {}\".format(ckpt_path))\n", " evaluate()\n", " else:\n", " print(\"Model was not loaded, training from scratch.\")\n", " model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,\n", " args=train_params, save=True, rng=rng)\n", "\n", " # Calculate training error\n", " if testing:\n", " eval_params = {'batch_size': batch_size}\n", " acc = model_eval(sess, x, y, preds, X_train, Y_train, args=eval_params)\n", " report.train_clean_train_clean_eval = acc\n", "\n", " # Initialize the Fast Gradient Sign Method (FGSM) attack object and graph\n", " wrap = KerasModelWrapper(model)\n", " fgsm = FastGradientMethod(wrap, sess=sess)\n", " fgsm_params = {'eps': 0.3,\n", " 'clip_min': 0.,\n", " 'clip_max': 1.}\n", " adv_x = fgsm.generate(x, **fgsm_params)\n", " # Consider the attack to be constant\n", " adv_x = tf.stop_gradient(adv_x)\n", " preds_adv = model(adv_x)\n", "\n", " # Evaluate the accuracy of the MNIST model on adversarial examples\n", " eval_par = {'batch_size': batch_size}\n", " acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)\n", " print('Test accuracy on adversarial examples: %0.4f\\n' % acc)\n", " report.clean_train_adv_eval = acc\n", "\n", " # Calculating train error\n", " if testing:\n", " eval_par = {'batch_size': batch_size}\n", " acc = model_eval(sess, x, y, preds_adv, X_train,\n", " Y_train, args=eval_par)\n", " report.train_clean_train_adv_eval = acc\n", "\n", " print(\"Repeating the process, using adversarial training\")\n", " # Redefine TF model graph\n", " model_2 = cnn_model()\n", " preds_2 = model_2(x)\n", " wrap_2 = KerasModelWrapper(model_2)\n", " fgsm2 = FastGradientMethod(wrap_2, sess=sess)\n", " preds_2_adv = model_2(fgsm2.generate(x, **fgsm_params))\n", "\n", " def evaluate_2():\n", " # Accuracy of adversarially trained model on legitimate test inputs\n", " eval_params = {'batch_size': batch_size}\n", " accuracy = model_eval(sess, x, y, preds_2, X_test, Y_test,\n", " args=eval_params)\n", " print('Test accuracy on legitimate examples: %0.4f' % accuracy)\n", " report.adv_train_clean_eval = accuracy\n", "\n", " # Accuracy of the adversarially trained model on adversarial examples\n", " accuracy = model_eval(sess, x, y, preds_2_adv, X_test,\n", " Y_test, args=eval_params)\n", " print('Test accuracy on adversarial examples: %0.4f' % accuracy)\n", " report.adv_train_adv_eval = accuracy\n", "\n", " # Perform and evaluate adversarial training\n", " model_train(sess, x, y, preds_2, X_train, Y_train,\n", " predictions_adv=preds_2_adv, evaluate=evaluate_2,\n", " args=train_params, save=False, rng=rng)\n", "\n", " # Calculate training errors\n", " if testing:\n", " eval_params = {'batch_size': batch_size}\n", " accuracy = model_eval(sess, x, y, preds_2, X_train, Y_train,\n", " args=eval_params)\n", " report.train_adv_train_clean_eval = accuracy\n", " accuracy = model_eval(sess, x, y, preds_2_adv, X_train,\n", " Y_train, args=eval_params)\n", " report.train_adv_train_adv_eval = accuracy\n", "\n", " return report" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Extracting /tmp/train-images-idx3-ubyte.gz\n", "Extracting /tmp/train-labels-idx1-ubyte.gz\n", "Extracting /tmp/t10k-images-idx3-ubyte.gz\n", "Extracting /tmp/t10k-labels-idx1-ubyte.gz\n", "X_train shape: (5000, 28, 28, 1)\n", "X_test shape: (1000, 28, 28, 1)\n", "Defined TensorFlow model graph.\n", "Model was not loaded, training from scratch.\n", "Test accuracy on legitimate examples: 0.9090\n", "Test accuracy on legitimate examples: 0.9480\n", "Test accuracy on adversarial examples: 0.0330\n", "\n", "Repeating the process, using adversarial training\n", "Test accuracy on legitimate examples: 0.8900\n", "Test accuracy on adversarial examples: 0.3240\n", "Test accuracy on legitimate examples: 0.9270\n", "Test accuracy on adversarial examples: 0.5400\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "mnist_tutorial(train_start=0, train_end=5000, test_start=0, test_end=1000, nb_epochs=2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "FastGradientMethod()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.1" }, "toc": { "colors": { "hover_highlight": "#DAA520", "navigate_num": "#000000", "navigate_text": "#333333", "running_highlight": "#FF0000", "selected_highlight": "#FFD700", "sidebar_border": "#EEEEEE", "wrapper_background": "#FFFFFF" }, "moveMenuLeft": true, "nav_menu": { "height": "12px", "width": "252px" }, "navigate_menu": true, "number_sections": true, "sideBar": true, "threshold": 4, "toc_cell": false, "toc_section_display": "block", "toc_window_display": false, "widenNotebook": false } }, "nbformat": 4, "nbformat_minor": 2 }