{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"activation-atlas-collect.ipynb","version":"0.3.2","provenance":[{"file_id":"1gJpwGX2reyM64OWkXAJuiy47X8Ngo-dR","timestamp":1530677728050},{"file_id":"1-BsF7t50HZHD8eXrWCmS419dCr2hGzl4","timestamp":1530043360039}],"collapsed_sections":[],"last_runtime":{"build_target":"//learning/brain/python/client:colab_notebook","kind":"private"}}},"cells":[{"metadata":{"id":"tPa1rDPCFyXB","colab_type":"text"},"cell_type":"markdown","source":["##### Copyright 2018 Google LLC.\n","\n","Licensed under the Apache License, Version 2.0 (the \"License\");"]},{"metadata":{"id":"sY7TB--AF1Iz","colab_type":"code","colab":{}},"cell_type":"code","source":["# Licensed under the Apache License, Version 2.0 (the \"License\");\n","# you may not use this file except in compliance with the License.\n","# You may obtain a copy of the License at\n","#\n","# https://www.apache.org/licenses/LICENSE-2.0\n","#\n","# Unless required by applicable law or agreed to in writing, software\n","# distributed under the License is distributed on an \"AS IS\" BASIS,\n","# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n","# See the License for the specific language governing permissions and\n","# limitations under the License."],"execution_count":0,"outputs":[]},{"metadata":{"id":"_yKMurVkF4A0","colab_type":"text"},"cell_type":"markdown","source":["# Activation Atlas — Collect Activations\n","\n","This notebook uses [**Lucid**](https://github.com/tensorflow/lucid) to reproduce the results in [Activation Atlas](https://distill.pub/2019/activation-atlas/).\n","\n","This notebook doesn't introduce the abstractions behind lucid; you may wish to also read the [Lucid tutorial](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb).\n","\n","**Note**: The easiest way to use this tutorial is as a [colab notebook](https://research.google.com/colaboratory/faq.html), which allows you to dive in with no setup."]},{"metadata":{"id":"h-O3WcjaF_Oo","colab_type":"text"},"cell_type":"markdown","source":["## Install and imports"]},{"metadata":{"id":"tPctnk8OGDJr","colab_type":"code","colab":{}},"cell_type":"code","source":["# Installations\n","!pip -q install lucid>=0.3.6"],"execution_count":0,"outputs":[]},{"metadata":{"id":"gmCa_sdGQLJF","colab_type":"code","colab":{}},"cell_type":"code","source":["# General support\n","import math\n","import tensorflow as tf\n","import numpy as np\n","import json\n","\n","# General lucid code\n","import lucid.modelzoo.vision_models as models"],"execution_count":0,"outputs":[]},{"metadata":{"id":"z6Phq6UyQuW0","colab_type":"code","colab":{}},"cell_type":"code","source":["options = {\n"," 'model': 'inceptionv1',\n"," 'split': 'train'\n","}"],"execution_count":0,"outputs":[]},{"metadata":{"id":"yP_HOS_DQcbd","colab_type":"code","colab":{}},"cell_type":"code","source":["# Let's import a model from the modelzoo\n","model = models.InceptionV1()\n","model.load_graphdef()"],"execution_count":0,"outputs":[]},{"metadata":{"id":"M-kcrWDcRfP4","colab_type":"code","colab":{}},"cell_type":"code","source":["# Write the classification labels once\n","model.labels"],"execution_count":0,"outputs":[]},{"metadata":{"id":"zHlc6SO0MNmm","colab_type":"code","colab":{}},"cell_type":"code","source":["# Setup the data provider for imagenet\n","# Note you will need to download imagenet data yourself and setup a data provider\n","# http://image-net.org/download\n","\n","# data_split = imagenet.get_split(options['split'])\n","# provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(data_split, seed=7)\n","# image_tensor, t_label, t_record_key, t_label_text = provider.get([\"image\", \"label\", \"record_key\", \"label_text\"])"],"execution_count":0,"outputs":[]},{"metadata":{"id":"rMM08Uajg6WY","colab_type":"code","colab":{}},"cell_type":"code","source":["sess = tf.InteractiveSession()\n","coord = tf.train.Coordinator()\n","threads = tf.train.start_queue_runners(sess=sess, coord=coord)"],"execution_count":0,"outputs":[]},{"metadata":{"id":"FiC8nZ9ghaNz","colab_type":"code","colab":{}},"cell_type":"code","source":["image_tensor_ = tf.image.resize_images(image_tensor[None], [224, 224])/255"],"execution_count":0,"outputs":[]},{"metadata":{"id":"R9SwsKWniS9X","colab_type":"code","colab":{}},"cell_type":"code","source":["T = render.import_model(model, image_tensor_, image_tensor_)"],"execution_count":0,"outputs":[]},{"metadata":{"id":"wHXrqNQ-r18y","colab_type":"code","colab":{}},"cell_type":"code","source":["def save_data(base_dir, options, activations, attributions):\n","\n"," # spatial activations\n"," activations_path = optionsToURL(base_dir + \"activations\", \"npy\", options)\n"," with gfile.GFile(activations_path, \"w\") as f:\n"," np.save(f, activations)\n","\n"," # spatial attributions to final classes\n"," attributions_path = optionsToURL(base_dir + \"attribution\", \"npy\", options)\n"," with gfile.GFile(attributions_path, \"w\") as f:\n"," np.save(f, attributions)\n"],"execution_count":0,"outputs":[]},{"metadata":{"id":"8IthlhMA8Ubp","colab_type":"code","colab":{}},"cell_type":"code","source":["def fwd_gradients(ys, xs, d_xs):\n"," \n"," \"\"\"Forward-mode pushforward analogous to the pullback defined by tf.gradients.\n"," With tf.gradients, grad_ys is the vector being pulled back, and here d_xs is\n"," the vector being pushed forward.\n"," \n"," By mattjj@google.com from\n"," https://github.com/renmengye/tensorflow-forward-ad/issues/2\n"," \"\"\"\n"," \n"," v = tf.zeros_like(ys)\n"," g = tf.gradients(ys, xs, grad_ys=v)\n"," return tf.gradients(g, v, grad_ys=d_xs)"],"execution_count":0,"outputs":[]},{"metadata":{"id":"iWilDZnzxRPX","colab_type":"code","colab":{}},"cell_type":"code","source":["number_of_images = int(1e6)\n","options['sample_images'] = number_of_images\n","number_of_pages = 500\n","number_of_images_per_page = number_of_images / number_of_pages\n","\n","layers = [\n"," \"conv2d2\",\n"," \"mixed3a\",\n"," \"mixed3b\",\n"," \"mixed4a\",\n"," \"mixed4b\",\n"," \"mixed4c\",\n"," \"mixed4d\",\n"," \"mixed4e\",\n"," \"mixed5a\",\n"," \"mixed5b\",\n","]\n","\n","for layer_name in reversed(layers):\n"," print()\n"," print(layer_name)\n"," options['layer'] = layer_name\n","\n"," d_previous = tf.placeholder(\"float32\")\n"," d_logit = fwd_gradients(T(\"softmax2_pre_activation\"), T(layer_name), d_previous)[0]\n","\n"," \n"," zeros = None\n"," print number_of_pages\n"," \n"," for p in range(number_of_pages):\n"," activations = []\n"," attributions = []\n"," \n"," for n in range(number_of_images_per_page):\n","\n"," # evaluate\n"," vec, label_index, record_key, label_text, image = sess.run([T(layer_name), t_label, t_record_key, t_label_text, image_tensor_])\n","\n"," # sample one random position in the image, minus the edges\n"," options['sample_type'] = 'random'\n"," n_x = np.random.randint(1, vec.shape[1])\n"," n_y = np.random.randint(1, vec.shape[2])\n"," \n"," # Compute logit attribution\n"," if zeros is None:\n"," zeros = np.zeros(vec.shape)\n"," else:\n"," zeros[:] = 0\n"," zeros[0, n_x, n_y] = vec[0, n_x, n_y]\n"," logit_attr = d_logit.eval({T(layer_name): vec, d_previous: zeros})\n","\n"," # top attributions for spatial activation:\n"," top_attribution_class_index = int(np.argsort(-logit_attr[0])[0])\n"," top_attribution_class_label = model.labels[top_attribution_class_index]\n","\n"," activations.append(vec[0, n_x, n_y])\n"," attributions.append(logit_attr[0])\n","\n"," # progress indicator\n"," print p + 1,\n","\n"," # save files to bigstore\n"," options['page'] = '{}_of_{}'.format(p + 1, number_of_pages)\n"," save_data(base_dir_gcs + layer_name + \"/\", options, activations, attributions)\n"," "],"execution_count":0,"outputs":[]},{"metadata":{"id":"lQnvsOBvoePg","colab_type":"code","colab":{}},"cell_type":"code","source":[""],"execution_count":0,"outputs":[]}]}