{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# CS 20 : TensorFlow for Deep Learning Research\n",
    "## Lecture 04 : Eager execution\n",
    "### Custon training subclassing\n",
    "Classifying mnist by using Tensorflow's eager execution.\n",
    "\n",
    "This guide uses these high-level TensorFlow concepts:\n",
    "\n",
    "* Enable an [eager execution](https://www.tensorflow.org/guide/eager?hl=ko) development environment,\n",
    "* Import data with the [Datasets API](https://www.tensorflow.org/guide/datasets?hl=ko)\n",
    "* Build model class by inheriting `tf.keras.Model` with TensorFlow's [Keras API](https://keras.io/getting-started/sequential-model-guide/)\n",
    "  \n",
    "  \n",
    "* Reference\n",
    "    + https://www.tensorflow.org/tutorials/eager/custom_training_walkthrough?hl=ko"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Setup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.12.0\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import, division, print_function\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow import keras\n",
    "\n",
    "tf.enable_eager_execution()\n",
    "\n",
    "print(tf.__version__)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Define `SoftmaxClassifier` class and `loss_fn` function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SoftmaxClassifier(keras.Model):\n",
    "    def __init__(self, num_classes):\n",
    "        super(SoftmaxClassifier, self).__init__()\n",
    "        self.__dense = tf.keras.layers.Dense(units = num_classes)\n",
    "        \n",
    "    def call(self, inputs):\n",
    "        score = self.__dense(inputs)\n",
    "        return score\n",
    "\n",
    "def loss_fn(model, x, y):\n",
    "    ce_loss = tf.losses.sparse_softmax_cross_entropy(labels = y, logits = model(x))\n",
    "    return ce_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### Instantiate `model` instance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = SoftmaxClassifier(num_classes = 10)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Import dataset and split dataset into train, validation, test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "(x_train, y_train), (x_tst, y_tst) = tf.keras.datasets.mnist.load_data()\n",
    "x_train = (x_train / 255).astype(np.float32).reshape(-1, 784)\n",
    "x_tst = (x_tst / 255).astype(np.float32).reshape(-1, 784)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(55000, 784) (55000,)\n",
      "(5000, 784) (5000,)\n"
     ]
    }
   ],
   "source": [
    "tr_indices = np.random.choice(range(x_train.shape[0]), size = 55000, replace = False)\n",
    "\n",
    "x_tr = x_train[tr_indices]\n",
    "y_tr = y_train[tr_indices].astype(np.int32)\n",
    "\n",
    "x_val = np.delete(arr = x_train, obj = tr_indices, axis = 0)\n",
    "y_val = np.delete(arr = y_train, obj = tr_indices, axis = 0).astype(np.int32)\n",
    "\n",
    "print(x_tr.shape, y_tr.shape)\n",
    "print(x_val.shape, y_val.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Train the model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "epochs = 30\n",
    "batch_size = 32\n",
    "learning_rate = .01\n",
    "\n",
    "tr_dataset = tf.data.Dataset.from_tensor_slices((x_tr, y_tr))\n",
    "tr_dataset = tr_dataset.shuffle(buffer_size = 10000)\n",
    "tr_dataset = tr_dataset.batch(batch_size = batch_size)\n",
    "\n",
    "val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))\n",
    "val_dataset = val_dataset.batch(batch_size = batch_size)\n",
    "\n",
    "opt = tf.train.AdamOptimizer(learning_rate = learning_rate)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epochs :  5, tr_loss : 0.321, val_loss : 0.372\n",
      "epochs : 10, tr_loss : 0.316, val_loss : 0.390\n",
      "epochs : 15, tr_loss : 0.309, val_loss : 0.430\n",
      "epochs : 20, tr_loss : 0.307, val_loss : 0.445\n",
      "epochs : 25, tr_loss : 0.302, val_loss : 0.433\n",
      "epochs : 30, tr_loss : 0.303, val_loss : 0.453\n"
     ]
    }
   ],
   "source": [
    "for epoch in range(epochs):\n",
    "    avg_tr_loss = 0\n",
    "    avg_val_loss = 0\n",
    "    tr_step = 0\n",
    "    val_step = 0\n",
    "    \n",
    "    # train\n",
    "    for x_mb, y_mb in tr_dataset:\n",
    "        with tf.GradientTape() as tape:\n",
    "            tr_loss = loss_fn(model = model, x = x_mb, y = y_mb)\n",
    "        grads = tape.gradient(target = tr_loss, sources = model.variables)\n",
    "        opt.apply_gradients(zip(grads, model.variables))\n",
    "        \n",
    "        avg_tr_loss += tr_loss\n",
    "        tr_step += 1\n",
    "    else:\n",
    "        avg_tr_loss /= tr_step\n",
    "        \n",
    "    # validation\n",
    "    for x_mb, y_mb in val_dataset:\n",
    "        val_loss = loss_fn(model = model, x = x_mb, y = y_mb)\n",
    "        avg_val_loss += val_loss\n",
    "        val_step += 1\n",
    "    else:\n",
    "        avg_val_loss /= val_step\n",
    "        \n",
    "    if (epoch + 1) % 5 == 0:\n",
    "        print('epochs : {:2}, tr_loss : {:.3f}, val_loss : {:.3f}'.format(epoch + 1, avg_tr_loss, avg_val_loss))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}