{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Neural Networks in PyTorch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.autograd import Variable\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Net(\n",
      "  (conv1): Conv2d (1, 6, kernel_size=(5, 5), stride=(1, 1))\n",
      "  (conv2): Conv2d (6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
      "  (fc1): Linear(in_features=400, out_features=120)\n",
      "  (fc2): Linear(in_features=120, out_features=84)\n",
      "  (fc3): Linear(in_features=84, out_features=10)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(1, 6, 5)\n",
    "        self.conv2 = nn.Conv2d(6, 16, 5)\n",
    "        self.fc1 = nn.Linear(16*5*5, 120)\n",
    "        self.fc2 = nn.Linear(120, 84)\n",
    "        self.fc3 = nn.Linear(84, 10)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n",
    "        x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n",
    "        x = x.view(-1, self.num_flat_features(x))\n",
    "        x = F.relu(self.fc1(x))\n",
    "        x = F.relu(self.fc2(x))\n",
    "        x = self.fc3(x)\n",
    "        return x\n",
    "    def num_flat_features(self, x):\n",
    "        size = x.size()[1:]\n",
    "        num_features = 1\n",
    "        for s in size:\n",
    "            num_features *= s\n",
    "        return num_features\n",
    "    \n",
    "net = Net()\n",
    "print(net)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10\n",
      "torch.Size([6, 1, 5, 5])\n"
     ]
    }
   ],
   "source": [
    "params = list(net.parameters())\n",
    "print(len(params))\n",
    "print(params[0].size())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Variable containing:\n",
      " 0.0520 -0.0491  0.0239  0.1516 -0.0555  0.0332  0.0873 -0.0206 -0.0700  0.0891\n",
      "[torch.FloatTensor of size 1x10]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "input = Variable(torch.randn(1, 1, 32, 32))\n",
    "out = net(input)\n",
    "print(out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Variable containing:\n",
      " 38.2532\n",
      "[torch.FloatTensor of size 1]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "target = Variable(torch.arange(1, 11))\n",
    "target = target.view(1, -1)\n",
    "criterion = nn.MSELoss()\n",
    "\n",
    "loss = criterion(out, target)\n",
    "print(loss)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<MseLossBackward object at 0x7f0d3d0da050>\n",
      "<AddmmBackward object at 0x7f0d3d0da150>\n",
      "<ExpandBackward object at 0x7f0d3d0da110>\n",
      "<AccumulateGrad object at 0x7f0d3d0da190>\n"
     ]
    }
   ],
   "source": [
    "print(loss.grad_fn)\n",
    "print(loss.grad_fn.next_functions[0][0])\n",
    "print(loss.grad_fn.next_functions[0][0].next_functions[0][0])\n",
    "print(loss.grad_fn.next_functions[0][0].next_functions[0][0].next_functions[0][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "conv1.bias.grad before backward\n",
      "None\n",
      "conv1.bias.grad after backward\n",
      "Variable containing:\n",
      "-0.0810\n",
      "-0.0638\n",
      "-0.0474\n",
      "-0.1109\n",
      " 0.0235\n",
      " 0.0689\n",
      "[torch.FloatTensor of size 6]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print('conv1.bias.grad before backward')\n",
    "print(net.conv1.bias.grad)\n",
    "\n",
    "loss.backward(retain_graph=True)\n",
    "\n",
    "print('conv1.bias.grad after backward')\n",
    "print(net.conv1.bias.grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(Variable containing:\n",
      "\n",
      "Columns 0 to 7 \n",
      "  4.7988  -1.8145   9.6046   5.1899   2.7905  11.5400  19.0878  12.0102\n",
      "\n",
      "Columns 8 to 9 \n",
      " 19.2533  18.5875\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0659  0.2023  0.1443  0.1505  0.2695  0.1457  0.1217  0.1000  0.2713  0.2817\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0519  0.2295  0.1683  0.2111  0.3358  0.2225  0.1956  0.2225  0.3733  0.4116\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0323  0.2573  0.1696  0.2499  0.3687  0.2751  0.2512  0.3136  0.4476  0.4794\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0463  0.2677  0.1846  0.2679  0.3976  0.2892  0.2572  0.3281  0.4646  0.5145\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0291  0.2927  0.2018  0.3052  0.4399  0.3470  0.3272  0.4194  0.5561  0.6071\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0247  0.3149  0.2164  0.3453  0.4927  0.3971  0.3772  0.4971  0.6338  0.6973\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0192  0.3426  0.2362  0.3930  0.5573  0.4562  0.4400  0.5915  0.7289  0.8067\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      "-0.0102  0.3746  0.2632  0.4507  0.6344  0.5283  0.5213  0.7050  0.8480  0.9410\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "(Variable containing:\n",
      " 0.0019  0.4116  0.2986  0.5196  0.7248  0.6148  0.6235  0.8407  0.9949  1.1035\n",
      "[torch.FloatTensor of size 1x10]\n",
      ", Variable containing:\n",
      "    1     2     3     4     5     6     7     8     9    10\n",
      "[torch.FloatTensor of size 1x10]\n",
      ")\n",
      "CPU times: user 208 ms, sys: 0 ns, total: 208 ms\n",
      "Wall time: 50.1 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "import torch.optim as optim\n",
    "\n",
    "net.zero_grad()\n",
    "optimizer = optim.SGD(net.parameters(), lr = 0.01)\n",
    "optimizer.zero_grad()\n",
    "for i in range(10):\n",
    "    output = net(input)\n",
    "    loss = criterion(output, target)\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    print(output, target)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "deeplearning",
   "language": "python",
   "name": "deeplearning"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}