{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# **2장 사전 훈련된 신경망**\n", "## 3 뉴럴토크2\n", "- [ImageCaptioning.pytorch - Guthub](https://github.com/deep-learning-with-pytorch/ImageCaptioning.pytorch)\n", "\n", "## 4 Torch Hub\n", "- [Torch Hub Document](https://pytorch.org/docs/stable/hub.html)\n", "- [Torch Hub Model List](https://pytorch.org/hub/research-models)\n", "- [Torch Vision Github](https://github.com/pytorch/vision)\n", "\n", "[다음과 같은 Warning 을 출력하는 경우](https://github.com/pytorch/vision/issues/2291#issuecomment-639552166)\n", "```\n", "Did you have `libjpeg` or `libpng` installed before building `torchvision` \n", "! pip3 install auditwheel\n", "```" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using cache found in /home/buffet/.cache/torch/hub/pytorch_vision_main\n" ] }, { "data": { "text/plain": [ "(99, ['alexnet', 'efficientnet_v2_l', 'mvit_v2_s', 'resnet101', 'swin_t'])" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch\n", "entrypoints = torch.hub.list('pytorch/vision') # , force_reload=True)\n", "len(entrypoints), entrypoints[::20]" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using cache found in /home/buffet/.cache/torch/hub/pytorch_vision_main\n", "Downloading: \"https://github.com/pytorch/vision/zipball/main\" to /home/buffet/.cache/torch/hub/main.zip\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "ResNet-18 from `Deep Residual Learning for Image Recognition `__.\n", "\n", " Args:\n", " weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The\n", " pretrained weights to use. See\n", " :class:`~torchvision.models.ResNet18_Weights` below for\n", " more details, and possible values. By default, no pre-trained\n", " weights are used.\n", " progress (bool, optional): If True, displays a progress bar of the\n", " download to stderr. Default is True.\n", " **kwargs: parameters passed to the ``torchvision.models.resnet.ResNet``\n", " base class. Please refer to the `source code\n", " `_\n", " for more details about this class.\n", "\n", " .. autoclass:: torchvision.models.ResNet18_Weights\n", " :members:\n", " \n" ] } ], "source": [ "# Resnet18 모델 호출 및 파라미터 확인\n", "resnet18 = torch.hub.load('pytorch/vision', 'resnet18', weights='DEFAULT')\n", "print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3 MNIST\n", "- Convolution Model" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torch.optim as optim\n", "from torchvision import datasets, transforms\n", "torch.manual_seed(4242)\n", "\n", "train_loader = torch.utils.data.DataLoader(\n", " datasets.MNIST('data/ch02/mnist', train=True, download=True,\n", " transform=transforms.Compose([\n", " transforms.ToTensor(),\n", " transforms.Normalize((0.1307,), (0.3081,))\n", " ])),\n", " batch_size=64, shuffle=True)\n", "train_loader" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Net(\n", " (conv1): Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))\n", " (conv2): Conv2d(10, 20, kernel_size=(5, 5), stride=(1, 1))\n", " (conv2_drop): Dropout2d(p=0.5, inplace=False)\n", " (fc1): Linear(in_features=320, out_features=50, bias=True)\n", " (fc2): Linear(in_features=50, out_features=10, bias=True)\n", ")" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "class Net(nn.Module):\n", " def __init__(self):\n", " super(Net, self).__init__()\n", " self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n", " self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n", " self.conv2_drop = nn.Dropout2d()\n", " self.fc1 = nn.Linear(320, 50)\n", " self.fc2 = nn.Linear(50, 10)\n", "\n", " def forward(self, x):\n", " x = F.relu(F.max_pool2d(self.conv1(x), 2))\n", " x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n", " x = x.view(-1, 320)\n", " x = F.relu(self.fc1(x))\n", " x = F.dropout(x, training=self.training)\n", " x = self.fc2(x)\n", " return F.log_softmax(x, dim=1)\n", "\n", "model = Net()\n", "model" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch : 0 >>> Current loss 0.48367545008659363\n", "Epoch : 1 >>> Current loss 0.13018010556697845\n", "Epoch : 2 >>> Current loss 0.4380616843700409\n", "Epoch : 3 >>> Current loss 0.44207993149757385\n", "Epoch : 4 >>> Current loss 0.21662652492523193\n", "Epoch : 5 >>> Current loss 0.3018296957015991\n", "Epoch : 6 >>> Current loss 0.23590700328350067\n", "Epoch : 7 >>> Current loss 0.2751865088939667\n", "Epoch : 8 >>> Current loss 0.0252096988260746\n", "Epoch : 9 >>> Current loss 0.06518728286027908\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)\n", "for epoch in range(10):\n", " for batch_idx, (data, target) in enumerate(train_loader):\n", " optimizer.zero_grad()\n", " output = model(data)\n", " loss = F.nll_loss(output, target)\n", " loss.backward()\n", " optimizer.step()\n", " print(f'Epoch : {epoch} >>> Current loss', float(loss))\n", "\n", "torch.save(model.state_dict(), 'data/ch02/mnist/mnist.pth')\n", "pretrained_model = Net()\n", "pretrained_model.load_state_dict(torch.load('data/ch02/mnist/mnist.pth'))" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.11" } }, "nbformat": 4, "nbformat_minor": 4 }