{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.\n", "- Author: Sebastian Raschka\n", "- GitHub Repository: https://github.com/rasbt/deeplearning-models" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "vY4SK0xKAJgm" }, "source": [ "# Model Zoo -- RNN with GRU" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "sc6xejhY-NzZ" }, "source": [ "Demo of a simple RNN for sentiment classification (here: a binary classification problem with two labels, positive and negative) using GRU (Gated Rectified Unit) cells." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": {}, "colab_type": "code", "id": "moNmVfuvnImW" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sebastian Raschka \n", "\n", "CPython 3.7.1\n", "IPython 7.4.0\n", "\n", "torch 1.0.1.post2\n" ] } ], "source": [ "%load_ext watermark\n", "%watermark -a 'Sebastian Raschka' -v -p torch\n", "\n", "import torch\n", "import torch.nn.functional as F\n", "from torchtext import data\n", "from torchtext import datasets\n", "import time\n", "import random\n", "\n", "torch.backends.cudnn.deterministic = True" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "GSRL42Qgy8I8" }, "source": [ "## General Settings" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": {}, "colab_type": "code", "id": "OvW1RgfepCBq" }, "outputs": [], "source": [ "RANDOM_SEED = 123\n", "torch.manual_seed(RANDOM_SEED)\n", "\n", "VOCABULARY_SIZE = 20000\n", "LEARNING_RATE = 1e-4\n", "BATCH_SIZE = 128\n", "NUM_EPOCHS = 15\n", "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "\n", "EMBEDDING_DIM = 128\n", "HIDDEN_DIM = 256\n", "OUTPUT_DIM = 1" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "mQMmKUEisW4W" }, "source": [ "## Dataset" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "4GnH64XvsV8n" }, "source": [ "Load the IMDB Movie Review dataset:" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 102 }, "colab_type": "code", "id": "WZ_4jiHVnMxN", "outputId": "6653638a-24d2-49a9-988a-9d9165cec548" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "downloading aclImdb_v1.tar.gz\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "aclImdb_v1.tar.gz: 100%|██████████| 84.1M/84.1M [00:08<00:00, 9.50MB/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Num Train: 20000\n", "Num Valid: 5000\n", "Num Test: 25000\n" ] } ], "source": [ "TEXT = data.Field(tokenize='spacy',\n", " include_lengths=True) # necessary for packed_padded_sequence\n", "LABEL = data.LabelField(dtype=torch.float)\n", "train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n", "train_data, valid_data = train_data.split(random_state=random.seed(RANDOM_SEED),\n", " split_ratio=0.8)\n", "\n", "print(f'Num Train: {len(train_data)}')\n", "print(f'Num Valid: {len(valid_data)}')\n", "print(f'Num Test: {len(test_data)}')" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "L-TBwKWPslPa" }, "source": [ "Build the vocabulary based on the top \"VOCABULARY_SIZE\" words:" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 51 }, "colab_type": "code", "id": "e8uNrjdtn4A8", "outputId": "7eb14563-3cdc-4386-8a0c-6066161faeb1" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Vocabulary size: 20002\n", "Number of classes: 2\n" ] } ], "source": [ "TEXT.build_vocab(train_data, max_size=VOCABULARY_SIZE)\n", "LABEL.build_vocab(train_data)\n", "\n", "print(f'Vocabulary size: {len(TEXT.vocab)}')\n", "print(f'Number of classes: {len(LABEL.vocab)}')" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "JpEMNInXtZsb" }, "source": [ "The TEXT.vocab dictionary will contain the word counts and indices. The reason why the number of words is VOCABULARY_SIZE + 2 is that it contains to special tokens for padding and unknown words: `` and ``." ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "eIQ_zfKLwjKm" }, "source": [ "Make dataset iterators:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "i7JiHR1stHNF" }, "outputs": [], "source": [ "train_loader, valid_loader, test_loader = data.BucketIterator.splits(\n", " (train_data, valid_data, test_data), \n", " batch_size=BATCH_SIZE,\n", " sort_within_batch=True, # necessary for packed_padded_sequence\n", " device=DEVICE)" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "R0pT_dMRvicQ" }, "source": [ "Testing the iterators (note that the number of rows depends on the longest document in the respective batch):" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 204 }, "colab_type": "code", "id": "y8SP_FccutT0", "outputId": "13c0ea99-41fe-416c-e809-c950c2f0737f" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train\n", "Text matrix size: torch.Size([132, 128])\n", "Target vector size: torch.Size([128])\n", "\n", "Valid:\n", "Text matrix size: torch.Size([61, 128])\n", "Target vector size: torch.Size([128])\n", "\n", "Test:\n", "Text matrix size: torch.Size([42, 128])\n", "Target vector size: torch.Size([128])\n" ] } ], "source": [ "print('Train')\n", "for batch in train_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break\n", " \n", "print('\\nValid:')\n", "for batch in valid_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break\n", " \n", "print('\\nTest:')\n", "for batch in test_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "G_grdW3pxCzz" }, "source": [ "## Model" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "nQIUm5EjxFNa" }, "outputs": [], "source": [ "import torch.nn as nn\n", "\n", "class RNN(nn.Module):\n", " def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):\n", " \n", " super().__init__()\n", " \n", " self.embedding = nn.Embedding(input_dim, embedding_dim)\n", " self.rnn = nn.GRU(embedding_dim, hidden_dim)\n", " self.fc = nn.Linear(hidden_dim, output_dim)\n", " \n", " def forward(self, text, text_length):\n", "\n", " #[sentence len, batch size] => [sentence len, batch size, embedding size]\n", " embedded = self.embedding(text)\n", " \n", " packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, text_length)\n", " \n", " #[sentence len, batch size, embedding size] => \n", " # output: [sentence len, batch size, hidden size]\n", " # hidden: [1, batch size, hidden size]\n", " packed_output, hidden = self.rnn(packed)\n", " \n", " return self.fc(hidden.squeeze(0)).view(-1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "Ik3NF3faxFmZ" }, "outputs": [], "source": [ "INPUT_DIM = len(TEXT.vocab)\n", "\n", "torch.manual_seed(RANDOM_SEED)\n", "model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)\n", "model = model.to(DEVICE)\n", "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "Lv9Ny9di6VcI" }, "source": [ "## Training" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "T5t1Afn4xO11" }, "outputs": [], "source": [ "def compute_binary_accuracy(model, data_loader, device):\n", " model.eval()\n", " correct_pred, num_examples = 0, 0\n", " with torch.no_grad():\n", " for batch_idx, batch_data in enumerate(data_loader):\n", " text, text_lengths = batch_data.text\n", " logits = model(text, text_lengths)\n", " predicted_labels = (torch.sigmoid(logits) > 0.5).long()\n", " num_examples += batch_data.label.size(0)\n", " correct_pred += (predicted_labels == batch_data.label.long()).sum()\n", " return correct_pred.float()/num_examples * 100" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1836 }, "colab_type": "code", "id": "EABZM8Vo0ilB", "outputId": "929868ae-bad3-43c0-a37e-d18cfb6fa9af" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 001/015 | Batch 000/157 | Cost: 0.6869\n", "Epoch: 001/015 | Batch 050/157 | Cost: 0.6909\n", "Epoch: 001/015 | Batch 100/157 | Cost: 0.6929\n", "Epoch: 001/015 | Batch 150/157 | Cost: 0.6667\n", "training accuracy: 58.70%\n", "valid accuracy: 57.30%\n", "Time elapsed: 0.18 min\n", "Epoch: 002/015 | Batch 000/157 | Cost: 0.6831\n", "Epoch: 002/015 | Batch 050/157 | Cost: 0.6460\n", "Epoch: 002/015 | Batch 100/157 | Cost: 0.6443\n", "Epoch: 002/015 | Batch 150/157 | Cost: 0.6239\n", "training accuracy: 68.70%\n", "valid accuracy: 67.54%\n", "Time elapsed: 0.36 min\n", "Epoch: 003/015 | Batch 000/157 | Cost: 0.4887\n", "Epoch: 003/015 | Batch 050/157 | Cost: 0.5954\n", "Epoch: 003/015 | Batch 100/157 | Cost: 0.6105\n", "Epoch: 003/015 | Batch 150/157 | Cost: 0.5285\n", "training accuracy: 75.35%\n", "valid accuracy: 73.62%\n", "Time elapsed: 0.55 min\n", "Epoch: 004/015 | Batch 000/157 | Cost: 0.4711\n", "Epoch: 004/015 | Batch 050/157 | Cost: 0.5610\n", "Epoch: 004/015 | Batch 100/157 | Cost: 0.4648\n", "Epoch: 004/015 | Batch 150/157 | Cost: 0.4983\n", "training accuracy: 79.68%\n", "valid accuracy: 77.00%\n", "Time elapsed: 0.73 min\n", "Epoch: 005/015 | Batch 000/157 | Cost: 0.4718\n", "Epoch: 005/015 | Batch 050/157 | Cost: 0.4375\n", "Epoch: 005/015 | Batch 100/157 | Cost: 0.4393\n", "Epoch: 005/015 | Batch 150/157 | Cost: 0.4138\n", "training accuracy: 81.71%\n", "valid accuracy: 78.18%\n", "Time elapsed: 0.92 min\n", "Epoch: 006/015 | Batch 000/157 | Cost: 0.3452\n", "Epoch: 006/015 | Batch 050/157 | Cost: 0.3552\n", "Epoch: 006/015 | Batch 100/157 | Cost: 0.4116\n", "Epoch: 006/015 | Batch 150/157 | Cost: 0.4030\n", "training accuracy: 82.78%\n", "valid accuracy: 79.52%\n", "Time elapsed: 1.11 min\n", "Epoch: 007/015 | Batch 000/157 | Cost: 0.3604\n", "Epoch: 007/015 | Batch 050/157 | Cost: 0.3680\n", "Epoch: 007/015 | Batch 100/157 | Cost: 0.3132\n", "Epoch: 007/015 | Batch 150/157 | Cost: 0.3442\n", "training accuracy: 85.72%\n", "valid accuracy: 81.90%\n", "Time elapsed: 1.30 min\n", "Epoch: 008/015 | Batch 000/157 | Cost: 0.3696\n", "Epoch: 008/015 | Batch 050/157 | Cost: 0.2850\n", "Epoch: 008/015 | Batch 100/157 | Cost: 0.3538\n", "Epoch: 008/015 | Batch 150/157 | Cost: 0.4393\n", "training accuracy: 86.21%\n", "valid accuracy: 81.56%\n", "Time elapsed: 1.48 min\n", "Epoch: 009/015 | Batch 000/157 | Cost: 0.3638\n", "Epoch: 009/015 | Batch 050/157 | Cost: 0.2887\n", "Epoch: 009/015 | Batch 100/157 | Cost: 0.3294\n", "Epoch: 009/015 | Batch 150/157 | Cost: 0.2515\n", "training accuracy: 86.36%\n", "valid accuracy: 82.18%\n", "Time elapsed: 1.67 min\n", "Epoch: 010/015 | Batch 000/157 | Cost: 0.2781\n", "Epoch: 010/015 | Batch 050/157 | Cost: 0.3547\n", "Epoch: 010/015 | Batch 100/157 | Cost: 0.2762\n", "Epoch: 010/015 | Batch 150/157 | Cost: 0.3104\n", "training accuracy: 87.39%\n", "valid accuracy: 82.92%\n", "Time elapsed: 1.86 min\n", "Epoch: 011/015 | Batch 000/157 | Cost: 0.3024\n", "Epoch: 011/015 | Batch 050/157 | Cost: 0.2901\n", "Epoch: 011/015 | Batch 100/157 | Cost: 0.1955\n", "Epoch: 011/015 | Batch 150/157 | Cost: 0.2581\n", "training accuracy: 89.20%\n", "valid accuracy: 83.66%\n", "Time elapsed: 2.05 min\n", "Epoch: 012/015 | Batch 000/157 | Cost: 0.1964\n", "Epoch: 012/015 | Batch 050/157 | Cost: 0.3578\n", "Epoch: 012/015 | Batch 100/157 | Cost: 0.2177\n", "Epoch: 012/015 | Batch 150/157 | Cost: 0.3732\n", "training accuracy: 88.12%\n", "valid accuracy: 82.82%\n", "Time elapsed: 2.24 min\n", "Epoch: 013/015 | Batch 000/157 | Cost: 0.2964\n", "Epoch: 013/015 | Batch 050/157 | Cost: 0.2757\n", "Epoch: 013/015 | Batch 100/157 | Cost: 0.4130\n", "Epoch: 013/015 | Batch 150/157 | Cost: 0.2817\n", "training accuracy: 90.71%\n", "valid accuracy: 84.54%\n", "Time elapsed: 2.43 min\n", "Epoch: 014/015 | Batch 000/157 | Cost: 0.2700\n", "Epoch: 014/015 | Batch 050/157 | Cost: 0.2832\n", "Epoch: 014/015 | Batch 100/157 | Cost: 0.3164\n", "Epoch: 014/015 | Batch 150/157 | Cost: 0.2610\n", "training accuracy: 90.95%\n", "valid accuracy: 84.68%\n", "Time elapsed: 2.61 min\n", "Epoch: 015/015 | Batch 000/157 | Cost: 0.2593\n", "Epoch: 015/015 | Batch 050/157 | Cost: 0.2185\n", "Epoch: 015/015 | Batch 100/157 | Cost: 0.3066\n", "Epoch: 015/015 | Batch 150/157 | Cost: 0.3229\n", "training accuracy: 90.88%\n", "valid accuracy: 84.88%\n", "Time elapsed: 2.79 min\n", "Total Training Time: 2.79 min\n", "Test accuracy: 85.09%\n" ] } ], "source": [ "start_time = time.time()\n", "\n", "for epoch in range(NUM_EPOCHS):\n", " model.train()\n", " for batch_idx, batch_data in enumerate(train_loader):\n", " \n", " text, text_lengths = batch_data.text\n", " \n", " ### FORWARD AND BACK PROP\n", " logits = model(text, text_lengths)\n", " cost = F.binary_cross_entropy_with_logits(logits, batch_data.label)\n", " optimizer.zero_grad()\n", " \n", " cost.backward()\n", " \n", " ### UPDATE MODEL PARAMETERS\n", " optimizer.step()\n", " \n", " ### LOGGING\n", " if not batch_idx % 50:\n", " print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | '\n", " f'Batch {batch_idx:03d}/{len(train_loader):03d} | '\n", " f'Cost: {cost:.4f}')\n", "\n", " with torch.set_grad_enabled(False):\n", " print(f'training accuracy: '\n", " f'{compute_binary_accuracy(model, train_loader, DEVICE):.2f}%'\n", " f'\\nvalid accuracy: '\n", " f'{compute_binary_accuracy(model, valid_loader, DEVICE):.2f}%')\n", " \n", " print(f'Time elapsed: {(time.time() - start_time)/60:.2f} min')\n", " \n", "print(f'Total Training Time: {(time.time() - start_time)/60:.2f} min')\n", "print(f'Test accuracy: {compute_binary_accuracy(model, test_loader, DEVICE):.2f}%')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "jt55pscgFdKZ" }, "outputs": [], "source": [ "import spacy\n", "nlp = spacy.load('en')\n", "\n", "def predict_sentiment(model, sentence):\n", " # based on:\n", " # https://github.com/bentrevett/pytorch-sentiment-analysis/blob/\n", " # master/2%20-%20Upgraded%20Sentiment%20Analysis.ipynb\n", " model.eval()\n", " tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n", " indexed = [TEXT.vocab.stoi[t] for t in tokenized]\n", " length = [len(indexed)]\n", " tensor = torch.LongTensor(indexed).to(DEVICE)\n", " tensor = tensor.unsqueeze(1)\n", " length_tensor = torch.LongTensor(length)\n", " prediction = torch.sigmoid(model(tensor, length_tensor))\n", " return prediction.item()" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 51 }, "colab_type": "code", "id": "O4__q0coFJyw", "outputId": "602fe8db-6b8e-4286-b5d5-f25381afaccd" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Probability positive:\n" ] }, { "data": { "text/plain": [ "0.8322937488555908" ] }, "execution_count": 16, "metadata": { "tags": [] }, "output_type": "execute_result" } ], "source": [ "print('Probability positive:')\n", "predict_sentiment(model, \"I really love this movie. This movie is so great!\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "7lRusB3dF80X" }, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "rnn_gru_packed_imdb.ipynb", "provenance": [], "version": "0.3.2" }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.1" } }, "nbformat": 4, "nbformat_minor": 2 }