{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.\n", "- Author: Sebastian Raschka\n", "- GitHub Repository: https://github.com/rasbt/deeplearning-models" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "vY4SK0xKAJgm" }, "source": [ "# Model Zoo -- Multilayer bidirectional RNN with LSTM" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "sc6xejhY-NzZ" }, "source": [ "Demo of a bidirectional RNN for sentiment classification (here: a binary classification problem with two labels, positive and negative) using LSTM (Long Short Term Memory) cells." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": {}, "colab_type": "code", "id": "moNmVfuvnImW" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Sebastian Raschka \n", "\n", "CPython 3.7.1\n", "IPython 7.4.0\n", "\n", "torch 1.0.1.post2\n" ] } ], "source": [ "%load_ext watermark\n", "%watermark -a 'Sebastian Raschka' -v -p torch\n", "\n", "import torch\n", "import torch.nn.functional as F\n", "from torchtext import data\n", "from torchtext import datasets\n", "import time\n", "import random\n", "\n", "torch.backends.cudnn.deterministic = True" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "GSRL42Qgy8I8" }, "source": [ "## General Settings" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": {}, "colab_type": "code", "id": "OvW1RgfepCBq" }, "outputs": [], "source": [ "RANDOM_SEED = 123\n", "torch.manual_seed(RANDOM_SEED)\n", "\n", "VOCABULARY_SIZE = 20000\n", "LEARNING_RATE = 1e-4\n", "BATCH_SIZE = 128\n", "NUM_EPOCHS = 15\n", "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "BIDIRECTIONAL = True\n", "\n", "EMBEDDING_DIM = 128\n", "NUM_LAYERS = 2\n", "HIDDEN_DIM = 128\n", "OUTPUT_DIM = 1" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "mQMmKUEisW4W" }, "source": [ "## Dataset" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "4GnH64XvsV8n" }, "source": [ "Load the IMDB Movie Review dataset:" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 68 }, "colab_type": "code", "id": "WZ_4jiHVnMxN", "outputId": "003776d6-ac38-4de6-f8e0-ee48a696bd27" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Num Train: 20000\n", "Num Valid: 5000\n", "Num Test: 25000\n" ] } ], "source": [ "TEXT = data.Field(tokenize='spacy',\n", " include_lengths=True) # necessary for packed_padded_sequence\n", "LABEL = data.LabelField(dtype=torch.float)\n", "train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n", "train_data, valid_data = train_data.split(random_state=random.seed(RANDOM_SEED),\n", " split_ratio=0.8)\n", "\n", "print(f'Num Train: {len(train_data)}')\n", "print(f'Num Valid: {len(valid_data)}')\n", "print(f'Num Test: {len(test_data)}')" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "L-TBwKWPslPa" }, "source": [ "Build the vocabulary based on the top \"VOCABULARY_SIZE\" words:" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 51 }, "colab_type": "code", "id": "e8uNrjdtn4A8", "outputId": "155fec65-7cee-4c1e-c928-da415a962be1" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Vocabulary size: 20002\n", "Number of classes: 2\n" ] } ], "source": [ "TEXT.build_vocab(train_data, max_size=VOCABULARY_SIZE)\n", "LABEL.build_vocab(train_data)\n", "\n", "print(f'Vocabulary size: {len(TEXT.vocab)}')\n", "print(f'Number of classes: {len(LABEL.vocab)}')" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "JpEMNInXtZsb" }, "source": [ "The TEXT.vocab dictionary will contain the word counts and indices. The reason why the number of words is VOCABULARY_SIZE + 2 is that it contains to special tokens for padding and unknown words: `` and ``." ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "eIQ_zfKLwjKm" }, "source": [ "Make dataset iterators:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "i7JiHR1stHNF" }, "outputs": [], "source": [ "train_loader, valid_loader, test_loader = data.BucketIterator.splits(\n", " (train_data, valid_data, test_data), \n", " batch_size=BATCH_SIZE,\n", " sort_within_batch=True, # necessary for packed_padded_sequence\n", " device=DEVICE)" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "R0pT_dMRvicQ" }, "source": [ "Testing the iterators (note that the number of rows depends on the longest document in the respective batch):" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 204 }, "colab_type": "code", "id": "y8SP_FccutT0", "outputId": "c7dfa012-d06f-4702-9caa-7cde4eee1ebd" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Train\n", "Text matrix size: torch.Size([132, 128])\n", "Target vector size: torch.Size([128])\n", "\n", "Valid:\n", "Text matrix size: torch.Size([61, 128])\n", "Target vector size: torch.Size([128])\n", "\n", "Test:\n", "Text matrix size: torch.Size([42, 128])\n", "Target vector size: torch.Size([128])\n" ] } ], "source": [ "print('Train')\n", "for batch in train_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break\n", " \n", "print('\\nValid:')\n", "for batch in valid_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break\n", " \n", "print('\\nTest:')\n", "for batch in test_loader:\n", " print(f'Text matrix size: {batch.text[0].size()}')\n", " print(f'Target vector size: {batch.label.size()}')\n", " break" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "G_grdW3pxCzz" }, "source": [ "## Model" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "nQIUm5EjxFNa" }, "outputs": [], "source": [ "import torch.nn as nn\n", "\n", "class RNN(nn.Module):\n", " def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):\n", " \n", " super().__init__()\n", " \n", " self.embedding = nn.Embedding(input_dim, embedding_dim)\n", " self.rnn = nn.LSTM(embedding_dim,\n", " hidden_dim,\n", " num_layers=NUM_LAYERS,\n", " bidirectional=BIDIRECTIONAL)\n", " self.fc = nn.Linear(hidden_dim*2, output_dim)\n", " \n", " def forward(self, text, text_length):\n", "\n", " #[sentence len, batch size] => [sentence len, batch size, embedding size]\n", " embedded = self.embedding(text)\n", " \n", " packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, text_length)\n", " \n", " packed_output, (hidden, cell) = self.rnn(packed)\n", " \n", " # combine both directions\n", " combined = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)\n", " \n", " return self.fc(combined.squeeze(0)).view(-1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "Ik3NF3faxFmZ" }, "outputs": [], "source": [ "INPUT_DIM = len(TEXT.vocab)\n", "\n", "torch.manual_seed(RANDOM_SEED)\n", "model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)\n", "model = model.to(DEVICE)\n", "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)" ] }, { "cell_type": "markdown", "metadata": { "colab_type": "text", "id": "Lv9Ny9di6VcI" }, "source": [ "## Training" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "T5t1Afn4xO11" }, "outputs": [], "source": [ "def compute_binary_accuracy(model, data_loader, device):\n", " model.eval()\n", " correct_pred, num_examples = 0, 0\n", " with torch.no_grad():\n", " for batch_idx, batch_data in enumerate(data_loader):\n", " text, text_lengths = batch_data.text\n", " logits = model(text, text_lengths)\n", " predicted_labels = (torch.sigmoid(logits) > 0.5).long()\n", " num_examples += batch_data.label.size(0)\n", " correct_pred += (predicted_labels == batch_data.label.long()).sum()\n", " return correct_pred.float()/num_examples * 100" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1836 }, "colab_type": "code", "id": "EABZM8Vo0ilB", "outputId": "27a806c8-60ed-4d90-8a3a-fb2c4cd68bf6" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch: 001/015 | Batch 000/157 | Cost: 0.6920\n", "Epoch: 001/015 | Batch 050/157 | Cost: 0.6899\n", "Epoch: 001/015 | Batch 100/157 | Cost: 0.6789\n", "Epoch: 001/015 | Batch 150/157 | Cost: 0.6822\n", "training accuracy: 59.57%\n", "valid accuracy: 57.96%\n", "Time elapsed: 0.33 min\n", "Epoch: 002/015 | Batch 000/157 | Cost: 0.6753\n", "Epoch: 002/015 | Batch 050/157 | Cost: 0.6222\n", "Epoch: 002/015 | Batch 100/157 | Cost: 0.6967\n", "Epoch: 002/015 | Batch 150/157 | Cost: 0.5783\n", "training accuracy: 70.41%\n", "valid accuracy: 68.04%\n", "Time elapsed: 0.67 min\n", "Epoch: 003/015 | Batch 000/157 | Cost: 0.5828\n", "Epoch: 003/015 | Batch 050/157 | Cost: 0.5243\n", "Epoch: 003/015 | Batch 100/157 | Cost: 0.4915\n", "Epoch: 003/015 | Batch 150/157 | Cost: 0.5100\n", "training accuracy: 76.81%\n", "valid accuracy: 74.58%\n", "Time elapsed: 1.00 min\n", "Epoch: 004/015 | Batch 000/157 | Cost: 0.4957\n", "Epoch: 004/015 | Batch 050/157 | Cost: 0.4526\n", "Epoch: 004/015 | Batch 100/157 | Cost: 0.3544\n", "Epoch: 004/015 | Batch 150/157 | Cost: 0.4816\n", "training accuracy: 79.42%\n", "valid accuracy: 77.06%\n", "Time elapsed: 1.34 min\n", "Epoch: 005/015 | Batch 000/157 | Cost: 0.4932\n", "Epoch: 005/015 | Batch 050/157 | Cost: 0.3864\n", "Epoch: 005/015 | Batch 100/157 | Cost: 0.3410\n", "Epoch: 005/015 | Batch 150/157 | Cost: 0.4023\n", "training accuracy: 83.50%\n", "valid accuracy: 80.04%\n", "Time elapsed: 1.68 min\n", "Epoch: 006/015 | Batch 000/157 | Cost: 0.4139\n", "Epoch: 006/015 | Batch 050/157 | Cost: 0.3480\n", "Epoch: 006/015 | Batch 100/157 | Cost: 0.3831\n", "Epoch: 006/015 | Batch 150/157 | Cost: 0.3973\n", "training accuracy: 85.54%\n", "valid accuracy: 81.82%\n", "Time elapsed: 2.02 min\n", "Epoch: 007/015 | Batch 000/157 | Cost: 0.3975\n", "Epoch: 007/015 | Batch 050/157 | Cost: 0.3051\n", "Epoch: 007/015 | Batch 100/157 | Cost: 0.3075\n", "Epoch: 007/015 | Batch 150/157 | Cost: 0.3503\n", "training accuracy: 85.21%\n", "valid accuracy: 80.54%\n", "Time elapsed: 2.35 min\n", "Epoch: 008/015 | Batch 000/157 | Cost: 0.3074\n", "Epoch: 008/015 | Batch 050/157 | Cost: 0.2903\n", "Epoch: 008/015 | Batch 100/157 | Cost: 0.3141\n", "Epoch: 008/015 | Batch 150/157 | Cost: 0.2595\n", "training accuracy: 87.69%\n", "valid accuracy: 82.68%\n", "Time elapsed: 2.69 min\n", "Epoch: 009/015 | Batch 000/157 | Cost: 0.2799\n", "Epoch: 009/015 | Batch 050/157 | Cost: 0.2448\n", "Epoch: 009/015 | Batch 100/157 | Cost: 0.2151\n", "Epoch: 009/015 | Batch 150/157 | Cost: 0.2847\n", "training accuracy: 88.93%\n", "valid accuracy: 83.70%\n", "Time elapsed: 3.03 min\n", "Epoch: 010/015 | Batch 000/157 | Cost: 0.2497\n", "Epoch: 010/015 | Batch 050/157 | Cost: 0.2540\n", "Epoch: 010/015 | Batch 100/157 | Cost: 0.3835\n", "Epoch: 010/015 | Batch 150/157 | Cost: 0.2845\n", "training accuracy: 90.00%\n", "valid accuracy: 84.10%\n", "Time elapsed: 3.38 min\n", "Epoch: 011/015 | Batch 000/157 | Cost: 0.2449\n", "Epoch: 011/015 | Batch 050/157 | Cost: 0.1758\n", "Epoch: 011/015 | Batch 100/157 | Cost: 0.1718\n", "Epoch: 011/015 | Batch 150/157 | Cost: 0.2826\n", "training accuracy: 91.14%\n", "valid accuracy: 85.00%\n", "Time elapsed: 3.71 min\n", "Epoch: 012/015 | Batch 000/157 | Cost: 0.1856\n", "Epoch: 012/015 | Batch 050/157 | Cost: 0.2359\n", "Epoch: 012/015 | Batch 100/157 | Cost: 0.2082\n", "Epoch: 012/015 | Batch 150/157 | Cost: 0.2608\n", "training accuracy: 91.82%\n", "valid accuracy: 85.04%\n", "Time elapsed: 4.05 min\n", "Epoch: 013/015 | Batch 000/157 | Cost: 0.2708\n", "Epoch: 013/015 | Batch 050/157 | Cost: 0.2684\n", "Epoch: 013/015 | Batch 100/157 | Cost: 0.1688\n", "Epoch: 013/015 | Batch 150/157 | Cost: 0.2290\n", "training accuracy: 92.83%\n", "valid accuracy: 85.64%\n", "Time elapsed: 4.39 min\n", "Epoch: 014/015 | Batch 000/157 | Cost: 0.2259\n", "Epoch: 014/015 | Batch 050/157 | Cost: 0.1845\n", "Epoch: 014/015 | Batch 100/157 | Cost: 0.1361\n", "Epoch: 014/015 | Batch 150/157 | Cost: 0.2182\n", "training accuracy: 93.69%\n", "valid accuracy: 85.96%\n", "Time elapsed: 4.73 min\n", "Epoch: 015/015 | Batch 000/157 | Cost: 0.2125\n", "Epoch: 015/015 | Batch 050/157 | Cost: 0.1962\n", "Epoch: 015/015 | Batch 100/157 | Cost: 0.2078\n", "Epoch: 015/015 | Batch 150/157 | Cost: 0.5364\n", "training accuracy: 91.02%\n", "valid accuracy: 83.28%\n", "Time elapsed: 5.06 min\n", "Total Training Time: 5.06 min\n", "Test accuracy: 83.21%\n" ] } ], "source": [ "start_time = time.time()\n", "\n", "for epoch in range(NUM_EPOCHS):\n", " model.train()\n", " for batch_idx, batch_data in enumerate(train_loader):\n", " \n", " text, text_lengths = batch_data.text\n", " \n", " ### FORWARD AND BACK PROP\n", " logits = model(text, text_lengths)\n", " cost = F.binary_cross_entropy_with_logits(logits, batch_data.label)\n", " optimizer.zero_grad()\n", " \n", " cost.backward()\n", " \n", " ### UPDATE MODEL PARAMETERS\n", " optimizer.step()\n", " \n", " ### LOGGING\n", " if not batch_idx % 50:\n", " print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | '\n", " f'Batch {batch_idx:03d}/{len(train_loader):03d} | '\n", " f'Cost: {cost:.4f}')\n", "\n", " with torch.set_grad_enabled(False):\n", " print(f'training accuracy: '\n", " f'{compute_binary_accuracy(model, train_loader, DEVICE):.2f}%'\n", " f'\\nvalid accuracy: '\n", " f'{compute_binary_accuracy(model, valid_loader, DEVICE):.2f}%')\n", " \n", " print(f'Time elapsed: {(time.time() - start_time)/60:.2f} min')\n", " \n", "print(f'Total Training Time: {(time.time() - start_time)/60:.2f} min')\n", "print(f'Test accuracy: {compute_binary_accuracy(model, test_loader, DEVICE):.2f}%')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "jt55pscgFdKZ" }, "outputs": [], "source": [ "import spacy\n", "nlp = spacy.load('en')\n", "\n", "def predict_sentiment(model, sentence):\n", " # based on:\n", " # https://github.com/bentrevett/pytorch-sentiment-analysis/blob/\n", " # master/2%20-%20Upgraded%20Sentiment%20Analysis.ipynb\n", " model.eval()\n", " tokenized = [tok.text for tok in nlp.tokenizer(sentence)]\n", " indexed = [TEXT.vocab.stoi[t] for t in tokenized]\n", " length = [len(indexed)]\n", " tensor = torch.LongTensor(indexed).to(DEVICE)\n", " tensor = tensor.unsqueeze(1)\n", " length_tensor = torch.LongTensor(length)\n", " prediction = torch.sigmoid(model(tensor, length_tensor))\n", " return prediction.item()" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 51 }, "colab_type": "code", "id": "O4__q0coFJyw", "outputId": "647a03b4-ff7e-4892-b279-f33fee690f81" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Probability positive:\n" ] }, { "data": { "text/plain": [ "0.9388696551322937" ] }, "execution_count": 12, "metadata": { "tags": [] }, "output_type": "execute_result" } ], "source": [ "print('Probability positive:')\n", "predict_sentiment(model, \"I really love this movie. This movie is so great!\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": {}, "colab_type": "code", "id": "7lRusB3dF80X" }, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "rnn_lstm_bi_imdb.ipynb", "provenance": [], "version": "0.3.2" }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.1" } }, "nbformat": 4, "nbformat_minor": 2 }