{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from pytorch_pretrained_bert.tokenization import BertTokenizer, WordpieceTokenizer\n", "from pytorch_pretrained_bert.modeling import BertForPreTraining, BertPreTrainedModel, BertModel, BertConfig, BertForMaskedLM, BertForSequenceClassification\n", "from pathlib import Path\n", "import torch\n", "import re\n", "from torch import Tensor\n", "from torch.nn import BCEWithLogitsLoss\n", "from fastai.text import Tokenizer, Vocab\n", "import pandas as pd\n", "import collections\n", "import os\n", "import pdb\n", "from tqdm import tqdm, trange\n", "import sys\n", "import random\n", "import numpy as np\n", "import apex\n", "from sklearn.model_selection import train_test_split\n", "module_path = os.path.abspath(os.path.join('..'))\n", "if module_path not in sys.path:\n", " sys.path.append(module_path)\n", "\n", "from sklearn.metrics import roc_curve, auc\n", "\n", "\n", "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n", "from torch.utils.data.distributed import DistributedSampler\n", "from pytorch_pretrained_bert.optimization import BertAdam" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import logging\n", "logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n", " datefmt='%m/%d/%Y %H:%M:%S',\n", " level=logging.INFO)\n", "logger = logging.getLogger(__name__)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "DATA_PATH=Path('../data/toxic_comments/')\n", "DATA_PATH.mkdir(exist_ok=True)\n", "\n", "PATH=Path('../data/toxic_comments/tmp')\n", "PATH.mkdir(exist_ok=True)\n", "\n", "CLAS_DATA_PATH=PATH/'class'\n", "CLAS_DATA_PATH.mkdir(exist_ok=True)\n", "\n", "model_state_dict = None\n", "\n", "# BERT_PRETRAINED_PATH = Path('../trained_model/')\n", "BERT_PRETRAINED_PATH = Path('../../complaints/bert/pretrained-weights/uncased_L-12_H-768_A-12/')\n", "# BERT_PRETRAINED_PATH = Path('../../complaints/bert/pretrained-weights/cased_L-12_H-768_A-12/')\n", "# BERT_PRETRAINED_PATH = Path('../../complaints/bert/pretrained-weights/uncased_L-24_H-1024_A-16/')\n", "\n", "\n", "# BERT_FINETUNED_WEIGHTS = Path('../trained_model/toxic_comments')\n", "\n", "PYTORCH_PRETRAINED_BERT_CACHE = BERT_PRETRAINED_PATH/'cache/'\n", "PYTORCH_PRETRAINED_BERT_CACHE.mkdir(exist_ok=True)\n", "\n", "# output_model_file = os.path.join(BERT_FINETUNED_WEIGHTS, \"pytorch_model.bin\")\n", "\n", "# Load a trained model that you have fine-tuned\n", "# model_state_dict = torch.load(output_model_file)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Model Parameters" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "args = {\n", " \"train_size\": -1,\n", " \"val_size\": -1,\n", " \"full_data_dir\": DATA_PATH,\n", " \"data_dir\": PATH,\n", " \"task_name\": \"toxic_multilabel\",\n", " \"no_cuda\": False,\n", " \"bert_model\": BERT_PRETRAINED_PATH,\n", " \"output_dir\": CLAS_DATA_PATH/'output',\n", " \"max_seq_length\": 512,\n", " \"do_train\": True,\n", " \"do_eval\": True,\n", " \"do_lower_case\": True,\n", " \"train_batch_size\": 32,\n", " \"eval_batch_size\": 32,\n", " \"learning_rate\": 3e-5,\n", " \"num_train_epochs\": 4.0,\n", " \"warmup_proportion\": 0.1,\n", " \"no_cuda\": False,\n", " \"local_rank\": -1,\n", " \"seed\": 42,\n", " \"gradient_accumulation_steps\": 1,\n", " \"optimize_on_cpu\": False,\n", " \"fp16\": False,\n", " \"loss_scale\": 128\n", "}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model Class" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class BertForMultiLabelSequenceClassification(BertPreTrainedModel):\n", " \"\"\"BERT model for classification.\n", " This module is composed of the BERT model with a linear layer on top of\n", " the pooled output.\n", " Params:\n", " `config`: a BertConfig class instance with the configuration to build a new model.\n", " `num_labels`: the number of classes for the classifier. Default = 2.\n", " Inputs:\n", " `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]\n", " with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts\n", " `extract_features.py`, `run_classifier.py` and `run_squad.py`)\n", " `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token\n", " types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to\n", " a `sentence B` token (see BERT paper for more details).\n", " `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n", " selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n", " input sequence length in the current batch. It's the mask that we typically use for attention when\n", " a batch has varying length sentences.\n", " `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]\n", " with indices selected in [0, ..., num_labels].\n", " Outputs:\n", " if `labels` is not `None`:\n", " Outputs the CrossEntropy classification loss of the output with the labels.\n", " if `labels` is `None`:\n", " Outputs the classification logits of shape [batch_size, num_labels].\n", " Example usage:\n", " ```python\n", " # Already been converted into WordPiece token ids\n", " input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n", " input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n", " token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n", " config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,\n", " num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)\n", " num_labels = 2\n", " model = BertForSequenceClassification(config, num_labels)\n", " logits = model(input_ids, token_type_ids, input_mask)\n", " ```\n", " \"\"\"\n", " def __init__(self, config, num_labels=2):\n", " super(BertForMultiLabelSequenceClassification, self).__init__(config)\n", " self.num_labels = num_labels\n", " self.bert = BertModel(config)\n", " self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n", " self.classifier = torch.nn.Linear(config.hidden_size, num_labels)\n", " self.apply(self.init_bert_weights)\n", "\n", " def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n", " _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n", " pooled_output = self.dropout(pooled_output)\n", " logits = self.classifier(pooled_output)\n", "\n", " if labels is not None:\n", " loss_fct = BCEWithLogitsLoss()\n", " loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1, self.num_labels))\n", " return loss\n", " else:\n", " return logits\n", " \n", " def freeze_bert_encoder(self):\n", " for param in self.bert.parameters():\n", " param.requires_grad = False\n", " \n", " def unfreeze_bert_encoder(self):\n", " for param in self.bert.parameters():\n", " param.requires_grad = True" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Data representation class" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class InputExample(object):\n", " \"\"\"A single training/test example for simple sequence classification.\"\"\"\n", "\n", " def __init__(self, guid, text_a, text_b=None, labels=None):\n", " \"\"\"Constructs a InputExample.\n", "\n", " Args:\n", " guid: Unique id for the example.\n", " text_a: string. The untokenized text of the first sequence. For single\n", " sequence tasks, only this sequence must be specified.\n", " text_b: (Optional) string. The untokenized text of the second sequence.\n", " Only must be specified for sequence pair tasks.\n", " labels: (Optional) [string]. The label of the example. This should be\n", " specified for train and dev examples, but not for test examples.\n", " \"\"\"\n", " self.guid = guid\n", " self.text_a = text_a\n", " self.text_b = text_b\n", " self.labels = labels\n", "\n", "\n", "class InputFeatures(object):\n", " \"\"\"A single set of features of data.\"\"\"\n", "\n", " def __init__(self, input_ids, input_mask, segment_ids, label_ids):\n", " self.input_ids = input_ids\n", " self.input_mask = input_mask\n", " self.segment_ids = segment_ids\n", " self.label_ids = label_ids" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class DataProcessor(object):\n", " \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n", "\n", " def get_train_examples(self, data_dir):\n", " \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n", " raise NotImplementedError()\n", "\n", " def get_dev_examples(self, data_dir):\n", " \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n", " raise NotImplementedError()\n", " \n", " def get_test_examples(self, data_dir, data_file_name, size=-1):\n", " \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n", " raise NotImplementedError() \n", "\n", " def get_labels(self):\n", " \"\"\"Gets the list of labels for this data set.\"\"\"\n", " raise NotImplementedError()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class MultiLabelTextProcessor(DataProcessor):\n", " \n", " def __init__(self, data_dir):\n", " self.data_dir = data_dir\n", " self.labels = None\n", " \n", " \n", " def get_train_examples(self, data_dir, size=-1):\n", " filename = 'train.csv'\n", " logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, filename)))\n", " if size == -1:\n", " data_df = pd.read_csv(os.path.join(data_dir, filename))\n", "# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)\n", " return self._create_examples(data_df, \"train\")\n", " else:\n", " data_df = pd.read_csv(os.path.join(data_dir, filename))\n", "# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)\n", " return self._create_examples(data_df.sample(size), \"train\")\n", " \n", " def get_dev_examples(self, data_dir, size=-1):\n", " \"\"\"See base class.\"\"\"\n", " filename = 'val.csv'\n", " if size == -1:\n", " data_df = pd.read_csv(os.path.join(data_dir, filename))\n", "# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)\n", " return self._create_examples(data_df, \"dev\")\n", " else:\n", " data_df = pd.read_csv(os.path.join(data_dir, filename))\n", "# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)\n", " return self._create_examples(data_df.sample(size), \"dev\")\n", " \n", " def get_test_examples(self, data_dir, data_file_name, size=-1):\n", " data_df = pd.read_csv(os.path.join(data_dir, data_file_name))\n", "# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)\n", " if size == -1:\n", " return self._create_examples(data_df, \"test\")\n", " else:\n", " return self._create_examples(data_df.sample(size), \"test\")\n", "\n", " def get_labels(self):\n", " \"\"\"See base class.\"\"\"\n", " if self.labels == None:\n", " self.labels = list(pd.read_csv(os.path.join(self.data_dir, \"classes.txt\"),header=None)[0].values)\n", " return self.labels\n", "\n", " def _create_examples(self, df, set_type, labels_available=True):\n", " \"\"\"Creates examples for the training and dev sets.\"\"\"\n", " examples = []\n", " for (i, row) in enumerate(df.values):\n", " guid = row[0]\n", " text_a = row[1]\n", " if labels_available:\n", " labels = row[2:]\n", " else:\n", " labels = []\n", " examples.append(\n", " InputExample(guid=guid, text_a=text_a, labels=labels))\n", " return examples\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n", " \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n", "\n", " label_map = {label : i for i, label in enumerate(label_list)}\n", "\n", " features = []\n", " for (ex_index, example) in enumerate(examples):\n", " tokens_a = tokenizer.tokenize(example.text_a)\n", "\n", " tokens_b = None\n", " if example.text_b:\n", " tokens_b = tokenizer.tokenize(example.text_b)\n", " # Modifies `tokens_a` and `tokens_b` in place so that the total\n", " # length is less than the specified length.\n", " # Account for [CLS], [SEP], [SEP] with \"- 3\"\n", " _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n", " else:\n", " # Account for [CLS] and [SEP] with \"- 2\"\n", " if len(tokens_a) > max_seq_length - 2:\n", " tokens_a = tokens_a[:(max_seq_length - 2)]\n", "\n", " # The convention in BERT is:\n", " # (a) For sequence pairs:\n", " # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n", " # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n", " # (b) For single sequences:\n", " # tokens: [CLS] the dog is hairy . [SEP]\n", " # type_ids: 0 0 0 0 0 0 0\n", " #\n", " # Where \"type_ids\" are used to indicate whether this is the first\n", " # sequence or the second sequence. The embedding vectors for `type=0` and\n", " # `type=1` were learned during pre-training and are added to the wordpiece\n", " # embedding vector (and position vector). This is not *strictly* necessary\n", " # since the [SEP] token unambigiously separates the sequences, but it makes\n", " # it easier for the model to learn the concept of sequences.\n", " #\n", " # For classification tasks, the first vector (corresponding to [CLS]) is\n", " # used as as the \"sentence vector\". Note that this only makes sense because\n", " # the entire model is fine-tuned.\n", " tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n", " segment_ids = [0] * len(tokens)\n", "\n", " if tokens_b:\n", " tokens += tokens_b + [\"[SEP]\"]\n", " segment_ids += [1] * (len(tokens_b) + 1)\n", "\n", " input_ids = tokenizer.convert_tokens_to_ids(tokens)\n", "\n", " # The mask has 1 for real tokens and 0 for padding tokens. Only real\n", " # tokens are attended to.\n", " input_mask = [1] * len(input_ids)\n", "\n", " # Zero-pad up to the sequence length.\n", " padding = [0] * (max_seq_length - len(input_ids))\n", " input_ids += padding\n", " input_mask += padding\n", " segment_ids += padding\n", "\n", " assert len(input_ids) == max_seq_length\n", " assert len(input_mask) == max_seq_length\n", " assert len(segment_ids) == max_seq_length\n", " \n", " labels_ids = []\n", " for label in example.labels:\n", " labels_ids.append(float(label))\n", "\n", "# label_id = label_map[example.label]\n", " if ex_index < 0:\n", " logger.info(\"*** Example ***\")\n", " logger.info(\"guid: %s\" % (example.guid))\n", " logger.info(\"tokens: %s\" % \" \".join(\n", " [str(x) for x in tokens]))\n", " logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n", " logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n", " logger.info(\n", " \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n", " logger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n", "\n", " features.append(\n", " InputFeatures(input_ids=input_ids,\n", " input_mask=input_mask,\n", " segment_ids=segment_ids,\n", " label_ids=labels_ids))\n", " return features" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n", " \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n", "\n", " # This is a simple heuristic which will always truncate the longer sequence\n", " # one token at a time. This makes more sense than truncating an equal percent\n", " # of tokens from each, since if one sequence is very short then each token\n", " # that's truncated likely contains more information than a longer sequence.\n", " while True:\n", " total_length = len(tokens_a) + len(tokens_b)\n", " if total_length <= max_length:\n", " break\n", " if len(tokens_a) > len(tokens_b):\n", " tokens_a.pop()\n", " else:\n", " tokens_b.pop()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Metric functions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def accuracy(out, labels):\n", " outputs = np.argmax(out, axis=1)\n", " return np.sum(outputs == labels)\n", "\n", "def accuracy_thresh(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True):\n", " \"Compute accuracy when `y_pred` and `y_true` are the same size.\"\n", " if sigmoid: y_pred = y_pred.sigmoid()\n", "# return ((y_pred>thresh)==y_true.byte()).float().mean().item()\n", " return np.mean(((y_pred>thresh)==y_true.byte()).float().cpu().numpy(), axis=1).sum()\n", "\n", "\n", "def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True):\n", " \"Computes the f_beta between `preds` and `targets`\"\n", " beta2 = beta ** 2\n", " if sigmoid: y_pred = y_pred.sigmoid()\n", " y_pred = (y_pred>thresh).float()\n", " y_true = y_true.float()\n", " TP = (y_pred*y_true).sum(dim=1)\n", " prec = TP/(y_pred.sum(dim=1)+eps)\n", " rec = TP/(y_true.sum(dim=1)+eps)\n", " res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2)\n", " return res.mean().item()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Training warmup " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def warmup_linear(x, warmup=0.002):\n", " if x < warmup:\n", " return x/warmup\n", " return 1.0 - x" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "processors = {\n", " \"toxic_multilabel\": MultiLabelTextProcessor\n", "}\n", "\n", "# Setup GPU parameters\n", "\n", "if args[\"local_rank\"] == -1 or args[\"no_cuda\"]:\n", " device = torch.device(\"cuda\" if torch.cuda.is_available() and not args[\"no_cuda\"] else \"cpu\")\n", " n_gpu = torch.cuda.device_count()\n", "# n_gpu = 1\n", "else:\n", " torch.cuda.set_device(args['local_rank'])\n", " device = torch.device(\"cuda\", args['local_rank'])\n", " n_gpu = 1\n", " # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n", " torch.distributed.init_process_group(backend='nccl')\n", "logger.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n", " device, n_gpu, bool(args['local_rank'] != -1), args['fp16']))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "args['train_batch_size'] = int(args['train_batch_size'] / args['gradient_accumulation_steps'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "random.seed(args['seed'])\n", "np.random.seed(args['seed'])\n", "torch.manual_seed(args['seed'])\n", "if n_gpu > 0:\n", " torch.cuda.manual_seed_all(args['seed'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "task_name = args['task_name'].lower()\n", "\n", "if task_name not in processors:\n", " raise ValueError(\"Task not found: %s\" % (task_name))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "processor = processors[task_name](args['data_dir'])\n", "label_list = processor.get_labels()\n", "num_labels = len(label_list)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "label_list" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tokenizer = BertTokenizer.from_pretrained(args['bert_model'], do_lower_case=args['do_lower_case'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "train_examples = None\n", "num_train_steps = None\n", "if args['do_train']:\n", " train_examples = processor.get_train_examples(args['full_data_dir'], size=args['train_size'])\n", "# train_examples = processor.get_train_examples(args['data_dir'], size=args['train_size'])\n", " num_train_steps = int(\n", " len(train_examples) / args['train_batch_size'] / args['gradient_accumulation_steps'] * args['num_train_epochs'])\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Prepare model\n", "def get_model():\n", "# pdb.set_trace()\n", " if model_state_dict:\n", " model = BertForMultiLabelSequenceClassification.from_pretrained(args['bert_model'], num_labels = num_labels, state_dict=model_state_dict)\n", " else:\n", " model = BertForMultiLabelSequenceClassification.from_pretrained(args['bert_model'], num_labels = num_labels)\n", " return model\n", "\n", "model = get_model()\n", "\n", "if args['fp16']:\n", " model.half()\n", "model.to(device)\n", "if args['local_rank'] != -1:\n", " try:\n", " from apex.parallel import DistributedDataParallel as DDP\n", " except ImportError:\n", " raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n", "\n", " model = DDP(model)\n", "elif n_gpu > 1:\n", " model = torch.nn.DataParallel(model)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from torch.optim.lr_scheduler import _LRScheduler, Optimizer\n", "\n", "class CyclicLR(object):\n", " \"\"\"Sets the learning rate of each parameter group according to\n", " cyclical learning rate policy (CLR). The policy cycles the learning\n", " rate between two boundaries with a constant frequency, as detailed in\n", " the paper `Cyclical Learning Rates for Training Neural Networks`_.\n", " The distance between the two boundaries can be scaled on a per-iteration\n", " or per-cycle basis.\n", " Cyclical learning rate policy changes the learning rate after every batch.\n", " `batch_step` should be called after a batch has been used for training.\n", " To resume training, save `last_batch_iteration` and use it to instantiate `CycleLR`.\n", " This class has three built-in policies, as put forth in the paper:\n", " \"triangular\":\n", " A basic triangular cycle w/ no amplitude scaling.\n", " \"triangular2\":\n", " A basic triangular cycle that scales initial amplitude by half each cycle.\n", " \"exp_range\":\n", " A cycle that scales initial amplitude by gamma**(cycle iterations) at each\n", " cycle iteration.\n", " This implementation was adapted from the github repo: `bckenstler/CLR`_\n", " Args:\n", " optimizer (Optimizer): Wrapped optimizer.\n", " base_lr (float or list): Initial learning rate which is the\n", " lower boundary in the cycle for eachparam groups.\n", " Default: 0.001\n", " max_lr (float or list): Upper boundaries in the cycle for\n", " each parameter group. Functionally,\n", " it defines the cycle amplitude (max_lr - base_lr).\n", " The lr at any cycle is the sum of base_lr\n", " and some scaling of the amplitude; therefore\n", " max_lr may not actually be reached depending on\n", " scaling function. Default: 0.006\n", " step_size (int): Number of training iterations per\n", " half cycle. Authors suggest setting step_size\n", " 2-8 x training iterations in epoch. Default: 2000\n", " mode (str): One of {triangular, triangular2, exp_range}.\n", " Values correspond to policies detailed above.\n", " If scale_fn is not None, this argument is ignored.\n", " Default: 'triangular'\n", " gamma (float): Constant in 'exp_range' scaling function:\n", " gamma**(cycle iterations)\n", " Default: 1.0\n", " scale_fn (function): Custom scaling policy defined by a single\n", " argument lambda function, where\n", " 0 <= scale_fn(x) <= 1 for all x >= 0.\n", " mode paramater is ignored\n", " Default: None\n", " scale_mode (str): {'cycle', 'iterations'}.\n", " Defines whether scale_fn is evaluated on\n", " cycle number or cycle iterations (training\n", " iterations since start of cycle).\n", " Default: 'cycle'\n", " last_batch_iteration (int): The index of the last batch. Default: -1\n", " Example:\n", " >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n", " >>> scheduler = torch.optim.CyclicLR(optimizer)\n", " >>> data_loader = torch.utils.data.DataLoader(...)\n", " >>> for epoch in range(10):\n", " >>> for batch in data_loader:\n", " >>> scheduler.batch_step()\n", " >>> train_batch(...)\n", " .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186\n", " .. _bckenstler/CLR: https://github.com/bckenstler/CLR\n", " \"\"\"\n", "\n", " def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3,\n", " step_size=2000, mode='triangular', gamma=1.,\n", " scale_fn=None, scale_mode='cycle', last_batch_iteration=-1):\n", "\n", "# if not isinstance(optimizer, Optimizer):\n", "# raise TypeError('{} is not an Optimizer'.format(\n", "# type(optimizer).__name__))\n", " self.optimizer = optimizer\n", "\n", " if isinstance(base_lr, list) or isinstance(base_lr, tuple):\n", " if len(base_lr) != len(optimizer.param_groups):\n", " raise ValueError(\"expected {} base_lr, got {}\".format(\n", " len(optimizer.param_groups), len(base_lr)))\n", " self.base_lrs = list(base_lr)\n", " else:\n", " self.base_lrs = [base_lr] * len(optimizer.param_groups)\n", "\n", " if isinstance(max_lr, list) or isinstance(max_lr, tuple):\n", " if len(max_lr) != len(optimizer.param_groups):\n", " raise ValueError(\"expected {} max_lr, got {}\".format(\n", " len(optimizer.param_groups), len(max_lr)))\n", " self.max_lrs = list(max_lr)\n", " else:\n", " self.max_lrs = [max_lr] * len(optimizer.param_groups)\n", "\n", " self.step_size = step_size\n", "\n", " if mode not in ['triangular', 'triangular2', 'exp_range'] \\\n", " and scale_fn is None:\n", " raise ValueError('mode is invalid and scale_fn is None')\n", "\n", " self.mode = mode\n", " self.gamma = gamma\n", "\n", " if scale_fn is None:\n", " if self.mode == 'triangular':\n", " self.scale_fn = self._triangular_scale_fn\n", " self.scale_mode = 'cycle'\n", " elif self.mode == 'triangular2':\n", " self.scale_fn = self._triangular2_scale_fn\n", " self.scale_mode = 'cycle'\n", " elif self.mode == 'exp_range':\n", " self.scale_fn = self._exp_range_scale_fn\n", " self.scale_mode = 'iterations'\n", " else:\n", " self.scale_fn = scale_fn\n", " self.scale_mode = scale_mode\n", "\n", " self.batch_step(last_batch_iteration + 1)\n", " self.last_batch_iteration = last_batch_iteration\n", "\n", " def batch_step(self, batch_iteration=None):\n", " if batch_iteration is None:\n", " batch_iteration = self.last_batch_iteration + 1\n", " self.last_batch_iteration = batch_iteration\n", " for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n", " param_group['lr'] = lr\n", "\n", " def _triangular_scale_fn(self, x):\n", " return 1.\n", "\n", " def _triangular2_scale_fn(self, x):\n", " return 1 / (2. ** (x - 1))\n", "\n", " def _exp_range_scale_fn(self, x):\n", " return self.gamma**(x)\n", "\n", " def get_lr(self):\n", " step_size = float(self.step_size)\n", " cycle = np.floor(1 + self.last_batch_iteration / (2 * step_size))\n", " x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1)\n", "\n", " lrs = []\n", " param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs)\n", " for param_group, base_lr, max_lr in param_lrs:\n", " base_height = (max_lr - base_lr) * np.maximum(0, (1 - x))\n", " if self.scale_mode == 'cycle':\n", " lr = base_lr + base_height * self.scale_fn(cycle)\n", " else:\n", " lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration)\n", " lrs.append(lr)\n", " return lrs" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Prepare optimizer\n", "param_optimizer = list(model.named_parameters())\n", "no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n", "optimizer_grouped_parameters = [\n", " {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n", " {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n", " ]\n", "t_total = num_train_steps\n", "if args['local_rank'] != -1:\n", " t_total = t_total // torch.distributed.get_world_size()\n", "if args['fp16']:\n", " try:\n", " from apex.optimizers import FP16_Optimizer\n", " from apex.optimizers import FusedAdam\n", " except ImportError:\n", " raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n", "\n", " optimizer = FusedAdam(optimizer_grouped_parameters,\n", " lr=args['learning_rate'],\n", " bias_correction=False,\n", " max_grad_norm=1.0)\n", " if args['loss_scale'] == 0:\n", " optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n", " else:\n", " optimizer = FP16_Optimizer(optimizer, static_loss_scale=args['loss_scale'])\n", "\n", "else:\n", " optimizer = BertAdam(optimizer_grouped_parameters,\n", " lr=args['learning_rate'],\n", " warmup=args['warmup_proportion'],\n", " t_total=t_total)\n", "\n", "scheduler = CyclicLR(optimizer, base_lr=2e-5, max_lr=5e-5, step_size=2500, last_batch_iteration=0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Eval Fn\n", "eval_examples = processor.get_dev_examples(args['data_dir'], size=args['val_size'])\n", "def eval():\n", " args['output_dir'].mkdir(exist_ok=True)\n", "\n", " \n", " eval_features = convert_examples_to_features(\n", " eval_examples, label_list, args['max_seq_length'], tokenizer)\n", " logger.info(\"***** Running evaluation *****\")\n", " logger.info(\" Num examples = %d\", len(eval_examples))\n", " logger.info(\" Batch size = %d\", args['eval_batch_size'])\n", " all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n", " all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n", " all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n", " all_label_ids = torch.tensor([f.label_ids for f in eval_features], dtype=torch.float)\n", " eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n", " # Run prediction for full data\n", " eval_sampler = SequentialSampler(eval_data)\n", " eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args['eval_batch_size'])\n", " \n", " all_logits = None\n", " all_labels = None\n", " \n", " model.eval()\n", " eval_loss, eval_accuracy = 0, 0\n", " nb_eval_steps, nb_eval_examples = 0, 0\n", " for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:\n", " input_ids = input_ids.to(device)\n", " input_mask = input_mask.to(device)\n", " segment_ids = segment_ids.to(device)\n", " label_ids = label_ids.to(device)\n", "\n", " with torch.no_grad():\n", " tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)\n", " logits = model(input_ids, segment_ids, input_mask)\n", "\n", "# logits = logits.detach().cpu().numpy()\n", "# label_ids = label_ids.to('cpu').numpy()\n", "# tmp_eval_accuracy = accuracy(logits, label_ids)\n", " tmp_eval_accuracy = accuracy_thresh(logits, label_ids)\n", " if all_logits is None:\n", " all_logits = logits.detach().cpu().numpy()\n", " else:\n", " all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)\n", " \n", " if all_labels is None:\n", " all_labels = label_ids.detach().cpu().numpy()\n", " else: \n", " all_labels = np.concatenate((all_labels, label_ids.detach().cpu().numpy()), axis=0)\n", " \n", "\n", " eval_loss += tmp_eval_loss.mean().item()\n", " eval_accuracy += tmp_eval_accuracy\n", "\n", " nb_eval_examples += input_ids.size(0)\n", " nb_eval_steps += 1\n", "\n", " eval_loss = eval_loss / nb_eval_steps\n", " eval_accuracy = eval_accuracy / nb_eval_examples\n", " \n", "# ROC-AUC calcualation\n", " # Compute ROC curve and ROC area for each class\n", " fpr = dict()\n", " tpr = dict()\n", " roc_auc = dict()\n", " \n", " for i in range(num_labels):\n", " fpr[i], tpr[i], _ = roc_curve(all_labels[:, i], all_logits[:, i])\n", " roc_auc[i] = auc(fpr[i], tpr[i])\n", " \n", " # Compute micro-average ROC curve and ROC area\n", " fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(all_labels.ravel(), all_logits.ravel())\n", " roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n", "\n", " result = {'eval_loss': eval_loss,\n", " 'eval_accuracy': eval_accuracy,\n", "# 'loss': tr_loss/nb_tr_steps,\n", " 'roc_auc': roc_auc }\n", "\n", " output_eval_file = os.path.join(args['output_dir'], \"eval_results.txt\")\n", " with open(output_eval_file, \"w\") as writer:\n", " logger.info(\"***** Eval results *****\")\n", " for key in sorted(result.keys()):\n", " logger.info(\" %s = %s\", key, str(result[key]))\n", "# writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n", " return result" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Load training data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "train_features = convert_examples_to_features(\n", " train_examples, label_list, args['max_seq_length'], tokenizer)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "logger.info(\"***** Running training *****\")\n", "logger.info(\" Num examples = %d\", len(train_examples))\n", "logger.info(\" Batch size = %d\", args['train_batch_size'])\n", "logger.info(\" Num steps = %d\", num_train_steps)\n", "all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n", "all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n", "all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n", "all_label_ids = torch.tensor([f.label_ids for f in train_features], dtype=torch.float)\n", "train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n", "if args['local_rank'] == -1:\n", " train_sampler = RandomSampler(train_data)\n", "else:\n", " train_sampler = DistributedSampler(train_data)\n", "train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args['train_batch_size'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from tqdm import tqdm_notebook as tqdm" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Train Model" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def fit(num_epocs=args['num_train_epochs']):\n", " global_step = 0\n", " model.train()\n", " for i_ in tqdm(range(int(num_epocs)), desc=\"Epoch\"):\n", "\n", " tr_loss = 0\n", " nb_tr_examples, nb_tr_steps = 0, 0\n", " for step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n", "\n", " batch = tuple(t.to(device) for t in batch)\n", " input_ids, input_mask, segment_ids, label_ids = batch\n", " loss = model(input_ids, segment_ids, input_mask, label_ids)\n", " if n_gpu > 1:\n", " loss = loss.mean() # mean() to average on multi-gpu.\n", " if args['gradient_accumulation_steps'] > 1:\n", " loss = loss / args['gradient_accumulation_steps']\n", "\n", " if args['fp16']:\n", " optimizer.backward(loss)\n", " else:\n", " loss.backward()\n", "\n", " tr_loss += loss.item()\n", " nb_tr_examples += input_ids.size(0)\n", " nb_tr_steps += 1\n", " if (step + 1) % args['gradient_accumulation_steps'] == 0:\n", " # scheduler.batch_step()\n", " # modify learning rate with special warm up BERT uses\n", " lr_this_step = args['learning_rate'] * warmup_linear(global_step/t_total, args['warmup_proportion'])\n", " for param_group in optimizer.param_groups:\n", " param_group['lr'] = lr_this_step\n", " optimizer.step()\n", " optimizer.zero_grad()\n", " global_step += 1\n", "\n", " logger.info('Loss after epoc {}'.format(tr_loss / nb_tr_steps))\n", " logger.info('Eval after epoc {}'.format(i_+1))\n", " eval()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Freeze BERT layers for 1 epoch\n", "# model.module.freeze_bert_encoder()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# fit(1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model.module.unfreeze_bert_encoder()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fit()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Save a trained model\n", "model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n", "output_model_file = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, \"finetuned_pytorch_model.bin\")\n", "torch.save(model_to_save.state_dict(), output_model_file)\n", "\n", "# Load a trained model that you have fine-tuned\n", "model_state_dict = torch.load(output_model_file)\n", "model = BertForMultiLabelSequenceClassification.from_pretrained(args['bert_model'], num_labels = num_labels, state_dict=model_state_dict)\n", "model.to(device)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Model Evaluation" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "eval()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def predict(model, path, test_filename='test.csv'):\n", " predict_processor = MultiLabelTextProcessor(path)\n", " test_examples = predict_processor.get_test_examples(path, test_filename, size=-1)\n", " \n", " # Hold input data for returning it \n", " input_data = [{ 'id': input_example.guid, 'comment_text': input_example.text_a } for input_example in test_examples]\n", "\n", " test_features = convert_examples_to_features(\n", " test_examples, label_list, args['max_seq_length'], tokenizer)\n", " \n", " logger.info(\"***** Running prediction *****\")\n", " logger.info(\" Num examples = %d\", len(test_examples))\n", " logger.info(\" Batch size = %d\", args['eval_batch_size'])\n", " \n", " all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)\n", " all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)\n", " all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)\n", "\n", " test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n", " \n", " # Run prediction for full data\n", " test_sampler = SequentialSampler(test_data)\n", " test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args['eval_batch_size'])\n", " \n", " all_logits = None\n", " \n", " model.eval()\n", " eval_loss, eval_accuracy = 0, 0\n", " nb_eval_steps, nb_eval_examples = 0, 0\n", " for step, batch in enumerate(tqdm(test_dataloader, desc=\"Prediction Iteration\")):\n", " input_ids, input_mask, segment_ids = batch\n", " input_ids = input_ids.to(device)\n", " input_mask = input_mask.to(device)\n", " segment_ids = segment_ids.to(device)\n", "\n", " with torch.no_grad():\n", " logits = model(input_ids, segment_ids, input_mask)\n", " logits = logits.sigmoid()\n", "\n", " if all_logits is None:\n", " all_logits = logits.detach().cpu().numpy()\n", " else:\n", " all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)\n", " \n", " nb_eval_examples += input_ids.size(0)\n", " nb_eval_steps += 1\n", "\n", " return pd.merge(pd.DataFrame(input_data), pd.DataFrame(all_logits, columns=label_list), left_index=True, right_index=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result = predict(model, DATA_PATH)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "cols = ['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result[cols].to_csv(DATA_PATH/'toxic_kaggle_submission_14_single.csv', index=None)" ] } ], "metadata": { "kernelspec": { "display_name": "Environment (conda_pytorch_p36)", "language": "python", "name": "conda_pytorch_p36" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }