{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from fastai.text import * # Quick access to NLP functionality" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Text example" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "An example of creating a language model and then transfering to a classifier." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "PosixPath('/home/ubuntu/.fastai/data/imdb_sample')" ] }, "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ "path = untar_data(URLs.IMDB_SAMPLE)\n", "path" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Open and view the independent and dependent variables:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
labeltextis_valid
0negativeUn-bleeping-believable! Meg Ryan doesn't even ...False
1positiveThis is a extremely well-made film. The acting...False
2negativeEvery once in a long while a movie will come a...False
3positiveName just says it all. I watched this movie wi...False
4negativeThis movie succeeds at being one of the most u...False
\n", "
" ], "text/plain": [ " label text is_valid\n", "0 negative Un-bleeping-believable! Meg Ryan doesn't even ... False\n", "1 positive This is a extremely well-made film. The acting... False\n", "2 negative Every once in a long while a movie will come a... False\n", "3 positive Name just says it all. I watched this movie wi... False\n", "4 negative This movie succeeds at being one of the most u... False" ] }, "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ "df = pd.read_csv(path/'texts.csv')\n", "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Create a `DataBunch` for each of the language model and the classifier:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data_lm = TextLMDataBunch.from_csv(path, 'texts.csv')\n", "data_clas = TextClasDataBunch.from_csv(path, 'texts.csv', vocab=data_lm.train_ds.vocab, bs=42)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We'll fine-tune the language model. [fast.ai](http://www.fast.ai/) has a pre-trained English model available that we can download, we just have to specify it like this:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "moms = (0.8,0.7)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
epochtrain_lossvalid_lossaccuracytime
14.7528413.9363850.28694900:17
24.4509843.8393460.29248500:17
34.1704273.8034780.29486600:17
43.9465963.7985830.29534200:17
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "learn = language_model_learner(data_lm, AWD_LSTM)\n", "learn.unfreeze()\n", "learn.fit_one_cycle(4, slice(1e-2), moms=moms)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Save our language model's encoder:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "learn.save_encoder('enc')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Fine tune it to create a classifier:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
epochtrain_lossvalid_lossaccuracytime
10.7065970.6357870.67164200:33
20.6874970.6276760.65174100:33
30.6684370.6037360.68159200:30
40.6503790.5972910.67661700:30
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "learn = text_classifier_learner(data_clas, AWD_LSTM)\n", "learn.load_encoder('enc')\n", "learn.fit_one_cycle(4, moms=moms)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
epochtrain_lossvalid_lossaccuracytime
10.6354530.5890160.69651700:51
20.6443970.6044810.67661700:55
30.6463750.5797780.69651700:55
40.6325350.5520550.73134300:53
50.6386720.5442920.73631800:51
60.6214850.5492860.73631800:55
70.6124890.5436370.74129400:53
80.6114540.5427030.73631800:54
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "learn.unfreeze()\n", "learn.fit_one_cycle(8, slice(1e-5,1e-3), moms=moms)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" } }, "nbformat": 4, "nbformat_minor": 2 }