{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#default_exp deployment" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Server Implementation\n", "\n", "Let's now take what we had before and run inference based on a list of filenames. We'll make a quick script to get the ball rolling for how we want everything to do using `nbdev` again" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#hide\n", "#Run once per session\n", "!pip install fastai" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#hide\n", "from nbdev.showdoc import *" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We'll want the libraries we've used" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "from fastai.vision.all import *" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Including our new `style_transfer.py` file" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "from style_transfer import *" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's grab our original style image" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "learn = load_learner('myModel', cpu=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And now we can make and prepare our dataloader with a filename!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dset = Datasets('cat.jpg', tfms=[PILImage.create])\n", "dl = dset.dataloaders(after_item=[ToTensor()], after_batch=[IntToFloatTensor(), Normalize.from_stats(*imagenet_stats)], bs=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "t_im = dl.one_batch()[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And get our raw output. " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with torch.no_grad():\n", " res = learn.model(t_im)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's wrap this into a function" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "def get_learner(fn, cpu=False):\n", " return load_learner(fn, cpu=cpu)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "def make_datasets(learn, fns, bs=1):\n", " cuda = next(learn.model.parameters()).is_cuda\n", " dset = Datasets(fns, tfms=[PILImage.create])\n", " if cuda: \n", " after_batch = [IntToFloatTensor(), Normalize.from_stats(*imagenet_stats)] \n", " dl = dset.dataloaders(after_item=[ToTensor()], after_batch=after_batch, bs=1)\n", " else: \n", " after_batch = [Normalize.from_stats(*imagenet_stats, cuda=False)]\n", " dl = dset.dataloaders(after_item=[ToTensor()], after_batch=after_batch, bs=1, device='cpu')\n", " return dl" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "from torchvision.utils import save_image" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We can write a quick `save_im` function to save all our outputed tensors to images" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "def save_im(imgs:list, path):\n", " \"Save a n*c*w*h `Tensor` into seperate images\"\n", " [save_image(im, f'{path}/{i}.png') for i, im in enumerate(imgs)]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Now let's put it all together" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#export\n", "def inference(pkl_name, fnames:list, path:Path, cpu:bool=True):\n", " \"Grab inference on a model, filenames, and a path to save it to\"\n", " path = path/'results'\n", " path.mkdir(parents=True, exist_ok=True)\n", " learn = get_learner(pkl_name, cpu)\n", " if len(fnames) > 1:\n", " dls = []\n", " for fname in fnames:\n", " dls.append(make_datasets(learn, fnames, 1))\n", " else:\n", " dls = [make_datasets(learn, fnames, 1)]\n", " res = []\n", " for b in dls:\n", " t_im = b.one_batch()[0]\n", " with torch.no_grad():\n", " out = learn.model(t_im)\n", " res.append(out)\n", " save_im(res, path)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And try it out!" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fnames = ['cat.jpg'] * 5" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "inference('myModel', fnames, path=Path(''))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Lastly let's make a `.py` file again to run it off of" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#hide\n", "from nbdev.imports import *\n", "from nbdev.export import reset_nbdev_module, notebook2script\n", "\n", "create_config('myLib', user='muellerzr', path='.', cfg_name='settings.ini')\n", "cfg = Config(cfg_name='settings.ini')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#hide\n", "reset_nbdev_module()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Converted 05_Inference_Server.ipynb.\n" ] } ], "source": [ "#hide\n", "from nbdev.export import notebook2script\n", "notebook2script('05_Inference_Server.ipynb')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "And we're done!" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" } }, "nbformat": 4, "nbformat_minor": 1 }