{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "execution": { "iopub.execute_input": "2022-06-15T17:06:13.379402Z", "iopub.status.busy": "2022-06-15T17:06:13.379005Z", "iopub.status.idle": "2022-06-15T17:06:17.577056Z", "shell.execute_reply": "2022-06-15T17:06:17.576168Z", "shell.execute_reply.started": "2022-06-15T17:06:13.379330Z" } }, "outputs": [], "source": [ "# This Python 3 environment comes with many helpful analytics libraries installed\n", "# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n", "# For example, here's several helpful packages to load\n", "\n", "import numpy as np # linear algebra\n", "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n", "\n", "# Input data files are available in the read-only \"../input/\" directory\n", "# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n", "\n", "import os\n", "for dirname, _, filenames in os.walk('/kaggle/input'):\n", " print(dirname)\n", "\n", "# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n", "# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:17.584479Z", "iopub.status.busy": "2022-06-15T17:06:17.582362Z", "iopub.status.idle": "2022-06-15T17:06:19.903433Z", "shell.execute_reply": "2022-06-15T17:06:19.902652Z", "shell.execute_reply.started": "2022-06-15T17:06:17.584439Z" } }, "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "import torchvision.transforms.functional as F\n", "from torch.utils.data import Dataset,DataLoader" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:19.905430Z", "iopub.status.busy": "2022-06-15T17:06:19.904894Z", "iopub.status.idle": "2022-06-15T17:06:19.922108Z", "shell.execute_reply": "2022-06-15T17:06:19.921428Z", "shell.execute_reply.started": "2022-06-15T17:06:19.905393Z" } }, "outputs": [], "source": [ "class DoubleConv(nn.Module):\n", " def __init__(self, in_channels, out_channels):\n", " super(DoubleConv, self).__init__()\n", " self.conv = nn.Sequential(\n", " nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),\n", " nn.BatchNorm2d(out_channels),\n", " nn.ReLU(inplace=True),\n", " nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),\n", " nn.BatchNorm2d(out_channels),\n", " nn.ReLU(inplace=True),\n", " )\n", "\n", " def forward(self, x):\n", " return self.conv(x)\n", "\n", "class UNET(nn.Module):\n", " def __init__(\n", " self, in_channels=3, out_channels=1, features=[64, 128, 256, 512],\n", " ):\n", " super(UNET, self).__init__()\n", " self.ups = nn.ModuleList()\n", " self.downs = nn.ModuleList()\n", " self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n", "\n", " # Down part of UNET\n", " for feature in features:\n", " self.downs.append(DoubleConv(in_channels, feature))\n", " in_channels = feature\n", "\n", " # Up part of UNET\n", " for feature in reversed(features):\n", " self.ups.append(\n", " nn.ConvTranspose2d(\n", " feature*2, feature, kernel_size=2, stride=2,)\n", " )\n", " self.ups.append(DoubleConv(feature*2, feature))\n", "\n", " self.bottleneck = DoubleConv(features[-1], features[-1]*2)\n", " self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)\n", "\n", " def forward(self, x):\n", " skip_connections = []\n", "\n", " for down in self.downs:\n", " x = down(x)\n", " skip_connections.append(x)\n", " x = self.pool(x)\n", "\n", " x = self.bottleneck(x)\n", " skip_connections = skip_connections[::-1]\n", "\n", " for idx in range(0, len(self.ups), 2):\n", " x = self.ups[idx](x)\n", " skip_connection = skip_connections[idx//2]\n", " \n", " # In case we have given image with odd dimensions\n", " if x.shape != skip_connection.shape:\n", " x = F.resize(x, size=skip_connection.shape[2:])\n", "\n", " concat_skip = torch.cat((skip_connection, x), dim=1)\n", " x = self.ups[idx+1](concat_skip)\n", "\n", " return self.final_conv(x)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:19.926153Z", "iopub.status.busy": "2022-06-15T17:06:19.925514Z", "iopub.status.idle": "2022-06-15T17:06:21.801560Z", "shell.execute_reply": "2022-06-15T17:06:21.800735Z", "shell.execute_reply.started": "2022-06-15T17:06:19.926126Z" } }, "outputs": [], "source": [ "def test():\n", " x = torch.randn((3,1,161,161))\n", " model = UNET(in_channels=1, out_channels=1)\n", " preds = model(x)\n", " print(preds.shape)\n", " print(x.shape)\n", " assert preds.shape == x.shape\n", " \n", "\n", "test()" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:21.803489Z", "iopub.status.busy": "2022-06-15T17:06:21.802969Z", "iopub.status.idle": "2022-06-15T17:06:21.807932Z", "shell.execute_reply": "2022-06-15T17:06:21.807050Z", "shell.execute_reply.started": "2022-06-15T17:06:21.803451Z" } }, "outputs": [], "source": [ "import os\n", "import PIL\n", "from PIL import Image\n", "from torch.utils.data import Dataset\n", "import numpy as np" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:21.809998Z", "iopub.status.busy": "2022-06-15T17:06:21.809444Z", "iopub.status.idle": "2022-06-15T17:06:21.820457Z", "shell.execute_reply": "2022-06-15T17:06:21.819642Z", "shell.execute_reply.started": "2022-06-15T17:06:21.809960Z" } }, "outputs": [], "source": [ "class CarvanaDataset(Dataset):\n", " def __init__(self,images,image_dir,mask_dir,transform=None,train=True):\n", " self.image_dir = image_dir\n", " self.mask_dir = mask_dir\n", " self.transform = transform\n", " self.isTrain = train\n", " self.images = images\n", " def __len__(self):\n", " return len(self.images)\n", " def __getitem__(self,index):\n", " img_path = os.path.join(self.image_dir,self.images[index])\n", " mask_path = os.path.join(self.mask_dir,self.images[index].replace(\".jpg\",\"_mask.gif\"))\n", " image = np.array(Image.open(img_path).convert(\"RGB\"))\n", " mask = np.array(Image.open(mask_path).convert(\"L\"),dtype=np.float32)\n", " mask[mask == 255.0] = 1.0\n", " \n", " if self.transform is not None:\n", " augmentations = self.transform(image=image,mask=mask)\n", " image = augmentations['image']\n", " mask = augmentations['mask']\n", " \n", " return {\"image\":image,\"mask\":mask}\n" ] }, { "cell_type": "markdown", "metadata": { "execution": { "iopub.execute_input": "2022-06-06T15:37:33.998798Z", "iopub.status.busy": "2022-06-06T15:37:33.998088Z", "iopub.status.idle": "2022-06-06T15:37:34.28288Z", "shell.execute_reply": "2022-06-06T15:37:34.281567Z", "shell.execute_reply.started": "2022-06-06T15:37:33.998758Z" } }, "source": [ "train_dirs = \"./train/\"\n", "train_mask_dirs = \"./train_masks/\"\n", "train_transform=None\n", "\n", "\n", "loader= get_loaders(\n", " train_img_dir,\n", " train_mask_dir,\n", " batch_size,\n", " num_workers,\n", " pin_memory,\n", " )\n", "\n", " \n", "for batch_idx, (data, targets) in enumerate(loader):\n", " data = data.to(device)\n", " targets = targets.float().unsqueeze(1).to(device)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:21.822574Z", "iopub.status.busy": "2022-06-15T17:06:21.821924Z", "iopub.status.idle": "2022-06-15T17:06:31.472338Z", "shell.execute_reply": "2022-06-15T17:06:31.471522Z", "shell.execute_reply.started": "2022-06-15T17:06:21.822531Z" } }, "outputs": [], "source": [ "import zipfile\n", "# 'test.zip'\n", "dirs = ['train.zip','train_masks.zip']\n", "for x in dirs:\n", " with zipfile.ZipFile(\"../input/carvana-image-masking-challenge/\"+ x,'r') as z:\n", " z.extractall(\".\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:31.473986Z", "iopub.status.busy": "2022-06-15T17:06:31.473613Z", "iopub.status.idle": "2022-06-15T17:06:32.153351Z", "shell.execute_reply": "2022-06-15T17:06:32.152341Z", "shell.execute_reply.started": "2022-06-15T17:06:31.473948Z" } }, "outputs": [], "source": [ "!ls" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:32.155681Z", "iopub.status.busy": "2022-06-15T17:06:32.155008Z", "iopub.status.idle": "2022-06-15T17:06:33.802577Z", "shell.execute_reply": "2022-06-15T17:06:33.801816Z", "shell.execute_reply.started": "2022-06-15T17:06:32.155634Z" } }, "outputs": [], "source": [ "import albumentations as A\n", "from albumentations.pytorch import ToTensorV2\n", "from tqdm import tqdm\n", "import torch.optim as optim" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:33.805747Z", "iopub.status.busy": "2022-06-15T17:06:33.805342Z", "iopub.status.idle": "2022-06-15T17:06:33.871206Z", "shell.execute_reply": "2022-06-15T17:06:33.867872Z", "shell.execute_reply.started": "2022-06-15T17:06:33.805678Z" } }, "outputs": [], "source": [ "LEARNING_RATE = 1e-4\n", "SPLIT=0.2\n", "DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "BATCH_SIZE = 4\n", "EPOCHS = 4\n", "NUM_WORKERS = 4\n", "IMAGE_HEIGHT = 572\n", "IMAGE_WIDTH = 572\n", "PIN_MEMORY = True\n", "DATAPATH = \"../input/carvana-image-masking-challenge/\"\n", "TRAIN_IMG_DIR = '../input/unet-practice/train/'\n", "TRAIN_MASK_DIR = '../input/unet-practice/train_masks/'" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:33.873349Z", "iopub.status.busy": "2022-06-15T17:06:33.872918Z", "iopub.status.idle": "2022-06-15T17:06:33.885956Z", "shell.execute_reply": "2022-06-15T17:06:33.885088Z", "shell.execute_reply.started": "2022-06-15T17:06:33.873308Z" } }, "outputs": [], "source": [ "images = os.listdir(TRAIN_IMG_DIR)\n", "masks = os.listdir(TRAIN_MASK_DIR)" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:33.888229Z", "iopub.status.busy": "2022-06-15T17:06:33.887869Z", "iopub.status.idle": "2022-06-15T17:06:34.369637Z", "shell.execute_reply": "2022-06-15T17:06:34.368858Z", "shell.execute_reply.started": "2022-06-15T17:06:33.888196Z" } }, "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "img = np.array(Image.open(TRAIN_IMG_DIR+\"/\"+images[0]).convert(\"RGB\"))\n", "plt.imshow(img,cmap=\"gray\")\n", "print(img.shape)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:34.371007Z", "iopub.status.busy": "2022-06-15T17:06:34.370624Z", "iopub.status.idle": "2022-06-15T17:06:34.748262Z", "shell.execute_reply": "2022-06-15T17:06:34.747481Z", "shell.execute_reply.started": "2022-06-15T17:06:34.370971Z" } }, "outputs": [], "source": [ "msk = np.array(Image.open(TRAIN_MASK_DIR+\"/\"+images[0].replace(\".jpg\",\"_mask.gif\")).convert(\"L\"))\n", "plt.imshow(msk,cmap=\"gray\")\n", "print(msk.shape)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:34.751212Z", "iopub.status.busy": "2022-06-15T17:06:34.749414Z", "iopub.status.idle": "2022-06-15T17:06:34.761391Z", "shell.execute_reply": "2022-06-15T17:06:34.760461Z", "shell.execute_reply.started": "2022-06-15T17:06:34.751182Z" } }, "outputs": [], "source": [ "def fit(model,dataloader,data,optimizer,criterion):\n", " print('-------------Training---------------')\n", " model.train()\n", " train_running_loss = 0.0\n", " counter=0\n", " \n", " # num of batches\n", " num_batches = int(len(data)/dataloader.batch_size)\n", " for i,data in tqdm(enumerate(dataloader),total=num_batches):\n", " counter+=1\n", " image,mask = data[\"image\"].to(DEVICE),data[\"mask\"].to(DEVICE)\n", " optimizer.zero_grad()\n", " outputs = model(image)\n", " outputs =outputs.squeeze(1)\n", " loss = criterion(outputs,mask)\n", " train_running_loss += loss.item()\n", " loss.backward()\n", " optimizer.step()\n", " train_loss = train_running_loss/counter\n", " return train_loss\n", "def validate(model,dataloader,data,criterion):\n", " print(\"\\n--------Validating---------\\n\")\n", " model.eval()\n", " valid_running_loss = 0.0\n", " counter = 0\n", " # number of batches\n", " num_batches = int(len(data)/dataloader.batch_size)\n", " with torch.no_grad():\n", " for i,data in tqdm(enumerate(dataloader),total=num_batches):\n", " counter+=1\n", " image,mask = data[\"image\"].to(DEVICE),data[\"mask\"].to(DEVICE)\n", " outputs = model(image)\n", " outputs =outputs.squeeze(1)\n", " loss = criterion(outputs,mask)\n", " valid_running_loss += loss.item()\n", " valid_loss = valid_running_loss/counter\n", " return valid_loss" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:34.763142Z", "iopub.status.busy": "2022-06-15T17:06:34.762773Z", "iopub.status.idle": "2022-06-15T17:06:34.775340Z", "shell.execute_reply": "2022-06-15T17:06:34.774485Z", "shell.execute_reply.started": "2022-06-15T17:06:34.763092Z" } }, "outputs": [], "source": [ "train_transform = A.Compose([\n", " A.Resize(IMAGE_HEIGHT,IMAGE_WIDTH),\n", " A.Rotate(limit=35,p=1.0),\n", " A.HorizontalFlip(p=0.5),\n", " A.VerticalFlip(p=0.1),\n", " A.Normalize(\n", " mean=[0.0,0.0,0.0],\n", " std = [1.0,1.0,1.0],\n", " max_pixel_value=255.0\n", " ),\n", " ToTensorV2() \n", "])\n", "validation_transform = A.Compose([\n", " A.Resize(IMAGE_HEIGHT,IMAGE_WIDTH),\n", " A.Normalize(\n", " mean = [0.0,0.0,0.0],\n", " std = [1.0,1.0,1.0],\n", " max_pixel_value=255.0,\n", " ),\n", " ToTensorV2()\n", "])" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:34.778303Z", "iopub.status.busy": "2022-06-15T17:06:34.777577Z", "iopub.status.idle": "2022-06-15T17:06:34.786300Z", "shell.execute_reply": "2022-06-15T17:06:34.785546Z", "shell.execute_reply.started": "2022-06-15T17:06:34.778268Z" } }, "outputs": [], "source": [ "def train_test_split(images,splitSize):\n", " imageLen = len(images)\n", " val_len = int(splitSize*imageLen)\n", " train_len = imageLen - val_len\n", " train_images,val_images = images[:train_len],images[train_len:]\n", " return train_images,val_images\n", "\n", "train_images_path,val_images_path = train_test_split(images,SPLIT)\n", "train_data = CarvanaDataset(train_images_path,TRAIN_IMG_DIR,TRAIN_MASK_DIR,train_transform,True)\n", "valid_data = CarvanaDataset(val_images_path,TRAIN_IMG_DIR,TRAIN_MASK_DIR,validation_transform,True)\n", "train_dataloader = DataLoader(train_data,batch_size=BATCH_SIZE,shuffle=True)\n", "valid_dataloader = DataLoader(valid_data,batch_size=BATCH_SIZE,shuffle=False)" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:34.788075Z", "iopub.status.busy": "2022-06-15T17:06:34.787820Z", "iopub.status.idle": "2022-06-15T17:06:43.863700Z", "shell.execute_reply": "2022-06-15T17:06:43.860891Z", "shell.execute_reply.started": "2022-06-15T17:06:34.788044Z" } }, "outputs": [], "source": [ "train_loss = []\n", "val_loss =[]\n", "model = UNET().to(DEVICE)\n", "optimizer = optim.Adam(model.parameters(),lr=LEARNING_RATE)\n", "criterion = nn.BCEWithLogitsLoss()\n", "for epoch in range(EPOCHS):\n", " print(f\"Epoch {epoch+1} of {EPOCHS}\")\n", " train_epoch_loss = fit(model, train_dataloader, train_data,optimizer,criterion)\n", " val_epoch_loss = validate(model, valid_dataloader, valid_data, criterion)\n", " train_loss.append(train_epoch_loss)\n", " val_loss.append(val_epoch_loss)\n", " print(f\"Train Loss: {train_epoch_loss:.4f}\")\n", " print(f'Val Loss: {val_epoch_loss:.4f}')\n", "\n", "# loss plots\n", "plt.figure(figsize=(10, 7))\n", "plt.plot(train_loss, color=\"orange\", label='train loss')\n", "plt.plot(val_loss, color=\"red\", label='validation loss')\n", "plt.xlabel(\"Epochs\")\n", "plt.ylabel(\"Loss\")\n", "plt.legend()\n", "# plt.savefig(f\"../input/loss.png\")\n", "plt.show()\n", "torch.save({\n", " 'epoch': EPOCHS,\n", " 'model_state_dict': model.state_dict(),\n", " 'optimizer_state_dict': optimizer.state_dict(),\n", " 'loss': criterion,\n", "}, \"./model.pth\")\n", "\n", "print(\"\\n---------DONE TRAINING----------\\n\")\n" ] }, { "cell_type": "code", "execution_count": 18, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:48.776421Z", "iopub.status.busy": "2022-06-15T17:06:48.775633Z", "iopub.status.idle": "2022-06-15T17:06:51.878793Z", "shell.execute_reply": "2022-06-15T17:06:51.877931Z", "shell.execute_reply.started": "2022-06-15T17:06:48.776377Z" } }, "outputs": [], "source": [ "checkpoint = torch.load('../input/unet-practice/model.pth')" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:51.881490Z", "iopub.status.busy": "2022-06-15T17:06:51.880881Z", "iopub.status.idle": "2022-06-15T17:06:52.237345Z", "shell.execute_reply": "2022-06-15T17:06:52.235528Z", "shell.execute_reply.started": "2022-06-15T17:06:51.881451Z" } }, "outputs": [], "source": [ "model = UNET() \n", "model.load_state_dict(checkpoint[\"model_state_dict\"])\n", "model.eval()\n", "model.cuda()" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:52.240263Z", "iopub.status.busy": "2022-06-15T17:06:52.238729Z", "iopub.status.idle": "2022-06-15T17:06:52.682818Z", "shell.execute_reply": "2022-06-15T17:06:52.682053Z", "shell.execute_reply.started": "2022-06-15T17:06:52.240186Z" } }, "outputs": [], "source": [ "data = train_data.__getitem__(101)\n", "plt.imshow(data['mask'],cmap=\"gray\")\n", "print(train_data.__getitem__(0)['mask'].shape)" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:58.481707Z", "iopub.status.busy": "2022-06-15T17:06:58.481338Z", "iopub.status.idle": "2022-06-15T17:06:58.527077Z", "shell.execute_reply": "2022-06-15T17:06:58.525840Z", "shell.execute_reply.started": "2022-06-15T17:06:58.481661Z" } }, "outputs": [], "source": [ "# for Testing on Single datapoint after training\n", "# plt.imshow(np.transpose(np.array(data['image']),(1,2,0)),cmap=\"gray\")\n", "# print(data['image'].shape)\n", "# img = data['image'].unsqueeze(0).to(device=\"cuda\")\n", "# model = UNet()\n", "output = model(img)\n", "output = torch.squeeze(output)\n", "output[output>0.0] = 1.0\n", "output[output<=0.0]=0\n", "# print(torch.max(output))\n", "# print(output.shape)\n", "disp = output.detach().cpu()\n", "plt.imshow(disp,cmap=\"gray\")" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:06:59.399400Z", "iopub.status.busy": "2022-06-15T17:06:59.398661Z", "iopub.status.idle": "2022-06-15T17:06:59.631231Z", "shell.execute_reply": "2022-06-15T17:06:59.630391Z", "shell.execute_reply.started": "2022-06-15T17:06:59.399363Z" } }, "outputs": [], "source": [ "torch.save({\n", " 'epoch': EPOCHS,\n", " 'model_state_dict': model.state_dict(),\n", " 'optimizer_state_dict': optimizer.state_dict(),\n", " 'loss': criterion,\n", "}, \"./model.pth\")" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:07:01.145317Z", "iopub.status.busy": "2022-06-15T17:07:01.144583Z", "iopub.status.idle": "2022-06-15T17:07:19.198270Z", "shell.execute_reply": "2022-06-15T17:07:19.197247Z", "shell.execute_reply.started": "2022-06-15T17:07:01.145281Z" } }, "outputs": [], "source": [ "!pip install gradio" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:07:19.201021Z", "iopub.status.busy": "2022-06-15T17:07:19.200583Z", "iopub.status.idle": "2022-06-15T17:07:28.970362Z", "shell.execute_reply": "2022-06-15T17:07:28.969560Z", "shell.execute_reply.started": "2022-06-15T17:07:19.200980Z" } }, "outputs": [], "source": [ "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "def sepia(input_img):\n", " sepia_filter = np.array(\n", " [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n", " )\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "demo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\n", "\n", "demo.launch(share=True)" ] }, { "cell_type": "code", "execution_count": 29, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:08:29.102655Z", "iopub.status.busy": "2022-06-15T17:08:29.102240Z", "iopub.status.idle": "2022-06-15T17:08:29.110063Z", "shell.execute_reply": "2022-06-15T17:08:29.108901Z", "shell.execute_reply.started": "2022-06-15T17:08:29.102603Z" } }, "outputs": [], "source": [ "import cv2\n", "import numpy as np\n", "import albumentations as A\n", "\n", "def preprocess(img_filepath):\n", " image = cv2.imread(img_filepath)\n", " \n", " test_transform = A.Compose([\n", " A.Resize(IMAGE_HEIGHT,IMAGE_WIDTH),\n", " A.Normalize(\n", " mean = [0.0,0.0,0.0],\n", " std = [1.0,1.0,1.0],\n", " max_pixel_value=255.0,\n", " ),\n", " ToTensorV2()\n", " ])\n", " \n", " aug = test_transform(image=image)\n", " image = aug['image']\n", " \n", "# image = image.transpose((2,0,1))\n", " \n", "# #image normalize\n", "# mean_vec = np.array([0.485, 0.456, 0.406])\n", "# std_vec = np.array([0.229, 0.224, 0.225])\n", "\n", "# for i in range(image.shape[0]):\n", "# image[i, :, :] = (image[i, :, :] - mean_vec[i]) / (std_vec[i])\n", "\n", "# image = np.stack([image]*1)\n", "\n", " return image" ] }, { "cell_type": "code", "execution_count": 30, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:08:29.597686Z", "iopub.status.busy": "2022-06-15T17:08:29.596933Z", "iopub.status.idle": "2022-06-15T17:08:29.603303Z", "shell.execute_reply": "2022-06-15T17:08:29.602449Z", "shell.execute_reply.started": "2022-06-15T17:08:29.597651Z" } }, "outputs": [], "source": [ "import torch\n", "\n", "def predict(input_image):\n", " output = model(img)\n", " output = torch.squeeze(output)\n", " output[output>0.0] = 1.0\n", " output[output<=0.0]=0\n", " return output" ] }, { "cell_type": "code", "execution_count": 31, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:08:30.053673Z", "iopub.status.busy": "2022-06-15T17:08:30.053095Z", "iopub.status.idle": "2022-06-15T17:08:37.406057Z", "shell.execute_reply": "2022-06-15T17:08:37.405064Z", "shell.execute_reply.started": "2022-06-15T17:08:30.053637Z" } }, "outputs": [], "source": [ "import gradio as gr\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "# from preprocess import preprocess\n", "# from predict import predict\n", "\n", "def inference(filepath):\n", "# input_batch = preprocess(filepath)\n", "# result = predict(input_batch)\n", "# pred_mask = np.array(result).astype(np.float32)\n", "# pred_mask = pred_mask * 255\n", "# pred_mask = pred_mask[0, 0, 0, :, :].astype(np.uint8)\n", "# plt.imshow(pred_mask)\n", "# plt.title(\"Predicted Tumor Mask\")\n", " \n", "# print(data['image'].shape)\n", " input_batch = preprocess(filepath)\n", " result = predict(input_batch)\n", " plt.imshow(result,cmap=\"gray\")\n", " plt.title(\"Segmented Image\")\n", " \n", " return plt\n", "\n", "\n", "title = \"Carvana Image Segmentation using PyTorch\"\n", "description = \"Segmentation of cars from Carvana Dataset\"\n", "article = \"

Kaggle Notebook: Brain MRI-UNET-PyTorch | Github Repo

\"\n", "examples = [['../input/unet-practice/train/00087a6bd4dc_01.jpg'], \n", " ['../input/unet-practice/train/00087a6bd4dc_04.jpg'], \n", " ['../input/unet-practice/train/00087a6bd4dc_11.jpg'], \n", " ['../input/unet-practice/train/0d1a9caf4350_02.jpg']] \n", "\n", "gr.Interface(inference, inputs=gr.inputs.Image(type=\"filepath\"), outputs=gr.outputs.Image('plot'), title=title,\n", " description=description,\n", " article=article,\n", " examples=examples).launch(share=True,debug=False, enable_queue=True)" ] }, { "cell_type": "code", "execution_count": 32, "metadata": { "execution": { "iopub.execute_input": "2022-06-15T17:12:50.940143Z", "iopub.status.busy": "2022-06-15T17:12:50.939411Z", "iopub.status.idle": "2022-06-15T17:12:50.945490Z", "shell.execute_reply": "2022-06-15T17:12:50.944608Z", "shell.execute_reply.started": "2022-06-15T17:12:50.940106Z" } }, "outputs": [], "source": [ "plt" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.8" } }, "nbformat": 4, "nbformat_minor": 4 }