{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "gf_Csp__upFO" }, "source": [ "### Using KDiffusion model, with t removed as baseline for future experiments" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "oFEJQaBsh-U9", "outputId": "158e8522-eb78-4522-cb53-280f3d017899" }, "outputs": [], "source": [ "!pip install -q diffusers datasets wandb lpips timm" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 69 }, "id": "Ddkidb9Fk6ZY", "outputId": "8c1ec8ab-26aa-4676-f1d6-d072d613cfb1" }, "outputs": [], "source": [ "import wandb\n", "wandb.login()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "-pTR-1A-h7ks", "outputId": "5ce14582-76bf-4ec7-b03d-6c449ac786e3" }, "outputs": [], "source": [ "#@title imports\n", "import wandb\n", "import torch\n", "import torchvision\n", "from torch import nn\n", "from torch import multiprocessing as mp\n", "from torch.utils import data\n", "from torchvision import datasets, transforms\n", "from torchvision import transforms as T\n", "from torchvision.transforms import functional as TF\n", "from fastai.data.all import *\n", "from fastai.vision.all import *\n", "from fastai.callback.wandb import *\n", "from timm.optim.rmsprop_tf import RMSpropTF\n", "from timm.optim.lookahead import Lookahead\n", "import accelerate\n", "from einops import rearrange\n", "from functools import partial\n", "import math\n", "from copy import deepcopy\n", "from pathlib import Path\n", "from tqdm.auto import trange, tqdm\n", "import k_diffusion as K\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "print(f'Using device: {device}')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#Training Config\n", "bs = 256 # the batch size\n", "grad_accum_steps = 1 # the number of gradient accumulation steps\n", "lr_max = 2e-4 # the max learning rate\n", "num_workers = 8 # the number of data loader workers\n", "resume = None # the checkpoint to resume from\n", "save_every = 10000 # save every this many steps\n", "training_seed = None # the random seed for training\n", "start_method = 'spawn' # the multiprocessing start method. Options: 'fork', 'forkserver', 'spawn'\n", "opt_func = partial(torch.optim.AdamW, lr=lr_max, betas=(0.95, 0.999),\n", " eps=1e-6, weight_decay=1e-3) \n", "\n", "#Logging Config\n", "sample_n = 64 # the number of images to sample for demo grids\n", "demo_every = 500 # save a demo grid every this many steps\n", "evaluate_every = 10000 # save a demo grid every this many steps\n", "evaluate_n = 2000 # the number of samples to draw to evaluate\n", "name = 'KDiff_FashionMnist_Baseline' # the name of the run\n", "wandb_project = 'FastDiffusion_KDiff_Fmnist' # the wandb project name (specify this to enable wandb)\n", "wandb_save_model = False # save model to wandb\n", "dataset_name = 'FashionMNIST' # wandb name for dataset used\n", "comments = 'Initial baseline run of K-diffusion model on FashionMnist.' # comments logged in wandb\n", "demo_imgs_dir = './demo_images'\n", "metrics_dir = './metrics'\n", "\n", "#Model Config\n", "sz = 28\n", "size = [sz,sz]\n", "input_channels = 1\n", "patch_size= 1\n", "mapping_out= 256\n", "depths= [2, 4, 4]\n", "channels= [128, 128, 256]\n", "self_attn_depths = [False, False, True]\n", "cross_attn_depths = None\n", "has_variance = True\n", "dropout_rate = 0.05\n", "augment_wrapper = True\n", "augment_prob = 0.12\n", "sigma_data = 0.6162\n", "sigma_min = 1e-2\n", "sigma_max = 80\n", "skip_stages = 0\n", "augment_prob = 0.12\n", "sigma_min = 1e-2\n", "sigma_max = 80\n", "\n", "#Model Save/Load\n", "checkpoints_dir = './checkpoints'\n", "model_path = Path(checkpoints_dir +'/kdiff_baseline_fmnist.pt')\n", "model_ema_path = Path(checkpoints_dir +'/kdiff_baseline_fmnist_ema.pt')\n", "model_path.parent.mkdir(exist_ok=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "mp.set_start_method(start_method)\n", "torch.backends.cuda.matmul.allow_tf32 = True" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "ddp_kwargs = accelerate.DistributedDataParallelKwargs(find_unused_parameters=skip_stages > 0)\n", "accelerator = accelerate.Accelerator(kwargs_handlers=[ddp_kwargs], gradient_accumulation_steps=grad_accum_steps)\n", "device = accelerator.device\n", "print(f'Process {accelerator.process_index} using device: {device}', flush=True)" ] }, { "cell_type": "markdown", "metadata": { "id": "ypA8A7pjuwI5" }, "source": [ "# Model and Training Setup" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def make_sample_density(mean=-1.2, std=1.2):\n", " #lognormal\n", " return partial(K.utils.rand_log_normal, loc=mean, scale=std)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def make_model():\n", " model = K.models.ImageDenoiserModelV1(\n", " c_in=input_channels,\n", " feats_in=mapping_out,\n", " depths=depths,\n", " channels=channels,\n", " self_attn_depths=self_attn_depths,\n", " cross_attn_depths=cross_attn_depths,\n", " patch_size=patch_size,\n", " dropout_rate=dropout_rate,\n", " mapping_cond_dim= 9 if augment_wrapper else 0,\n", " unet_cond_dim = 0,\n", " cross_cond_dim = 0,\n", " skip_stages= skip_stages,\n", " has_variance=has_variance,\n", " )\n", " if augment_wrapper:\n", " model = K.augmentation.KarrasAugmentWrapper(model)\n", " return model" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def make_denoiser_wrapper():\n", " if not has_variance:\n", " return partial(K.layers.Denoiser, sigma_data=sigma_data)\n", " return partial(K.layers.DenoiserWithVariance, sigma_data=sigma_data)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tf = transforms.Compose([\n", " transforms.Resize(sz, interpolation=transforms.InterpolationMode.LANCZOS),\n", " transforms.CenterCrop(sz),\n", " K.augmentation.KarrasAugmentationPipeline(augment_prob),\n", "])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "train_set = datasets.FashionMNIST('data', train=True, download=True, transform=tf)\n", "\n", "if accelerator.is_main_process:\n", " try:\n", " print('Number of items in dataset:', len(train_set))\n", " except TypeError:\n", " pass\n", "\n", "train_dl = data.DataLoader(train_set, bs, shuffle=True, drop_last=True, num_workers=num_workers, persistent_workers=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "inner_model = make_model()\n", "\n", "if accelerator.is_main_process:\n", " print('Parameters:', K.utils.n_params(inner_model))\n", "\n", "model = make_denoiser_wrapper()(inner_model)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def init_wandb():\n", " import wandb\n", " log_config = {}\n", " log_config['num_epochs'] = 'N/A'\n", " log_config['lr_max'] = lr_max\n", " log_config['comments'] = comments\n", " log_config['dataset'] = dataset_name\n", " log_config['parameters'] = K.utils.n_params(inner_model)\n", " wandb.init(project=wandb_project, config=log_config, save_code=False) " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def init_training_manual_seed(accelerator):\n", " if training_seed is not None:\n", " seeds = torch.randint(-2 ** 63, 2 ** 63 - 1, [accelerator.num_processes], generator=torch.Generator().manual_seed(training_seed))\n", " torch.manual_seed(seeds[accelerator.process_index]) " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def log_step_to_wandb(epoch, loss, step, sched, ema_decay):\n", " log_dict = {\n", " 'epoch': epoch,\n", " 'loss': loss.item(),\n", " 'lr': sched.get_last_lr()[0],\n", " 'ema_decay': ema_decay,\n", " }\n", " wandb.log(log_dict, step=step)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def write_progress_to_tdqm(epoch, step, loss):\n", " tqdm.write(f'Epoch: {epoch}, step: {step}, loss: {loss.item():g}')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "opt = opt_func(inner_model.parameters())\n", "init_training_manual_seed(accelerator)\n", "use_wandb = accelerator.is_main_process and wandb_project\n", "if use_wandb: init_wandb()\n", "sched = K.utils.InverseLR(opt, inv_gamma=20000.0, power=1.0, warmup=0.99)\n", "ema_sched = K.utils.EMAWarmup(power=0.6667, max_value=0.9999)\n", "image_key = 0\n", "\n", "inner_model, opt, train_dl = accelerator.prepare(inner_model, opt, train_dl)\n", "if use_wandb:\n", " wandb.watch(inner_model)\n", "\n", "sample_density = make_sample_density()\n", "model_ema = deepcopy(model)\n", "\n", "epoch = 0\n", "step = 0" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "evaluate_enabled = evaluate_every > 0 and evaluate_n > 0\n", "extractor = None\n", "\n", "if evaluate_enabled:\n", " extractor = K.evaluation.InceptionV3FeatureExtractor(device=device)\n", " train_iter = iter(train_dl)\n", " if accelerator.is_main_process:\n", " print('Computing features for reals...')\n", " reals_features = K.evaluation.compute_features(accelerator, lambda x: next(train_iter)[image_key][1], extractor, evaluate_n, bs)\n", " if accelerator.is_main_process:\n", " Path(metrics_dir).mkdir(exist_ok=True)\n", " metrics_log = K.utils.CSVLogger(f'{name}_metrics.csv', ['step', 'fid', 'kid'])\n", " del train_iter" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@torch.no_grad()\n", "def demo(model_ema, step, size):\n", " with K.utils.eval_mode(model_ema):\n", " if accelerator.is_main_process:\n", " tqdm.write('Sampling...')\n", " filename = f'{demo_imgs_dir}/{name}_demo_{step:08}.png'\n", " path = Path(filename)\n", " path.parent.mkdir(exist_ok=True)\n", " n_per_proc = math.ceil(sample_n / accelerator.num_processes)\n", " x = torch.randn([n_per_proc, input_channels, size[0], size[1]], device=device) * sigma_max\n", " sigmas = K.sampling.get_sigmas_karras(50, sigma_min, sigma_max, rho=7., device=device)\n", " x_0 = K.sampling.sample_lms(model_ema, x, sigmas, disable=not accelerator.is_main_process)\n", " x_0 = accelerator.gather(x_0)[:sample_n]\n", " # For some reason the images are inverting...\n", " x_0 = -x_0\n", "\n", " if accelerator.is_main_process:\n", " grid = torchvision.utils.make_grid(x_0, nrow=math.ceil(sample_n ** 0.5), padding=0)\n", " K.utils.to_pil_image(grid).save(filename)\n", " if use_wandb:\n", " wandb.log({'demo_grid': wandb.Image(filename)}, step=step)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@torch.no_grad()\n", "def evaluate(model_ema, step, size):\n", " with K.utils.eval_mode(model_ema):\n", " if not evaluate_enabled:\n", " return\n", " if accelerator.is_main_process:\n", " tqdm.write('Evaluating...')\n", " sigmas = K.sampling.get_sigmas_karras(50, sigma_min, sigma_max, rho=7., device=device)\n", " def sample_fn(n):\n", " x = torch.randn([n, input_channels, size[0], size[1]], device=device) * sigma_max\n", " x_0 = K.sampling.sample_lms(model_ema, x, sigmas, disable=True)\n", " return x_0\n", " fakes_features = K.evaluation.compute_features(accelerator, sample_fn, extractor, evaluate_n, bs)\n", " if accelerator.is_main_process:\n", " fid = K.evaluation.fid(fakes_features, reals_features)\n", " kid = K.evaluation.kid(fakes_features, reals_features)\n", " print(f'FID: {fid.item():g}, KID: {kid.item():g}')\n", " if accelerator.is_main_process:\n", " metrics_log.write(step, fid.item(), kid.item())\n", " if use_wandb:\n", " wandb.log({'FID': fid.item(), 'KID': kid.item()}, step=step)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def save(step, epoch, opt, sched):\n", " accelerator.wait_for_everyone()\n", " filename = f'{checkpoints_dir}/{name}_{step:08}.pth'\n", " if accelerator.is_main_process:\n", " tqdm.write(f'Saving to {filename}...')\n", " obj = {\n", " 'model': accelerator.unwrap_model(model.inner_model).state_dict(),\n", " 'model_ema': accelerator.unwrap_model(model_ema.inner_model).state_dict(),\n", " 'opt': opt.state_dict(),\n", " 'sched': sched.state_dict(),\n", " 'ema_sched': ema_sched.state_dict(),\n", " 'epoch': epoch,\n", " 'step': step\n", " }\n", " accelerator.save(obj, filename)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Training Loop" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": [ "dee95d2f12434f91b42f903656cc1ae8", "a4e5d6ce3657424eada1a68d0ff884b1", "8f31a65e904f4bde91cc6fea6636c0a8", "c76eba57c2af4a40b73731dc10b74c57", "d99f9f7346f34b1cbaaa865b0106a952", "4979c4212a1b46ceb5664a9a8680b665", "60553a0615334106a03d52877861569a", "13c327d321444b32aa4349ec81e98cde" ] }, "id": "X9oh08qYkpRH", "outputId": "0f3e9a25-ad0f-44ef-a087-b172c921347f" }, "outputs": [], "source": [ "try:\n", " while True:\n", " for batch in tqdm(train_dl, disable=not accelerator.is_main_process):\n", " with accelerator.accumulate(model):\n", " reals, _, aug_cond = batch[image_key]\n", " noise = torch.randn_like(reals)\n", " sigma = sample_density([reals.shape[0]], device=device)\n", " losses = model.loss(reals, noise, sigma, aug_cond=aug_cond)\n", " losses_all = accelerator.gather(losses)\n", " loss = losses_all.mean()\n", " accelerator.backward(losses.mean())\n", " opt.step()\n", " sched.step()\n", " opt.zero_grad()\n", " if accelerator.sync_gradients:\n", " ema_decay = ema_sched.get_value()\n", " K.utils.ema_update(model, model_ema, ema_decay)\n", " ema_sched.step()\n", "\n", " if accelerator.is_main_process and step % 25 == 0:\n", " write_progress_to_tdqm(epoch, step, loss)\n", "\n", " if use_wandb: \n", " log_step_to_wandb(epoch, loss, step, sched, ema_decay)\n", "\n", " if step % demo_every == 0:\n", " demo(model_ema, step, size)\n", "\n", " if evaluate_enabled and step > 0 and step % evaluate_every == 0:\n", " evaluate(model_ema, step, size)\n", "\n", " if step > 0 and step % save_every == 0:\n", " save(step, epoch, opt, sched)\n", "\n", " step += 1\n", " epoch += 1\n", "except KeyboardInterrupt:\n", " pass" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Finished: Epoch: 78, step: 18450, loss: -0.411248" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "RtbFHKZqz1q7" }, "outputs": [], "source": [ "torch.save(model.state_dict(), str(model_path))\n", "torch.save(model_ema.state_dict(), str(model_ema_path))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "markdown", "metadata": { "id": "_vP7D_a4u0d9" }, "source": [ "# Sampling" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "inner_model = make_model().to(device)\n", "model_ema = make_denoiser_wrapper()(inner_model)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model_ema.load_state_dict(torch.load(str(model_ema_path)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@torch.no_grad()\n", "def sample_lms(model_ema, size):\n", " with K.utils.eval_mode(model_ema):\n", " n_per_proc = math.ceil(sample_n / accelerator.num_processes)\n", " x = torch.randn([n_per_proc, input_channels, size[0], size[1]], device=device) * sigma_max\n", " sigmas = K.sampling.get_sigmas_karras(50, sigma_min, sigma_max, rho=7., device=device)\n", " x_0 = K.sampling.sample_lms(model_ema, x, sigmas, disable=not accelerator.is_main_process)\n", " x_0 = accelerator.gather(x_0)[:sample_n]\n", " # For some reason the images are inverting...\n", " x_0 = -x_0\n", "\n", " grid = torchvision.utils.make_grid(x_0, nrow=math.ceil(sample_n ** 0.5), padding=0)\n", " return K.utils.to_pil_image(grid)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "grid = sample_lms(model_ema, size)\n", "fig, ax = plt.subplots(1, 1, figsize=(16, 16))\n", "ax.imshow(grid)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "0abad3eee1904c2bb8a2f963fed5fba6": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "13c327d321444b32aa4349ec81e98cde": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "184b1cfa56be41c7845062a4e7c5fa59": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "250c148b80734613a53fec26ab1b3db8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "33691922e1a0401890529b929d0169b7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "3b7a1d8560004241b9b06700bcdb5b1c": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_63c702b2fa6a4270b88479f5319a6ae2", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_33691922e1a0401890529b929d0169b7", "value": 1 } }, "4979c4212a1b46ceb5664a9a8680b665": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "56f6e898da4248ea9a64658f6b284a3b": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "60553a0615334106a03d52877861569a": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6172b7637810408ebe9e2118c5d02c04": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_250c148b80734613a53fec26ab1b3db8", "placeholder": "​", "style": "IPY_MODEL_0abad3eee1904c2bb8a2f963fed5fba6", "value": "100%" } }, "63c702b2fa6a4270b88479f5319a6ae2": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "6a7fe0a8c7d844c9b92ab9dab247ec79": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "72f566c251ba4cf6a0282ed4340e1f08": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_56f6e898da4248ea9a64658f6b284a3b", "placeholder": "​", "style": "IPY_MODEL_184b1cfa56be41c7845062a4e7c5fa59", "value": " 1/1 [00:00<00:00, 27.33it/s]" } }, "8f31a65e904f4bde91cc6fea6636c0a8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_60553a0615334106a03d52877861569a", "max": 1, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_13c327d321444b32aa4349ec81e98cde", "value": 1 } }, "a4e5d6ce3657424eada1a68d0ff884b1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "LabelModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "LabelModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "LabelView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_d99f9f7346f34b1cbaaa865b0106a952", "placeholder": "​", "style": "IPY_MODEL_4979c4212a1b46ceb5664a9a8680b665", "value": "139.511 MB of 139.511 MB uploaded (0.000 MB deduped)\r" } }, "aa9daced7eee485a918d1e398d228f51": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_6172b7637810408ebe9e2118c5d02c04", "IPY_MODEL_3b7a1d8560004241b9b06700bcdb5b1c", "IPY_MODEL_72f566c251ba4cf6a0282ed4340e1f08" ], "layout": "IPY_MODEL_6a7fe0a8c7d844c9b92ab9dab247ec79" } }, "c76eba57c2af4a40b73731dc10b74c57": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "d99f9f7346f34b1cbaaa865b0106a952": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "dee95d2f12434f91b42f903656cc1ae8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "VBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "VBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "VBoxView", "box_style": "", "children": [ "IPY_MODEL_a4e5d6ce3657424eada1a68d0ff884b1", "IPY_MODEL_8f31a65e904f4bde91cc6fea6636c0a8" ], "layout": "IPY_MODEL_c76eba57c2af4a40b73731dc10b74c57" } } } } }, "nbformat": 4, "nbformat_minor": 4 }