{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from __future__ import absolute_import, division, print_function, unicode_literals\n", "\n", "import sys\n", "import tensorflow as tf\n", "\n", "import os\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import PIL\n", "\n", "print(\"Python Version: Need: 3.6.5 Current: \" + sys.version)\n", "print(\"TensorFlow Version: Need: 2.0.0-beta1 Current: \" + tf.__version__)\n", "\n", "_URL = \"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz\"\n", "\n", "zip_file = tf.keras.utils.get_file(origin=_URL, fname=\"flower_photos.tgz\", extract=True)\n", "\n", "base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')\n", "\n", "print(\"Base Directory: \" + base_dir)\n", "\n", "IMAGE_SIZE = 224\n", "BATCH_SIZE = 64\n", "\n", "datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n", " rescale=1./255, \n", " validation_split=0.2)\n", "\n", "train_generator = datagen.flow_from_directory(\n", " base_dir,\n", " target_size=(IMAGE_SIZE, IMAGE_SIZE),\n", " batch_size=BATCH_SIZE, \n", " subset='training')\n", "\n", "val_generator = datagen.flow_from_directory(\n", " base_dir,\n", " target_size=(IMAGE_SIZE, IMAGE_SIZE),\n", " batch_size=BATCH_SIZE, \n", " subset='validation')\n", "\n", "for image_batch, label_batch in train_generator:\n", " break\n", "\n", "image_batch.shape, label_batch.shape\n", "\n", "print (train_generator.class_indices)\n", "\n", "labels = '\\n'.join(sorted(train_generator.class_indices.keys()))\n", "\n", "with open('labels.txt', 'w') as f:\n", " f.write(labels)\n", "\n", "!cat labels.txt\n", "\n", "IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)\n", "\n", "# Create the base model from the pre-trained model MobileNet V2\n", "base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')\n", "\n", "base_model.trainable = False\n", "\n", "model = tf.keras.Sequential([\n", " base_model,\n", " tf.keras.layers.Conv2D(32, 3, activation='relu'),\n", " tf.keras.layers.Dropout(0.2),\n", " tf.keras.layers.GlobalAveragePooling2D(),\n", " tf.keras.layers.Dense(5, activation='softmax')\n", "])\n", "\n", "model.compile(optimizer=tf.keras.optimizers.Adam(), \n", " loss='categorical_crossentropy', \n", " metrics=['accuracy'])\n", "\n", "model.summary()\n", "\n", "print('Number of trainable variables = {}'.format(len(model.trainable_variables)))\n", "\n", "epochs = 10\n", "\n", "history = model.fit(train_generator, epochs=epochs, validation_data=val_generator)\n", "\n", "acc = history.history['accuracy']\n", "val_acc = history.history['val_accuracy']\n", "\n", "loss = history.history['loss']\n", "val_loss = history.history['val_loss']\n", "\n", "plt.figure(figsize=(8, 8))\n", "plt.subplot(2, 1, 1)\n", "plt.plot(acc, label='Training Accuracy')\n", "plt.plot(val_acc, label='Validation Accuracy')\n", "plt.legend(loc='lower right')\n", "plt.ylabel('Accuracy')\n", "plt.ylim([min(plt.ylim()),1])\n", "plt.title('Training and Validation Accuracy')\n", "\n", "plt.subplot(2, 1, 2)\n", "plt.plot(loss, label='Training Loss')\n", "plt.plot(val_loss, label='Validation Loss')\n", "plt.legend(loc='upper right')\n", "plt.ylabel('Cross Entropy')\n", "plt.ylim([0,1.0])\n", "plt.title('Training and Validation Loss')\n", "plt.xlabel('epoch')\n", "plt.show()\n", "\n", "base_model.trainable = True\n", "\n", "# Let's take a look to see how many layers are in the base model\n", "print(\"Number of layers in the base model: \", len(base_model.layers))\n", "\n", "# Fine tune from this layer onwards\n", "fine_tune_at = 100\n", "\n", "# Freeze all the layers before the `fine_tune_at` layer\n", "for layer in base_model.layers[:fine_tune_at]:\n", " layer.trainable = False\n", "\n", "\n", "model.compile(loss='categorical_crossentropy',\n", " optimizer = tf.keras.optimizers.Adam(1e-5),\n", " metrics=['accuracy'])\n", "\n", "model.summary()\n", "\n", "print('Number of trainable variables = {}'.format(len(model.trainable_variables)))\n", "\n", "history_fine = model.fit(train_generator, epochs=5, validation_data=val_generator)\n", "\n", "saved_model_dir = 'save/fine_tuning'\n", "tf.saved_model.save(model, saved_model_dir)\n", "\n", "converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\n", "tflite_model = converter.convert()\n", "\n", "with open('model.tflite', 'wb') as f:\n", " f.write(tflite_model)\n", "\n", "acc = history_fine.history['accuracy']\n", "val_acc = history_fine.history['val_accuracy']\n", "\n", "loss = history_fine.history['loss']\n", "val_loss = history_fine.history['val_loss']\n", "\n", "plt.figure(figsize=(8, 8))\n", "plt.subplot(2, 1, 1)\n", "plt.plot(acc, label='Training Accuracy')\n", "plt.plot(val_acc, label='Validation Accuracy')\n", "plt.legend(loc='lower right')\n", "plt.ylabel('Accuracy')\n", "plt.ylim([min(plt.ylim()),1])\n", "plt.title('Training and Validation Accuracy')\n", "\n", "plt.subplot(2, 1, 2)\n", "plt.plot(loss, label='Training Loss')\n", "plt.plot(val_loss, label='Validation Loss')\n", "plt.legend(loc='upper right')\n", "plt.ylabel('Cross Entropy')\n", "plt.ylim([0,1.0])\n", "plt.title('Training and Validation Loss')\n", "plt.xlabel('epoch')\n", "plt.show()\n", "\n", "\n", "\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }