{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# These are all the modules we'll be using later. Make sure you can import them\n", "# before proceeding further.\n", "from __future__ import print_function\n", "import imageio\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import os\n", "import sys\n", "import tarfile\n", "from IPython.display import display, Image\n", "from sklearn.linear_model import LogisticRegression\n", "from six.moves.urllib.request import urlretrieve\n", "from six.moves import cPickle as pickle\n", "\n", "# Config the matplotlib backend as plotting inline in IPython\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "data_root = 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data' # Change me to store data elsewhere\n", "array_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\n", "filename_for_log_regr = data_root + '\\\\finalized_model_log_regr_500K_samples.sav'\n", "url = 'https://commondatastorage.googleapis.com/books1000/'\n", "last_percent_reported = None" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found and verified D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large.tar.gz\n", "Found and verified D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small.tar.gz\n" ] } ], "source": [ "def download_progress_hook(count, blockSize, totalSize):\n", " \"\"\"A hook to report the progress of a download. This is mostly intended for users with\n", " slow internet connections. Reports every 5% change in download progress.\n", " \"\"\"\n", " global last_percent_reported\n", " percent = int(count * blockSize * 100 / totalSize)\n", "\n", " if last_percent_reported != percent:\n", " if percent % 5 == 0:\n", " sys.stdout.write(\"%s%%\" % percent)\n", " sys.stdout.flush()\n", " else:\n", " sys.stdout.write(\".\")\n", " sys.stdout.flush()\n", " \n", " last_percent_reported = percent\n", " \n", "def maybe_download(filename, expected_bytes, force=False):\n", " \"\"\"Download a file if not present, and make sure it's the right size.\"\"\"\n", " dest_filename = os.path.join(data_root, filename)\n", " if force or not os.path.exists(dest_filename):\n", " print('Attempting to download:', filename) \n", " filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n", " print('\\nDownload Complete!')\n", " statinfo = os.stat(dest_filename)\n", " if statinfo.st_size == expected_bytes:\n", " print('Found and verified', dest_filename)\n", " else:\n", " raise Exception(\n", " 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n", " return dest_filename\n", "\n", "train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n", "test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large already present - Skipping extraction of D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large.tar.gz.\n", "['D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\A', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\B', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\C', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\D', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\E', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\F', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\G', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\H', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\I', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_large\\\\J']\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small already present - Skipping extraction of D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small.tar.gz.\n", "['D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\A', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\B', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\C', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\D', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\E', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\F', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\G', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\H', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\I', 'D:\\\\1_Workspaces\\\\UNDER_VCS\\\\github\\\\1_ML_NN\\\\python_with_math\\\\data\\\\notMNIST_small\\\\J']\n" ] } ], "source": [ "num_classes = 10\n", "np.random.seed(133)\n", "\n", "def maybe_extract(filename, force=False):\n", " root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz\n", " if os.path.isdir(root) and not force:\n", " # You may override by setting force=True.\n", " print('%s already present - Skipping extraction of %s.' % (root, filename))\n", " else:\n", " print('Extracting data for %s. This may take a while. Please wait.' % root)\n", " tar = tarfile.open(filename)\n", " sys.stdout.flush()\n", " tar.extractall(data_root)\n", " tar.close()\n", " data_folders = [\n", " os.path.join(root, d) for d in sorted(os.listdir(root))\n", " if os.path.isdir(os.path.join(root, d))]\n", " if len(data_folders) != num_classes:\n", " raise Exception(\n", " 'Expected %d folders, one per class. Found %d instead.' % (\n", " num_classes, len(data_folders)))\n", " print(data_folders)\n", " return data_folders\n", " \n", "train_folders = maybe_extract(train_filename)\n", "test_folders = maybe_extract(test_filename)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\A.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\B.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\C.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\D.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\E.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\F.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\G.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\H.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\I.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_large\\J.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\A.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\B.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\C.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\D.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\E.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\F.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\G.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\H.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\I.pickle already present - Skipping pickling.\n", "D:\\1_Workspaces\\UNDER_VCS\\github\\1_ML_NN\\python_with_math\\data\\notMNIST_small\\J.pickle already present - Skipping pickling.\n" ] } ], "source": [ "image_size = 28 # Pixel width and height.\n", "pixel_depth = 255.0 # Number of levels per pixel.\n", "\n", "def load_letter(folder, min_num_images):\n", " \"\"\"Load the data for a single letter label.\"\"\"\n", " image_files = os.listdir(folder) ## it counts all files in the folder\n", " dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n", " dtype=np.float32) #it creates a 3d array\n", " print(folder)\n", " num_images = 0\n", " for image in image_files:\n", " image_file = os.path.join(folder, image)\n", " try:\n", " image_data = (imageio.imread(image_file).astype(float) - \n", " pixel_depth / 2) / pixel_depth\n", " if image_data.shape != (image_size, image_size):\n", " raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n", " dataset[num_images, :, :] = image_data\n", " num_images = num_images + 1\n", " except (IOError, ValueError) as e:\n", " print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n", " \n", " dataset = dataset[0:num_images, :, :]\n", " if num_images < min_num_images:\n", " raise Exception('Many fewer images than expected: %d < %d' %\n", " (num_images, min_num_images))\n", " \n", " print('Full dataset tensor:', dataset.shape)\n", " print('Mean:', np.mean(dataset))\n", " print('Standard deviation:', np.std(dataset))\n", " return dataset\n", " \n", "def maybe_pickle(data_folders, min_num_images_per_class, force=False):\n", " dataset_names = []\n", " for folder in data_folders: #for each letter's folder\n", " set_filename = folder + '.pickle' #create a file with appropriate letter's name\n", " dataset_names.append(set_filename) #add them to a returned set\n", " if os.path.exists(set_filename) and not force:\n", " # You may override by setting force=True.\n", " print('%s already present - Skipping pickling.' % set_filename)\n", " else:\n", " print('Pickling %s.' % set_filename)\n", " dataset = load_letter(folder, min_num_images_per_class) #it loads a letter from folder to a 3D array\n", " try:\n", " with open(set_filename, 'wb') as f:\n", " pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) #it dumps a 3D array to a file\n", " except Exception as e:\n", " print('Unable to save data to', set_filename, ':', e)\n", " \n", " return dataset_names\n", "\n", "train_datasets = maybe_pickle(train_folders, 45000) # it creates 3D array for all letters in a train dataset\n", "test_datasets = maybe_pickle(test_folders, 1800)# it creates 3D array for all letters in a test dataset" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "====================== merging =======================================\n", "valid_size 10000 \n", "\n", "train_size 200000 \n", " num_classes 10 \n", "\n", "vsize_per_class 1000 \n", " tsize_per_class 20000 \n", "\n", "\n", "====================== merging =======================================\n", "valid_size 0 \n", "\n", "train_size 10000 \n", " num_classes 10 \n", "\n", "vsize_per_class 0 \n", " tsize_per_class 1000 \n", "\n", "\n", "Training: (200000, 28, 28) (200000,)\n", "Validation: (10000, 28, 28) (10000,)\n", "Testing: (10000, 28, 28) (10000,)\n" ] } ], "source": [ "#it creates an empty n-dimensional arrays\n", "def make_arrays(nb_rows, img_size):\n", " if nb_rows:\n", " dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)\n", " labels = np.ndarray(nb_rows, dtype=np.int32)\n", " else:\n", " dataset, labels = None, None\n", " return dataset, labels\n", "\n", "# it creates a single 3dArray from all GIVEN picle files\n", "def merge_datasets(pickle_files, train_size, valid_size=0):\n", " print ('====================== merging =======================================')\n", " num_classes = len(pickle_files)\n", "\n", " valid_dataset, valid_labels = make_arrays(valid_size, image_size) #empty array\n", " train_dataset, train_labels = make_arrays(train_size, image_size) #empty array\n", "\n", " vsize_per_class = valid_size // num_classes\n", " tsize_per_class = train_size // num_classes\n", "\n", " print ('valid_size ', valid_size, '\\n')\n", " print ('train_size ', train_size, '\\n', 'num_classes', num_classes, '\\n')\n", " print ('vsize_per_class ', vsize_per_class, '\\n', 'tsize_per_class', tsize_per_class, '\\n\\n')\n", " \n", " start_v, start_t = 0, 0\n", " end_v, end_t = vsize_per_class, tsize_per_class\n", " end_l = vsize_per_class+tsize_per_class\n", "\n", " for label, pickle_file in enumerate(pickle_files): \n", " try:\n", " with open(pickle_file, 'rb') as f:\n", " letter_set = pickle.load(f)\n", " # let's shuffle the letters to have random validation and training set\n", " np.random.shuffle(letter_set)\n", " if valid_dataset is not None:\n", " valid_letter = letter_set[:vsize_per_class, :, :]\n", " valid_dataset[start_v:end_v, :, :] = valid_letter\n", " valid_labels[start_v:end_v] = label\n", " start_v += vsize_per_class\n", " end_v += vsize_per_class\n", " \n", " train_letter = letter_set[vsize_per_class:end_l, :, :]\n", " train_dataset[start_t:end_t, :, :] = train_letter\n", " train_labels[start_t:end_t] = label\n", " start_t += tsize_per_class\n", " end_t += tsize_per_class\n", " except Exception as e:\n", " print('Unable to process data from', pickle_file, ':', e)\n", " raise\n", " \n", "# it returns 4 objects!!\n", " return valid_dataset, valid_labels, train_dataset, train_labels\n", " \n", "#SUBJECT TO CHANGE accordingly to task 6 \n", "train_size = 200000\n", "valid_size = 10000\n", "test_size = 10000\n", "\n", "# it uses all 4 objects\n", "valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(train_datasets, train_size, valid_size)\n", "\n", "# it uses only 2 last objects\n", "_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)\n", "\n", "print('Training:', train_dataset.shape, train_labels.shape)\n", "print('Validation:', valid_dataset.shape, valid_labels.shape)\n", "print('Testing:', test_dataset.shape, test_labels.shape)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(10000, 784)\n", "0.8273\n" ] } ], "source": [ "#TODO - instead of steps above just dump valid_dataset_reshaped and load it\n", "#reshaping validation dataset\n", "nsamples, nx, ny = valid_dataset.shape\n", "valid_dataset_reshaped = valid_dataset.reshape((nsamples,nx*ny))\n", "print (valid_dataset_reshaped.shape)\n", "# load the model from disk\n", "loaded_model = pickle.load(open(filename_for_log_regr, 'rb'))\n", "result = loaded_model.score(valid_dataset_reshaped, valid_labels)\n", "print(result)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(10000, 784)\n", "0.8949\n" ] } ], "source": [ "#reshaping test dataset\n", "nsamples, nx, ny = test_dataset.shape\n", "test_dataset_reshaped = test_dataset.reshape((nsamples,nx*ny))\n", "print (test_dataset_reshaped.shape)\n", "score = loaded_model.score(test_dataset_reshaped, test_labels)\n", "print(score)" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(28, 28, 4)\n" ] } ], "source": [ "#read a real image for letter which has been prepared by me:\n", "letter_root_name = '\\\\my_letters\\\\my_H_28_28_again'\n", "my_letter = data_root + letter_root_name + '.png'\n", "my_letter_gray = data_root + letter_root_name + '_gray' + '.png'\n", "\n", "image_data = imageio.imread(my_letter);\n", "print(image_data.shape)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(28, 28)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "C:\\Other_IT\\Anaconda\\lib\\site-packages\\imageio\\core\\util.py:104: UserWarning: Conversion from float64 to uint8, range [0.0, 255.0]\n", " 'range [{2}, {3}]'.format(dtype_str, out_type.__name__, mi, ma))\n" ] } ], "source": [ "imageio.imwrite(my_letter_gray, image_data[:, :, 0])\n", "image_data_gray = imageio.imread(my_letter_gray).astype(float);\n", "print(image_data_gray.shape)" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(784,)\n" ] }, { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACzNJREFUeJzt3V+opIV5x/Hvr3ZdicmFJumyNVIj2IKEdAOHbSFSUmxS\nI4U1N5K9CBsQNhdJaCAXlfSiXkppEnJRApsq2ZTUpJCIXkhFl4IEingUq2tso5UNcbvuJjUQ01L/\n5enFeU2P6/nnOe/MO2ef7wcOZ+adOed9mLPffWfmnZk3VYWkfn5j6gEkTcP4paaMX2rK+KWmjF9q\nyvilpoxfasr4paaMX2rqN+e5souzty7h0nmuUmrlf/lvXqmXs5Xr7ij+JDcAXwMuAv6uqm7f6PqX\ncCl/kOt3skpJG3i4Tmz5utu+25/kIuBvgY8D1wKHk1y73d8nab528pj/IPBsVT1XVa8A3wEOjTOW\npFnbSfxXAD9Zdf75YdmbJDmaZDnJ8qu8vIPVSRrTzJ/tr6pjVbVUVUt72Dvr1Unaop3Efxq4ctX5\n9w3LJO0CO4n/EeCaJO9PcjHwSeDeccaSNGvb3tVXVa8l+RxwPyu7+u6sqqdGm0zSTO1oP39V3Qfc\nN9IskubIl/dKTRm/1JTxS00Zv9SU8UtNGb/U1Fzfz3+huv8/H590/X/62wcmXX9Hs/ybz+vv6ZZf\nasr4paaMX2rK+KWmjF9qyvilpoxfasr4paaMX2rK+KWmjF9qyvilpoxfasr4paZ8S+8WTf22Xc1X\nh7+3W36pKeOXmjJ+qSnjl5oyfqkp45eaMn6pqR3t509yCngJeB14raqWxhhKb+ZHc2sWxniRzx9X\n1c9G+D2S5si7/VJTO42/gAeTPJrk6BgDSZqPnd7tv66qTif5LeCBJP9WVQ+tvsLwn8JRgEt4xw5X\nJ2ksO9ryV9Xp4fs54G7g4BrXOVZVS1W1tIe9O1mdpBFtO/4klyZ51xungY8BJ8caTNJs7eRu/z7g\n7iRv/J5/qKp/GmUqSTO37fir6jng90ecZVId3r+t/+ff2119UlvGLzVl/FJTxi81ZfxSU8YvNeVH\ndy8A37I7G4u8O28R/uZu+aWmjF9qyvilpoxfasr4paaMX2rK+KWm3M8vzcAi7MffjFt+qSnjl5oy\nfqkp45eaMn6pKeOXmjJ+qak2+/mnfG/3btjnq7fnQvibuuWXmjJ+qSnjl5oyfqkp45eaMn6pKeOX\nmtp0P3+SO4E/A85V1QeGZZcD3wWuAk4BN1fVz2c3pvRWi/y5/LvBVrb83wRuOG/ZrcCJqroGODGc\nl7SLbBp/VT0EvHje4kPA8eH0ceCmkeeSNGPbfcy/r6rODKdfAPaNNI+kOdnxE35VVUCtd3mSo0mW\nkyy/yss7XZ2kkWw3/rNJ9gMM38+td8WqOlZVS1W1tIe921ydpLFtN/57gSPD6SPAPeOMI2leNo0/\nyV3AvwC/l+T5JLcAtwMfTfIM8CfDeUm7yKb7+avq8DoXXT/yLJLmyFf4SU0Zv9SU8UtNGb/UlPFL\nTRm/1FSbj+6epQvhY5wXkR+3Pltu+aWmjF9qyvilpoxfasr4paaMX2rK+KWmLpj9/FPuE/YjpLUb\nueWXmjJ+qSnjl5oyfqkp45eaMn6pKeOXmtpV+/ndny6Nxy2/1JTxS00Zv9SU8UtNGb/UlPFLTRm/\n1NSm8Se5M8m5JCdXLbstyekkjw9fN852TElj28qW/5vADWss/2pVHRi+7ht3LEmztmn8VfUQ8OIc\nZpE0Rzt5zP/5JE8MDwsuG20iSXOx3fi/DlwNHADOAF9e74pJjiZZTrL8Ki9vc3WSxrat+KvqbFW9\nXlW/Ar4BHNzguseqaqmqlvawd7tzShrZtuJPsn/V2U8AJ9e7rqTFtOlbepPcBXwEeE+S54G/Aj6S\n5ABQwCngMzOcUdIMbBp/VR1eY/EdM5hloXU4Xvui8fMbZstX+ElNGb/UlPFLTRm/1JTxS00Zv9TU\nQn1095S7dtyVp27c8ktNGb/UlPFLTRm/1JTxS00Zv9SU8UtNGb/UlPFLTRm/1JTxS00Zv9SU8UtN\nGb/UlPFLTS3U+/nVi5/fMC23/FJTxi81ZfxSU8YvNWX8UlPGLzVl/FJTm+7nT3Il8C1gH1DAsar6\nWpLLge8CVwGngJur6ucb/a7f/eD/cP/90+zbdb+u9GZb2fK/Bnyxqq4F/hD4bJJrgVuBE1V1DXBi\nOC9pl9g0/qo6U1WPDadfAp4GrgAOAceHqx0HbprVkJLG97Ye8ye5CvgQ8DCwr6rODBe9wMrDAkm7\nxJbjT/JO4HvAF6rqF6svq6pi5fmAtX7uaJLlJMs//a/XdzSspPFsKf4ke1gJ/9tV9f1h8dkk+4fL\n9wPn1vrZqjpWVUtVtfTed180xsySRrBp/EkC3AE8XVVfWXXRvcCR4fQR4J7xx5M0K1t5S++HgU8B\nTyZ5Yz/dl4DbgX9McgvwY+Dm2YwoaRY2jb+qfgBknYuvH3ccSfPiK/ykpoxfasr4paaMX2rK+KWm\njF9q6oL56G7fsqvz+W9iY275paaMX2rK+KWmjF9qyvilpoxfasr4pabmup//R0+8w32v0oJwyy81\nZfxSU8YvNWX8UlPGLzVl/FJTxi81dcG8n1+7j6/5mJZbfqkp45eaMn6pKeOXmjJ+qSnjl5oyfqmp\nTeNPcmWSf07ywyRPJfnzYfltSU4neXz4unH240oay1Ze5PMa8MWqeizJu4BHkzwwXPbVqvqb2Y0n\naVY2jb+qzgBnhtMvJXkauGLWg0marbf1mD/JVcCHgIeHRZ9P8kSSO5Ncts7PHE2ynGT5VV7e0bCS\nxrPl+JO8E/ge8IWq+gXwdeBq4AAr9wy+vNbPVdWxqlqqqqU97B1hZElj2FL8SfawEv63q+r7AFV1\ntqper6pfAd8ADs5uTElj28qz/QHuAJ6uqq+sWr5/1dU+AZwcfzxJs7KVZ/s/DHwKeDLJ48OyLwGH\nkxwACjgFfGYmE0qaia082/8DIGtcdN/440iaF1/hJzVl/FJTxi81ZfxSU8YvNWX8UlPGLzVl/FJT\nxi81ZfxSU8YvNWX8UlPGLzVl/FJTqar5rSz5KfDjVYveA/xsbgO8PYs626LOBc62XWPO9jtV9d6t\nXHGu8b9l5clyVS1NNsAGFnW2RZ0LnG27pprNu/1SU8YvNTV1/McmXv9GFnW2RZ0LnG27Jplt0sf8\nkqYz9ZZf0kQmiT/JDUn+PcmzSW6dYob1JDmV5MnhyMPLE89yZ5JzSU6uWnZ5kgeSPDN8X/MwaRPN\nthBHbt7gyNKT3naLdsTrud/tT3IR8CPgo8DzwCPA4ar64VwHWUeSU8BSVU2+TzjJHwG/BL5VVR8Y\nlv018GJV3T78x3lZVf3Fgsx2G/DLqY/cPBxQZv/qI0sDNwGfZsLbboO5bmaC222KLf9B4Nmqeq6q\nXgG+AxyaYI6FV1UPAS+et/gQcHw4fZyVfzxzt85sC6GqzlTVY8Ppl4A3jiw96W23wVyTmCL+K4Cf\nrDr/PIt1yO8CHkzyaJKjUw+zhn3DYdMBXgD2TTnMGjY9cvM8nXdk6YW57bZzxOux+YTfW11XVQeA\njwOfHe7eLqRaecy2SLtrtnTk5nlZ48jSvzblbbfdI16PbYr4TwNXrjr/vmHZQqiq08P3c8DdLN7R\nh8++cZDU4fu5ief5tUU6cvNaR5ZmAW67RTri9RTxPwJck+T9SS4GPgncO8Ecb5Hk0uGJGJJcCnyM\nxTv68L3AkeH0EeCeCWd5k0U5cvN6R5Zm4ttu4Y54XVVz/wJuZOUZ//8A/nKKGdaZ62rgX4evp6ae\nDbiLlbuBr7Ly3MgtwLuBE8AzwIPA5Qs0298DTwJPsBLa/olmu46Vu/RPAI8PXzdOfdttMNckt5uv\n8JOa8gk/qSnjl5oyfqkp45eaMn6pKeOXmjJ+qSnjl5r6P0MZlAfeUPNSAAAAAElFTkSuQmCC\n", "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "nx_img, ny_img = image_data_gray.shape;\n", "reshaped_image_gray = image_data_gray.reshape(nx_img*ny_img);\n", "plt.imshow(image_data_gray);\n", "print (reshaped_image_gray.shape);" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "collapsed": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(1, 784)\n", " index_of_letter: 7 \n", " letter is: h\n" ] } ], "source": [ "reshaped_2d_array = reshaped_image_gray.reshape(-784, 784);\n", "print(reshaped_2d_array.shape)\n", "predicted = loaded_model.predict(reshaped_2d_array);\n", "index_of_letter = predicted[0]\n", "print (' index_of_letter: ', index_of_letter, '\\n', 'letter is: ', array_letters[index_of_letter])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.0" } }, "nbformat": 4, "nbformat_minor": 2 }