{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "![NeuronUnit Logo](https://raw.githubusercontent.com/scidash/assets/master/logos/neuronunit/NeuronUnitBlack2.png)\n", "# Chapter 5\n", "Back to [Chapter 5](chapter5.ipynb)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Obtaining an appropriate execution Environment \n", "## ie docker-stacks/neuronunit-optimization:\n", "\n", "Force use of development local repository. The repository must be the russell_dev branch of https://github.com/russelljjarvis/neuronunit\n", "and the docker build is: `docker-stacks/neuronunit-optimization`, there are more graceful ways of achieving this.\n", "\n", "If you lack `docker-stacks/neuronunit-optimization` try:\n", "\n", "```\n", "$git clone -b dev https://github.com/scidash/docker-stacks.git\n", "$cd docker-stacks\n", "$sudo bash build-all```\n", "\n", "The best way will be to integrate this doc chapter5 and its supporting code \n", "from https://github.com/russelljjarvis/neuronunit -> https://github.com/scidash/neuronunit\n", "\n", "# Invocation of the jupyter note book:\n", "\n", "On my local machine the development repo of neuronunit is located here:\n", "\n", "```\n", "$HOME/git\n", "# I navigate to that directory and execute:\n", "\n", "$docker run -p 8888:8888 -v \\\n", "`pwd`:/home/jovyan/mnt scidash/neuronunit-optimization \\\n", "jupyter notebook --ip=0.0.0.0 --NotebookApp.token=\\\"\\\" \\ \n", "--NotebookApp.disable_check_xsrf=True \n", "```" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import sys\n", "import os\n", "THIS_DIR = os.path.dirname(os.path.realpath('chapter5.ipynb'))\n", "this_nu = os.path.join(THIS_DIR,'../')\n", "sys.path.insert(0,this_nu)\n", "import neuronunit\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "attempting to recover from pickled file\n", "Ignoring included LEMS file: Cells.xml\n", "Ignoring included LEMS file: Networks.xml\n", "Ignoring included LEMS file: Simulation.xml\n", "Mechanisms already loaded from path: /home/jovyan/mnt/git/neuronunit/neuronunit/tests/NeuroML2. Aborting.\n", "Ignoring included LEMS file: Cells.xml\n", "Ignoring included LEMS file: Networks.xml\n", "Ignoring included LEMS file: Simulation.xml\n" ] } ], "source": [ "from neuronunit.tests import get_neab\n", "from neuronunit.tests import utilities #as outils \n", "from neuronunit.tests import model_parameters as modelp\n", "from neuronunit.models.reduced import ReducedModel\n", "import numpy as np\n", "outils = utilities.Utilities1(get_neab)\n", "model = ReducedModel(get_neab.LEMS_MODEL_PATH,name='vanilla',backend='NEURON')\n", "model.load_model()\n", "outils.model = model\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "ename": "ImportError", "evalue": "No module named 'optimization'", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mImportError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 61\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 62\u001b[0m \u001b[0mt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mTest\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 63\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mscores\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdata_tuples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moptimize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmodelp\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[1;32m\u001b[0m in \u001b[0;36moptimize\u001b[1;34m(self, model, modelp)\u001b[0m\n\u001b[0;32m 51\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 52\u001b[0m \u001b[1;31m# Do optimization including repeated calls to judge\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 53\u001b[1;33m \u001b[0mmodels\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdata_tuples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_optimize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmodelp\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 54\u001b[0m \u001b[0mparameters\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mscores\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_optimization_parameters\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_tuples\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 55\u001b[0m \u001b[1;31m# this a way of looking at solved model parameters, ie candidate solutions from\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32m\u001b[0m in \u001b[0;36m_optimize\u001b[1;34m(self, model, modelp)\u001b[0m\n\u001b[0;32m 9\u001b[0m \u001b[0mOutputs\u001b[0m \u001b[0mthe\u001b[0m \u001b[0moptimal\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mits\u001b[0m \u001b[0mattributes\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mlow\u001b[0m \u001b[0merror\u001b[0m \u001b[0mit\u001b[0m \u001b[0mresulted\u001b[0m \u001b[1;32min\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 10\u001b[0m '''\n\u001b[1;32m---> 11\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0moptimization\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnsga_serial\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 12\u001b[0m \u001b[0mgap\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnsga\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mGAparams\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 13\u001b[0m \u001b[1;31m# The number of generations is 3\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mImportError\u001b[0m: No module named 'optimization'" ] } ], "source": [ "\n", "\n", "class Test:\n", " def _optimize(self, model,modelp):\n", " '''\n", " The implementation of optimization, consisting of implementation details.\n", " Inputs a model, and model parameter ranges to expore\n", " Private method for programmer designer.\n", " Outputs the optimal model, its attributes and the low error it resulted in.\n", " '''\n", " from neuronunit.optimization import nsga_serial\n", " gap = nsga.GAparams(model)\n", " # The number of generations is 3\n", " gap.NGEN = 3\n", " # The population of genes is 12\n", " gap.MU = 12 \n", " gap.BOUND_LOW = [ np.min(i) for i in modelp.model_params.values() ]\n", " gap.BOUND_UP = [ np.max(i) for i in modelp.model_params.values() ]\n", " # call the actual Genetic Algorithm with Optimization parameters: number of generation (NGEN = 3)\n", " # and gene population size (MU = 12)\n", " vmpop, pop, invalid_ind, pf = nsga.main(gap.NGEN,gap.MU,model,modelp);\n", " attributes = [ i.attrs for i in vmpop ]\n", " rheobases = [ i.rheobase for i in vmpop ]\n", " scores = [ i.score for i in vmpop ] \n", " parameters = [ i.attrs for i in vmpop ]\n", " \n", " data_tuples = (vmpop, parameters, scores, pop, invalid_ind, pf, rheobases)\n", " return pop, data_tuples\n", " \n", " def _get_optimization_parameters(self, data_tuples):\n", " # Your specific unpacking of tuples that _optimize returns\n", " # vmpop, parameters, scores, pop, invalid_ind, pf, rheobases)\n", " _ , parameters , scores, _ , _ , _ , _ = zip(*data_tuples)\n", " return parameters,scores\n", "\n", " def optimize(self, model, modelp):\n", " '''\n", " # The Class users version of optimize\n", " # where details are hidden in _optimizae\n", " \n", " # Inputs: \n", " # a Izhihikitch model specified in NML, but implemented with a NEURONbackend type\n", " # from neuronunit. Modelp a formatated dictionary of model parameters\n", " # where keys are Izhikitich parameters, and values are parameter ranges.\n", " # Outputs:\n", " # the optimal model, the scores as pandas data frame.\n", " # data_tuples: other data about models from the converged gene population\n", " # like resulting rheobase values from the converged genes, pandas score arrays\n", " # the genes corresponding to attributes of the pareto front (in a raw format).\n", " '''\n", "\n", " # Do optimization including repeated calls to judge\n", " models, data_tuples = self._optimize(model,modelp)\n", " parameters, scores = self._get_optimization_parameters(data_tuples)\n", " # this a way of looking at solved model parameters, ie candidate solutions from \n", " # the pareto front.\n", " # scores is a list of pandas dataframes for the converged gene population.\n", " # It might be good to convert it into one big panda table if I knew how.\n", " return model, scores, data_tuples\n", "\n", " \n", "t = Test()\n", "model,scores,data_tuples = t.optimize(model,modelp) " ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "modelp\n", "import pandas as pd\n", "models = data_tuples[1]\n", "sc = pd.DataFrame(scores[0])\n", "for j,i in enumerate(models):\n", " i.name = attributes[j]\n", "sc" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "data = [ models[0].name ]\n", "model_values0 = pd.DataFrame(data) \n", "model_values0" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "rheobases=data_tuples[5][0]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "data = [ models[0].name ]\n", "model_values0 = pd.DataFrame(data) \n", "model_values0\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "rheobases[0]\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "\n", "sc1 = pd.DataFrame(scores[1])\n", "sc1\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "rheobases[1]\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "data=[ models[1].name ]\n", "model_values1 = pd.DataFrame(data) \n", "model_values1" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "models[1].name " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The code below is used to get the differences between values obtained via brute force, and those obtained otherwise.\n", "It displays the differences in parameter values as pandas data tables.\n", " \n", "I have knowingly violated Github conventions by adding data (a pickled file, as well as sources to the repository). \n", "The justification being that ground_error (the ground truth to compare against Genetic Algorithm outputs). \n", "Takes a prohibitively long time to generate, and therefore detracts from notebooking philosophy." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import pickle\n", "import pandas as pd\n", "\n", "try:\n", " ground_error = pickle.load(open('big_model_evaulated.pickle','rb'))\n", "except:\n", " # The exception code is only skeletal, it would not actually work, but its the right principles.\n", " print('{0} it seems the error truth data does not yet exist, lets create it now '.format(str(False)))\n", " ground_error = list(futures.map(outils.func2map, ground_truth))\n", " pickle.dump(ground_error,open('big_model_evaulated.pickle','wb'))\n", "\n", "# ground_error_nsga=list(zip(vmpop,pop,invalid_ind))\n", "# pickle.dump(ground_error_nsga,open('nsga_evaulated.pickle','wb'))\n", "\n", "sum_errors = [ i[0] for i in ground_error ]\n", "composite_errors = [ i[1] for i in ground_error ]\n", "attrs = [ i[2] for i in ground_error ]\n", "rheobase = [ i[3] for i in ground_error ]\n", "\n", "indexs = [i for i,j in enumerate(sum_errors) if j==np.min(sum_errors) ][0]\n", "indexc = [i for i,j in enumerate(composite_errors) if j==np.min(composite_errors) ][0]\n", "#assert indexs == indexc\n", "vmpop = data_tuples[0]\n", "df_0 = pd.DataFrame([ (k,v,vmpop[0].attrs[k],float(v)-float(vmpop[0].attrs[k])) for k,v in ground_error[indexc][2].items() ])\n", "df_1 = pd.DataFrame([ (k,v,vmpop[1].attrs[k],float(v)-float(vmpop[1].attrs[k])) for k,v in ground_error[indexc][2].items() ])\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "#These are the differences in attributes found via brute force versus the genetic algorithm. For the top two candidates.\n", "\n", "df_0" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "df_1\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.2" } }, "nbformat": 4, "nbformat_minor": 2 }