{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Simple example of the evolutionary optimization framework\n", "\n", "This notebook provides a simple example for the use of the evolutionary optimization framework builtin to the library. Under the hood, the implementation of the evolutionary algorithm is powered by `deap` and `pypet` cares about the parallelization and storage of the simulation data for us. \n", "\n", "Here we demonstrate how to fit parameters of a the evaluation function `optimize_me` which simply computes the distance of the parameters to the unit circle and returns this as the `fitness_tuple` that DEAP expects." ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "# change to the root directory of the project\n", "import os\n", "if os.getcwd().split(\"/\")[-1] == \"examples\":\n", " os.chdir('..')\n", " \n", "# This will reload all imports as soon as the code changes\n", "%load_ext autoreload\n", "%autoreload 2 " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "try:\n", " import matplotlib.pyplot as plt\n", "except ImportError:\n", " import sys\n", " !{sys.executable} -m pip install matplotlib seaborn\n", " import matplotlib.pyplot as plt\n", " \n", "import numpy as np\n", "import logging\n", "\n", "from neurolib.utils.parameterSpace import ParameterSpace\n", "from neurolib.optimize.evolution import Evolution\n", "\n", "import neurolib.optimize.evolution.evolutionaryUtils as eu\n", "import neurolib.utils.functions as func\n", "\n", "def optimize_me(traj):\n", " ind = evolution.getIndividualFromTraj(traj)\n", " logging.info(\"Hello, I am {}\".format(ind.id))\n", " logging.info(\"You can also call me {}, or simply ({:.2}, {:.2}).\".format(ind.params, ind.x, ind.y))\n", " \n", " # let's make a circle\n", " computation_result = abs((ind.x**2 + ind.y**2) - 1)\n", " # DEAP wants a tuple as fitness, ALWAYS!\n", " fitness_tuple = (computation_result ,)\n", " \n", " # we also require a dictionary with at least a single result for storing the results in the hdf\n", " result_dict = {}\n", " \n", " return fitness_tuple, result_dict\n", "\n", " \n", "pars = ParameterSpace(['x', 'y'], [[-5.0, 5.0], [-5.0, 5.0]])\n", "evolution = Evolution(optimize_me, pars, weightList = [-1.0],\n", " POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4, filename=\"example-2.0.hdf\")\n", "# info: chose POP_INIT_SIZE=100, POP_SIZE = 50, NGEN=10 for real exploration, \n", "# values here are low for testing: POP_INIT_SIZE=10, POP_SIZE = 6, NGEN=4\n", "\n", "evolution.run(verbose = True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "evolution.loadResults()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "evolution.info(plot=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "gens, all_scores = evolution.getScoresDuringEvolution(reverse=True)\n", "\n", "import matplotlib.pyplot as plt\n", "plt.figure(figsize=(8, 4), dpi=200) \n", "plt.plot(gens, np.nanmean(all_scores, axis=1))\n", "plt.fill_between(gens, np.nanmin(all_scores, axis=1), np.nanmax(all_scores, axis=1), alpha=0.3)\n", "plt.xlabel(\"Generation #\")\n", "plt.ylabel(\"Score\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 5 }