{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "### 导入 pacakges" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import gym\n", "import numpy as np\n", "from collections import defaultdict\n", "import functools\n", "from tqdm import tqdm_notebook" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def generate_zeros(n):\n", " return [0] * n\n", "\n", "\n", "class TabularQAgent(object):\n", " def __init__(self, observation_space, action_space):\n", " self.observation_space = observation_space\n", " self.action_space = action_space\n", " self.action_n = action_space.n\n", " self.config = {\n", " \"learning_rate\": 0.5,\n", " \"eps\": 0.5, # Epsilon in epsilon greedy policies\n", " \"discount\": 0.99,\n", " \"n_iter\": 10000} # Number of iterations\n", "\n", " self.q = defaultdict(functools.partial(generate_zeros, n=self.action_n))\n", "\n", " def act(self, observation, eps=None):\n", " if eps is None:\n", " eps = self.config[\"eps\"] \n", " # epsilon greedy.\n", " action = np.argmax(self.q[observation]) if np.random.random() > eps else self.action_space.sample()\n", " return action\n", "\n", " def learn(self, env):\n", " obs = env.reset()\n", "\n", " rAll = 0\n", " step_count = 0\n", "\n", " for t in range(self.config[\"n_iter\"]):\n", " action = self.act(obs)\n", " obs2, reward, done, _ = env.step(action)\n", "\n", " future = 0.0\n", " if not done:\n", " future = np.max(self.q[obs2])\n", " self.q[obs][action] = (1 - self.config[\"learning_rate\"]) * self.q[obs][action] + self.config[\"learning_rate\"] * (reward + self.config[\"discount\"] * future)\n", "\n", " obs = obs2\n", "\n", " rAll += reward\n", " step_count += 1\n", "\n", " if done:\n", " break\n", "\n", " return rAll, step_count\n", "\n", " def test(self, env):\n", " obs = env.reset()\n", " env.render(mode='human')\n", "\n", " for t in range(self.config[\"n_iter\"]):\n", " env.render(mode='human')\n", "\n", " action = self.act(obs, eps=0)\n", " obs2, reward, done, _ = env.step(action)\n", " env.render(mode='human')\n", "\n", " if done:\n", " break\n", "\n", " obs = obs2" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def train(tabular_q_agent, env):\n", " for episode in tqdm_notebook(range(200000)):\n", "\n", " all_reward, step_count = tabular_q_agent.learn(env)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "A Jupyter Widget" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", "\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "\u001b[41mF\u001b[0mHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "\u001b[41mF\u001b[0mHFH\n", "FFFH\n", "HFFG\n", " (Left)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Left)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "S\u001b[41mF\u001b[0mFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Up)\n", "\u001b[41mS\u001b[0mFFF\n", "FHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "\u001b[41mF\u001b[0mHFH\n", "FFFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "\u001b[41mF\u001b[0mHFH\n", "FFFH\n", "HFFG\n", " (Left)\n", "SFFF\n", "FHFH\n", "\u001b[41mF\u001b[0mFFH\n", "HFFG\n", " (Left)\n", "SFFF\n", "FHFH\n", "\u001b[41mF\u001b[0mFFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "FHFH\n", "F\u001b[41mF\u001b[0mFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "FHFH\n", "F\u001b[41mF\u001b[0mFH\n", "HFFG\n", " (Down)\n", "SFFF\n", "FHFH\n", "FF\u001b[41mF\u001b[0mH\n", "HFFG\n", " (Down)\n", "SFFF\n", "FHFH\n", "FF\u001b[41mF\u001b[0mH\n", "HFFG\n", " (Left)\n", "SFFF\n", "FHFH\n", "FFFH\n", "HF\u001b[41mF\u001b[0mG\n", " (Left)\n", "SFFF\n", "FHFH\n", "FFFH\n", "HF\u001b[41mF\u001b[0mG\n", " (Down)\n", "SFFF\n", "FHFH\n", "FFFH\n", "HFF\u001b[41mG\u001b[0m\n" ] } ], "source": [ "# 构建 Environment\n", "env = gym.make('FrozenLake-v0')\n", "env.seed(0) # 确保结果具有可重现性\n", "\n", "# 构建 Agent\n", "tabular_q_agent = TabularQAgent(env.observation_space, env.action_space)\n", "\n", "# 开始训练\n", "train(tabular_q_agent, env)\n", "\n", "tabular_q_agent.test(env)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3.6", "language": "python", "name": "python36" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 }