{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Balancing the gathering and use of information\n", "\n", "> 그로킹 심층 강화학습 중 4장 내용인 \"정보의 수집과 사용간의 균형\"에 대한 내용입니다.\n", "\n", "- hide: true\n", "- toc: true \n", "- badges: true\n", "- comments: true\n", "- author: Chanseok Kang\n", "- categories: [Python, Reinforcement_Learning, Grokking_Deep_Reinforcement_Learning]\n", "- permalink: /book/:title:output_ext\n", "- search_exclude: false" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "> Note: 실행을 위해 아래의 패키지들을 설치해주기 바랍니다." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#collapse\n", "!pip install tqdm numpy scikit-learn pyglet setuptools && \\\n", "!pip install gym asciinema pandas tabulate tornado==5.* PyBullet && \\\n", "!pip install git+https://github.com/pybox2d/pybox2d#egg=Box2D && \\\n", "!pip install git+https://github.com/mimoralea/gym-bandits#egg=gym-bandits && \\\n", "!pip install git+https://github.com/mimoralea/gym-walk#egg=gym-walk && \\\n", "!pip install git+https://github.com/mimoralea/gym-aima#egg=gym-aima && \\\n", "!pip install gym[atari]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 밴딧 (Bandits)" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import warnings ; warnings.filterwarnings('ignore')\n", "\n", "import gym\n", "import gym_bandits\n", "import numpy as np\n", "from scipy.special import softmax as softmax_fn\n", "from pprint import pprint\n", "from tqdm import tqdm_notebook as tqdm\n", "\n", "from itertools import cycle\n", "\n", "import sys\n", "import random\n", "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import matplotlib.pylab as pylab\n", "SEEDS = (12, 34, 56, 78, 90)\n", "\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "plt.style.use('fivethirtyeight')\n", "params = {\n", " 'figure.figsize': (15, 8),\n", " 'font.size': 24,\n", " 'legend.fontsize': 20,\n", " 'axes.titlesize': 28,\n", " 'axes.labelsize': 24,\n", " 'xtick.labelsize': 20,\n", " 'ytick.labelsize': 20\n", "}\n", "pylab.rcParams.update(params)\n", "np.set_printoptions(suppress=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 기본 전략" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "def pure_exploitation(env, n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Pure exploitation'\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " action = np.argmax(Q)\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "def pure_exploration(env, n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Pure exploration'\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " action = np.random.randint(len(Q))\n", " \n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 간단한 전략들" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "def epsilon_greedy(env, epsilon=0.01, n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Epsilon-Greedy {}'.format(epsilon)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " if np.random.uniform() > epsilon:\n", " action = np.argmax(Q)\n", " else:\n", " action = np.random.randint(len(Q))\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [], "source": [ "def lin_dec_epsilon_greedy(env,\n", " init_epsilon=1.0,\n", " min_epsilon=0.01, \n", " decay_ratio=0.05, \n", " n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Lin Epsilon-Greedy {}, {}, {}'.format(init_epsilon, \n", " min_epsilon, \n", " decay_ratio)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " decay_episodes = n_episodes * decay_ratio\n", " epsilon = 1 - e / decay_episodes\n", " epsilon *= init_epsilon - min_epsilon\n", " epsilon += min_epsilon\n", " epsilon = np.clip(epsilon, min_epsilon, init_epsilon)\n", " if np.random.uniform() > epsilon:\n", " action = np.argmax(Q)\n", " else:\n", " action = np.random.randint(len(Q))\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "def exp_dec_epsilon_greedy(env, \n", " init_epsilon=1.0,\n", " min_epsilon=0.01,\n", " decay_ratio=0.1,\n", " n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", "\n", " decay_episodes = int(n_episodes * decay_ratio)\n", " rem_episodes = n_episodes - decay_episodes\n", " epsilons = 0.01\n", " epsilons /= np.logspace(-2, 0, decay_episodes)\n", " epsilons *= init_epsilon - min_epsilon\n", " epsilons += min_epsilon\n", " epsilons = np.pad(epsilons, (0, rem_episodes), 'edge')\n", " name = 'Exp Epsilon-Greedy {}, {}, {}'.format(init_epsilon, \n", " min_epsilon, \n", " decay_ratio)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " if np.random.uniform() > epsilons[e]:\n", " action = np.argmax(Q)\n", " else:\n", " action = np.random.randint(len(Q))\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [], "source": [ "def optimistic_initialization(env, \n", " optimistic_estimate=1.0,\n", " initial_count=100,\n", " n_episodes=1000):\n", " Q = np.full((env.action_space.n), optimistic_estimate, dtype=np.float64)\n", " N = np.full((env.action_space.n), initial_count, dtype=np.int)\n", " \n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Optimistic {}, {}'.format(optimistic_estimate, \n", " initial_count)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " action = np.argmax(Q)\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", "\n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 두 개의 팔을 가진 밴딧 환경" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Two-Armed Bandit environment with seed 12\n", "Probability of reward: [0.41630234 0.5545003 ]\n", "Reward: [1 1]\n", "Q(.): [0.41630234 0.5545003 ]\n", "V*: 0.5545003042316209\n", "\n", "Two-Armed Bandit environment with seed 34\n", "Probability of reward: [0.88039337 0.56881791]\n", "Reward: [1 1]\n", "Q(.): [0.88039337 0.56881791]\n", "V*: 0.8803933660102791\n", "\n", "Two-Armed Bandit environment with seed 56\n", "Probability of reward: [0.44859284 0.9499771 ]\n", "Reward: [1 1]\n", "Q(.): [0.44859284 0.9499771 ]\n", "V*: 0.9499771030206514\n", "\n", "Two-Armed Bandit environment with seed 78\n", "Probability of reward: [0.53235706 0.84511988]\n", "Reward: [1 1]\n", "Q(.): [0.53235706 0.84511988]\n", "V*: 0.8451198776828125\n", "\n", "Two-Armed Bandit environment with seed 90\n", "Probability of reward: [0.56461729 0.91744039]\n", "Reward: [1 1]\n", "Q(.): [0.56461729 0.91744039]\n", "V*: 0.9174403942290458\n", "\n", "Mean V* across all seeds: 0.8294862090348818\n" ] } ], "source": [ "b2_Vs = []\n", "for seed in SEEDS:\n", " env_name = 'BanditTwoArmedUniform-v0'\n", " env = gym.make(env_name, seed=seed) ; env.reset()\n", " b2_Q = np.array(env.env.p_dist * env.env.r_dist)\n", " print('Two-Armed Bandit environment with seed', seed)\n", " print('Probability of reward:', env.env.p_dist)\n", " print('Reward:', env.env.r_dist)\n", " print('Q(.):', b2_Q)\n", " b2_Vs.append(np.max(b2_Q))\n", " print('V*:', b2_Vs[-1])\n", " print()\n", "print('Mean V* across all seeds:', np.mean(b2_Vs))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### 두 팔 밴딧 환경에서 간단한 전략 수행" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "f6bd686301234c0bb659e77bad25e78d", "version_major": 2, "version_minor": 0 }, "text/plain": [ "All experiments: 0%| | 0/5 [00:00" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "fig, axs = plt.subplots(5, 1, figsize=(28, 28), sharey=False, sharex=False)\n", "\n", "lines = [\"-\",\"--\",\":\",\"-.\"]\n", "linecycler = cycle(lines)\n", "min_reg, max_ret = float('inf'), float('-inf')\n", "for label, result in b2_results_s.items():\n", " color = next(linecycler)\n", "\n", " # reward\n", " episode_mean_rew = np.array(result['episode_mean_rew'])\n", " mean_episode_mean_rew = np.mean(episode_mean_rew, axis=0)\n", "\n", " axs[0].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", "\n", " axs[1].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " axs[1].set_xscale('log')\n", " \n", " axs[2].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " if max_ret < mean_episode_mean_rew[-1]: max_ret = mean_episode_mean_rew[-1]\n", " axs[2].axis((mean_episode_mean_rew.shape[0]*0.989,\n", " mean_episode_mean_rew.shape[0],\n", " max_ret-0.005,\n", " max_ret+0.0001))\n", "\n", " # regret\n", " cum_regret = np.array(result['cum_regret'])\n", " mean_cum_regret = np.mean(cum_regret, axis=0)\n", "\n", " axs[3].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " \n", " axs[4].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " if min_reg > mean_cum_regret[-1]: min_reg = mean_cum_regret[-1]\n", " plt.axis((mean_cum_regret.shape[0]*0.989,\n", " mean_cum_regret.shape[0],\n", " min_reg-0.5,\n", " min_reg+5))\n", "\n", " # config plot\n", " axs[0].set_title('Mean Episode Reward')\n", " axs[1].set_title('Mean Episode Reward (Log scale)')\n", " axs[2].set_title('Mean Episode Reward (Zoom on best)')\n", " axs[3].set_title('Total Regret')\n", " axs[4].set_title('Total Regret (Zoom on best)')\n", " plt.xlabel('Episodes')\n", " axs[0].legend(loc='upper left')\n", "\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 조금 더 발전된 전략들" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [], "source": [ "def softmax(env, \n", " init_temp=float('inf'), \n", " min_temp=0.0,\n", " decay_ratio=0.04,\n", " n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", "\n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Lin SoftMax {}, {}, {}'.format(init_temp, \n", " min_temp,\n", " decay_ratio)\n", " # can't really use infinity\n", " init_temp = min(init_temp,\n", " sys.float_info.max)\n", " # can't really use zero\n", " min_temp = max(min_temp,\n", " np.nextafter(np.float32(0), \n", " np.float32(1)))\n", " for e in tqdm(range(n_episodes),\n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " decay_episodes = n_episodes * decay_ratio\n", " temp = 1 - e / decay_episodes\n", " temp *= init_temp - min_temp\n", " temp += min_temp\n", " temp = np.clip(temp, min_temp, init_temp)\n", "\n", " scaled_Q = Q / temp\n", " norm_Q = scaled_Q - np.max(scaled_Q)\n", " exp_Q = np.exp(norm_Q)\n", " probs = exp_Q / np.sum(exp_Q)\n", " assert np.isclose(probs.sum(), 1.0)\n", "\n", " action = np.random.choice(np.arange(len(probs)), \n", " size=1, \n", " p=probs)[0]\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [], "source": [ "def upper_confidence_bound(env, \n", " c=2, \n", " n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", " \n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'UCB {}'.format(c)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " action = e\n", " if e >= len(Q):\n", " U = np.sqrt(c * np.log(e)/N)\n", " action = np.argmax(Q + U)\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", " \n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [], "source": [ "def thompson_sampling(env, \n", " alpha=1,\n", " beta=0,\n", " n_episodes=1000):\n", " Q = np.zeros((env.action_space.n), dtype=np.float64)\n", " N = np.zeros((env.action_space.n), dtype=np.int)\n", " \n", " Qe = np.empty((n_episodes, env.action_space.n), dtype=np.float64)\n", " returns = np.empty(n_episodes, dtype=np.float64)\n", " actions = np.empty(n_episodes, dtype=np.int)\n", " name = 'Thompson Sampling {}, {}'.format(alpha, beta)\n", " for e in tqdm(range(n_episodes), \n", " desc='Episodes for: ' + name, \n", " leave=False):\n", " samples = np.random.normal(\n", " loc=Q, scale=alpha/(np.sqrt(N) + beta))\n", " action = np.argmax(samples)\n", "\n", " _, reward, _, _ = env.step(action)\n", " N[action] += 1\n", " Q[action] = Q[action] + (reward - Q[action])/N[action]\n", "\n", " Qe[e] = Q\n", " returns[e] = reward\n", " actions[e] = action\n", " return name, returns, Qe, actions" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### 두팔 밴딧 환경에서의 개선된 전략들의 수행" ] }, { "cell_type": "code", "execution_count": 27, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c7d82a542394479fa1e4717c2daae741", "version_major": 2, "version_minor": 0 }, "text/plain": [ "All experiments: 0%| | 0/5 [00:00" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "fig, axs = plt.subplots(5, 1, figsize=(28, 28), sharey=False, sharex=False)\n", "\n", "lines = [\"-\",\"--\",\":\",\"-.\"]\n", "linecycler = cycle(lines)\n", "min_reg, max_ret = float('inf'), float('-inf')\n", "for label, result in b2_results_a.items():\n", " color = next(linecycler)\n", "\n", " # reward\n", " episode_mean_rew = np.array(result['episode_mean_rew'])\n", " mean_episode_mean_rew = np.mean(episode_mean_rew, axis=0)\n", "\n", " axs[0].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", "\n", " axs[1].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " axs[1].set_xscale('log')\n", " \n", " axs[2].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " if max_ret < mean_episode_mean_rew[-1]: max_ret = mean_episode_mean_rew[-1]\n", " axs[2].axis((mean_episode_mean_rew.shape[0]*0.989,\n", " mean_episode_mean_rew.shape[0],\n", " max_ret-0.004,\n", " max_ret+0.0001))\n", "\n", " # regret\n", " cum_regret = np.array(result['cum_regret'])\n", " mean_cum_regret = np.mean(cum_regret, axis=0)\n", "\n", " axs[3].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " \n", " axs[4].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " if min_reg > mean_cum_regret[-1]: min_reg = mean_cum_regret[-1]\n", " plt.axis((mean_cum_regret.shape[0]*0.989,\n", " mean_cum_regret.shape[0],\n", " min_reg-1,\n", " min_reg+4))\n", "\n", " # config plot\n", " axs[0].set_title('Mean Episode Reward')\n", " axs[1].set_title('Mean Episode Reward (Log scale)')\n", " axs[2].set_title('Mean Episode Reward (Zoom on best)')\n", " axs[3].set_title('Total Regret')\n", " axs[4].set_title('Total Regret (Zoom on best)')\n", " plt.xlabel('Episodes')\n", " axs[0].legend(loc='upper left')\n", " \n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 열 팔 가우시안 밴딧 환경" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10-Armed Bandit environment with seed 12\n", "Probability of reward: [1 1 1 1 1 1 1 1 1 1]\n", "Reward: [ 1.38503828 -2.12704259 -2.04412697 -0.67407396 0.63734453 1.58553551\n", " 2.64476297 0.34536369 -1.3928017 -0.13044506]\n", "Q(.): [ 1.38503828 -2.12704259 -2.04412697 -0.67407396 0.63734453 1.58553551\n", " 2.64476297 0.34536369 -1.3928017 -0.13044506]\n", "V*: 2.6447629665055974\n", "\n", "10-Armed Bandit environment with seed 34\n", "Probability of reward: [1 1 1 1 1 1 1 1 1 1]\n", "Reward: [ 0.18060298 0.9982891 1.56491649 -0.5319185 0.05065747 -0.38137431\n", " -0.37199852 0.78790366 1.00121956 -0.00984009]\n", "Q(.): [ 0.18060298 0.9982891 1.56491649 -0.5319185 0.05065747 -0.38137431\n", " -0.37199852 0.78790366 1.00121956 -0.00984009]\n", "V*: 1.5649164942952658\n", "\n", "10-Armed Bandit environment with seed 56\n", "Probability of reward: [1 1 1 1 1 1 1 1 1 1]\n", "Reward: [ 0.62499037 -0.07140136 0.92729309 0.04536638 0.84506588 -0.18313555\n", " 0.00476172 1.55827457 -0.87910825 -0.00429599]\n", "Q(.): [ 0.62499037 -0.07140136 0.92729309 0.04536638 0.84506588 -0.18313555\n", " 0.00476172 1.55827457 -0.87910825 -0.00429599]\n", "V*: 1.5582745674137135\n", "\n", "10-Armed Bandit environment with seed 78\n", "Probability of reward: [1 1 1 1 1 1 1 1 1 1]\n", "Reward: [ 1.20523565 0.11299807 0.66357907 -0.29196638 1.01421424 -0.72565023\n", " 1.16574679 1.70303914 0.77572013 -1.38797678]\n", "Q(.): [ 1.20523565 0.11299807 0.66357907 -0.29196638 1.01421424 -0.72565023\n", " 1.16574679 1.70303914 0.77572013 -1.38797678]\n", "V*: 1.7030391402728304\n", "\n", "10-Armed Bandit environment with seed 90\n", "Probability of reward: [1 1 1 1 1 1 1 1 1 1]\n", "Reward: [ 0.81161829 0.12563368 -0.2520508 -0.55127142 0.53276387 0.19875864\n", " 0.04448967 -0.37178956 -0.25712615 0.04091966]\n", "Q(.): [ 0.81161829 0.12563368 -0.2520508 -0.55127142 0.53276387 0.19875864\n", " 0.04448967 -0.37178956 -0.25712615 0.04091966]\n", "V*: 0.8116182893597546\n", "\n", "Mean V* across all seeds: 1.6565222915694324\n" ] } ], "source": [ "b10_Vs = []\n", "for seed in SEEDS:\n", " env_name = 'BanditTenArmedGaussian-v0'\n", " env = gym.make(env_name, seed=seed) ; env.reset()\n", " r_dist = np.array(env.env.r_dist)[:,0]\n", " b10_Q = np.array(env.env.p_dist * r_dist)\n", " print('10-Armed Bandit environment with seed', seed)\n", " print('Probability of reward:', env.env.p_dist)\n", " print('Reward:', r_dist)\n", " print('Q(.):', b10_Q)\n", " b10_Vs.append(np.max(b10_Q))\n", " print('V*:', b10_Vs[-1])\n", " print()\n", "print('Mean V* across all seeds:', np.mean(b10_Vs))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### 열팔 밴딧 환경에서의 간단한 전략 수행" ] }, { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0024bd83e5904f69a0da04656fe96480", "version_major": 2, "version_minor": 0 }, "text/plain": [ "All experiments: 0%| | 0/5 [00:00" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "fig, axs = plt.subplots(5, 1, figsize=(28, 28), sharey=False, sharex=False)\n", "\n", "lines = [\"-\",\"--\",\":\",\"-.\"]\n", "linecycler = cycle(lines)\n", "min_reg, max_ret = float('inf'), float('-inf')\n", "for label, result in b10_results_s.items():\n", " color = next(linecycler)\n", "\n", " # reward\n", " episode_mean_rew = np.array(result['episode_mean_rew'])\n", " mean_episode_mean_rew = np.mean(episode_mean_rew, axis=0)\n", "\n", " axs[0].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", "\n", " axs[1].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " axs[1].set_xscale('log')\n", " \n", " axs[2].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " if max_ret < mean_episode_mean_rew[-1]: max_ret = mean_episode_mean_rew[-1]\n", " axs[2].axis((mean_episode_mean_rew.shape[0]*0.989,\n", " mean_episode_mean_rew.shape[0],\n", " max_ret-0.06,\n", " max_ret+0.005))\n", "\n", " # regret\n", " cum_regret = np.array(result['cum_regret'])\n", " mean_cum_regret = np.mean(cum_regret, axis=0)\n", "\n", " axs[3].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " \n", " axs[4].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " if min_reg > mean_cum_regret[-1]: min_reg = mean_cum_regret[-1]\n", " plt.axis((mean_cum_regret.shape[0]*0.989,\n", " mean_cum_regret.shape[0],\n", " min_reg-5,\n", " min_reg+45))\n", "\n", " # config plot\n", " axs[0].set_title('Mean Episode Reward')\n", " axs[1].set_title('Mean Episode Reward (Log scale)')\n", " axs[2].set_title('Mean Episode Reward (Zoom on best)')\n", " axs[3].set_title('Total Regret')\n", " axs[4].set_title('Total Regret (Zoom on best)')\n", " plt.xlabel('Episodes')\n", " axs[0].legend(loc='upper left')\n", "\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### 열팔 밴딧 환경에서의 개선된 전략 수행" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "309700526d3442ab8341499ace43ea3f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "All experiments: 0%| | 0/5 [00:00" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "fig, axs = plt.subplots(5, 1, figsize=(28, 28), sharey=False, sharex=False)\n", "\n", "lines = [\"-\",\"--\",\":\",\"-.\"]\n", "linecycler = cycle(lines)\n", "min_reg, max_ret = float('inf'), float('-inf')\n", "for label, result in b10_results_a.items():\n", " color = next(linecycler)\n", "\n", " # reward\n", " episode_mean_rew = np.array(result['episode_mean_rew'])\n", " mean_episode_mean_rew = np.mean(episode_mean_rew, axis=0)\n", "\n", " axs[0].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", "\n", " axs[1].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " axs[1].set_xscale('log')\n", " \n", " axs[2].plot(mean_episode_mean_rew, color, linewidth=2, label=label)\n", " if max_ret < mean_episode_mean_rew[-1]: max_ret = mean_episode_mean_rew[-1]\n", " axs[2].axis((mean_episode_mean_rew.shape[0]*0.989,\n", " mean_episode_mean_rew.shape[0],\n", " max_ret-0.01,\n", " max_ret+0.005))\n", "\n", " # regret\n", " cum_regret = np.array(result['cum_regret'])\n", " mean_cum_regret = np.mean(cum_regret, axis=0)\n", "\n", " axs[3].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " \n", " axs[4].plot(mean_cum_regret, color, linewidth=2, label=label)\n", " if min_reg > mean_cum_regret[-1]: min_reg = mean_cum_regret[-1]\n", " plt.axis((mean_cum_regret.shape[0]*0.989,\n", " mean_cum_regret.shape[0],\n", " min_reg-5,\n", " min_reg+12))\n", "\n", " # config plot\n", " axs[0].set_title('Mean Episode Reward')\n", " axs[1].set_title('Mean Episode Reward (Log scale)')\n", " axs[2].set_title('Mean Episode Reward (Zoom on best)')\n", " axs[3].set_title('Total Regret')\n", " axs[4].set_title('Total Regret (Zoom on best)')\n", " plt.xlabel('Episodes')\n", " axs[0].legend(loc='upper left')\n", "\n", "plt.show()" ] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "name": "PyTorch_Tutorial.ipynb", "provenance": [] }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.10" } }, "nbformat": 4, "nbformat_minor": 4 }