{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from sklearn.base import clone\n",
    "from sklearn.datasets import load_boston, load_iris\n",
    "from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\n",
    "from sklearn.model_selection import KFold, StratifiedKFold\n",
    "from sklearn.model_selection import learning_curve as sklearning_curve"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def learning_curve(estimator, X, y, train_sizes, random_state=0):\n",
    "    if estimator._estimator_type == \"regressor\":\n",
    "        cv = KFold()\n",
    "    else:  # estimator._estimator_type == \"classifier\"\n",
    "        cv = StratifiedKFold()\n",
    "    train_scores = np.zeros((len(train_sizes), cv.n_splits))\n",
    "    test_scores = np.zeros((len(train_sizes), cv.n_splits))\n",
    "    cv_iter = list(cv.split(X, y))\n",
    "    train_sizes_abs = (len(cv_iter[0][0]) * np.array(train_sizes)).astype(int)\n",
    "    rng = np.random.RandomState(random_state)\n",
    "    cv_iter = [(rng.permutation(train), test) for train, test in cv_iter]\n",
    "    for i, train_size in enumerate(train_sizes_abs):\n",
    "        for j, (train, test) in enumerate(cv_iter):\n",
    "            est = clone(estimator)\n",
    "            est.fit(X[train][:train_size], y[train][:train_size])\n",
    "            train_scores[i, j] = est.score(X[train][:train_size], y[train][:train_size])\n",
    "            test_scores[i, j] = est.score(X[test], y[test])\n",
    "    return train_sizes_abs, train_scores, test_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# regression\n",
    "X, y = load_boston(return_X_y=True)\n",
    "clf = RandomForestRegressor(random_state=0)\n",
    "ans1 = learning_curve(clf, X, y, train_sizes=[0.2, 0.4, 0.6, 0.8, 1], random_state=0)\n",
    "ans2 = sklearning_curve(clf, X, y, train_sizes=[0.2, 0.4, 0.6, 0.8, 1], shuffle=True, random_state=0)\n",
    "assert np.array_equal(ans1[0], ans2[0])\n",
    "assert np.allclose(ans1[1], ans2[1])\n",
    "assert np.allclose(ans1[2], ans2[2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# classification\n",
    "X, y = load_iris(return_X_y=True)\n",
    "clf = RandomForestClassifier(random_state=0)\n",
    "ans1 = learning_curve(clf, X, y, train_sizes=[0.2, 0.4, 0.6, 0.8, 1], random_state=0)\n",
    "ans2 = sklearning_curve(clf, X, y, train_sizes=[0.2, 0.4, 0.6, 0.8, 1], shuffle=True, random_state=0)\n",
    "assert np.array_equal(ans1[0], ans2[0])\n",
    "assert np.allclose(ans1[1], ans2[1])\n",
    "assert np.allclose(ans1[2], ans2[2])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "dev",
   "language": "python",
   "name": "dev"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}