{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from math import ceil, floor\n", "from sklearn.datasets import load_iris\n", "from sklearn.model_selection import train_test_split as sktrain_test_split" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "class ShuffleSplit():\n", " def __init__(self, n_splits=10,\n", " train_size=0.9, test_size=0.1, random_state=0):\n", " self.n_splits = n_splits\n", " self.train_size = train_size\n", " self.test_size = test_size\n", " self.random_state = random_state\n", "\n", " def split(self, X, y=None):\n", " n_train = floor(self.train_size * X.shape[0])\n", " n_test = ceil(self.test_size * X.shape[0])\n", " rng = np.random.RandomState(self.random_state)\n", " for _ in range(self.n_splits):\n", " permutation = rng.permutation(X.shape[0])\n", " yield (permutation[n_test:(n_test + n_train)],\n", " permutation[:n_test])" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "class StratifiedShuffleSplit():\n", " def __init__(self, n_splits=10,\n", " train_size=0.9, test_size=0.1, random_state=0):\n", " self.n_splits = n_splits\n", " self.train_size = train_size\n", " self.test_size = test_size\n", " self.random_state = random_state\n", "\n", " def _approximate_mode(self, class_counts, n_draws, rng):\n", " continuous = n_draws * class_counts / class_counts.sum()\n", " floored = np.floor(continuous)\n", " need_to_add = int(n_draws - floored.sum())\n", " if need_to_add > 0:\n", " remainder = continuous - floored\n", " values = np.sort(np.unique(remainder))[::-1]\n", " for value in values:\n", " inds = np.where(remainder == value)[0]\n", " add_now = min(len(inds), need_to_add)\n", " inds = rng.choice(inds, size=add_now, replace=False)\n", " floored[inds] += 1\n", " need_to_add -= add_now\n", " if need_to_add == 0:\n", " break\n", " return floored.astype(int)\n", "\n", " def split(self, X, y):\n", " n_train = np.floor(self.train_size * X.shape[0])\n", " n_test = np.ceil(self.test_size * X.shape[0])\n", " classes, y_indices = np.unique(y, return_inverse=True)\n", " class_counts = np.bincount(y_indices)\n", " # quick sort is not stable\n", " class_indices = np.split(np.argsort(y_indices, kind='mergesort'),\n", " np.cumsum(class_counts)[:-1])\n", " rng = np.random.RandomState(self.random_state)\n", " for _ in range(self.n_splits):\n", " train, test = [], []\n", " n_i = self._approximate_mode(class_counts, n_train, rng)\n", " t_i = self._approximate_mode(class_counts - n_i, n_test, rng)\n", " for i in range(classes.shape[0]):\n", " permutation = rng.permutation(class_counts[i])\n", " train.extend(class_indices[i][permutation][:n_i[i]])\n", " test.extend(class_indices[i][permutation][n_i[i]:n_i[i] + t_i[i]])\n", " train = rng.permutation(train)\n", " test = rng.permutation(test)\n", " yield train, test" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def train_test_split(X, y, train_size=0.75, test_size=0.25,\n", " random_state=0, stratify=None):\n", " if stratify is not None:\n", " cv = StratifiedShuffleSplit(train_size=train_size, test_size=test_size, random_state=0)\n", " train, test = next(cv.split(X, stratify))\n", " else:\n", " cv = ShuffleSplit(train_size=train_size, test_size=test_size, random_state=0)\n", " train, test = next(cv.split(X))\n", " return X[train], X[test], y[train], y[test]" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "X, y = load_iris(return_X_y=True)\n", "X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X, y, random_state=0)\n", "X_train_2, X_test_2, y_train_2, y_test_2 = sktrain_test_split(X, y, random_state=0)\n", "assert np.allclose(X_train_1, X_train_2)\n", "assert np.allclose(X_test_1, X_test_2)\n", "assert np.array_equal(y_train_1, y_train_2)\n", "assert np.array_equal(y_test_1, y_test_2)" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "X, y = load_iris(return_X_y=True)\n", "X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X, y, train_size=0.5,\n", " test_size=0.2, random_state=0)\n", "X_train_2, X_test_2, y_train_2, y_test_2 = sktrain_test_split(X, y, train_size=0.5,\n", " test_size=0.2, random_state=0)\n", "assert np.allclose(X_train_1, X_train_2)\n", "assert np.allclose(X_test_1, X_test_2)\n", "assert np.array_equal(y_train_1, y_train_2)\n", "assert np.array_equal(y_test_1, y_test_2)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "X, y = load_iris(return_X_y=True)\n", "X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X, y, random_state=0, stratify=y)\n", "X_train_2, X_test_2, y_train_2, y_test_2 = sktrain_test_split(X, y, random_state=0, stratify=y)\n", "assert np.allclose(X_train_1, X_train_2)\n", "assert np.allclose(X_test_1, X_test_2)\n", "assert np.array_equal(y_train_1, y_train_2)\n", "assert np.array_equal(y_test_1, y_test_2)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "X, y = load_iris(return_X_y=True)\n", "X_train_1, X_test_1, y_train_1, y_test_1 = train_test_split(X, y, train_size=0.5,\n", " test_size=0.2, random_state=0, stratify=y)\n", "X_train_2, X_test_2, y_train_2, y_test_2 = sktrain_test_split(X, y, train_size=0.5,\n", " test_size=0.2, random_state=0, stratify=y)\n", "assert np.allclose(X_train_1, X_train_2)\n", "assert np.allclose(X_test_1, X_test_2)\n", "assert np.array_equal(y_train_1, y_train_2)\n", "assert np.array_equal(y_test_1, y_test_2)" ] } ], "metadata": { "kernelspec": { "display_name": "dev", "language": "python", "name": "dev" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 }