{ "cells": [ { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# 파이썬 2와 파이썬 3 지원\n", "from __future__ import division, print_function, unicode_literals\n", "\n", "# 공통\n", "import numpy as np\n", "import os\n", "\n", "# 일관된 출력을 위해 유사난수 초기화\n", "np.random.seed(42)\n", "\n", "# 맷플롯립 설정\n", "# 맷플롯립 설정\n", "%matplotlib inline\n", "import matplotlib\n", "import matplotlib.pyplot as plt\n", "plt.rcParams['axes.labelsize'] = 14\n", "plt.rcParams['xtick.labelsize'] = 12\n", "plt.rcParams['ytick.labelsize'] = 12\n", "\n", "# 한글출력\n", "matplotlib.rc('font', family='NanumBarunGothic')\n", "plt.rcParams['axes.unicode_minus'] = False\n", "\n", "# 그림을 저장할 폴드\n", "PROJECT_ROOT_DIR = \"c:\\\\git\\\\hands_on_ml_link\"\n", "CHAPTER_ID = \"end_to_end_project\"\n", "IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\n", "\n", "def save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n", " path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n", " if tight_layout:\n", " plt.tight_layout()\n", " plt.savefig(path, format=fig_extension, dpi=resolution)\n", "\n", "import os\n", "import tarfile\n", "from six.moves import urllib\n", "\n", "DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml/master/\"\n", "HOUSING_PATH = os.path.join(\"c:\\\\git\\\\hands_on_ml_link\",\"datasets\", \"housing\")\n", "HOUSING_URL = DOWNLOAD_ROOT + \"datasets/housing/housing.tgz\"\n", "\n", "def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):\n", " if not os.path.isdir(housing_path):\n", " os.makedirs(housing_path)\n", " tgz_path = os.path.join(housing_path, \"housing.tgz\")\n", " urllib.request.urlretrieve(housing_url, tgz_path)\n", " housing_tgz = tarfile.open(tgz_path)\n", " housing_tgz.extractall(path=housing_path)\n", " housing_tgz.close()\n", " \n", "import pandas as pd\n", "\n", "def load_housing_data(housing_path=HOUSING_PATH):\n", " csv_path = os.path.join(housing_path, \"housing.csv\")\n", " return pd.read_csv(csv_path)\n", "\n", "housing = load_housing_data()\n", "\n", "np.random.seed(42)\n", "\n", "import numpy as np\n", "\n", "# 예시를 위해서 만든 것입니다. 사이킷런에는 train_test_split() 함수가 있습니다.\n", "def split_train_test(data, test_ratio):\n", " shuffled_indices = np.random.permutation(len(data))\n", " test_set_size = int(len(data) * test_ratio)\n", " test_indices = shuffled_indices[:test_set_size]\n", " train_indices = shuffled_indices[test_set_size:]\n", " return data.iloc[train_indices], data.iloc[test_indices]\n", "\n", "train_set, test_set = split_train_test(housing, 0.2)\n", "\n", "\n", "from zlib import crc32\n", "\n", "def test_set_check(identifier, test_ratio):\n", " return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32\n", "\n", "def split_train_test_by_id(data, test_ratio, id_column):\n", " ids = data[id_column]\n", " in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))\n", " return data.loc[~in_test_set], data.loc[in_test_set]\n", "\n", "import hashlib\n", "\n", "def test_set_check(identifier, test_ratio, hash=hashlib.md5):\n", " return bytearray(hash(np.int64(identifier)).digest())[-1] < 256 * test_ratio\n", "\n", "def test_set_check(identifier, test_ratio, hash=hashlib.md5):\n", " return bytearray(hash(np.int64(identifier)).digest())[-1] < 256 * test_ratio\n", "\n", "housing_with_id = housing.reset_index() # `index` 열이 추가된 데이터프레임이 반환됩니다.\n", "train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, \"index\")\n", "\n", "housing_with_id[\"id\"] = housing[\"longitude\"] * 1000 + housing[\"latitude\"]\n", "train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, \"id\")\n", "\n", "from sklearn.model_selection import train_test_split\n", "\n", "train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n", "\n", "# 소득 카테고리 개수를 제한하기 위해 1.5로 나눕니다.\n", "housing[\"income_cat\"] = np.ceil(housing[\"median_income\"] / 1.5)\n", "# 5 이상은 5로 레이블합니다.\n", "housing[\"income_cat\"].where(housing[\"income_cat\"] < 5, 5.0, inplace=True)\n", "\n", "from sklearn.model_selection import StratifiedShuffleSplit\n", "\n", "split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\n", "for train_index, test_index in split.split(housing, housing[\"income_cat\"]):\n", " strat_train_set = housing.loc[train_index]\n", " strat_test_set = housing.loc[test_index]\n", "\n", "def income_cat_proportions(data):\n", " return data[\"income_cat\"].value_counts() / len(data)\n", "\n", "train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)\n", "\n", "compare_props = pd.DataFrame({\n", " \"Overall\": income_cat_proportions(housing),\n", " \"Stratified\": income_cat_proportions(strat_test_set),\n", " \"Random\": income_cat_proportions(test_set),\n", "}).sort_index()\n", "compare_props[\"Rand. %error\"] = 100 * compare_props[\"Random\"] / compare_props[\"Overall\"] - 100\n", "compare_props[\"Strat. %error\"] = 100 * compare_props[\"Stratified\"] / compare_props[\"Overall\"] - 100\n", "\n", "for set_ in (strat_train_set, strat_test_set):\n", " set_.drop(\"income_cat\", axis=1, inplace=True)\n", "\n", "housing = strat_train_set.copy()\n", "\n", "housing = strat_train_set.drop(\"median_house_value\", axis=1) # 훈련 세트를 위해 레이블 삭제\n", "housing_labels = strat_train_set[\"median_house_value\"].copy()\n", "\n", "sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()\n", "\n", "from sklearn.impute import SimpleImputer\n", "\n", "imputer = SimpleImputer(strategy=\"median\")\n", "\n", "housing_num = housing.drop('ocean_proximity', axis=1)\\\n", " \n", "imputer.fit(housing_num)\n", "housing_num.median().values\n", "X = imputer.transform(housing_num)\n", "housing_tr = pd.DataFrame(X, columns=housing_num.columns,\n", " index = list(housing.index.values))\n", "housing_tr = pd.DataFrame(X, columns=housing_num.columns)\n", "\n", "housing_cat = housing['ocean_proximity']\n", "housing_cat_encoded, housing_categories = housing_cat.factorize()\n", "from sklearn.preprocessing import OneHotEncoder\n", "\n", "encoder = OneHotEncoder(categories='auto')\n", "housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))\n", "from sklearn.base import BaseEstimator, TransformerMixin\n", "from sklearn.utils import check_array\n", "from sklearn.preprocessing import LabelEncoder\n", "from scipy import sparse\n", "\n", "class CategoricalEncoder(BaseEstimator, TransformerMixin):\n", " def __init__(self, encoding='onehot', categories='auto', dtype=np.float64,\n", " handle_unknown='error'):\n", " self.encoding = encoding\n", " self.categories = categories\n", " self.dtype = dtype\n", " self.handle_unknown = handle_unknown\n", "\n", " def fit(self, X, y=None):\n", " \"\"\"Fit the CategoricalEncoder to X.\n", " Parameters\n", " ----------\n", " X : array-like, shape [n_samples, n_feature]\n", " The data to determine the categories of each feature.\n", " Returns\n", " -------\n", " self\n", " \"\"\"\n", "\n", " if self.encoding not in ['onehot', 'onehot-dense', 'ordinal']:\n", " template = (\"encoding should be either 'onehot', 'onehot-dense' \"\n", " \"or 'ordinal', got %s\")\n", " raise ValueError(template % self.handle_unknown)\n", "\n", " if self.handle_unknown not in ['error', 'ignore']:\n", " template = (\"handle_unknown should be either 'error' or \"\n", " \"'ignore', got %s\")\n", " raise ValueError(template % self.handle_unknown)\n", "\n", " if self.encoding == 'ordinal' and self.handle_unknown == 'ignore':\n", " raise ValueError(\"handle_unknown='ignore' is not supported for\"\n", " \" encoding='ordinal'\")\n", "\n", " X = check_array(X, dtype=np.object, accept_sparse='csc', copy=True)\n", " n_samples, n_features = X.shape\n", "\n", " self._label_encoders_ = [LabelEncoder() for _ in range(n_features)]\n", "\n", " for i in range(n_features):\n", " le = self._label_encoders_[i]\n", " Xi = X[:, i]\n", " if self.categories == 'auto':\n", " le.fit(Xi)\n", " else:\n", " valid_mask = np.in1d(Xi, self.categories[i])\n", " if not np.all(valid_mask):\n", " if self.handle_unknown == 'error':\n", " diff = np.unique(Xi[~valid_mask])\n", " msg = (\"Found unknown categories {0} in column {1}\"\n", " \" during fit\".format(diff, i))\n", " raise ValueError(msg)\n", " le.classes_ = np.array(np.sort(self.categories[i]))\n", "\n", " self.categories_ = [le.classes_ for le in self._label_encoders_]\n", "\n", " return self\n", "\n", " def transform(self, X):\n", " \"\"\"Transform X using one-hot encoding.\n", " Parameters\n", " ----------\n", " X : array-like, shape [n_samples, n_features]\n", " The data to encode.\n", " Returns\n", " -------\n", " X_out : sparse matrix or a 2-d array\n", " Transformed input.\n", " \"\"\"\n", " X = check_array(X, accept_sparse='csc', dtype=np.object, copy=True)\n", " n_samples, n_features = X.shape\n", " X_int = np.zeros_like(X, dtype=np.int)\n", " X_mask = np.ones_like(X, dtype=np.bool)\n", "\n", " for i in range(n_features):\n", " valid_mask = np.in1d(X[:, i], self.categories_[i])\n", "\n", " if not np.all(valid_mask):\n", " if self.handle_unknown == 'error':\n", " diff = np.unique(X[~valid_mask, i])\n", " msg = (\"Found unknown categories {0} in column {1}\"\n", " \" during transform\".format(diff, i))\n", " raise ValueError(msg)\n", " else:\n", " # Set the problematic rows to an acceptable value and\n", " # continue `The rows are marked `X_mask` and will be\n", " # removed later.\n", " X_mask[:, i] = valid_mask\n", " X[:, i][~valid_mask] = self.categories_[i][0]\n", " X_int[:, i] = self._label_encoders_[i].transform(X[:, i])\n", "\n", " if self.encoding == 'ordinal':\n", " return X_int.astype(self.dtype, copy=False)\n", "\n", " mask = X_mask.ravel()\n", " n_values = [cats.shape[0] for cats in self.categories_]\n", " n_values = np.array([0] + n_values)\n", " indices = np.cumsum(n_values)\n", "\n", " column_indices = (X_int + indices[:-1]).ravel()[mask]\n", " row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n", " n_features)[mask]\n", " data = np.ones(n_samples * n_features)[mask]\n", "\n", " out = sparse.csc_matrix((data, (row_indices, column_indices)),\n", " shape=(n_samples, indices[-1]),\n", " dtype=self.dtype).tocsr()\n", " if self.encoding == 'onehot-dense':\n", " return out.toarray()\n", " else:\n", " return out\n", "\n", "cat_encoder = CategoricalEncoder()\n", "housing_cat_reshaped = housing_cat.values.reshape(-1, 1)\n", "housing_cat_1hot = cat_encoder.fit_transform(housing_cat_reshaped)\n", "from sklearn.preprocessing import OneHotEncoder\n", "\n", "cat_encoder = OneHotEncoder(categories='auto')\n", "housing_cat_reshaped = housing_cat.values.reshape(-1, 1)\n", "housing_cat_1hot = cat_encoder.fit_transform(housing_cat_reshaped)\n", "cat_encoder = OneHotEncoder(categories='auto', sparse=False)\n", "housing_cat_1hot = cat_encoder.fit_transform(housing_cat_reshaped)\n", "cat_encoder.categories_\n", "housing_cat = housing[['ocean_proximity']]\n", "from sklearn.preprocessing import OrdinalEncoder\n", "ordinal_encoder = OrdinalEncoder()\n", "housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)\n", "from sklearn.preprocessing import OneHotEncoder\n", "\n", "cat_encoder = OneHotEncoder(categories='auto')\n", "housing_cat_1hot = cat_encoder.fit_transform(housing_cat)\n", "cat_encoder = OneHotEncoder(categories='auto', sparse=False)\n", "housing_cat_1hot = cat_encoder.fit_transform(housing_cat)\n", "from sklearn.base import BaseEstimator, TransformerMixin\n", "\n", "# 컬럼 인덱스\n", "rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6\n", "\n", "class CombinedAttributesAdder(BaseEstimator, TransformerMixin):\n", " def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs\n", " self.add_bedrooms_per_room = add_bedrooms_per_room\n", " def fit(self, X, y=None):\n", " return self # nothing else to do\n", " def transform(self, X, y=None):\n", " rooms_per_household = X[:, rooms_ix] / X[:, household_ix]\n", " population_per_household = X[:, population_ix] / X[:, household_ix]\n", " if self.add_bedrooms_per_room:\n", " bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]\n", " return np.c_[X, rooms_per_household, population_per_household,\n", " bedrooms_per_room]\n", " else:\n", " return np.c_[X, rooms_per_household, population_per_household]\n", "\n", "attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\n", "housing_extra_attribs = attr_adder.transform(housing.values)\n", "housing_extra_attribs = pd.DataFrame(\n", " housing_extra_attribs, \n", " columns=list(housing.columns)+[\"rooms_per_household\", \"population_per_household\"])\n", "from sklearn.pipeline import Pipeline\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "num_pipeline = Pipeline([\n", " ('imputer', SimpleImputer(strategy=\"median\")),\n", " ('attribs_adder', CombinedAttributesAdder()),\n", " ('std_scaler', StandardScaler()),\n", " ])\n", "\n", "housing_num_tr = num_pipeline.fit_transform(housing_num)\n", "from sklearn.compose import ColumnTransformer\n", "\n", "num_attribs = list(housing_num)\n", "cat_attribs = [\"ocean_proximity\"]\n", "\n", "full_pipeline = ColumnTransformer([\n", " (\"num\", num_pipeline, num_attribs),\n", " (\"cat\", OneHotEncoder(categories='auto'), cat_attribs),\n", " ])\n", "\n", "housing_prepared = full_pipeline.fit_transform(housing)\n", "from sklearn.base import BaseEstimator, TransformerMixin\n", "\n", "# 사이킷런이 DataFrame을 바로 사용하지 못하므로\n", "# 수치형이나 범주형 컬럼을 선택하는 클래스를 만듭니다.\n", "class DataFrameSelector(BaseEstimator, TransformerMixin):\n", " def __init__(self, attribute_names):\n", " self.attribute_names = attribute_names\n", " def fit(self, X, y=None):\n", " return self\n", " def transform(self, X):\n", " return X[self.attribute_names].values\n", "num_attribs = list(housing_num)\n", "cat_attribs = [\"ocean_proximity\"]\n", "\n", "num_pipeline = Pipeline([\n", " ('selector', DataFrameSelector(num_attribs)),\n", " ('imputer', SimpleImputer(strategy=\"median\")),\n", " ('attribs_adder', CombinedAttributesAdder()),\n", " ('std_scaler', StandardScaler()),\n", " ])\n", "\n", "cat_pipeline = Pipeline([\n", " ('selector', DataFrameSelector(cat_attribs)),\n", " ('cat_encoder', CategoricalEncoder(encoding=\"onehot-dense\")),\n", " ])\n", "full_pipeline = ColumnTransformer([\n", " (\"num_pipeline\", num_pipeline, num_attribs),\n", " (\"cat_encoder\", OneHotEncoder(categories='auto'), cat_attribs),\n", " ])\n", "housing_prepared = full_pipeline.fit_transform(housing)\n", "\n", "\n", "df = pd.DataFrame(np.arange(10, 22).reshape(3, 4),\n", " index=[\"a\", \"b\", \"c\"],\n", " columns=[\"A\", \"B\", \"C\", \"D\"])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Chapter 2. 머신러닝 프로젝트 처음부터 끝까지\n", "---" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "## 2.6 모델 선택과 훈련" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.6.1 훈련 세트에서 훈련하고 평가하기" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 선형 회귀 모델을 훈련" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None,\n", " normalize=False)" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.linear_model import LinearRegression\n", "\n", "lin_reg = LinearRegression() # 선형 회귀 모델\n", "lin_reg.fit(housing_prepared, housing_labels) # 훈련, 매개변수(Train data, Target values)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 훈련 세트에 있는 몇 개 샘플에 대해 적용" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
longitudelatitudehousing_median_agetotal_roomstotal_bedroomspopulationhouseholdsmedian_incomeocean_proximity
17606-121.8937.2938.01568.0351.0710.0339.02.7042<1H OCEAN
18632-121.9337.0514.0679.0108.0306.0113.06.4214<1H OCEAN
14650-117.2032.7731.01952.0471.0936.0462.02.8621NEAR OCEAN
3230-119.6136.3125.01847.0371.01460.0353.01.8839INLAND
3555-118.5934.2317.06592.01525.04459.01463.03.0347<1H OCEAN
\n", "
" ], "text/plain": [ " longitude latitude housing_median_age total_rooms total_bedrooms \\\n", "17606 -121.89 37.29 38.0 1568.0 351.0 \n", "18632 -121.93 37.05 14.0 679.0 108.0 \n", "14650 -117.20 32.77 31.0 1952.0 471.0 \n", "3230 -119.61 36.31 25.0 1847.0 371.0 \n", "3555 -118.59 34.23 17.0 6592.0 1525.0 \n", "\n", " population households median_income ocean_proximity \n", "17606 710.0 339.0 2.7042 <1H OCEAN \n", "18632 306.0 113.0 6.4214 <1H OCEAN \n", "14650 936.0 462.0 2.8621 NEAR OCEAN \n", "3230 1460.0 353.0 1.8839 INLAND \n", "3555 4459.0 1463.0 3.0347 <1H OCEAN " ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "some_data = housing.iloc[:5]\n", "some_data" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "17606 286600.0\n", "18632 340600.0\n", "14650 196900.0\n", "3230 46300.0\n", "3555 254500.0\n", "Name: median_house_value, dtype: float64" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "some_labels = housing_labels.iloc[:5]\n", "some_labels" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[-1.15604281, 0.77194962, 0.74333089, -0.49323393, -0.44543821,\n", " -0.63621141, -0.42069842, -0.61493744, -0.31205452, -0.08649871,\n", " 0.15531753, 1. , 0. , 0. , 0. ,\n", " 0. ],\n", " [-1.17602483, 0.6596948 , -1.1653172 , -0.90896655, -1.0369278 ,\n", " -0.99833135, -1.02222705, 1.33645936, 0.21768338, -0.03353391,\n", " -0.83628902, 1. , 0. , 0. , 0. ,\n", " 0. ],\n", " [ 1.18684903, -1.34218285, 0.18664186, -0.31365989, -0.15334458,\n", " -0.43363936, -0.0933178 , -0.5320456 , -0.46531516, -0.09240499,\n", " 0.4222004 , 0. , 0. , 0. , 0. ,\n", " 1. ],\n", " [-0.01706767, 0.31357576, -0.29052016, -0.36276217, -0.39675594,\n", " 0.03604096, -0.38343559, -1.04556555, -0.07966124, 0.08973561,\n", " -0.19645314, 0. , 1. , 0. , 0. ,\n", " 0. ],\n", " [ 0.49247384, -0.65929936, -0.92673619, 1.85619316, 2.41221109,\n", " 2.72415407, 2.57097492, -0.44143679, -0.35783383, -0.00419445,\n", " 0.2699277 , 1. , 0. , 0. , 0. ,\n", " 0. ]])" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "some_data_prepared = full_pipeline.transform(some_data) # 변환 파이프라인(데이터 전처리)\n", "some_data_prepared" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "예측: [210644.60459286 317768.80697211 210956.43331178 59218.98886849\n", " 189747.55849879]\n" ] } ], "source": [ "print(\"예측:\", lin_reg.predict(some_data_prepared))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 실제 값과 비교" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "레이블: [286600.0, 340600.0, 196900.0, 46300.0, 254500.0]\n" ] } ], "source": [ "print(\"레이블:\", list(some_labels))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 사이킷런의 mean_square_error 함수를 사용\n", "- 전체 훈련 세트에 대한 이 회귀 모델의 **RMSE**(평균 제곱근 오차Root Mean Square Error) 측정" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "68628.19819848923" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.metrics import mean_squared_error\n", "\n", "housing_predictions = lin_reg.predict(housing_prepared)\n", "lin_mse = mean_squared_error(housing_labels, housing_predictions)\n", "lin_rmse = np.sqrt(lin_mse)\n", "lin_rmse" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 대부분 중간 주택 가격은 120,000에서 265,000 사이\n", "- 예측 오차가 68,628인 것은 훈련 데이터에 과소적합된 사례\n", "- 과소적합을 해결하는 주요 방법\n", " - 더 강력한 모델을 선택\n", " - 훈련 알고리즘에 더 좋은 특성을 주입\n", " - 모델의 규제를 감소(이 모델은 규제 사용 X)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 먼저 더 복잡한 모델인 DecisionTreeRegressor 모델을 시도\n", "- 이 모델은 강력하고 데이터에서 복잡한 비선형 관계를 찾을 수 있음" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "0.0" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.tree import DecisionTreeRegressor\n", "\n", "tree_reg = DecisionTreeRegressor(random_state=42)\n", "tree_reg.fit(housing_prepared, housing_labels)\n", "housing_predictions = tree_reg.predict(housing_prepared)\n", "tree_mse = mean_squared_error(housing_labels, housing_predictions)\n", "tree_rmse = np.sqrt(tree_mse)\n", "tree_rmse" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 이 모델은 데이터에 과대적합됨\n", "- 훈련에 사용한 데이터를 예측에 그대로 사용하였기 때문" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.6.2 교차 검증을 사용한 평가" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 모델을 평가하기 위해 사이킷런의 **교차 검증** 기능인 **K-겹 교차 검증**K-fold cross-validation을 수행\n", "- 훈련 세트를 **폴드**fold라 불리는 10개의 서브셋으로 무작위 분할 후, 10번 훈련하고 평가\n", "- 매번 다른 폴드 선택해 평가에 사용, 나머지는 훈련에 사용\n", "- 10개의 평가 점수가 담긴 배열이 결과가 됨" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "![k-fold_cross_validation](./images/k-fold_cross_validation.PNG)\n", "**
k-fold_cross_validation
**" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "점수: [70194.33680785 66855.16363941 72432.58244769 70758.73896782\n", " 71115.88230639 75585.14172901 70262.86139133 70273.6325285\n", " 75366.87952553 71231.65726027]\n", "평균: 71407.68766037929\n", "표준편차: 2439.4345041191004\n" ] } ], "source": [ "from sklearn.model_selection import cross_val_score\n", "\n", "scores = cross_val_score(tree_reg, housing_prepared, housing_labels,\n", " scoring=\"neg_mean_squared_error\", cv=10)\n", "tree_rmse_scores = np.sqrt(-scores) # 사이킷런 교차검증의 score는 비용 함수가 아니라 효용 함수이므로 RMSE를 나타내기 위해 -를 붙임\n", "\n", "def display_scores(scores):\n", " print(\"점수:\", scores)\n", " print(\"평균:\", scores.mean())\n", " print(\"표준편차:\", scores.std())\n", "\n", "display_scores(tree_rmse_scores)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 결정 트리 결과가 이전보다 나빠진 것을 볼 수 있다.\n", "- 교차 검증으로 모델의 성능 추정뿐 아니라 이 추정이 얼마나 정확한지(즉, 표준편차) 측정 가능" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 선형 회귀 모델과 비교" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "점수: [66782.73843989 66960.118071 70347.95244419 74739.57052552\n", " 68031.13388938 71193.84183426 64969.63056405 68281.61137997\n", " 71552.91566558 67665.10082067]\n", "평균: 69052.46136345083\n", "표준편차: 2731.674001798349\n" ] } ], "source": [ "lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,\n", " scoring=\"neg_mean_squared_error\", cv=10)\n", "lin_rmse_scores = np.sqrt(-lin_scores)\n", "display_scores(lin_rmse_scores)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 확실히 결정 트리 모델이 과대적합되어 선형 회귀 모델보다 성능이 나쁘다." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 마지막으로 RandomForestRegressor 모델 시도\n", " - 특성을 무작위 선택하여 많은 결정 트리 만들고 그 예측을 평균낸다.\n", " - 여러 다른 모델을 모아서 하나의 모델을 만드는 것을 **앙상블 학습**이라고 함\n", " - 머신러닝 알고리즘의 성능을 극대화하는 방법 중 하나" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "21933.31414779769" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.ensemble import RandomForestRegressor\n", "\n", "forest_reg = RandomForestRegressor(n_estimators=10, random_state=42)\n", "forest_reg.fit(housing_prepared, housing_labels)\n", "housing_predictions = forest_reg.predict(housing_prepared)\n", "forest_mse = mean_squared_error(housing_labels, housing_predictions)\n", "forest_rmse = np.sqrt(forest_mse)\n", "forest_rmse" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "점수: [51646.44545909 48940.60114882 53050.86323649 54408.98730149\n", " 50922.14870785 56482.50703987 51864.52025526 49760.85037653\n", " 55434.21627933 53326.10093303]\n", "평균: 52583.72407377466\n", "표준편차: 2298.353351147122\n" ] } ], "source": [ "forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels,\n", " scoring=\"neg_mean_squared_error\", cv=10)\n", "forest_rmse_scores = np.sqrt(-forest_scores)\n", "display_scores(forest_rmse_scores)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 이전 두 모델보다 훌륭함\n", "- 하지만, 훈련 세트에 대한 점수가 검증 세트에 대한 점수보다 낮으므로 이 모델도 과대적합되어 있음\n", "- 과대적합을 해결하는 방법\n", " - 모델을 간단히 함\n", " - 제한을 함(즉, 규제)\n", " - 더 많은 훈련 데이터를 모음\n", "- 하나의 모델을 더 깊이 들어가기 전에, 다양한 모델을 시도해 가능성 있는 2~5개 정도의 모델을 선정하는 것이 좋다." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "## 2.7 모델 세부 튜닝" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.7.1 그리드 탐색" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 사이킷런의 GridSearchCV 사용\n", "- 탐색하고자 하는 하이퍼파라미터와 시도해볼 값을 지정하면 가능한 모든 하이퍼파라미터 조합에 대해 교차 검증을 사용해 평가함" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 다음 코드는 RandomForestRegressor에 대한 최적의 하이퍼파라미터 조합을 탐색함" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "GridSearchCV(cv=5, error_score='raise-deprecating',\n", " estimator=RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,\n", " max_features='auto', max_leaf_nodes=None,\n", " min_impurity_decrease=0.0, min_impurity_split=None,\n", " min_samples_leaf=1, min_samples_split=2,\n", " min_weight_fraction_leaf=0.0, n_estimators='warn', n_jobs=None,\n", " oob_score=False, random_state=42, verbose=0, warm_start=False),\n", " fit_params=None, iid='warn', n_jobs=-1,\n", " param_grid=[{'max_features': [2, 4, 6, 8], 'n_estimators': [3, 10, 30]}, {'max_features': [2, 3, 4], 'bootstrap': [False], 'n_estimators': [3, 10]}],\n", " pre_dispatch='2*n_jobs', refit=True, return_train_score=True,\n", " scoring='neg_mean_squared_error', verbose=0)" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.model_selection import GridSearchCV\n", "\n", "param_grid = [\n", " # 하이퍼파라미터 12(=3×4)개의 조합을 시도합니다.\n", " {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},\n", " # bootstrap은 False로 하고 6(=2×3)개의 조합을 시도합니다.\n", " {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},\n", " ]\n", "\n", "forest_reg = RandomForestRegressor(random_state=42)\n", "# 다섯 폴드에서 훈련하면 총 (12+6)*5=90번의 훈련이 일어납니다.\n", "grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', \n", " return_train_score=True, n_jobs=-1)\n", "grid_search.fit(housing_prepared, housing_labels)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최상의 파라미터 조합:" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'max_features': 8, 'n_estimators': 30}" ] }, "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ "grid_search.best_params_" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 8과 30은 탐색 범위의 최댓값이기 때문에 계속 점수가 향상될 가능성이 있으므로 더 큰 값으로 다시 검색해야 함" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'max_features': 8, 'n_estimators': 80}" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "param_grid = [\n", " # 하이퍼파라미터 12(=3×4)개의 조합을 시도합니다.\n", " {'n_estimators': [30, 70, 80, 90], 'max_features': [7, 8, 9]},\n", " # bootstrap은 False로 하고 6(=2×3)개의 조합을 시도합니다.\n", " {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},\n", " ]\n", "\n", "forest_reg = RandomForestRegressor(random_state=42)\n", "# 다섯 폴드에서 훈련하면 총 (12+6)*5=90번의 훈련이 일어납니다.\n", "grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error', \n", " return_train_score=True, n_jobs=-1)\n", "grid_search.fit(housing_prepared, housing_labels)\n", "\n", "grid_search.best_params_" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최적의 추정기에 직접 접근할 수도 있음" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=None,\n", " max_features=8, max_leaf_nodes=None, min_impurity_decrease=0.0,\n", " min_impurity_split=None, min_samples_leaf=1,\n", " min_samples_split=2, min_weight_fraction_leaf=0.0,\n", " n_estimators=30, n_jobs=None, oob_score=False, random_state=42,\n", " verbose=0, warm_start=False)" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "grid_search.best_estimator_" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "그리드서치에서 테스트한 하이퍼파라미터 조합의 점수를 확인합니다:" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "63669.05791727153 {'max_features': 2, 'n_estimators': 3}\n", "55627.16171305252 {'max_features': 2, 'n_estimators': 10}\n", "53384.57867637289 {'max_features': 2, 'n_estimators': 30}\n", "60965.99185930139 {'max_features': 4, 'n_estimators': 3}\n", "52740.98248528835 {'max_features': 4, 'n_estimators': 10}\n", "50377.344409590376 {'max_features': 4, 'n_estimators': 30}\n", "58663.84733372485 {'max_features': 6, 'n_estimators': 3}\n", "52006.15355973719 {'max_features': 6, 'n_estimators': 10}\n", "50146.465964159885 {'max_features': 6, 'n_estimators': 30}\n", "57869.25504027614 {'max_features': 8, 'n_estimators': 3}\n", "51711.09443660957 {'max_features': 8, 'n_estimators': 10}\n", "49682.25345942335 {'max_features': 8, 'n_estimators': 30}\n", "62895.088889905004 {'max_features': 2, 'bootstrap': False, 'n_estimators': 3}\n", "54658.14484390074 {'max_features': 2, 'bootstrap': False, 'n_estimators': 10}\n", "59470.399594730654 {'max_features': 3, 'bootstrap': False, 'n_estimators': 3}\n", "52725.01091081235 {'max_features': 3, 'bootstrap': False, 'n_estimators': 10}\n", "57490.612956065226 {'max_features': 4, 'bootstrap': False, 'n_estimators': 3}\n", "51009.51445842374 {'max_features': 4, 'bootstrap': False, 'n_estimators': 10}\n" ] } ], "source": [ "cvres = grid_search.cv_results_\n", "for mean_score, params in zip(cvres[\"mean_test_score\"], cvres[\"params\"]):\n", " print(np.sqrt(-mean_score), params)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "위 예에서는 max_features 하이퍼파라미터가 8, n_estimators 하이퍼파라미터가 80일 때 최적의 솔루션입니다." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.7.2 랜덤 탐색" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 그리드 탐색 방법은 비교적 적은 수의 조합을 탐구할 때 좋다.\n", "- 하지만 하이퍼파라미터 **탐색 공간**이 커지면 RandomizedSearchCV를 사용하는 편이 더 좋다.\n", "- RandomizedSearchCV는 각 반복마다 하이퍼파라미터에 임의의 수를 대입하여 지정한 횟수만큼 평가\n", "- 주요 장점\n", " - 랜덤 탐색을 1,000회 반복하도록 실행하면 하이퍼파라미터마다 각기 다른 1,000개의 값을 탐색\n", " - 단순히 반복 횟수를 조절하는 것만드로 하이퍼파라미터 탐색에 투입할 컴퓨팅 자원을 제어할 수 있음" ] }, { "cell_type": "code", "execution_count": 27, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "49150.657232934034 {'max_features': 7, 'n_estimators': 180}\n", "51389.85295710133 {'max_features': 5, 'n_estimators': 15}\n", "50796.12045980556 {'max_features': 3, 'n_estimators': 72}\n", "50835.09932039744 {'max_features': 5, 'n_estimators': 21}\n", "49280.90117886215 {'max_features': 7, 'n_estimators': 122}\n", "50774.86679035961 {'max_features': 3, 'n_estimators': 75}\n", "50682.75001237282 {'max_features': 3, 'n_estimators': 88}\n", "49608.94061293652 {'max_features': 5, 'n_estimators': 100}\n", "50473.57642831875 {'max_features': 3, 'n_estimators': 150}\n", "64429.763804893395 {'max_features': 5, 'n_estimators': 2}\n" ] } ], "source": [ "from sklearn.model_selection import RandomizedSearchCV\n", "from scipy.stats import randint\n", "\n", "param_distribs = {\n", " 'n_estimators': randint(low=1, high=200),\n", " 'max_features': randint(low=1, high=8),\n", " }\n", "\n", "forest_reg = RandomForestRegressor(random_state=42)\n", "rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,\n", " n_iter=10, cv=5, scoring='neg_mean_squared_error', \n", " random_state=42, n_jobs=-1)\n", "rnd_search.fit(housing_prepared, housing_labels)\n", "\n", "cvres = rnd_search.cv_results_\n", "for mean_score, params in zip(cvres[\"mean_test_score\"], cvres[\"params\"]):\n", " print(np.sqrt(-mean_score), params)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "랜덤 탐색을 10회 반복하여 찾은 최적의 하이퍼파라미터는 max_features가 7, n_estimators가 180입니다." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.7.3 앙상블 방법" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 많은 결정 트리를 만들고 그 예측을 평균 내는 결정 트리의 앙상블인 랜덤 포레스트\n", "- 랜덤 포레스트가 결정 트리 하나보다 더 성능이 좋은 것처럼 모델의 그룹이 단일 모델보다 더 나은 성능을 발휘할 때가 많다.\n", "- 이 주제는 7장에서 자세히 살펴봄" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.7.4 최상의 모델과 오차 분석" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 최상의 모델을 분석하면 문제에 대한 좋은 통찰을 얻는 경우가 많습니다.\n", "- RandomForestRegressor가 각 특성의 상대적은 중요도를 알려줍니다.\n", "- 이 정보를 바탕으로 덜 중요한 특성들을 제외할 수 있습니다." ] }, { "cell_type": "code", "execution_count": 31, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([7.33442355e-02, 6.29090705e-02, 4.11437985e-02, 1.46726854e-02,\n", " 1.41064835e-02, 1.48742809e-02, 1.42575993e-02, 3.66158981e-01,\n", " 5.64191792e-02, 1.08792957e-01, 5.33510773e-02, 1.03114883e-02,\n", " 1.64780994e-01, 6.02803867e-05, 1.96041560e-03, 2.85647464e-03])" ] }, "execution_count": 31, "metadata": {}, "output_type": "execute_result" } ], "source": [ "feature_importances = grid_search.best_estimator_.feature_importances_\n", "feature_importances" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['longitude',\n", " 'latitude',\n", " 'housing_median_age',\n", " 'total_rooms',\n", " 'total_bedrooms',\n", " 'population',\n", " 'households',\n", " 'median_income']" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "num_attribs" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN']" ] }, "execution_count": 33, "metadata": {}, "output_type": "execute_result" } ], "source": [ "cat_one_hot_attribs = list(cat_encoder.categories_[0])\n", "cat_one_hot_attribs" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['longitude',\n", " 'latitude',\n", " 'housing_median_age',\n", " 'total_rooms',\n", " 'total_bedrooms',\n", " 'population',\n", " 'households',\n", " 'median_income',\n", " 'rooms_per_hhold',\n", " 'pop_per_hhold',\n", " 'bedrooms_per_room',\n", " '<1H OCEAN',\n", " 'INLAND',\n", " 'ISLAND',\n", " 'NEAR BAY',\n", " 'NEAR OCEAN']" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "extra_attribs = [\"rooms_per_hhold\", \"pop_per_hhold\", \"bedrooms_per_room\"]\n", "attributes = num_attribs + extra_attribs + cat_one_hot_attribs\n", "attributes" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[(0.3661589806181342, 'median_income'),\n", " (0.1647809935615905, 'INLAND'),\n", " (0.10879295677551573, 'pop_per_hhold'),\n", " (0.07334423551601242, 'longitude'),\n", " (0.0629090704826203, 'latitude'),\n", " (0.05641917918195401, 'rooms_per_hhold'),\n", " (0.05335107734767581, 'bedrooms_per_room'),\n", " (0.041143798478729635, 'housing_median_age'),\n", " (0.014874280890402767, 'population'),\n", " (0.014672685420543237, 'total_rooms'),\n", " (0.014257599323407807, 'households'),\n", " (0.014106483453584102, 'total_bedrooms'),\n", " (0.010311488326303787, '<1H OCEAN'),\n", " (0.002856474637320158, 'NEAR OCEAN'),\n", " (0.00196041559947807, 'NEAR BAY'),\n", " (6.028038672736599e-05, 'ISLAND')]" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "sorted(zip(feature_importances, attributes), reverse=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### 2.7.5 테스트 세트로 시스템 평가하기" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 어느 정도 모델을 튜닝하여 만족할 만한 모델을 얻게 되면 테스트 세트에서 최종 모델을 평가함" ] }, { "cell_type": "code", "execution_count": 36, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "47730.22690385927" ] }, "execution_count": 36, "metadata": {}, "output_type": "execute_result" } ], "source": [ "final_model = grid_search.best_estimator_\n", "\n", "X_test = strat_test_set.drop(\"median_house_value\", axis=1) # 예측 변수\n", "y_test = strat_test_set[\"median_house_value\"].copy() # 레이블\n", "\n", "X_test_prepared = full_pipeline.transform(X_test) # 데이터 변환\n", "final_predictions = final_model.predict(X_test_prepared) # 예측\n", "\n", "# 평가\n", "final_mse = mean_squared_error(y_test, final_predictions)\n", "final_rmse = np.sqrt(final_mse)\n", "final_rmse" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "## 2.8 론칭, 모니터링, 그리고 시스템 유지 보수" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "- 이제 제품 시스템에 적용하기 위한 준비를 해야함\n", " - 입력 데이터 소스를 우리 시스템에 연결하고 테스트 코드를 작성\n", " - 일정 간격으로 시스템의 실시간 성능을 체크하고 성능이 떨어졌을 때 알람을 통지할 수 있는 모니터링 코드를 작성\n", " - 시스템의 예측을 샘플링해서 평가함, 이런 과정에는 사람의 분석이 필요\n", " - 시스템의 입력 데이터 품질 평가\n", " - 마지막으로 새로운 데이터를 사용해 정기적으로 모델을 훈련, 가능하면 이 과정은 자동화함" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "## 2.10 연습문제" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### 1. 서포트 벡터 머신 회귀(sklearn.svm.SVR)를 kernel=“linear”(하이퍼파라미터 C를 바꿔가며)나 kernel=“rbf”(하이퍼파라미터 C와 gamma를 바꿔가며) 등의 다양한 하이퍼파라미터 설정으로 시도해보세요. 지금은 이 하이퍼파라미터가 무엇을 의미하는지 너무 신경 쓰지 마세요. 최상의 SVR 모델은 무엇인가요?" ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\link-Hwang\\Anaconda3\\envs\\mlbook\\lib\\site-packages\\sklearn\\externals\\joblib\\externals\\loky\\process_executor.py:700: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.\n", " \"timeout or by a memory leak.\", UserWarning\n" ] }, { "data": { "text/plain": [ "GridSearchCV(cv=5, error_score='raise-deprecating',\n", " estimator=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\n", " gamma='auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True,\n", " tol=0.001, verbose=False),\n", " fit_params=None, iid='warn', n_jobs=-1,\n", " param_grid=[{'C': [10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0], 'kernel': ['linear']}, {'C': [1.0, 3.0, 10.0, 30.0, 100.0, 300.0, 1000.0], 'gamma': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0], 'kernel': ['rbf']}],\n", " pre_dispatch='2*n_jobs', refit=True, return_train_score='warn',\n", " scoring='neg_mean_squared_error', verbose=0)" ] }, "execution_count": 42, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.model_selection import GridSearchCV\n", "from sklearn.svm import SVR\n", "\n", "param_grid = [\n", " {'kernel': ['linear'], 'C': [10., 30., 100., 300., 1000., 3000., 10000., 30000.0]},\n", " {'kernel': ['rbf'], 'C': [1.0, 3.0, 10., 30., 100., 300., 1000.0],\n", " 'gamma': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0]},\n", " ]\n", "\n", "svm_reg = SVR()\n", "grid_search = GridSearchCV(svm_reg, param_grid, cv=5, scoring='neg_mean_squared_error', \n", " verbose=0, n_jobs=-1)\n", "grid_search.fit(housing_prepared, housing_labels)" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Fitting 5 folds for each of 50 candidates, totalling 250 fits\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "[Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers.\n", "[Parallel(n_jobs=-1)]: Done 33 tasks | elapsed: 1.9min\n", "C:\\Users\\link-Hwang\\Anaconda3\\lib\\site-packages\\sklearn\\externals\\joblib\\externals\\loky\\process_executor.py:700: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.\n", " \"timeout or by a memory leak.\", UserWarning\n", "[Parallel(n_jobs=-1)]: Done 154 tasks | elapsed: 9.2min\n", "[Parallel(n_jobs=-1)]: Done 250 out of 250 | elapsed: 14.9min finished\n" ] }, { "data": { "text/plain": [ "GridSearchCV(cv=5, error_score='raise-deprecating',\n", " estimator=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\n", " gamma='auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True,\n", " tol=0.001, verbose=False),\n", " fit_params=None, iid='warn', n_jobs=-1,\n", " param_grid=[{'kernel': ['linear'], 'C': [10.0, 30.0, 100.0, 300.0, 1000.0, 3000.0, 10000.0, 30000.0]}, {'kernel': ['rbf'], 'C': [1.0, 3.0, 10.0, 30.0, 100.0, 300.0, 1000.0], 'gamma': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0]}],\n", " pre_dispatch='2*n_jobs', refit=True, return_train_score='warn',\n", " scoring='neg_mean_squared_error', verbose=2)" ] }, "execution_count": 34, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.model_selection import GridSearchCV\n", "from sklearn.svm import SVR\n", "\n", "param_grid = [\n", " {'kernel': ['linear'], 'C': [10., 30., 100., 300., 1000., 3000., 10000., 30000.0]},\n", " {'kernel': ['rbf'], 'C': [1.0, 3.0, 10., 30., 100., 300., 1000.0],\n", " 'gamma': [0.01, 0.03, 0.1, 0.3, 1.0, 3.0]},\n", " ]\n", "\n", "svm_reg = SVR()\n", "grid_search = GridSearchCV(svm_reg, param_grid, cv=5, scoring='neg_mean_squared_error', \n", " verbose=2, n_jobs=-1)\n", "grid_search.fit(housing_prepared, housing_labels)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최상 모델의 (5-폴드 교차 검증으로 평가한) 점수는 다음과 같습니다:" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "70363.90313964167" ] }, "execution_count": 38, "metadata": {}, "output_type": "execute_result" } ], "source": [ "negative_mse = grid_search.best_score_\n", "rmse = np.sqrt(-negative_mse)\n", "rmse" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최상의 하이퍼파라미터를 확인해 보겠습니다:" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'C': 30000.0, 'kernel': 'linear'}" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "grid_search.best_params_" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "#### 2. GridSearchCV를 RandomizedSearchCV로 바꿔보세요." ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Fitting 5 folds for each of 50 candidates, totalling 250 fits\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "[Parallel(n_jobs=-1)]: Using backend LokyBackend with 4 concurrent workers.\n", "[Parallel(n_jobs=-1)]: Done 33 tasks | elapsed: 3.5min\n", "C:\\Users\\link-Hwang\\Anaconda3\\lib\\site-packages\\sklearn\\externals\\joblib\\externals\\loky\\process_executor.py:700: UserWarning: A worker stopped while some jobs were given to the executor. This can be caused by a too short worker timeout or by a memory leak.\n", " \"timeout or by a memory leak.\", UserWarning\n", "[Parallel(n_jobs=-1)]: Done 154 tasks | elapsed: 15.7min\n", "[Parallel(n_jobs=-1)]: Done 250 out of 250 | elapsed: 25.5min finished\n" ] }, { "data": { "text/plain": [ "RandomizedSearchCV(cv=5, error_score='raise-deprecating',\n", " estimator=SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\n", " gamma='auto_deprecated', kernel='rbf', max_iter=-1, shrinking=True,\n", " tol=0.001, verbose=False),\n", " fit_params=None, iid='warn', n_iter=50, n_jobs=-1,\n", " param_distributions={'kernel': ['linear', 'rbf'], 'C': , 'gamma': },\n", " pre_dispatch='2*n_jobs', random_state=42, refit=True,\n", " return_train_score='warn', scoring='neg_mean_squared_error',\n", " verbose=2)" ] }, "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from scipy.stats import expon\n", "\n", "# 노트: kernel 매개변수가 \"linear\"일 때는 gamma가 무시됩니다.\n", "param_distribs = {\n", " 'kernel': ['linear', 'rbf'],\n", " 'C': randint(low=10000, high=100000),\n", " 'gamma': expon(scale=1.0),\n", " }\n", "\n", "svm_reg = SVR()\n", "rnd_search = RandomizedSearchCV(svm_reg, param_distributions=param_distribs,\n", " n_iter=50, cv=5, scoring='neg_mean_squared_error',\n", " verbose=2, n_jobs=-1, random_state=42)\n", "rnd_search.fit(housing_prepared, housing_labels)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "여기서 사용된 scale=1.0인 지수 분포를 살펴보겠습니다. 일부 샘플은 1.0보다 아주 크거나 작습니다. 하지만 로그 분포를 보면 대부분의 값이 exp(-2)와 exp(+2), 즉 0.1과 7.4 사이에 집중되어 있음을 알 수 있습니다." ] }, { "cell_type": "code", "execution_count": 54, "metadata": { "scrolled": true }, "outputs": [ { "data": { "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmYAAAELCAYAAABzrkqTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzt3XmcXFWd9/HPV4JBSaLExGgQkhHRaJDExzi4PCyKC5sjL4KahyibEtBBx3FBRoMEkBlQcVwQNBhkV8AJoEZQURadcYtLohkQCRDZIp3QhHQgCeLv+eOcgttFVXd1d3XX7arv+/WqV1fdc5dzq26f+tXZriICMzMzM2u9p7U6A2ZmZmaWODAzMzMzKwkHZmZmZmYl4cDMzMzMrCQcmJmZmZmVhAMzMzMzs5JwYNYBJPVIemED602XFJLGNLjfCyR9Oj/fU9KfhprXwr6vlXREfn6kpJ81cd/zJf2wWfur2vdYSf8r6XnDtP99JN0zHPtuBkmfl3Rcq/NhViTpfZL+msvC5zSwfp9lTrF8GmK+7pL0xvz8E5K+PtR9Fvb9RLlfLKubtO+vSjqpWfuz3jo+MMv/GI/mi7jyOLvV+RosSTdKem9xWUSMi4g7hvO4EfHTiHhJf+tJWiTpkgb2t39EXDjUfNUKNiPi0oh481D3XccC4OaIWDtM+286SbtJ+oGkdZL6ndhQ0mxJv5H0SP47u5D8WeCTkp4+fDm20aQYfLTo+NsCnwfenMvC9VXpA/pBCs0rn6r2+e8R8d7+1qtVxtfZX1PK/VpBakQcFxGnDXXfVlvHB2bZW/NFXHkc3+oMdSolo/m6PBa4uNWZGKDHgCuA9/S3Yg64rgEuAXYALgSuqQRiEXE/cCvwT8OWW7OBmQJsB6xqdUZGwkACTCun0fwFOOwknSvp24XXZ0r6cQ4e9pF0T65+Xpd/Fc4vrPssSRdJ6pK0RtLCSsBR+QUi6XOSuiXdKWn/qm2XSLpf0r2SPi1pm/62lXQ6sCdwdrHmL/8afFF+fqCk30l6WNLdkhYN4P14haTfStoo6XJSYVdJ69XEJunjOe8bJf1J0r6S9gM+Abwz529FXvdGSadL+m/gEeCFNX4VStKXJW2QdKukfQsJvX6RV9XK3Zz/PpSP+ZrqX4CSXivp13nfv5b02kLajZJOk/Tf+Vx+KGlSnfdnZ2AX4JeFZQcoNW1uzO/HRwtpb5P0+/xZrM7vD5KOknRL3uYOScf28ZlMlfRf+Tq7U9IH661bT0T8KSKW0NgX1z7AGOALEbElIr4ECHhDYZ0bgQMHmg/rPJKOkXS7pAclfUfS1ELam3PZsUHSOZJuUp2aIqUuBF+QdF9+fCEvezFQ6WLxkKSf1Nj8KWVEYb/1yugnyidJL8p526D0XXB5H+f7bqXvg/WSPlmV9kS5JWk7SZfk9R7K5dKUfsr4f5b0Z+DPhWUvKhxikqQf5XLlJknT8npPqTGsnJ+klwJfBV6Tj/dQTu/VNNrP5xiSjpP05/xefkWS6r1H5sCsPx8Bds9f5HuSahSOiCfvY/U8YBKwI3AEsFhSpTnvy8CzgBcCewOHA0cV9r0HqcCYBHwGWFK4WC8E/ga8CHgF8Gbgvf1tGxGfBH4KHN9Hzd+mnJdnk7483yfp4P7eCKUakatJtUETgSuBuXXWfQlwPPCqiBgPvAW4KyKuA/4duDznb1Zhs3eTmgHHA2tq7HYP4I58zicDSyVN7C/fwF7577PzMX9eldeJwDLgS8BzSE0ey9S7H8phpM/uucDTgY9S28uBOyLib4VlS4Bj8/uwG/CTfNx/BC4CPkb6LPYC7srbPAAcBEzIx/1PSf+n+mBKgf53gRWka3Bf4EOS3pLTD8uFer3HznXftfpmAisL/wMAK/PyiluAWZj1QdIbgP8A3gE8n/R//62cNgn4NvBvpP/LPwGvrb0nAD4JvBqYTbr2/hFYGBG38eS1+eyIeEONbeuVEX2V0UWnAT8k1SC/gFT21zrflwHnksq6qfm8XlDnfI4gfX/slNc7Dni0nzL+4Jznl9XZ5/yc10nA74FL66z3hIi4JR/75/l4z65xXnU/x4KDgFeRPpt3kL4TrA4HZsnVVV9YxwBExCPAu0hf1pcAH4iI6o7XJ+Wag5tIX/DvUKrdeifwbxGxMSLuAs4i/UNWrImI8yLicVIg9nxgiqQpwP7AhyJiU0Q8APwnMK+/bRs50Yi4MSL+EBF/j4iVwDdJgWN/Xg1sS6opeSwivg38us66jwNjgZdJ2jYi7oqI1f3s/4KIWBURf4uIx2qkP1A49uWkArMZtTIHAn+OiIvzsb9Jaop7a2Gdb0TEbRHxKKnJb3atHZECrI1Vyx4jvQ8TIqI7In6bl78HOD8ifpQ/i3sj4laAiFgWEasjuYlU6O9Z43ivAiZHxKkRsTX3JzmPfK1ExGUR8ew+Hn8Z6JsFjAM2VC3bQAqoKzbm98KsL/NJ/wO/jYgtpCDsNZKmAwcAqyJiaf6h8yWgr36b84FTI+KBiOgCTqF3eTsYjZazjwHTgKkRsTki6g0aOBT4XkTcnM/3JODvddZ9jBSQvSgiHo+I30TEw/3k9z8i4sFcTtWyrHDsT5Le65362Wcj+vocK86IiIdymXMD9ctQw4FZxcFVX1jnVRIi4lekmhqRvpSLuiNiU+H1GtIvoUmkmpU1VWk7Fl4/UcjkABDSl940UgB0fyVQBL5Gqq3pb9t+SdpD0g1KTV8bSL+GajbNVZkK3FtVU1KrZouIuB34ELAIeEDSt4pV23Xc3U96rWP3t89GTOWp51H3syI1tdZ7r7vpHaBAqlU8AFiTmw8qzSQ7ATWDVUn7S/pFbhZ4KG9f6zOaBkwt/qggNRU3FKQPUg+pJq9oAr0D0vHAQ8OYB2sPvf73IqIHWE/635tKoUzI//t9jUau/j9uRvnQaDl7Aun74VeSVkk6uo88Fs9pE+l8a7kY+AHwLaWm2c8oDWLoS39laPHYPcCDDEMZWvU5VjRahhoOzPol6Z9JtT/3kf4Bi3aQtH3h9c55vXU8+SuqmHZvA4e8G9gCTCoEihMiYmZ/G2b9jaq7DPgOsFNEPIvUf6CR9v77gR2rqvLrNoXl2pr/S3oPAjizn/z1l+9ax74vP98EPLOQVpyqor/93kfvz6my70Y+q2orSf3jiiNAfx0RbyMF1lfzZHB/N6k/Wi+SxgL/BXwOmJKbDr5P7c/obuDOqh8V4yPigLyv+eo92rj6MZimzFWk5v1ifnand/+0l5KaV8360ut/L5elzyH9791PoZkvX2/1mv2esi96lw/96Xckcp8bR6yNiGMiYipp8M85VX27Ku4n/SADQNIzSedba5+PRcQpEfEyUhPuQaQuKH3lt7/zKB57HKlLyn2k8hOaVIZWfY42CA7M+qDUcfTTpObMdwMnqPfUAACnSHp67oN2EHBlrvq+Ajhd0vjcyfLDpObQPkUa1fZD4CxJEyQ9TdIukhppbgT4K6lfWz3jgQcjYnPu53RYg/v9Oanf2wcljZF0CKkfx1NIeomkN+QgYzPwKKl5s5K/6Rr4yMvn5mNvK+ntpC//7+e03wPzctocUpNBRRepuaDee/J94MW5P9YYSe8k9dH43gDzR27m/jP5fcnXxXxJz8rNsw/z5PuwBDhKaVDE0yTtKGkGqaZ1bM7335Q6HNeb2uNXwMNKAy2eIWkbpakvXpXzc2n0Hm1c/fhLzqckbZePXel4PLbOMW/M5/BBpc7VlT4uxU7VewPXDujNs3a3bb6uKo8xpB+JRylNvzKW1P/0l5G6fiwDXi7p4LzuP9M7WKj2TWChpMm5f9qnaKC8zforI/ok6e2SKkFjNymQebzGqt8GDpL0f5X67J5Kne9gSa+X9PLcLeZh0g/9Yhk6mLweUDj2aaT3+u7c9Hsv8K5chhxN7x+NfwVeoPpT4PT1OdogODBLvltVk3BVLgwuAc6MiBUR8WdSM9HFhS+ttaR/xPtIHSmPq/QTAj5A+iVyB/Az0sV7foP5OZz0Jfm/ef/fJvVvaMQXgUOVRr98qUb6+4FTJW0kFV7VzbM1RcRW4BDgyJyndwJL66w+FjiDVHO4lhRUfSKnXZn/rpf02xrb1vNLYNe8z9OBQ+PJ+YhOIhUk3aS+JZcV8v1IXv+/c3Pfq6vOaz0poP4Iqfr9BOCgiFg3gLwVfY3efVveDdwl6WFSs/G78nF/Re7YT+qjdRMwLSI2Ah8kfS7dpMD5O7UOlH8AvJXUX+NO0nvzdVKn4YGYRgqeK7Vej/LkSLbKZJqfyMfcSupkfDipufJoUleArXnd55MC26sHmAdrb98nXVeVx6KI+DHpf/e/SLVJu/Bk/8h1wNtJne7Xk66p5aTWhFo+ndNXAn8AfpuX9au/MqIBrwJ+KamH9L/6LxFxZ43jrCIFmJeRzreb+s2zzyOV+w+TBtPcxJOBZn9lfD2XkQZOPQi8ktQ3rOIY0kCk9aTBEv9TSPsJqWxYK+kp5WJfn6MNjnp327FGSdoHuCQi+qpetw6Tg/bfAfvm2s+OIuksYHVEnNPqvFj7yDXs9wDzI+KGVufHbDh5IjqzJsqjkuoNV297EfGRVufB2oPStC+/JNWwfYzUz/IXLc2U2QhwU6aZmZXRa0gjl9eRmuwP7mMqCLO24aZMMzMzs5JwjZmZmZlZSTTUxywPiT+SdMuZb0bEkYW0Z5LmXHoHaWLUFRGxV04TaXRe5XZCS4CPVyYKzVNPLCFNfXAL8J6I+H1feZk0aVJMnz69sbMzs7bwm9/8Zl1ETG51PprBZZhZZxlo+dVo5//7SEOP3wI8oyptcd7PS0nDcIvzfC0gDa2fRZrb5Uek6SO+mudEuQb4AnAOaWK+ayTtWhl6X8v06dNZvnx5g9k2s3YgqeZdJkYjl2FmnWWg5VdDTZn5fmVXU3X7CKWbVf8TsCAiuir39CqscgRwVkTcExH3ku4XeWRO24cU0H0h0r0mv0QadVPrJrNmZmZmbW+ofcz2IN0j6xRJ6yT9QdLcQvpMet+aZUVeVklbWXX/w5WF9CdIWiBpuaTlXV1dQ8yymZmZWTkNNTB7AbAbaebyqcDxwIWSXprTx+W0ig3AuNz3rDqtkl59E2giYnFEzImIOZMnt0U3EzMzM7OnGGpg9ijpHl6fjoitEXETcANP3tuvB5hQWH8C0JNryarTKukbh5gnM7MnSDo+17hvkXRBVdq+km6V9IikG/J9bStpYyWdL+lhSWslfbjRbc3MBmuogdnKftJXkTr+V8ziyfvxrQJ2z7VnFbsX0s3MmqEyeKnXvWrzza6Xku7zN5F0r8XLC6ssIt2fdRrweuAESfs1uK2Z2aA0FJhJGiNpO2AbYBtJ2+WbfN8M/AX4t7zO60id+n+QN70I+LCkHSVNJd0o+oKcdiPwOPDB/Mv0+Lz8J0M/LTOzpN7gJeAQYFVEXBkRm0mB2CxJM3L64cBpEdEdEbcA5/Hk4KX+tjUzG5RGa8wWkpotTwTelZ8vjIjHgLcBB5D6h50HHB4Rt+btvgZ8F/gD8EdgWV5GnhLjYFLh9xBwNOmWG3WnyjAza6Jeg5MiYhPpFkAzJe1A6jfb1+ClmtvWOpAHMJlZoxqaxywiFpF+EdZKW0W6p1mttABOyI9a6b8DXtlIHszMmmwcUB0lVQYgjSu8rk7rb9uniIjFpDkfmTNnju+DZ2Z1+ZZMZtap+hqA1FN4XZ3W37ZmZoPW6Mz/o9b0E5c9ZdldZxzYgpyYWcmsIk2CDYCk7YFdSH3HuiXdTxqw9KO8SvXgpZrbjkC+zTpKp32Pu8bMzNpaH4OXrgJ2kzQ3p3+KNOl1pY/sRcBCSTvkTv3H8OTgpf62NTMblLavMTOzjrcQOLnw+l3AKRGxKN+p5GzgEuCXwLzCeicD55LubvIocGZEXAcQEV39bGtmg1CrdqzTODAzs7bWz+Cl64GaU1xExBbSaPGjB7qtmdlguSnTzMzMrCQcmJmZmZmVhAMzMzMzs5JwYGZmZmZWEg7MzMzMzErCgZmZmZlZSTgwMzMzMysJB2ZmZmZmJeHAzMzMzKwkPPO/mZmZtYRvwfRUrjEzMzMzKwkHZmZmZmYl0VBgJul4ScslbZF0QZ11TpYUkt5YWDZW0vmSHpa0VtKHq7bZV9Ktkh6RdIOkaUM6GzMzM7NRrNEas/uATwPn10qUtAtwKHB/VdIiYFdgGvB64ARJ++VtJgFLgZOAicBy4PKBZd/MzMysfTQUmEXE0oi4GlhfZ5WzgY8DW6uWHw6cFhHdEXELcB5wZE47BFgVEVdGxGZSEDdL0oyBnYKZmZlZexhyHzNJbwe2RsT3q5bvAEwFVhQWrwBm5uczi2kRsQlYXUgv7mtBbkpd3tXVNdQsm5mZmZXSkAIzSeOAfwc+VCN5XP67obBsAzC+kL6B3orpT4iIxRExJyLmTJ48eShZNjMzMyutodaYnQJcHBF31kjryX8nFJZNADYW0ifQWzHdzMzMrKMMNTDbF/hgHnG5FtgJuELSxyOimzQYYFZh/VnAqvx8VTFN0vbALoV0MzMzs47S6HQZYyRtB2wDbCNpO0ljSIHZbsDs/LgPOBb4St70ImChpB1yp/5jgAty2lXAbpLm5n1/ClgZEbc259TMzMzMRpdGa8wWAo8CJwLvys8XRsT6iFhbeQCPA90RUWnGPJnUoX8NcBPw2Yi4DiAiuoC5wOlAN7AHMK85p2VmZmY2+jR0r8yIWESazqK/9aZXvd4CHJ0ftda/HvD0GGZmZmb4lkxmZmZmpeHAzMzMzKwkGmrKNDMzMyuL6Scu6/X6rjMObFFOms81ZmZmZmYl4cDMzMzMrCQ6simznatAzczMbPRyjZmZmZlZSTgwMzMzMyuJjmzKNDMzs5FV3Y3IanONmZmZmVlJODAzs44mabqk70vqlrRW0tmSxuS02ZJ+I+mR/Hd2YTtJOlPS+vz4jCS17kzMrB04MDOzTncO8ADwfGA2sDfwfklPB64BLgF2AC4ErsnLARYABwOzgN2Bg4BjRzbrZtZuHJiZWaf7B+CKiNgcEWuB64CZwD6kfrhfiIgtEfElQMAb8nZHAGdFxD0RcS9wFnDkSGfezNqLAzMz63RfBOZJeqakHYH9eTI4WxkRUVh3ZV5O/ruikLaikNaLpAWSlkta3tXV1fQTMLP24cDMzDrdTaSA6mHgHmA5cDUwDthQte4GYHx+Xp2+ARhXq59ZRCyOiDkRMWfy5MlNzr6ZtRMHZmbWsSQ9DfgBsBTYHphE6k92JtADTKjaZAKwMT+vTp8A9FTVsJmZDUhDgZmk43M1/BZJFxSWv1rSjyQ9KKlL0pWSnl9I73PUUl8jnszMRsBEYCfg7NyPbD3wDeAAYBWwe1UN2O55OfnvrELarEKamY2g6Scue8pjtGq0xuw+4NPA+VXLdwAWA9OBaaRfkt8opNcdtdTAiCczs2EVEeuAO4H3SRoj6dmkTv0rgBuBx4EPShor6fi82U/y34uAD0vaUdJU4CPABSOZfzNrPw0FZhGxNCKuBtZXLb82Iq6MiIcj4hHgbOB1hVX6GrW0D32PeDIzGwmHAPsBXcDtwN+Af42IraQflocDDwFHAwfn5QBfA74L/AH4I7AsLzMzG7Rm35JpL3pX5fc1aqmvEU/XNTlfZmY1RcTvST8Ua6X9DnhlnbQATsgPM7OmaFpgJml34FPA2wqL+xq11N+Ip+K+F5CaRdl5552blWUzMzOzUmnKqExJLwKuBf4lIn5aSOpr1FJ/I56e4KHmZmZm1gmGHJhJmgZcD5wWERdXJfc1aqm/EU9mZmZmHaXR6TLGSNoO2AbYRtJ2edmOpBFKX4mIr9bYtK9RSzfS94gnMzMzs47SaB+zhcDJhdfvAk4BAnghcLKkJ9IjYlx++rWc/of8+ut5GRGxVdLBedkZwC30HvFkZmZm1lEaCswiYhGwqE7yKX1s1+eopb5GPJmZmZl1Gt+SyczMzKwkHJiZmZmZlYQDMzMzM7OScGBmZmZmVhIOzMzMzMxKwoGZmZmZWUk4MDMzMzMrCQdmZmZmZiXhwMzMzMysJByYmZmZmZVEo/fKbGvTT1z2lGV3nXFgC3JiZmZmncw1ZmZmZmYl4cDMzMzMrCTclGlmZmZNV6ubkPXPNWZmZmZmJeHAzMzMzKwkHJiZmZmZlURDgZmk4yUtl7RF0gVVaftKulXSI5JukDStkDZW0vmSHpa0VtKHG93WzMzMrNM0WmN2H/Bp4PziQkmTgKXAScBEYDlweWGVRcCuwDTg9cAJkvZrcFszMzOzjtJQYBYRSyPiamB9VdIhwKqIuDIiNpMCsVmSZuT0w4HTIqI7Im4BzgOObHBbMzMzs44y1D5mM4EVlRcRsQlYDcyUtAMwtZien8/sb9vqg0hakJtSl3d1dQ0xy2ZmZmblNNTAbBywoWrZBmB8TqMqvZLW37a9RMTiiJgTEXMmT548xCybmZmZldNQA7MeYELVsgnAxpxGVXolrb9tzczMzDrOUAOzVcCsygtJ2wO7kPqOdQP3F9Pz81X9bTvEPJmZmZmNSo1OlzFG0nbANsA2kraTNAa4CthN0tyc/ilgZUTcmje9CFgoaYfcqf8Y4IKc1t+2ZmZmZh2l0RqzhcCjwInAu/LzhRHRBcwFTge6gT2AeYXtTiZ16F8D3AR8NiKuA2hgWzMzM7OO0uh0GYsiQlWPRTnt+oiYERHPiIh9IuKuwnZbIuLoiJgQEVMi4vNV+627rZnZSJE0T9ItkjZJWi1pz7x80BNom5kNhm/JZGYdTdKbgDOBo0ijwvcC7hjKBNpmZoM1ptUZKKvpJy7r9fquMw5sUU7MbJidApwaEb/Ir++FNH8ieRLs/HoRsE7SjNwX9nDgqDzQqVtSZQLt60Y4/2bWRlxjZmYdS9I2wBxgsqTbJd0j6WxJz2BoE2hXH8eTZJtZQ1xjZmadbAqwLXAosCfwGHANacDTOKA6imp0Au1eImIxsBhgzpw50aS8m5VGdSuTDZ5rzMyskz2a/345Iu6PiHXA54EDGNoE2mZmg+LAzMw6Vu4fdg9QqxZrKBNom5kNigMzM+t03wA+IOm5ue/Yh4DvMbQJtM3MBsWBmZl1utOAXwO3AbcAvwNOH8oE2mZmg+XO/2bW0SLiMeD9+VGddj0wo852W4Cj88PMrClcY2ZmZmZWEg7MzMzMzErCgZmZmZlZSTgwMzMzMysJB2ZmZmZmJeHAzMzMzKwkHJiZmZmZlYTnMTMzM7O2U31j9bvOOLBFORmYpgRmkqYD5wCvAbYA3wY+FBF/kzQbWAK8lDSr9nsi4vd5OwFnAO/Nu1oCfDwiat23rqWqP2AYPR+ymZmZjQ7Naso8B3gAeD4wG9gbeL+kpwPXAJcAOwAXAtfk5QALgINJN//dHTgIOLZJeTIzMzMbVZoVmP0DcEVEbI6ItcB1wExgH1Kt3BciYktEfAkQ8Ia83RHAWRFxT0TcC5wFHNmkPJmZmZmNKs0KzL4IzJP0TEk7AvvzZHC2sqppcmVeTv67opC2opD2BEkLJC2XtLyrq6tJWTYzMzMrl2YFZjeRAqqHgXuA5cDVwDhgQ9W6G4Dx+Xl1+gZgXO579oSIWBwRcyJizuTJk5uUZTMzM7NyGXJgJulpwA+ApcD2wCRSf7IzgR5gQtUmE4CN+Xl1+gSgp4yd/83MzMyGWzNqzCYCOwFn535k64FvAAcAq4Ddq2rAds/LyX9nFdJmFdLMzMzMOsqQA7OIWAfcCbxP0hhJzyZ16l8B3Ag8DnxQ0lhJx+fNfpL/XgR8WNKOkqYCHwEuGGqezMzMzEajZvUxOwTYD+gCbgf+BvxrRGwlTYdxOPAQcDRwcF4O8DXgu8AfgD8Cy/IyMzMzs47TlAlm84Sx+9RJ+x3wyjppAZyQH2ZmZmYdzffKNDMzMysJB2ZmZmZmJeHAzMzMzKwkHJiZmZmZlURTOv+bmZlZZ5h+4rJWZ6GtucbMzMzMrCQcmJmZmZmVhAMzMzMzs5JwYGZmZmZWEg7MzMzMzErCgZmZmZlZSXi6jCGoNWT4rjMObEFOzMzMrB24xszMzMysJByYmZmZmZWEAzMzM0DSrpI2S7qksOwwSWskbZJ0taSJhbSJkq7KaWskHdaanJtZO3FgZmaWfAX4deWFpJnA14B3A1OAR4BzqtbfmtPmA+fmbczMBs2BmZl1PEnzgIeAHxcWzwe+GxE3R0QPcBJwiKTxkrYH5gInRURPRPwM+A4piDMzG7SmBWaS5km6JVfrr5a0Z16+r6RbJT0i6QZJ0wrbjJV0vqSHJa2V9OFm5cfMrBGSJgCnAh+pSpoJrKi8iIjVpBqyF+fH4xFxW2H9FXmbWsdYIGm5pOVdXV3NzL6ZtZmmBGaS3gScCRwFjAf2Au6QNAlYSvqlORFYDlxe2HQRsCswDXg9cIKk/ZqRJzOzBp0GLImIu6uWjwM2VC3bQCrj+kp7iohYHBFzImLO5MmTm5BlM2tXzZrH7BTg1Ij4RX59L6RficCqiLgyv14ErJM0IyJuBQ4HjoqIbqBb0nnAkcB1TcqXmVldkmYDbwReUSO5B5hQtWwCsBH4ex9pZmaDNuTATNI2wBzgO5JuB7YDrgY+xlObAjZJWg3MlPRXYGoxPT8/uMYxFgALAHbeeeehZtnMrGIfYDrwF0mQasK2kfQy0g/EWZUVJb0QGAvcRgrMxkjaNSL+nFeZBawasZybWVtqRlPmFGBb4FBgT2A26dfnQvpvCqAqvWZTgJsBzGyYLAZ2IZVbs4GvAsuAtwCXAm+VtGfu7H8qsDQiNkbEJlI3jVMlbS/pdcDbgItbcRJm1j6a0ZT5aP775Yi4H0DS50mB2c3Ur+7vKbzeXJVmZjbsIuIR0jQYAEjqATZHRBfQJek4UoD2HOB6Uj/aivcD5wMPAOuB90WEa8ys7dS6/eBoNFpuozjkwCwiuiXdA0SN5FXAEZUX+VfnLqR+Z92S7idV//8or+KmADNrmYhYVPX6MuCyOus+SI2uF2ZmQ9Gszv/fAD4g6TrgMeBDwPeAq4DPSppLah74FLAyd/wHuAhYKGk5qUn0GHr/Ih11qiPyMkbjZmZmVk7NmsfsNNKM2bcBtwC/A07PzQFzgdOBbmAPYF7aQRZnAAAPA0lEQVRhu5OB1cAa4CbgsxHhEZlmZmbWkZpSYxYRj5H6W7y/Rtr1wIw6220Bjs4PMzMzs47mWzKZmZmZlYQDMzMzM7OScGBmZmZmVhIOzMzMzMxKwoGZmZmZWUk4MDMzMzMrCQdmZmZmZiXRrJn/zczMbJRrl/tijmauMTMzMzMrCQdmZmZmZiXhwMzMzMysJNzHbJjVaq+/64wDW5ATMzMzKzvXmJmZmZmVhAMzMzMzs5JwYGZmZmZWEg7MzMzMzErCgZmZmZlZSTQ1MJO0q6TNki4pLDtM0hpJmyRdLWliIW2ipKty2hpJhzUzP2ZmZmajSbNrzL4C/LryQtJM4GvAu4EpwCPAOVXrb81p84Fz8zZmZmZmHadpgZmkecBDwI8Li+cD342ImyOiBzgJOETSeEnbA3OBkyKiJyJ+BnyHFMSZmZmZdZymBGaSJgCnAh+pSpoJrKi8iIjVpBqyF+fH4xFxW2H9FXmb6v0vkLRc0vKurq5mZNnMzMysdJpVY3YasCQi7q5aPg7YULVsAzC+n7ReImJxRMyJiDmTJ09uUpbNzMzMymXIt2SSNBt4I/CKGsk9wISqZROAjcDf+0hra9W3afItmszMzAyac6/MfYDpwF8kQaoJ20bSy4DrgFmVFSW9EBgL3EYKzMZI2jUi/pxXmQWsakKezMzMzEadZgRmi4FvFV5/lBSovQ94LvBzSXsCvyX1Q1saERsBJC0FTpX0XmA28DbgtU3Ik5mZmdmoM+TALCIeIU2DAYCkHmBzRHQBXZKOAy4FngNcDxxV2Pz9wPnAA8B64H0R4RozMzOzEVDdtcZarxk1Zr1ExKKq15cBl9VZ90Hg4GbnwczMzGw08i2ZzMzMzErCgZmZmZlZSTS9KdPMbLSQNJZ0m7g3AhOB24FPRMS1OX1f0q3jdgZ+CRwZEWsK254LHErqZ/uZiPj8iJ+EWYPcn2x0cGBWArX+WTy3mdmIGAPcDewN/AU4ALhC0stJ8zAuBd4LfJc0kfblwKvztouAXYFpwPOAGyT9b0RcN5InYGbtxYGZmXWsiNhECrAqvifpTuCVpJHkqyLiSgBJi4B1kmZExK3A4cBREdENdEs6DziSNH+jmdmgODArKdeimY08SVNI9/FdRZqLsXiv302SVgMzJf0VmFpMz89rjjKXtABYALDzzjsPT+bNrC2487+ZGSBpW9KcixfmGrH+7vVLVXrNe/2C7/drZo1zYGZmHU/S04CLga3A8XlxX/f67Sm8rk4zMxs0B2Zm1tGUbvK7BJgCzI2Ix3LSKnrf63d7YBdSv7Nu4P5iOr7Xr5k1gQMzM+t05wIvBd4aEY8Wll8F7CZprqTtgE8BK3MzJ8BFwEJJO0iaARwDXDCC+TazNuTAzMw6lqRpwLHAbGCtpJ78mJ/v9zsXOB3oBvYA5hU2PxlYDawBbgI+66kyzGyoPCrTzDpWnixWfaRfD8yok7YFODo/zMyawjVmZmZmZiXhwMzMzMysJNyUaWZm1mZ8X8zGlHEyd9eYmZmZmZWEa8xGkerIvtVRvZmZmTXXkAMzSWOBc4A3AhOB24FPRMS1OX1f4CvAzsAvgSPzSKjKtucChwKPAJ+JiM8PNU+dooxVsGZmZjZ4zWjKHAPcDewNPAs4CbhC0nRJk4CledlEYDlweWHbRcCuwDTg9cAJkvZrQp7MzMzMRp0h15hFxCZSgFXxPUl3Aq8EnkO6fcmVAJIWAeskzcizZx8OHJVvb9It6TzgSMCTNJqZmVnHaXrnf0lTgBeT7hk3E1hRSctB3GpgpqQdgKnF9Px8Zo19LpC0XNLyrq6uZmfZzMzMrBSaGphJ2ha4FLgw14iNAzZUrbYBGJ/TqEqvpPUSEYsjYk5EzJk8eXIzs2xmZmZWGk0LzCQ9DbgY2Aocnxf3ABOqVp0AbMxpVKVX0szMzMw6TlOmy5AkYAkwBTggIh7LSauAIwrrbQ/sQup31i3pfmAW8KO8yqy8jZmZmTXIE8q2j2bNY3Yu8FLgjRHxaGH5VcBnJc0FlgGfAlbmZk6Ai4CFkpaTgrpjgKOalKeO5Ck0zMzMRq8hN2VKmgYcC8wG1krqyY/5EdEFzAVOB7qBPYB5hc1PJg0GWAPcBHw2Ijwi08zMzDpSM6bLWAOoj/TrgRl10rYAR+eHmZmZWUfzvTLNzMzMSsL3yuwAvsemmZnZ6OAaMzMzM7OScI2ZAa5VMzMbLTw1RntzjZmZmZlZSbjGzGryfGhmZmYjz4FZB3I1uJnZ6ODyuvM4MDMzMzPLWt3n2n3MzMzMzErCNWbWMPc7MzMbXm66NNeYmZmZmZWEa8xsSBr5dedaNTMzs8Y4MDMzMxsBbqa0Rjgws2HX6hEuZmZmo4UDMxtxHkRgZu3OtWM2WO78b2ZmZlYSrjGzUhjsIAI3k5pZq7l2zJqp5YGZpInAEuDNwDrg3yListbmysrIhZ+VjcuvzuSyqLOMdPeblgdmwFeArcAUYDawTNKKiFjV2mzZaDTYAtM1bTZILr/MrKlaGphJ2h6YC+wWET3AzyR9B3g3cGIr82adpYy/gAcbLHpwxchw+TUwjVyXw/l/2EhXCLMyUES07uDSK4D/iYhnFJZ9FNg7It5aWLYAWJBfvgT40wAOM4nUxNCu2v38oP3Psd3PD4Z+jtMiYnKzMtMMjZZfeflQyrDhNFquvdGST3Beh8tozuuAyq9WN2WOAzZULdsAjC8uiIjFwOLBHEDS8oiYM7jslV+7nx+0/zm2+/lB255jQ+UXDK0MG06j5XMZLfkE53W4dFJeWz1dRg8woWrZBGBjC/JiZjYQLr/MrOlaHZjdBoyRtGth2SzAHWfNrOxcfplZ07U0MIuITcBS4FRJ20t6HfA24OImHqZ0zQdN1u7nB+1/ju1+ftCG5zhC5ddwGy2fy2jJJzivw6Vj8trSzv/wxDxA5wNvAtYDJ3oeIDMbDVx+mVmztTwwMzMzM7Ok1X3MzMzMzCxzYGZmZmZWEm0bmEmaKOkqSZskrZF0WKvz1CySxkpaks9ro6TfSdq/1fkaLpJ2lbRZ0iWtzkuzSZon6ZZ8na6WtGer89RMkqZL+r6kbklrJZ0tqdXzJ7Y1SbtJ+oGkdZKe0ldlIGWjkjMlrc+Pz0jSMOX7q5J6Co8tkupOPSIp8jlU1v/6cOSrzrGPlPR4VX736WP9fSXdKukRSTdImjaCeT1C0m8kPSzpnvwZ1v0fbNX72uh1OZLXZJ3jN/z9O9DrpKJtAzN638NuPnCupJmtzVLTjAHuBvYGngWcBFwhaXoL8zScvgL8utWZaDZJbwLOBI4iTUq6F3BHSzPVfOcADwDPJ91Lcm/g/S3NUft7DLgCeE+d9IGUjQuAg0nTgOwOHAQc29TcZhFxXESMqzyAbwJX9rPZrMI27x2OfPXh58X8RsSNtVaSNIk0evckYCKwHLh85LLJM4EPkWaj3wPYF/hoP9u04n1t9LocsWuyjoF+/zZ0nRS1ZWCmJ+9hd1JE9ETEz4DKPexGvYjYFBGLIuKuiPh7RHwPuBN4Zavz1myS5gEPAT9udV6GwSnAqRHxi/w53hsR97Y6U032D8AVEbE5ItYC1wHt8gOplCLiTxGxhBrzqQ2ibDwCOCsi7snX5lnAkcOT85r5vHC4jzUCDgFWRcSVEbEZWATMkjRjJA4eEedGxE8jYmv+DC8FXjcSx27UAK/LllyTFSPx/duWgRnwYuDxiLitsGwFbfqFIGkK6ZzbamJLSROAU4GPtDovzSZpG2AOMFnS7bmJ4WxJz+hv21Hmi8A8Sc+UtCOwPyk4s9YYaNk4M6c3sm4zzQW6gJv7We/m3ES+tAUtBq/IzcW3STqpj+bBXu9hnv9uNa37PtqL/r8rRvp9Hch12aprsqYGvn8bvU6e0K6BWcP3sBvtJG1L+gV0YUTc2ur8NNlpwJKIuLvVGRkGU4BtgUOBPUnNfK8AFrYyU8PgJlKh+TBwD6kZ5+qW5qizDbRsrF5/AzBuBPr0HAFcFH3P57Q3MB2YAdwHfG8E+y/eDOwGPJcURP4/4GN11i3N95Gko0g/CD/Xx2qteF8H8h616pp8iga+fwdynTyhXQOzjriHnaSnkWYZ3woc3+LsNJWk2cAbgf9sdV6GyaP575cj4v6IWAd8HjighXlqqnx9/oDUv2Z7Uh+XHUj96qxJJM0vdCy+tp/VB1o2Vq8/AejpJ2BqSL18S9qJFBxc1Nf2EXFzbp57CPgXUrP5S4ear0byGhF3RMSduSnrD6Sa/UPrbD6i30d9vK8HA2cA++fypqaRfF8LBvIeDds1ORCNfP8O8Dp5QrsGZm1/D7v862AJqeZlbkQ81uIsNds+pF9tf5G0ltRZda6k37YyU80SEd2kGqR2nuF5IrATcHZEbImI9cA3aKPgswwi4tJCx+L+RmcPtGxcldMbWXdA+sj34cD/RMRAB8IEMCy1Jg28x30du9d7mPtT7cIwfR/Vyquk/YDzgLfmAGFAu2SY3teCgVyXw3ZNNmoI37+NvZcR0ZYP4FukUT3bkzo6bgBmtjpfTTy/rwK/AMa1Oi/DdH7PBJ5XeHwO+DYwudV5a+I5nkoabfpcUk3ST4HTWp2vJp/jHcCJpJFMzwauAi5tdb7a+ZEL/u2Al+Uvgu2AsYX0hstG4DjgFmBHYCrpC/C4Yc7/n4Cj+1lnJqn5fxtS09YX8nbbjtB7vD8wJT+fAfwROLnOupPzezw3fxZnAr8YwevhDaTbhe3VwLote18bvS5bcU3WyEND378DuU56bTeSJzPCb9xEUl+WTcBfgMNanacmntu0XOBuJlXrVh7zW523YTznRcAlrc5Hk89pW9J0Eg8Ba4EvAdu1Ol9NPsfZwI1AN7CONP3Bc1udr3Z+kGqao+pxVyG9btlI6u/YU3gt4DPAg/nxGfKt/IYp76/J+RpfI+1a4BP5+RtywLCJNB3L1cCuI/gefw74az7+HaQfWdsW0lcVy2NSt4xbSV0YbgSmj2BebwD+VvVdcW3Z3td612Wrr8ka+az7/QvsnJ/v3Mh1Uu/he2WamZmZlUS79jEzMzMzG3UcmJmZmZmVhAMzMzMzs5JwYGZmZmZWEg7MzMzMzErCgZmZmZlZSTgwMzMzMysJB2ZmZmZmJfH/AWgsaJ0VGDEDAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] }, "metadata": { "needs_background": "light" }, "output_type": "display_data" } ], "source": [ "from scipy.stats import expon, reciprocal\n", "\n", "expon_distrib = expon(scale=1.)\n", "samples = expon_distrib.rvs(10000, random_state=42) # 무작위 변수 10000개 생성\n", "plt.figure(figsize=(10, 4)) # 그래프의 가로, 세로 크기 지정\n", "plt.subplot(121) # 한 화면에 여러 그래프를 나눠서 그려줌, 1행 2열로 잡은 공간에서 1번째\n", "plt.title(\"Exponential distribution (scale=1.0)\")\n", "plt.hist(samples, bins=50) # 데이터 집계할 구간\n", "plt.subplot(122)\n", "plt.title(\"Log of this distribution\")\n", "plt.hist(np.log(samples), bins=50)\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최상 모델의 (5-폴드 교차 검증으로 평가한) 점수는 다음과 같습니다:" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "55325.2999140845" ] }, "execution_count": 39, "metadata": {}, "output_type": "execute_result" } ], "source": [ "negative_mse = rnd_search.best_score_\n", "rmse = np.sqrt(-negative_mse)\n", "rmse" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "최상의 하이퍼파라미터를 확인해 보겠습니다:" ] }, { "cell_type": "code", "execution_count": 40, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'C': 99475, 'gamma': 0.37354658165762367, 'kernel': 'rbf'}" ] }, "execution_count": 40, "metadata": {}, "output_type": "execute_result" } ], "source": [ "rnd_search.best_params_" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "---\n", "#### 3. 가장 중요한 특성을 선택하는 변환기를 준비 파이프라인에 추가해보세요." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "변환기는 fit(), transform(), fit_transform() 메서드를 구현한 파이썬 클래스를 만들면 됩니다. 마지막 메서드는 TransformerMinxin을 상속하면 자동으로 생성됩니다. 또한 BaseEstimator를 상속하면 하이퍼파라미터 튜닝에 필요한 두 메서드( get_params()와 set_params() )를 추가로 얻게 됩니다. get_params()와 set_params() 함수는 사이킷런의 파이프라인과 그리드 탐색에 꼭 필요한 메서드이므로 모든 추정기와 변환기는 BaseEstimator를 상속해야 합니다." ] }, { "cell_type": "code", "execution_count": 42, "metadata": {}, "outputs": [], "source": [ "from sklearn.base import BaseEstimator, TransformerMixin\n", "\n", "def indices_of_top_k(arr, k):\n", " return np.sort(np.argpartition(np.array(arr), -k)[-k:])\n", "\n", "class TopFeatureSelector(BaseEstimator, TransformerMixin):\n", " def __init__(self, feature_importances, k):\n", " self.feature_importances = feature_importances\n", " self.k = k\n", " def fit(self, X, y=None):\n", " self.feature_indices_ = indices_of_top_k(self.feature_importances, self.k)\n", " return self\n", " def transform(self, X):\n", " return X[:, self.feature_indices_]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "이 특성 선택 클래스는 이미 어떤 식으로든 특성 중요도를 계산했다고 가정합니다" ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([7, 3, 8, 4], dtype=int64)" ] }, "execution_count": 66, "metadata": {}, "output_type": "execute_result" } ], "source": [ "np.argpartition([1,3,5,7,9,2,4,6,8], -4)[-4:] # 상위 4개 인덱스 반환" ] }, { "cell_type": "code", "execution_count": 47, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([ 0, 1, 7, 9, 12], dtype=int64)" ] }, "execution_count": 47, "metadata": {}, "output_type": "execute_result" } ], "source": [ "k = 5 # 선택할 특성의 개수를 지정\n", "\n", "# 최상의 k개 특성의 인덱스를 확인\n", "top_k_feature_indices = indices_of_top_k(feature_importances, k)\n", "top_k_feature_indices" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array(['longitude', 'latitude', 'median_income', 'pop_per_hhold',\n", " 'INLAND'], dtype='