{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# DecisionTreeEncoder\n", "\n", "The DecisionTreeEncoder() encodes categorical variables with predictions of a decision tree model.\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "from sklearn.model_selection import train_test_split\n", "from feature_engine.encoding import DecisionTreeEncoder" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# Load titanic dataset from OpenML\n", "\n", "def load_titanic():\n", " data = pd.read_csv('https://www.openml.org/data/get_csv/16826755/phpMYEkMl')\n", " data = data.replace('?', np.nan)\n", " data['cabin'] = data['cabin'].astype(str).str[0]\n", " data['pclass'] = data['pclass'].astype('O')\n", " data['age'] = data['age'].astype('float')\n", " data['fare'] = data['fare'].astype('float')\n", " data['embarked'].fillna('C', inplace=True)\n", " data.drop(labels=['boat', 'body', 'home.dest'], axis=1, inplace=True)\n", " return data" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
pclasssurvivednamesexagesibspparchticketfarecabinembarked
011Allen, Miss. Elisabeth Waltonfemale29.00000024160211.3375BS
111Allison, Master. Hudson Trevormale0.916712113781151.5500CS
210Allison, Miss. Helen Lorainefemale2.000012113781151.5500CS
310Allison, Mr. Hudson Joshua Creightonmale30.000012113781151.5500CS
410Allison, Mrs. Hudson J C (Bessie Waldo Daniels)female25.000012113781151.5500CS
\n", "
" ], "text/plain": [ " pclass survived name sex \\\n", "0 1 1 Allen, Miss. Elisabeth Walton female \n", "1 1 1 Allison, Master. Hudson Trevor male \n", "2 1 0 Allison, Miss. Helen Loraine female \n", "3 1 0 Allison, Mr. Hudson Joshua Creighton male \n", "4 1 0 Allison, Mrs. Hudson J C (Bessie Waldo Daniels) female \n", "\n", " age sibsp parch ticket fare cabin embarked \n", "0 29.0000 0 0 24160 211.3375 B S \n", "1 0.9167 1 2 113781 151.5500 C S \n", "2 2.0000 1 2 113781 151.5500 C S \n", "3 30.0000 1 2 113781 151.5500 C S \n", "4 25.0000 1 2 113781 151.5500 C S " ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data = load_titanic()\n", "data.head()" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "X = data.drop(['survived', 'name', 'ticket'], axis=1)\n", "y = data.survived" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "cabin 0\n", "pclass 0\n", "embarked 0\n", "dtype: int64" ] }, "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# we will encode the below variables, they have no missing values\n", "X[['cabin', 'pclass', 'embarked']].isnull().sum()" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "cabin object\n", "pclass object\n", "embarked object\n", "dtype: object" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "''' Make sure that the variables are type (object).\n", "if not, cast it as object , otherwise the transformer will either send an error (if we pass it as argument) \n", "or not pick it up (if we leave variables=None). '''\n", "\n", "X[['cabin', 'pclass', 'embarked']].dtypes" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "((916, 8), (393, 8))" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# let's separate into training and testing set\n", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n", "\n", "X_train.shape, X_test.shape" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The categorical variable will be first encoded into integers with the\n", "OrdinalEncoder(). The integers can be assigned arbitrarily to the\n", "categories or following the mean value of the target in each category.\n", "\n", "Then a decision tree will be fit using the resulting numerical variable to predict\n", "the target variable. Finally, the original categorical variable values will be\n", "replaced by the predictions of the decision tree." ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "\"\\nParameters\\n ----------\\n\\n encoding_method: str, default='arbitrary'\\n The categorical encoding method that will be used to encode the original\\n categories to numerical values.\\n\\n 'ordered': the categories are numbered in ascending order according to\\n the target mean value per category.\\n\\n 'arbitrary' : categories are numbered arbitrarily.\\n\\n cv : int, default=3\\n Desired number of cross-validation fold to be used to fit the decision\\n tree.\\n\\n scoring: str, default='neg_mean_squared_error'\\n Desired metric to optimise the performance for the tree. Comes from\\n sklearn metrics. See the DecisionTreeRegressor or DecisionTreeClassifier\\n model evaluation documentation for more options:\\n https://scikit-learn.org/stable/modules/model_evaluation.html\\n\\n regression : boolean, default=True\\n Indicates whether the encoder should train a regression or a classification\\n decision tree.\\n\\n param_grid : dictionary, default=None\\n The list of parameters over which the decision tree should be optimised\\n during the grid search. The param_grid can contain any of the permitted\\n parameters for Scikit-learn's DecisionTreeRegressor() or\\n DecisionTreeClassifier().\\n\\n If None, then param_grid = {'max_depth': [1, 2, 3, 4]}.\\n\\n random_state : int, default=None\\n The random_state to initialise the training of the decision tree. It is one\\n of the parameters of the Scikit-learn's DecisionTreeRegressor() or\\n DecisionTreeClassifier(). For reproducibility it is recommended to set\\n the random_state to an integer.\\n\\n variables : list, default=None\\n The list of categorical variables that will be encoded. If None, the\\n encoder will find and select all object type variables.\\n\"" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "'''\n", "Parameters\n", " ----------\n", "\n", " encoding_method: str, default='arbitrary'\n", " The categorical encoding method that will be used to encode the original\n", " categories to numerical values.\n", "\n", " 'ordered': the categories are numbered in ascending order according to\n", " the target mean value per category.\n", "\n", " 'arbitrary' : categories are numbered arbitrarily.\n", "\n", " cv : int, default=3\n", " Desired number of cross-validation fold to be used to fit the decision\n", " tree.\n", "\n", " scoring: str, default='neg_mean_squared_error'\n", " Desired metric to optimise the performance for the tree. Comes from\n", " sklearn metrics. See the DecisionTreeRegressor or DecisionTreeClassifier\n", " model evaluation documentation for more options:\n", " https://scikit-learn.org/stable/modules/model_evaluation.html\n", "\n", " regression : boolean, default=True\n", " Indicates whether the encoder should train a regression or a classification\n", " decision tree.\n", "\n", " param_grid : dictionary, default=None\n", " The list of parameters over which the decision tree should be optimised\n", " during the grid search. The param_grid can contain any of the permitted\n", " parameters for Scikit-learn's DecisionTreeRegressor() or\n", " DecisionTreeClassifier().\n", "\n", " If None, then param_grid = {'max_depth': [1, 2, 3, 4]}.\n", "\n", " random_state : int, default=None\n", " The random_state to initialise the training of the decision tree. It is one\n", " of the parameters of the Scikit-learn's DecisionTreeRegressor() or\n", " DecisionTreeClassifier(). For reproducibility it is recommended to set\n", " the random_state to an integer.\n", "\n", " variables : list, default=None\n", " The list of categorical variables that will be encoded. If None, the\n", " encoder will find and select all object type variables.\n", "'''" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "DecisionTreeEncoder(param_grid={'max_depth': [1, 2, 3, 4]}, regression=False,\n", " scoring='roc_auc',\n", " variables=['cabin', 'pclass', 'embarked'])" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tree_enc = DecisionTreeEncoder(encoding_method='arbitrary',\n", " cv=3,\n", " scoring = 'roc_auc',\n", " param_grid = {'max_depth': [1, 2, 3, 4]},\n", " regression = False,\n", " variables=['cabin', 'pclass', 'embarked']\n", " )\n", "\n", "tree_enc.fit(X_train,y_train) # to fit you need to pass the target y" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Pipeline(steps=[('categorical_encoder',\n", " OrdinalEncoder(encoding_method='arbitrary',\n", " variables=['cabin', 'pclass', 'embarked'])),\n", " ('tree_discretiser',\n", " DecisionTreeDiscretiser(param_grid={'max_depth': [1, 2, 3, 4]},\n", " regression=False, scoring='roc_auc',\n", " variables=['cabin', 'pclass',\n", " 'embarked']))])" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tree_enc.encoder_" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
pclasssexagesibspparchfarecabinembarked
2330.617391female56.00183.15830.6116500.558011
7250.259036female22.0007.75000.3048430.373494
9340.259036female4.00222.02500.3048430.338957
2410.617391maleNaN0050.00000.6981130.338957
4660.436170male34.01026.00000.3048430.338957
\n", "
" ], "text/plain": [ " pclass sex age sibsp parch fare cabin embarked\n", "233 0.617391 female 56.0 0 1 83.1583 0.611650 0.558011\n", "725 0.259036 female 22.0 0 0 7.7500 0.304843 0.373494\n", "934 0.259036 female 4.0 0 2 22.0250 0.304843 0.338957\n", "241 0.617391 male NaN 0 0 50.0000 0.698113 0.338957\n", "466 0.436170 male 34.0 1 0 26.0000 0.304843 0.338957" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# transform and visualise the data\n", "\n", "train_t = tree_enc.transform(X_train)\n", "test_t = tree_enc.transform(X_test)\n", "\n", "test_t.sample(5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Automatically select the variables\n", "\n", "This encoder will select all categorical variables to encode, when no variables are specified when calling the encoder." ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "DecisionTreeEncoder(param_grid={'max_depth': [1, 2, 3, 4]}, regression=False,\n", " scoring='roc_auc')" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tree_enc = DecisionTreeEncoder(encoding_method='arbitrary',\n", " cv=3,\n", " scoring = 'roc_auc',\n", " param_grid = {'max_depth': [1, 2, 3, 4]},\n", " regression = False,\n", " )\n", "\n", "tree_enc.fit(X_train,y_train) # to fit you need to pass the target y" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "Pipeline(steps=[('categorical_encoder',\n", " OrdinalEncoder(encoding_method='arbitrary',\n", " variables=['pclass', 'sex', 'cabin',\n", " 'embarked'])),\n", " ('tree_discretiser',\n", " DecisionTreeDiscretiser(param_grid={'max_depth': [1, 2, 3, 4]},\n", " regression=False, scoring='roc_auc',\n", " variables=['pclass', 'sex', 'cabin',\n", " 'embarked']))])" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tree_enc.encoder_" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
pclasssexagesibspparchfarecabinembarked
7530.2590360.18760822.0008.05000.3048430.338957
390.6173910.18760848.00050.49580.6981130.558011
9000.2590360.187608NaN1223.45000.3048430.338957
11480.2590360.18760835.0007.12500.3048430.338957
1870.6173910.72835816.00139.40000.6981130.338957
\n", "
" ], "text/plain": [ " pclass sex age sibsp parch fare cabin embarked\n", "753 0.259036 0.187608 22.0 0 0 8.0500 0.304843 0.338957\n", "39 0.617391 0.187608 48.0 0 0 50.4958 0.698113 0.558011\n", "900 0.259036 0.187608 NaN 1 2 23.4500 0.304843 0.338957\n", "1148 0.259036 0.187608 35.0 0 0 7.1250 0.304843 0.338957\n", "187 0.617391 0.728358 16.0 0 1 39.4000 0.698113 0.338957" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# transform and visualise the data\n", "\n", "train_t = tree_enc.transform(X_train)\n", "test_t = tree_enc.transform(X_test)\n", "\n", "test_t.sample(5)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "fenotebook", "language": "python", "name": "fenotebook" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.2" }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false } }, "nbformat": 4, "nbformat_minor": 4 }