{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "slideshow": { "slide_type": "skip" } }, "outputs": [], "source": [ "import dateutil\n", "import math\n", "\n", "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import matplotlib.dates as mdates\n", "import plotly.plotly as py\n", "import plotly.graph_objs as go\n", "import plotly.figure_factory as ff\n", "import statsmodels.api as sm\n", "\n", "from scipy import stats\n", "from sklearn.metrics import mean_squared_error, mean_absolute_error\n", "from sklearn import linear_model\n", "from sklearn.preprocessing import MinMaxScaler" ] }, { "cell_type": "markdown", "metadata": { "slideshow": { "slide_type": "slide" } }, "source": [ "# Carga y limpieza de datos" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "slideshow": { "slide_type": "subslide" } }, "outputs": [], "source": [ "df_start = pd.read_excel('../Data/Graficas DustTrack Honeywell 5sept comp.xlsx')\n", "df_start.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Eliminar nulos y headers repetidos\n", "\n", "df_start = df_start[~df_start['Valor DustTrack'].isna()]\n", "df_start = df_start[df_start.columns.values[:-1]]\n", "sum(df_start['Valor DustTrack'] == 'Valor DustTrack')\n", "df_start = df_start[np.logical_not(df_start['Valor DustTrack'] == 'Valor DustTrack')]\n", "\n", "df_start.index = np.arange(len(df_start))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Casting a variables numericas\n", "\n", "df_start[['Valor DustTrack', 'Valor Honeywell']] = df_start[['Valor DustTrack', 'Valor Honeywell']].apply(pd.to_numeric)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "def plot_time_series(df, variables, filename): \n", " data = list()\n", " for variable in variables:\n", " data.append(go.Scatter(x = df.index,\n", " y = df[variable],\n", " name=variable))\n", " \n", " layout = go.Layout(title='DustTrack vs Honeywell',\n", " yaxis=dict(title='Pm 2.5 value'),\n", " xaxis=dict(title='N° Observation'))\n", " fig = go.Figure(data=data, layout=layout)\n", " plot_url = py.iplot(fig, filename=filename)\n", " return plot_url\n", " \n", "# Login plotly\n", "py.sign_in('FranciscoJavierOspinaSalazar', 'WLz5dfnwRjfo2cSwlRiL')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Visualización inicial de los datos y exploraciones univariadas" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false, "slideshow": { "slide_type": "slide" } }, "outputs": [], "source": [ "plot_time_series(df=df_start, variables=['Valor DustTrack', 'Valor Honeywell'], filename='Comparison_sensors')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Boxplot de ambas mediciones" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def build_boxplot(df, variables, filename):\n", " if isinstance(variables, str):\n", " variables = [variables]\n", " data = list()\n", " for variable in variables:\n", " data.append(go.Box(y = df[variable],\n", " name=variable))\n", " \n", " layout = go.Layout(title='Boxplot',\n", " yaxis=dict(title='Pm 2.5 value'))\n", " fig = go.Figure(data=data, layout=layout)\n", " plot_url = py.iplot(fig, filename=filename)\n", " return plot_url\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "build_boxplot(df=df_start, variables='Valor DustTrack', filename='Boxplot_DustTrack')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "build_boxplot(df=df_start, variables='Valor Honeywell', filename='Boxplot_Honeywell')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "build_boxplot(df=df_start, variables=['Valor DustTrack', 'Valor Honeywell'], filename='Boxplot_Comparison1')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Remoción de outliers" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df = df_start.copy()\n", "df = df[np.logical_not(np.logical_or(df['Valor Honeywell'] > 90, df['Valor DustTrack'] > 492))]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Suavizamiento de serie con medias móviles\n", "\n", "La descripción de las médias móviles y su uso para suavizar picos en series de tiempo se encuentra en [http://www.expansion.com/diccionario-economico/media-movil.html](http://www.expansion.com/diccionario-economico/media-movil.html)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "n_movil_mean = 20\n", "movil_mean = df[['Valor DustTrack', 'Valor Honeywell']].rolling(n_movil_mean).mean()\n", "movil_mean.columns = ['Mm_DustTrack', 'Mm_Honeywell']\n", "movil_mean = movil_mean.loc[n_movil_mean - 1:]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "## Reescalamiento de los datos con la función Logaritmo" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "movil_mean = movil_mean.assign(Log10_DustTrack=np.log10(movil_mean['Mm_DustTrack']))\n", "movil_mean = movil_mean.assign(Log10_Honeywell=np.log10(movil_mean['Mm_Honeywell']))\n", "movil_mean.index = np.arange(len(movil_mean))\n", "movil_mean.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_time_series(df=movil_mean, variables=['Mm_DustTrack', 'Mm_Honeywell'], filename='serie_movil')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Con el suavizamiento se suprimieron los picos que superaban 2000 en el eje y" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_time_series(df=movil_mean, variables=['Log10_DustTrack', 'Log10_Honeywell'], filename='log10_serie_movil')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "build_boxplot(df=movil_mean, variables=['Log10_DustTrack', 'Log10_Honeywell'], filename='Bloxplot_log10')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Partición de la base de datos en traint test" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "size_train = int(len(movil_mean) * .7)\n", "index_train = np.random.choice(np.arange(len(movil_mean)), size=size_train, replace=False)\n", "index_test = np.setdiff1d(np.arange(len(movil_mean)), index_train)\n", "df_train = movil_mean.iloc[index_train]\n", "df_test = movil_mean.iloc[index_test]\n", "df_train = df_train.sort_index()\n", "df_test = df_test.sort_index()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Escalamiento de los datos " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "scaler_dt = MinMaxScaler()\n", "scaler_dt.fit(df_train['Mm_DustTrack'].values.reshape(-1, 1))\n", "scaled_DustTrack_train = scaler_dt.transform(df_train['Mm_DustTrack'].values.reshape(-1,1))\n", "scaled_DustTrack_test = scaler_dt.transform(df_test['Mm_DustTrack'].values.reshape(-1,1))\n", "\n", "scaler_hw = MinMaxScaler()\n", "scaler_hw.fit(df_train['Mm_Honeywell'].values.reshape(-1, 1))\n", "scaled_Honeywell_train = scaler_hw.transform(df_train['Mm_Honeywell'].values.reshape(-1,1))\n", "scaled_Honeywell_test = scaler_hw.transform(df_test['Mm_Honeywell'].values.reshape(-1,1))\n", "\n", "df_train = df_train.assign(scaled_DustTrack=scaled_DustTrack_train)\n", "df_train = df_train.assign(scaled_Honeywell=scaled_Honeywell_train)\n", "df_test = df_test.assign(scaled_DustTrack=scaled_DustTrack_test)\n", "df_test = df_test.assign(scaled_Honeywell=scaled_Honeywell_test)\n", "movil_mean = movil_mean.assign()\n", "\n", "df_train.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "build_boxplot(df=df_train, variables=['scaled_DustTrack', 'scaled_Honeywell'], filename='Bloxplot_log10')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Correlación lineal de Pearson \n", "\n", "[https://es.wikipedia.org/wiki/Coeficiente_de_correlaci%C3%B3n_de_Pearson](https://es.wikipedia.org/wiki/Coeficiente_de_correlaci%C3%B3n_de_Pearson)\n", "\n", "Debido a que solo son dos variables si la correlación de pearson es alta se pueden hacer modelos que intenten generar la variable **Valor DustTrack** a partir de **Valor Honeywell**." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Coeficinte de correlación sobre las variables transformadas por logaritmos de las\n", "# series suavizadas por las media móviles\n", "stats.pearsonr(df_train['Log10_DustTrack'], df_train['Log10_Honeywell'])[0]" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "stats.pearsonr(df_train['scaled_DustTrack'], df_train['scaled_Honeywell'])[0]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Tanto en la gráfica de la serie de tiempo como en la correlación, estas son las mejores variables hasta el momento para el modelo. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Construcción de modelos" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model_min_max_scaler = linear_model.LinearRegression(fit_intercept=True)\n", "model_log_scaler = linear_model.LinearRegression(fit_intercept=True)\n", "model_min_max_scaler.fit(X=df_train['scaled_Honeywell'].values.reshape(-1,1), y=df_train['scaled_DustTrack'].values.reshape(-1,1), )\n", "model_log_scaler.fit(X=df_train['Log10_Honeywell'].values.reshape(-1,1), y=df_train['Log10_DustTrack'].values.reshape(-1,1))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Párametros de los modelos" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print('Parameters model with log scaler. Coef = {}, Intercept = {}'.format(model_log_scaler.coef_[0][0], model_log_scaler.intercept_[0]))\n", "print('Parameters model with min_max scaler. Coef = {}, Intercept = {}'.format(model_min_max_scaler.coef_[0][0], model_min_max_scaler.intercept_[0]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Append de predicciones a las series suavizadas" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "pred_min_max_scaler_train = scaler_dt.inverse_transform(model_min_max_scaler.predict(df_train['scaled_Honeywell'].values.reshape(-1,1)).reshape(-1,1))\n", "pred_log_scaler_train = np.power(10, model_log_scaler.predict(df_train['Log10_Honeywell'].values.reshape(-1,1)).reshape(-1,1))\n", "\n", "df_train = df_train.assign(pred_min_max_scaler=pred_min_max_scaler_train)\n", "df_train = df_train.assign(pred_log_scaler=pred_log_scaler_train)\n", "\n", "pred_min_max_scaler_test= scaler_dt.inverse_transform(model_min_max_scaler.predict(df_test['scaled_Honeywell'].values.reshape(-1,1)).reshape(-1,1))\n", "pred_log_scaler_test = np.power(10, model_log_scaler.predict(df_test['Log10_Honeywell'].values.reshape(-1,1)).reshape(-1,1))\n", "\n", "df_test = df_test.assign(pred_min_max_scaler=pred_min_max_scaler_test)\n", "df_test = df_test.assign(pred_log_scaler=pred_log_scaler_test)\n", "\n", "df_test.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Append de predicciones a las series sin suaizar" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "df_start = df_start.assign(pred_log_scaler=np.power(10, model_log_scaler.predict(np.log10(df_start['Valor Honeywell']).values.reshape(-1,1)).reshape(-1,1)))\n", "\n", "scaled_Honeywell = scaler_hw.transform(df_start['Valor Honeywell'].values.reshape(-1,1))\n", "df_start = df_start.assign(scaled_Honeywell=scaled_Honeywell)\n", "pred_min_max_scaler = scaler_dt.inverse_transform(model_min_max_scaler.predict(df_start['scaled_Honeywell'].values.reshape(-1,1)).reshape(-1,1))\n", "df_start = df_start.assign(pred_min_max_scaler=pred_min_max_scaler)\n", "df_start.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Plots del valor real vs la predicción del modelo (sobre serie suavizada / train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "plot_time_series(df=df_train, \n", " variables=['pred_min_max_scaler', 'pred_log_scaler', 'Mm_DustTrack'], \n", " filename='Predictions_01')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Plots del valor real vs la predicción del modelo (sobre serie suavizada / test)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_time_series(df=df_test, \n", " variables=['pred_min_max_scaler', 'pred_log_scaler', 'Mm_DustTrack'], \n", " filename='Predictions_02')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_time_series(df=df_start, \n", " variables=['pred_min_max_scaler', 'pred_log_scaler', 'Valor DustTrack'], \n", " filename='Predictions_03')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Errores de la predicción vs el valor real" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dc_errors = dict()\n", "for key, value in {'Train Data': df_train, 'Test Data': df_test, 'Original Data': df_start}.items():\n", " true_val = 'Mm_DustTrack'\n", " if key == 'Original Data':\n", " true_val = 'Valor DustTrack'\n", " dc_errors[key] = [mean_squared_error(value[true_val], value['pred_log_scaler']),\n", " mean_squared_error(value[true_val], value['pred_min_max_scaler']),\n", " mean_absolute_error(value[true_val], value['pred_log_scaler']),\n", " mean_absolute_error(value[true_val], value['pred_min_max_scaler'])]\n", "df_errors = pd.DataFrame(dc_errors)\n", "df_errors.index = ['Log10 transform MSE', 'Min_max transform MSE', 'Log10 transform MAE', 'Min_max transform MAE']\n", "df_errors[['Train Data', 'Test Data', 'Original Data']]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Nota: Los valores de MAE y MSE corresponden al error absoluto medio y error cuadrático medio" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def plot_density(df, y_tue, y_pred, group_labels, title, filename):\n", " hist_data = list()\n", " for y in y_pred:\n", " hist_data.append(df[y_tue] - df[y])\n", "\n", " group_labels = group_labels\n", "\n", " # Create distplot with custom bin_size\n", " fig = ff.create_distplot(hist_data, group_labels, show_hist=False)\n", " fig['layout'].update(title=title)\n", " plot_url = py.iplot(fig, filename=filename)\n", " return plot_url" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_density(df=df_train, \n", " y_tue='Mm_DustTrack', \n", " y_pred=['pred_log_scaler', 'pred_min_max_scaler'], \n", " group_labels=['Log scaler', 'Min_max scaler'], \n", " title='Densidad de los residuales train', \n", " filename='Residuals_density_01')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_density(df=df_test, \n", " y_tue='Mm_DustTrack', \n", " y_pred=['pred_log_scaler', 'pred_min_max_scaler'], \n", " group_labels=['Log scaler', 'Min_max scaler'], \n", " title='Densidad de los residuales test', \n", " filename='Residuals_density_02')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plot_density(df=df_start, \n", " y_tue='Valor DustTrack', \n", " y_pred=['pred_log_scaler', 'pred_min_max_scaler'], \n", " group_labels=['Log scaler', 'Min_max scaler'], \n", " title='Densidad de los residuales test', \n", " filename='Residuals_density_02')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df_train = df_train.assign(res_log_scaler=np.abs(df_train['Mm_DustTrack'] - df_train['pred_log_scaler']))\n", "df_train = df_train.assign(res_min_max_scaler=np.abs(df_train['Mm_DustTrack'] - df_train['pred_min_max_scaler']))\n", "df_test = df_test.assign(res_log_scaler=np.abs(df_test['Mm_DustTrack'] - df_test['pred_log_scaler']))\n", "df_test = df_test.assign(res_min_max_scaler=np.abs(df_test['Mm_DustTrack'] - df_test['pred_min_max_scaler']))\n", "df_start = df_start.assign(res_log_scaler=np.abs(df_start['Valor DustTrack'] - df_start['pred_log_scaler']))\n", "df_start = df_start.assign(res_min_max_scaler=np.abs(df_start['Valor DustTrack'] - df_start['pred_min_max_scaler']))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "step = 0.05\n", "quantile_train = df_train[['res_log_scaler', 'res_min_max_scaler']].quantile(np.arange(0, 1 + step, step))\n", "quantile_train.columns = [['res_log_scaler_train', 'res_min_max_scaler_train']]\n", "quantile_test = df_test[['res_log_scaler', 'res_min_max_scaler']].quantile(np.arange(0, 1 + step, step))\n", "quantile_test.columns = [['res_log_scaler_test', 'res_min_max_scaler_test']]\n", "quantile_start = df_start[['res_log_scaler', 'res_min_max_scaler']].quantile(np.arange(0, 1 + step, step))\n", "quantile_start.columns = [['res_log_scaler_original_data', 'res_min_max_scaler_original_data']]\n", "df_quantile = pd.concat([quantile_train, quantile_test, quantile_start], axis=1)\n", "df_quantile" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Ecuación final de modelamiento (transformaión Log10)\n", "\n", "La ecuación final que mejor se comporta para el modelamiento de los valores de **Valor DustTrack** a partir de los datos de **Valor Honeywell** es:\n", "\n", "$$y = 10^{1.1675 * log_{10}(x) + 0.4173}$$\n", "\n", "\n", "Donde X es el valor de la variable **Valor Honeywell** y y el valor de la variable **Valor DustTrack** m" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Conclusiones finales\n", "\n", "- Es posible hacer una reconstrucción no perfecta pero si bastante buena de los datos del sensor **DustTrack** con base en los datos del sensor **Honeywell**." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 2 }