import numpy as np from numpy import concatenate # from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # import statistics def create_dataset(ts, dim ,h): look_back = dim + h -1 # dataset = np.insert(dataset, [0] * look_back, 0) dataX, dataY = [], [] for i in range(len(ts) - look_back): a = ts[i:(i + look_back)] dataX.append(a) dataY.append(ts[i + look_back]) dataY = np.array(dataY) dataY = np.reshape(dataY, (dataY.shape[0], 1)) dataset = np.concatenate((dataX, dataY), axis=1) return dataset def unpadding(y): a = y.copy() h = y.shape[1] s = np.empty(y.shape[0] + y.shape[1] -1) for i in range(s.shape[0]): s[i]=np.diagonal(np.flip(a,1), offset= -i + h-1,axis1=0,axis2=1).copy().mean() return s def mape(y_true, y_pred): y_true = unpadding(y_true) y_pred = unpadding(y_pred) mask = y_true != 0.0 ## Note: does not handle mix 1d representation #if _is_1d(y_true): # y_true, y_pred = _check_1d_array(y_true, y_pred) N_metric = (y_true[mask] - y_pred[mask])/y_true[mask] N_metric = np.fabs(N_metric) metric = N_metric.mean() return metric def smape(y_true, y_pred): y_true = unpadding(y_true) y_pred = unpadding(y_pred) mask = y_true != 0.0 ## Note: does not handle mix 1d representation #if _is_1d(y_true): # y_true, y_pred = _check_1d_array(y_true, y_pred) N_metric = (y_true[mask] - y_pred[mask])/(y_true[mask] + y_pred[mask]) N_metric = np.fabs(N_metric) metric = N_metric.mean() def rmse(y_true, y_pred): y_true = unpadding(y_true) y_pred = unpadding(y_pred) return np.sqrt(mean_squared_error(y_true,y_pred))