{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Bayesian Temporal Tensor Factorization\n", "\n", "**Published**: December 27, 2020\n", "\n", "**Author**: Xinyu Chen [[**GitHub homepage**](https://github.com/xinychen)]\n", "\n", "**Download**: This Jupyter notebook is at our GitHub repository. If you want to evaluate the code, please download the notebook from the [**transdim**](https://github.com/xinychen/transdim/blob/master/predictor/BTTF.ipynb) repository.\n", "\n", "This notebook shows how to implement the Bayesian Temporal Tensor Factorization (BTTF), a fully Bayesian matrix factorization model, on some real-world data sets. To overcome the missing data problem in multivariate time series, BTTF takes into account both low-rank matrix structure and time series autoregression. For an in-depth discussion of BTTF, please see [1].\n", "\n", "
\n", "\n", "[1] Xinyu Chen, Lijun Sun (2019). Bayesian temporal factorization for multidimensional time series prediction. arXiv:1910.06366. [PDF] \n", "\n", "
" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from numpy.linalg import inv as inv\n", "from numpy.random import normal as normrnd\n", "from numpy.random import multivariate_normal as mvnrnd\n", "from scipy.linalg import khatri_rao as kr_prod\n", "from scipy.stats import wishart\n", "from scipy.stats import invwishart\n", "from numpy.linalg import solve as solve\n", "from numpy.linalg import cholesky as cholesky_lower\n", "from scipy.linalg import cholesky as cholesky_upper\n", "from scipy.linalg import solve_triangular as solve_ut\n", "import matplotlib.pyplot as plt\n", "%matplotlib inline" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def mvnrnd_pre(mu, Lambda):\n", " src = normrnd(size = (mu.shape[0],))\n", " return solve_ut(cholesky_upper(Lambda, overwrite_a = True, check_finite = False), \n", " src, lower = False, check_finite = False, overwrite_b = True) + mu\n", "\n", "def cov_mat(mat, mat_bar):\n", " mat = mat - mat_bar\n", " return mat.T @ mat\n", "\n", "def ten2mat(tensor, mode):\n", " return np.reshape(np.moveaxis(tensor, mode, 0), (tensor.shape[mode], -1), order = 'F')" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "def sample_factor_u(tau_sparse_tensor, tau_ind, U, V, X, beta0 = 1):\n", " \"\"\"Sampling M-by-R factor matrix U and its hyperparameters (mu_u, Lambda_u).\"\"\"\n", " \n", " dim1, rank = U.shape\n", " U_bar = np.mean(U, axis = 0)\n", " temp = dim1 / (dim1 + beta0)\n", " var_mu_hyper = temp * U_bar\n", " var_U_hyper = inv(np.eye(rank) + cov_mat(U, U_bar) + temp * beta0 * np.outer(U_bar, U_bar))\n", " var_Lambda_hyper = wishart.rvs(df = dim1 + rank, scale = var_U_hyper)\n", " var_mu_hyper = mvnrnd_pre(var_mu_hyper, (dim1 + beta0) * var_Lambda_hyper)\n", "\n", " var1 = kr_prod(X, V).T\n", " var2 = kr_prod(var1, var1)\n", " var3 = (var2 @ ten2mat(tau_ind, 0).T).reshape([rank, rank, dim1]) + var_Lambda_hyper[:, :, None]\n", " var4 = var1 @ ten2mat(tau_sparse_tensor, 0).T + (var_Lambda_hyper @ var_mu_hyper)[:, None]\n", " for i in range(dim1):\n", " U[i, :] = mvnrnd_pre(solve(var3[:, :, i], var4[:, i]), var3[:, :, i])\n", " \n", " return U" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def sample_factor_v(tau_sparse_tensor, tau_ind, U, V, X, beta0 = 1):\n", " \"\"\"Sampling N-by-R factor matrix V and its hyperparameters (mu_v, Lambda_v).\"\"\"\n", " \n", " dim2, rank = V.shape\n", " V_bar = np.mean(V, axis = 0)\n", " temp = dim2 / (dim2 + beta0)\n", " var_mu_hyper = temp * V_bar\n", " var_V_hyper = inv(np.eye(rank) + cov_mat(V, V_bar) + temp * beta0 * np.outer(V_bar, V_bar))\n", " var_Lambda_hyper = wishart.rvs(df = dim2 + rank, scale = var_V_hyper)\n", " var_mu_hyper = mvnrnd_pre(var_mu_hyper, (dim2 + beta0) * var_Lambda_hyper)\n", "\n", " var1 = kr_prod(X, U).T\n", " var2 = kr_prod(var1, var1)\n", " var3 = (var2 @ ten2mat(tau_ind, 1).T).reshape([rank, rank, dim2]) + var_Lambda_hyper[:, :, None]\n", " var4 = var1 @ ten2mat(tau_sparse_tensor, 1).T + (var_Lambda_hyper @ var_mu_hyper)[:, None]\n", " for j in range(dim2):\n", " V[j, :] = mvnrnd_pre(solve(var3[:, :, j], var4[:, j]), var3[:, :, j])\n", " \n", " return V" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def mnrnd(M, U, V):\n", " \"\"\"\n", " Generate matrix normal distributed random matrix.\n", " M is a m-by-n matrix, U is a m-by-m matrix, and V is a n-by-n matrix.\n", " \"\"\"\n", " dim1, dim2 = M.shape\n", " X0 = np.random.randn(dim1, dim2)\n", " P = cholesky_lower(U)\n", " Q = cholesky_lower(V)\n", " \n", " return M + P @ X0 @ Q.T\n", "\n", "def sample_var_coefficient(X, time_lags):\n", " dim, rank = X.shape\n", " d = time_lags.shape[0]\n", " tmax = np.max(time_lags)\n", " \n", " Z_mat = X[tmax : dim, :]\n", " Q_mat = np.zeros((dim - tmax, rank * d))\n", " for k in range(d):\n", " Q_mat[:, k * rank : (k + 1) * rank] = X[tmax - time_lags[k] : dim - time_lags[k], :]\n", " var_Psi0 = np.eye(rank * d) + Q_mat.T @ Q_mat\n", " var_Psi = inv(var_Psi0)\n", " var_M = var_Psi @ Q_mat.T @ Z_mat\n", " var_S = np.eye(rank) + Z_mat.T @ Z_mat - var_M.T @ var_Psi0 @ var_M\n", " Sigma = invwishart.rvs(df = rank + dim - tmax, scale = var_S)\n", " \n", " return mnrnd(var_M, var_Psi, Sigma), Sigma" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def sample_factor_x(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, Lambda_x):\n", " \"\"\"Sampling T-by-R factor matrix X.\"\"\"\n", "\n", " dim3, rank = X.shape\n", " tmax = np.max(time_lags)\n", " tmin = np.min(time_lags)\n", " d = time_lags.shape[0]\n", " A0 = np.dstack([A] * d)\n", " for k in range(d):\n", " A0[k * rank : (k + 1) * rank, :, k] = 0\n", " mat0 = Lambda_x @ A.T\n", " mat1 = np.einsum('kij, jt -> kit', A.reshape([d, rank, rank]), Lambda_x)\n", " mat2 = np.einsum('kit, kjt -> ij', mat1, A.reshape([d, rank, rank]))\n", " \n", " var1 = kr_prod(V, U).T\n", " var2 = kr_prod(var1, var1)\n", " var3 = (var2 @ ten2mat(tau_ind, 2).T).reshape([rank, rank, dim3]) + Lambda_x[:, :, None]\n", " var4 = var1 @ ten2mat(tau_sparse_tensor, 2).T\n", " for t in range(dim3):\n", " Mt = np.zeros((rank, rank))\n", " Nt = np.zeros(rank)\n", " Qt = mat0 @ X[t - time_lags, :].reshape(rank * d)\n", " index = list(range(0, d))\n", " if t >= dim3 - tmax and t < dim3 - tmin:\n", " index = list(np.where(t + time_lags < dim3))[0]\n", " elif t < tmax:\n", " Qt = np.zeros(rank)\n", " index = list(np.where(t + time_lags >= tmax))[0]\n", " if t < dim3 - tmin:\n", " Mt = mat2.copy()\n", " temp = np.zeros((rank * d, len(index)))\n", " n = 0\n", " for k in index:\n", " temp[:, n] = X[t + time_lags[k] - time_lags, :].reshape(rank * d)\n", " n += 1\n", " temp0 = X[t + time_lags[index], :].T - np.einsum('ijk, ik -> jk', A0[:, :, index], temp)\n", " Nt = np.einsum('kij, jk -> i', mat1[index, :, :], temp0)\n", " \n", " var3[:, :, t] = var3[:, :, t] + Mt\n", " if t < tmax:\n", " var3[:, :, t] = var3[:, :, t] - Lambda_x + np.eye(rank)\n", " X[t, :] = mvnrnd_pre(solve(var3[:, :, t], var4[:, t] + Nt + Qt), var3[:, :, t])\n", "\n", " return X" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def compute_mape(var, var_hat):\n", " return np.sum(np.abs(var - var_hat) / var) / var.shape[0]\n", "\n", "def compute_rmse(var, var_hat):\n", " return np.sqrt(np.sum((var - var_hat) ** 2) / var.shape[0])" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def ar4cast(A, X, Sigma, time_lags, multi_step):\n", " dim, rank = X.shape\n", " d = time_lags.shape[0]\n", " X_new = np.append(X, np.zeros((multi_step, rank)), axis = 0)\n", " for t in range(multi_step):\n", " var = A.T @ X_new[dim + t - time_lags, :].reshape(rank * d)\n", " X_new[dim + t, :] = mvnrnd(var, Sigma)\n", " return X_new" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### BTTF Implementation\n", "\n" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "def BTTF(dense_tensor, sparse_tensor, init, rank, time_lags, burn_iter, gibbs_iter, multi_step = 1):\n", " \"\"\"Bayesian Temporal Tensor Factorization, BTTF.\"\"\"\n", " \n", " dim1, dim2, dim3 = sparse_tensor.shape\n", " d = time_lags.shape[0]\n", " U = init[\"U\"]\n", " V = init[\"V\"]\n", " X = init[\"X\"]\n", " if np.isnan(sparse_tensor).any() == False:\n", " ind = sparse_tensor != 0\n", " pos_obs = np.where(ind)\n", " pos_test = np.where((dense_tensor != 0) & (sparse_tensor == 0))\n", " elif np.isnan(sparse_tensor).any() == True:\n", " pos_test = np.where((dense_tensor != 0) & (np.isnan(sparse_tensor)))\n", " ind = ~np.isnan(sparse_tensor)\n", " pos_obs = np.where(ind)\n", " sparse_tensor[np.isnan(sparse_tensor)] = 0\n", " dense_test = dense_tensor[pos_test]\n", " del dense_tensor\n", " U_plus = np.zeros((dim1, rank, gibbs_iter))\n", " V_plus = np.zeros((dim2, rank, gibbs_iter))\n", " X_plus = np.zeros((dim3 + multi_step, rank, gibbs_iter))\n", " A_plus = np.zeros((rank * d, rank, gibbs_iter))\n", " tau_plus = np.zeros(gibbs_iter)\n", " Sigma_plus = np.zeros((rank, rank, gibbs_iter))\n", " temp_hat = np.zeros(len(pos_test[0]))\n", " show_iter = 500\n", " tau = 1\n", " tensor_hat_plus = np.zeros(sparse_tensor.shape)\n", " tensor_new_plus = np.zeros((dim1, dim2, multi_step))\n", " for it in range(burn_iter + gibbs_iter):\n", " tau_ind = tau * ind\n", " tau_sparse_tensor = tau * sparse_tensor\n", " U = sample_factor_u(tau_sparse_tensor, tau_ind, U, V, X)\n", " V = sample_factor_v(tau_sparse_tensor, tau_ind, U, V, X)\n", " A, Sigma = sample_var_coefficient(X, time_lags)\n", " X = sample_factor_x(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, inv(Sigma))\n", " tensor_hat = np.einsum('is, js, ts -> ijt', U, V, X)\n", " tau = np.random.gamma(1e-6 + 0.5 * np.sum(ind), \n", " 1 / (1e-6 + 0.5 * np.sum(((sparse_tensor - tensor_hat) ** 2) * ind)))\n", " temp_hat += tensor_hat[pos_test]\n", " if (it + 1) % show_iter == 0 and it < burn_iter:\n", " temp_hat = temp_hat / show_iter\n", " print('Iter: {}'.format(it + 1))\n", " print('MAPE: {:.6}'.format(compute_mape(dense_test, temp_hat)))\n", " print('RMSE: {:.6}'.format(compute_rmse(dense_test, temp_hat)))\n", " temp_hat = np.zeros(len(pos_test[0]))\n", " print()\n", " if it + 1 > burn_iter:\n", " U_plus[:, :, it - burn_iter] = U\n", " V_plus[:, :, it - burn_iter] = V\n", " A_plus[:, :, it - burn_iter] = A\n", " Sigma_plus[:, :, it - burn_iter] = Sigma\n", " tau_plus[it - burn_iter] = tau\n", " tensor_hat_plus += tensor_hat\n", " X0 = ar4cast(A, X, Sigma, time_lags, multi_step)\n", " X_plus[:, :, it - burn_iter] = X0\n", " tensor_new_plus += np.einsum('is, js, ts -> ijt', U, V, X0[- multi_step :, :])\n", " tensor_hat = tensor_hat_plus / gibbs_iter\n", " print('Imputation MAPE: {:.6}'.format(compute_mape(dense_test, tensor_hat[:, :, : dim3][pos_test])))\n", " print('Imputation RMSE: {:.6}'.format(compute_rmse(dense_test, tensor_hat[:, :, : dim3][pos_test])))\n", " print()\n", " tensor_hat = np.append(tensor_hat, tensor_new_plus / gibbs_iter, axis = 2)\n", " tensor_hat[tensor_hat < 0] = 0\n", " \n", " return tensor_hat, U_plus, V_plus, X_plus, A_plus, Sigma_plus, tau_plus" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ "def sample_factor_x_partial(tau_sparse_tensor, tau_ind, time_lags, U, V, X, A, Lambda_x, back_step):\n", " \"\"\"Sampling T-by-R factor matrix X.\"\"\"\n", " \n", " dim3, rank = X.shape\n", " tmax = np.max(time_lags)\n", " tmin = np.min(time_lags)\n", " d = time_lags.shape[0]\n", " A0 = np.dstack([A] * d)\n", " for k in range(d):\n", " A0[k * rank : (k + 1) * rank, :, k] = 0\n", " mat0 = Lambda_x @ A.T\n", " mat1 = np.einsum('kij, jt -> kit', A.reshape([d, rank, rank]), Lambda_x)\n", " mat2 = np.einsum('kit, kjt -> ij', mat1, A.reshape([d, rank, rank]))\n", " \n", " var1 = kr_prod(V, U).T\n", " var2 = kr_prod(var1, var1)\n", " var3 = (var2 @ ten2mat(tau_ind[:, :, - back_step :], 2).T).reshape([rank, rank, back_step]) + Lambda_x[:, :, None]\n", " var4 = var1 @ ten2mat(tau_sparse_tensor[:, :, - back_step :], 2).T\n", " for t in range(dim3 - back_step, dim3):\n", " Mt = np.zeros((rank, rank))\n", " Nt = np.zeros(rank)\n", " Qt = mat0 @ X[t - time_lags, :].reshape(rank * d)\n", " index = list(range(0, d))\n", " if t >= dim3 - tmax and t < dim3 - tmin:\n", " index = list(np.where(t + time_lags < dim3))[0]\n", " if t < dim3 - tmin:\n", " Mt = mat2.copy()\n", " temp = np.zeros((rank * d, len(index)))\n", " n = 0\n", " for k in index:\n", " temp[:, n] = X[t + time_lags[k] - time_lags, :].reshape(rank * d)\n", " n += 1\n", " temp0 = X[t + time_lags[index], :].T - np.einsum('ijk, ik -> jk', A0[:, :, index], temp)\n", " Nt = np.einsum('kij, jk -> i', mat1[index, :, :], temp0)\n", " var3[:, :, t + back_step - dim3] = var3[:, :, t + back_step - dim3] + Mt\n", " X[t, :] = mvnrnd_pre(solve(var3[:, :, t + back_step - dim3], \n", " var4[:, t + back_step - dim3] + Nt + Qt), var3[:, :, t + back_step - dim3])\n", " return X" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "def BTTF_partial(dense_tensor, sparse_tensor, init, rank, time_lags, burn_iter, gibbs_iter, multi_step = 1, gamma = 10):\n", " \"\"\"Bayesian Temporal Tensor Factorization, BTTF.\"\"\"\n", " \n", " dim1, dim2, dim3 = sparse_tensor.shape\n", " U_plus = init[\"U_plus\"]\n", " V_plus = init[\"V_plus\"]\n", " X_plus = init[\"X_plus\"]\n", " A_plus = init[\"A_plus\"]\n", " Sigma_plus = init[\"Sigma_plus\"]\n", " tau_plus = init[\"tau_plus\"]\n", " if np.isnan(sparse_tensor).any() == False:\n", " ind = sparse_tensor != 0\n", " pos_obs = np.where(ind)\n", " elif np.isnan(sparse_tensor).any() == True:\n", " ind = ~np.isnan(sparse_tensor)\n", " pos_obs = np.where(ind)\n", " sparse_tensor[np.isnan(sparse_tensor)] = 0\n", " X_new_plus = np.zeros((dim3 + multi_step, rank, gibbs_iter))\n", " tensor_new_plus = np.zeros((dim1, dim2, multi_step))\n", " back_step = gamma * multi_step\n", " for it in range(gibbs_iter):\n", " tau_ind = tau_plus[it] * ind\n", " tau_sparse_tensor = tau_plus[it] * sparse_tensor\n", " X = sample_factor_x_partial(tau_sparse_tensor, tau_ind, time_lags, U_plus[:, :, it], V_plus[:, :, it],\n", " X_plus[:, :, it], A_plus[:, :, it], inv(Sigma_plus[:, :, it]), back_step)\n", " X0 = ar4cast(A_plus[:, :, it], X, Sigma_plus[:, :, it], time_lags, multi_step)\n", " X_new_plus[:, :, it] = X0\n", " tensor_new_plus += np.einsum('is, js, ts -> ijt', U_plus[:, :, it], V_plus[:, :, it], X0[- multi_step :, :])\n", " tensor_hat = tensor_new_plus / gibbs_iter\n", " tensor_hat[tensor_hat < 0] = 0\n", " \n", " return tensor_hat, U_plus, V_plus, X_new_plus, A_plus, Sigma_plus, tau_plus" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "from ipywidgets import IntProgress\n", "from IPython.display import display\n", "\n", "def BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 10):\n", " dim1, dim2, T = dense_tensor.shape\n", " start_time = T - pred_step\n", " max_count = int(np.ceil(pred_step / multi_step))\n", " tensor_hat = np.zeros((dim1, dim2, max_count * multi_step))\n", " f = IntProgress(min = 0, max = max_count) # instantiate the bar\n", " display(f) # display the bar\n", " for t in range(max_count):\n", " if t == 0:\n", " init = {\"U\": 0.1 * np.random.randn(dim1, rank), \n", " \"V\": 0.1 * np.random.randn(dim2, rank),\n", " \"X\": 0.1 * np.random.randn(start_time, rank)}\n", " tensor, U, V, X_new, A, Sigma, tau = BTTF(dense_tensor[:, :, : start_time], \n", " sparse_tensor[:, :, : start_time], init, rank, time_lags, burn_iter, gibbs_iter, multi_step)\n", " else:\n", " init = {\"U_plus\": U, \"V_plus\": V, \"X_plus\": X_new, \"A_plus\": A, \"Sigma_plus\": Sigma, \"tau_plus\": tau}\n", " tensor, U, V, X_new, A, Sigma, tau = BTTF_partial(dense_tensor[:, :, : start_time + t * multi_step], \n", " sparse_tensor[:, :, : start_time + t * multi_step], init, \n", " rank, time_lags, burn_iter, gibbs_iter, multi_step, gamma)\n", " tensor_hat[:, :, t * multi_step : (t + 1) * multi_step] = tensor[:, :, - multi_step :]\n", " f.value = t\n", " small_dense_tensor = dense_tensor[:, :, start_time : T]\n", " pos = np.where(small_dense_tensor != 0)\n", " print('Prediction MAPE: {:.6}'.format(compute_mape(small_dense_tensor[pos], tensor_hat[pos])))\n", " print('Prediction RMSE: {:.6}'.format(compute_rmse(small_dense_tensor[pos], tensor_hat[pos])))\n", " print()\n", " return tensor_hat" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Eavluation on NYC Taxi Flow Data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 30\\times 1461$ (origin, destination, time)\n", "- Test on original data\n" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "import scipy.io\n", "import warnings\n", "warnings.simplefilter('ignore')\n", "\n", "dense_tensor = scipy.io.loadmat('../datasets/NYC-data-set/tensor.mat')['tensor'].astype(np.float32)\n", "sparse_tensor = dense_tensor.copy()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Model setting**:\n", "\n", "- Low rank: 30\n", "- Total (rolling) prediction horizons: 7 * 24\n", "- Time lags: {1, 2, 24, 24 + 1, 24 + 2, 7 * 24, 7 * 24 + 1, 7 * 24 + 2}\n", "- The number of burn-in iterations: 1000\n", "- The number of Gibbs iterations: 200" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "14083e42c35e40d5b650b866fe80110d", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=84)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.539154\n", "Prediction RMSE: 5.10179\n", "\n", "Running time: 1525 seconds\n", "\n", "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ee3627fc9f0642a0908ae4c709e5fb2a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=42)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.546757\n", "Prediction RMSE: 5.17932\n", "\n", "Running time: 1441 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "e4f2e2d6cc0c4579be875cd300038673", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=28)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.56155\n", "Prediction RMSE: 5.25294\n", "\n", "Running time: 1312 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 7 * 24\n", "time_lags = np.array([1, 2, 3, 24, 25, 26, 7 * 24, 7 * 24 + 1, 7 * 24 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [2, 4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 30\\times 1461$ (origin, destination, time)\n", "- Random missing (RM)\n", "- 40% missing rate\n" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "import scipy.io\n", "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = scipy.io.loadmat('../datasets/NYC-data-set/tensor.mat')['tensor'].astype(np.float32)\n", "dim = dense_tensor.shape\n", "missing_rate = 0.4 # Random missing (RM)\n", "sparse_tensor = dense_tensor * np.round(np.random.rand(dim[0], dim[1], dim[2]) + 0.5 - missing_rate)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Model setting**:\n", "\n", "- Low rank: 30\n", "- Total (rolling) prediction horizons: 7 * 24\n", "- Time lags: {1, 2, 24, 24 + 1, 24 + 2, 7 * 24, 7 * 24 + 1, 7 * 24 + 2}\n", "- The number of burn-in iterations: 1000\n", "- The number of Gibbs iterations: 200" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "546c9a06d74642e5b91dfa3d161f8f16", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=84)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.476403\n", "RMSE: 5.1438\n", "\n", "Iter: 1000\n", "MAPE: 0.489101\n", "RMSE: 5.30296\n", "\n", "Imputation MAPE: 0.490007\n", "Imputation RMSE: 5.30489\n", "\n", "Prediction MAPE: 0.482274\n", "Prediction RMSE: 5.95222\n", "\n", "Running time: 1410 seconds\n", "\n", "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "633e1041aec34e67be1e7ac866bdc27a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=42)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.503474\n", "RMSE: 5.15323\n", "\n", "Iter: 1000\n", "MAPE: 0.505224\n", "RMSE: 5.30834\n", "\n", "Imputation MAPE: 0.505934\n", "Imputation RMSE: 5.33052\n", "\n", "Prediction MAPE: 0.537489\n", "Prediction RMSE: 5.32858\n", "\n", "Running time: 1362 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "78ea28370836458d8aae9a65e71d598e", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=28)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.50377\n", "RMSE: 5.14515\n", "\n", "Iter: 1000\n", "MAPE: 0.521959\n", "RMSE: 5.29029\n", "\n", "Imputation MAPE: 0.523315\n", "Imputation RMSE: 5.30101\n", "\n", "Prediction MAPE: 0.561496\n", "Prediction RMSE: 5.4019\n", "\n", "Running time: 1328 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 7 * 24\n", "time_lags = np.array([1, 2, 3, 24, 25, 26, 7 * 24, 7 * 24 + 1, 7 * 24 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [2, 4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 30\\times 1461$ (origin, destination, time)\n", "- Random missing (RM)\n", "- 60% missing rate\n" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [], "source": [ "import scipy.io\n", "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = scipy.io.loadmat('../datasets/NYC-data-set/tensor.mat')['tensor'].astype(np.float32)\n", "dim = dense_tensor.shape\n", "missing_rate = 0.6 # Random missing (RM)\n", "sparse_tensor = dense_tensor * np.round(np.random.rand(dim[0], dim[1], dim[2]) + 0.5 - missing_rate)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Model setting**:\n", "\n", "- Low rank: 30\n", "- Total (rolling) prediction horizons: 7 * 24\n", "- Time lags: {1, 2, 24, 24 + 1, 24 + 2, 7 * 24, 7 * 24 + 1, 7 * 24 + 2}\n", "- The number of burn-in iterations: 1000\n", "- The number of Gibbs iterations: 200" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "081c615509b8451a87567faa227b73dd", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=84)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.485907\n", "RMSE: 5.31316\n", "\n", "Iter: 1000\n", "MAPE: 0.494965\n", "RMSE: 5.57816\n", "\n", "Imputation MAPE: 0.495821\n", "Imputation RMSE: 5.60975\n", "\n", "Prediction MAPE: 0.488657\n", "Prediction RMSE: 6.72835\n", "\n", "Running time: 1554 seconds\n", "\n", "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "2cf2a58cb29b42eb9f375e97aed0df8d", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=42)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.508972\n", "RMSE: 5.29687\n", "\n", "Iter: 1000\n", "MAPE: 0.523079\n", "RMSE: 5.52528\n", "\n", "Imputation MAPE: 0.510566\n", "Imputation RMSE: 5.54642\n", "\n", "Prediction MAPE: 0.529534\n", "Prediction RMSE: 5.43116\n", "\n", "Running time: 1382 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "ebb67ecb8b194a24b0891cc1d4ada855", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=28)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.513433\n", "RMSE: 5.32022\n", "\n", "Iter: 1000\n", "MAPE: 0.535674\n", "RMSE: 5.49721\n", "\n", "Imputation MAPE: 0.535587\n", "Imputation RMSE: 5.50608\n", "\n", "Prediction MAPE: 0.552921\n", "Prediction RMSE: 5.46179\n", "\n", "Running time: 1375 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 7 * 24\n", "time_lags = np.array([1, 2, 3, 24, 25, 26, 7 * 24, 7 * 24 + 1, 7 * 24 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [2, 4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 30\\times 1461$ (origin, destination, time)\n", "- Non-random missing (NM)\n", "- 40% missing rate\n" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "import scipy.io\n", "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = scipy.io.loadmat('../datasets/NYC-data-set/tensor.mat')['tensor']\n", "dim = dense_tensor.shape\n", "nm_tensor = np.random.rand(dim[0], dim[1], dim[2])\n", "missing_rate = 0.4 # Non-random missing (NM)\n", "binary_tensor = np.zeros(dense_tensor.shape)\n", "for i1 in range(dim[0]):\n", " for i2 in range(dim[1]):\n", " for i3 in range(61):\n", " binary_tensor[i1, i2, i3 * 24 : (i3 + 1) * 24] = np.round(nm_tensor[i1, i2, i3] + 0.5 - missing_rate)\n", "sparse_tensor = dense_tensor * binary_tensor" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Model setting**:\n", "\n", "- Low rank: 30\n", "- Total (rolling) prediction horizons: 7 * 24\n", "- Time lags: {1, 2, 24, 24 + 1, 24 + 2, 7 * 24, 7 * 24 + 1, 7 * 24 + 2}\n", "- The number of burn-in iterations: 1000\n", "- The number of Gibbs iterations: 200" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "d933359c78164d8da4a6618703a97f24", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=84)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.48005\n", "RMSE: 5.33599\n", "\n", "Iter: 1000\n", "MAPE: 0.483847\n", "RMSE: 5.50755\n", "\n", "Imputation MAPE: 0.483285\n", "Imputation RMSE: 5.50448\n", "\n", "Prediction MAPE: 0.481164\n", "Prediction RMSE: 6.14635\n", "\n", "Running time: 1492 seconds\n", "\n", "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "438c09f9d6b24c17a5165dcef33b6388", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=42)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.515712\n", "RMSE: 5.27328\n", "\n", "Iter: 1000\n", "MAPE: 0.528275\n", "RMSE: 5.40139\n", "\n", "Imputation MAPE: 0.532998\n", "Imputation RMSE: 5.44158\n", "\n", "Prediction MAPE: 0.548801\n", "Prediction RMSE: 5.31528\n", "\n", "Running time: 1446 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "0bef900d83d449ae93061d51708b9401", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=28)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.508245\n", "RMSE: 5.30643\n", "\n", "Iter: 1000\n", "MAPE: 0.522899\n", "RMSE: 5.47282\n", "\n", "Imputation MAPE: 0.525004\n", "Imputation RMSE: 5.43607\n", "\n", "Prediction MAPE: 0.548346\n", "Prediction RMSE: 5.35651\n", "\n", "Running time: 1308 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 7 * 24\n", "time_lags = np.array([1, 2, 3, 24, 25, 26, 7 * 24, 7 * 24 + 1, 7 * 24 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [2, 4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Evaluation on Pacific Surface Temperature Data\n", "\n", "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 84\\times 396$ (location x, location y, month)\n", "- Test on original data" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import warnings\n", "warnings.simplefilter('ignore')\n", "\n", "dense_tensor = np.load('../datasets/Temperature-data-set/tensor.npy').astype(np.float32)\n", "pos = np.where(dense_tensor[:, 0, :] > 50)\n", "dense_tensor[pos[0], :, pos[1]] = 0\n", "sparse_tensor = dense_tensor.copy()" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "d16eb985eee64cba8d010f1a036cc2d4", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=60)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.0285055\n", "Prediction RMSE: 0.924666\n", "\n", "Running time: 1222 seconds\n", "\n", "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "91d3e4317fe84473b7880c4c659a2413", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=30)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.0258453\n", "Prediction RMSE: 0.83646\n", "\n", "Running time: 1127 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "7a2f8e402a094585b0f83b0cda988ec2", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=20)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Iter: 1000\n", "MAPE: nan\n", "RMSE: nan\n", "\n", "Imputation MAPE: nan\n", "Imputation RMSE: nan\n", "\n", "Prediction MAPE: 0.0281043\n", "Prediction RMSE: 0.908133\n", "\n", "Running time: 1100 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [2, 4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 84\\times 396$ (location x, location y, month)\n", "- Random missing (RM)\n", "- 40% missing rate" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = np.load('../datasets/Temperature-data-set/tensor.npy').astype(np.float32)\n", "pos = np.where(dense_tensor[:, 0, :] > 50)\n", "dense_tensor[pos[0], :, pos[1]] = 0\n", "random_tensor = np.random.rand(dense_tensor.shape[0], dense_tensor.shape[1], dense_tensor.shape[2])\n", "missing_rate = 0.4\n", "\n", "## Random missing (RM)\n", "binary_tensor = np.round(random_tensor + 0.5 - missing_rate)\n", "sparse_tensor = dense_tensor.copy()\n", "sparse_tensor[binary_tensor == 0] = np.nan\n", "sparse_tensor[sparse_tensor == 0] = np.nan" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "066ab72d972048d0ae243558a5d58751", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=60)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0144821\n", "RMSE: 0.482159\n", "\n", "Iter: 1000\n", "MAPE: 0.0143581\n", "RMSE: 0.476049\n", "\n", "Imputation MAPE: 0.0142973\n", "Imputation RMSE: 0.474065\n", "\n", "Prediction MAPE: 0.0231417\n", "Prediction RMSE: 0.758236\n", "\n", "Running time: 1349 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "multi_step = 2\n", "start = time.time()\n", "print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", "tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", "end = time.time()\n", "print('Running time: %d seconds'%(end - start))\n", "print()" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "816cef269c174f9b8ebf750b6ffb1520", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=30)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.014985\n", "RMSE: 0.498448\n", "\n", "Iter: 1000\n", "MAPE: 0.0147059\n", "RMSE: 0.487322\n", "\n", "Imputation MAPE: 0.0145875\n", "Imputation RMSE: 0.483447\n", "\n", "Prediction MAPE: 0.0249583\n", "Prediction RMSE: 0.809355\n", "\n", "Running time: 1179 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "b61f2a09ea5940f7b37301b2aee9b8df", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=20)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0150258\n", "RMSE: 0.499392\n", "\n", "Iter: 1000\n", "MAPE: 0.0148409\n", "RMSE: 0.491953\n", "\n", "Imputation MAPE: 0.0147268\n", "Imputation RMSE: 0.488867\n", "\n", "Prediction MAPE: 0.0257521\n", "Prediction RMSE: 0.840748\n", "\n", "Running time: 1240 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 84\\times 396$ (location x, location y, month)\n", "- Random missing (RM)\n", "- 60% missing rate" ] }, { "cell_type": "code", "execution_count": 32, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = np.load('../datasets/Temperature-data-set/tensor.npy').astype(np.float32)\n", "pos = np.where(dense_tensor[:, 0, :] > 50)\n", "dense_tensor[pos[0], :, pos[1]] = 0\n", "random_tensor = np.random.rand(dense_tensor.shape[0], dense_tensor.shape[1], dense_tensor.shape[2])\n", "missing_rate = 0.6\n", "\n", "## Random missing (RM)\n", "binary_tensor = np.round(random_tensor + 0.5 - missing_rate)\n", "sparse_tensor = dense_tensor.copy()\n", "sparse_tensor[binary_tensor == 0] = np.nan\n", "sparse_tensor[sparse_tensor == 0] = np.nan" ] }, { "cell_type": "code", "execution_count": 33, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "02fbc0ca25d442d6991fd3ad8feda478", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=60)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0145609\n", "RMSE: 0.485104\n", "\n", "Iter: 1000\n", "MAPE: 0.0144643\n", "RMSE: 0.4799\n", "\n", "Imputation MAPE: 0.0144504\n", "Imputation RMSE: 0.479404\n", "\n", "Prediction MAPE: 0.461062\n", "Prediction RMSE: 12.2373\n", "\n", "Running time: 1320 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "multi_step = 2\n", "start = time.time()\n", "print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", "tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", "end = time.time()\n", "print('Running time: %d seconds'%(end - start))\n", "print()" ] }, { "cell_type": "code", "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "3020107b53644dbd90f5156f62d6aca3", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=60)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0147971\n", "RMSE: 0.493339\n", "\n", "Iter: 1000\n", "MAPE: 0.0146462\n", "RMSE: 0.487619\n", "\n", "Imputation MAPE: 0.0145599\n", "Imputation RMSE: 0.484679\n", "\n", "Prediction MAPE: 0.0235214\n", "Prediction RMSE: 0.76378\n", "\n", "Running time: 1645 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "multi_step = 2\n", "start = time.time()\n", "print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", "tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 40)\n", "end = time.time()\n", "print('Running time: %d seconds'%(end - start))\n", "print()" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "232da9b223da4ae6bd7f242ecce46aff", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=30)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0147971\n", "RMSE: 0.493339\n", "\n", "Iter: 1000\n", "MAPE: 0.0146462\n", "RMSE: 0.487619\n", "\n", "Imputation MAPE: 0.0145574\n", "Imputation RMSE: 0.484643\n", "\n", "Prediction MAPE: 0.0243273\n", "Prediction RMSE: 0.787939\n", "\n", "Running time: 1199 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c42e7b7504de4648bc81877da6a3abd4", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=20)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0149478\n", "RMSE: 0.499435\n", "\n", "Iter: 1000\n", "MAPE: 0.0146285\n", "RMSE: 0.484588\n", "\n", "Imputation MAPE: 0.0145979\n", "Imputation RMSE: 0.48322\n", "\n", "Prediction MAPE: 0.0322035\n", "Prediction RMSE: 1.03053\n", "\n", "Running time: 1261 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Scenario setting**:\n", "\n", "- Tensor size: $30\\times 84\\times 396$ (location x, location y, month)\n", "- Non-random missing (NM)\n", "- 40% missing rate" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "np.random.seed(1000)\n", "\n", "dense_tensor = np.load('../datasets/Temperature-data-set/tensor.npy').astype(np.float32)\n", "pos = np.where(dense_tensor[:, 0, :] > 50)\n", "dense_tensor[pos[0], :, pos[1]] = 0\n", "random_tensor = np.random.rand(dense_tensor.shape[0], dense_tensor.shape[1], int(dense_tensor.shape[2] / 3))\n", "missing_rate = 0.4\n", "\n", "## Non-random missing (NM)\n", "binary_tensor = np.zeros(dense_tensor.shape)\n", "for i1 in range(dense_tensor.shape[0]):\n", " for i2 in range(dense_tensor.shape[1]):\n", " for i3 in range(int(dense_tensor.shape[2] / 3)):\n", " binary_tensor[i1, i2, i3 * 3 : (i3 + 1) * 3] = np.round(random_tensor[i1, i2, i3] + 0.5 - missing_rate)\n", "sparse_tensor = dense_tensor.copy()\n", "sparse_tensor[binary_tensor == 0] = np.nan\n", "sparse_tensor[sparse_tensor == 0] = np.nan" ] }, { "cell_type": "code", "execution_count": 38, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 2.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9e1e36bdaa114ce28e5ad0237ae5b3fe", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=60)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0148901\n", "RMSE: 0.494736\n", "\n", "Iter: 1000\n", "MAPE: 0.0146504\n", "RMSE: 0.485204\n", "\n", "Imputation MAPE: 0.0145857\n", "Imputation RMSE: 0.482443\n", "\n", "Prediction MAPE: 0.0231671\n", "Prediction RMSE: 0.753352\n", "\n", "Running time: 1435 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "multi_step = 2\n", "start = time.time()\n", "print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", "tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 40)\n", "end = time.time()\n", "print('Running time: %d seconds'%(end - start))\n", "print()s" ] }, { "cell_type": "code", "execution_count": 37, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Prediction time horizon (delta) = 4.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "844ba5e2b7ab4d94a44c82401b1b3919", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=30)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.0149142\n", "RMSE: 0.495803\n", "\n", "Iter: 1000\n", "MAPE: 0.0145458\n", "RMSE: 0.482243\n", "\n", "Imputation MAPE: 0.0144844\n", "Imputation RMSE: 0.47983\n", "\n", "Prediction MAPE: 0.0241001\n", "Prediction RMSE: 0.788512\n", "\n", "Running time: 1308 seconds\n", "\n", "Prediction time horizon (delta) = 6.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "29ce23c2655644c3883b2c8075ff82ad", "version_major": 2, "version_minor": 0 }, "text/plain": [ "IntProgress(value=0, max=20)" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Iter: 500\n", "MAPE: 0.014927\n", "RMSE: 0.497119\n", "\n", "Iter: 1000\n", "MAPE: 0.0144465\n", "RMSE: 0.479791\n", "\n", "Imputation MAPE: 0.0144483\n", "Imputation RMSE: 0.478898\n", "\n", "Prediction MAPE: 0.0244661\n", "Prediction RMSE: 0.799278\n", "\n", "Running time: 1346 seconds\n", "\n" ] } ], "source": [ "import time\n", "rank = 30\n", "pred_step = 10 * 12\n", "time_lags = np.array([1, 2, 3, 12, 13, 14, 2 * 12, 2 * 12 + 1, 2 * 12 + 2])\n", "burn_iter = 1000\n", "gibbs_iter = 200\n", "for multi_step in [4, 6]:\n", " start = time.time()\n", " print('Prediction time horizon (delta) = {}.'.format(multi_step))\n", " tensor_hat = BTTF_forecast(dense_tensor, sparse_tensor, pred_step, multi_step, \n", " rank, time_lags, burn_iter, gibbs_iter, gamma = 30)\n", " end = time.time()\n", " print('Running time: %d seconds'%(end - start))\n", " print()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### License\n", "\n", "
\n", "This work is released under the MIT license.\n", "
" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.3" } }, "nbformat": 4, "nbformat_minor": 2 }