{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "from scipy.linalg import pinv\n", "from scipy.special import logsumexp\n", "from sklearn.datasets import load_iris\n", "from sklearn.mixture import GaussianMixture as skGaussianMixture" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "class GaussianMixture():\n", " def __init__(self, n_components, max_iter, init_params, random_state):\n", " self.n_components = n_components\n", " self.random_state = random_state\n", " self.max_iter = max_iter\n", " self.init_params = init_params\n", "\n", " def _estimate_weighted_log_prob(self, X):\n", " log_prob = np.zeros((X.shape[0], self.n_components))\n", " for k in range(self.n_components):\n", " diff = X - self.means_[k]\n", " log_prob[:, k] = (-0.5 * X.shape[1] * np.log(2 * np.pi)\n", " - 0.5 * np.log(np.linalg.det(self.covariances_[k]))\n", " - 0.5 * np.diag(np.dot(np.dot(diff, pinv(self.covariances_[k])), diff.T)))\n", " weighted_log_prob = np.log(self.weights_) + log_prob\n", " return weighted_log_prob\n", "\n", " def _estimate_log_prob_resp(self, X):\n", " weighted_log_prob = self._estimate_weighted_log_prob(X)\n", " log_prob_norm = logsumexp(weighted_log_prob, axis=1)\n", " log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]\n", " return log_prob_norm, log_resp\n", "\n", " def _e_step(self, X):\n", " log_prob_norm, log_resp = self._estimate_log_prob_resp(X)\n", " return np.mean(log_prob_norm), log_resp\n", "\n", " def _m_step(self, X, resp):\n", " nk = resp.sum(axis=0)\n", " weights = nk / X.shape[0]\n", " means = np.dot(resp.T, X) / nk[:, np.newaxis]\n", " covariances = np.empty((self.n_components, X.shape[1], X.shape[1]))\n", " for k in range(self.n_components):\n", " diff = X - means[k]\n", " covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k]\n", " return weights, means, covariances\n", "\n", " def fit(self, X):\n", " rng = np.random.RandomState(0)\n", " resp = rng.rand(X.shape[0], self.n_components)\n", " resp /= resp.sum(axis=1)[:, np.newaxis]\n", " self.weights_, self.means_, self.covariances_ = self._m_step(X, resp)\n", " lower_bound = -np.inf\n", " self.converged_ = False\n", " for n_iter in range(1, self.max_iter + 1):\n", " prev_lower_bound = lower_bound\n", " lower_bound, log_resp = self._e_step(X)\n", " self.weights_, self.means_, self.covariances_ = self._m_step(X, np.exp(log_resp))\n", " change = lower_bound - prev_lower_bound\n", " if abs(change) < 1e-3: # consistent with scikit-learn default\n", " self.converged_ = True\n", " self.n_iter_ = n_iter\n", " break\n", " return self\n", "\n", " def predict_proba(self, X):\n", " _, log_resp = self._estimate_log_prob_resp(X)\n", " return np.exp(log_resp)\n", "\n", " def predict(self, X):\n", " return np.argmax(self._estimate_weighted_log_prob(X), axis=1)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "X, _ = load_iris(return_X_y=True)\n", "clf1 = GaussianMixture(n_components=3, max_iter=100,\n", " init_params=\"random\", random_state=0).fit(X)\n", "clf2 = skGaussianMixture(n_components=3, max_iter=100,\n", " init_params=\"random\", random_state=0).fit(X)\n", "assert np.allclose(clf1.weights_, clf2.weights_, atol=1e-4)\n", "assert np.allclose(clf1.means_, clf2.means_)\n", "assert np.allclose(clf1.covariances_, clf2.covariances_, atol=1e-4)\n", "prob1 = clf1.predict_proba(X)\n", "prob2 = clf2.predict_proba(X)\n", "assert np.allclose(prob1, prob2, atol=1e-3)\n", "pred1 = clf1.predict(X)\n", "pred2 = clf2.predict(X)\n", "assert np.array_equal(pred1, pred2)" ] } ], "metadata": { "kernelspec": { "display_name": "dev", "language": "python", "name": "dev" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 }