{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "Sascha Spors,\n", "Professorship Signal Theory and Digital Signal Processing,\n", "Institute of Communications Engineering (INT),\n", "Faculty of Computer Science and Electrical Engineering (IEF),\n", "University of Rostock,\n", "Germany\n", "\n", "# Data Driven Audio Signal Processing - A Tutorial with Computational Examples\n", "\n", "Master Course #24512\n", "\n", "- lecture: https://github.com/spatialaudio/data-driven-audio-signal-processing-lecture\n", "- tutorial: https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise\n", "\n", "Feel free to contact lecturer frank.schultz@uni-rostock.de\n", "\n", "# PCA on Achieved Points of Written Examination " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import scipy\n", "from scipy.linalg import svd, diagsvd\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", "\n", "np.set_printoptions(precision=3, sign=' ', suppress=True)\n", "\n", "print(np.__version__) # tested with 1.26.4\n", "print(scipy.__version__) # tested with 1.13.1\n", "print(mpl.__version__) # tested with 3.9.2" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "X = np.loadtxt(open(\"exam_points_meanfree_unitvar.csv\", \"rb\"), delimiter=\";\", skiprows=0)\n", "N, F = X.shape\n", "print(N, F) # 34 students, 5 tasks for exam on signals & systems, a typical course in electrical engineering bachelor studies\n", "# columns correspond to theses tasks\n", "task_label = ['Task 1: Convolution', 'Task 2: Fourier', 'Task 3: Sampling', 'Task 4: Laplace Domain', 'Task 5: z-Domain']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# data in exam_points_meanfree_unitvar.csv is already mean-free and columns have var=1\n", "# so the numbers in X do not represent points or percentage,\n", "# but rather encode the performance of the students per task in a normalised way\n", "# X is however sorted: first row belongs to best grade, last row to worst grade\n", "np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# for completeness of PCA algorithm ->\n", "# make X zscore (altough it is already)\n", "mu = np.mean(X, axis=0)\n", "X = X - mu # de-mean\n", "sigma = np.sqrt(np.sum(X**2, axis=0) / (N-1))\n", "X = X / sigma # normalise to std=1\n", "np.mean(X, axis=0), np.std(X, axis=0, ddof=1), np.var(X, axis=0, ddof=1) # check" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "X # print mean=0 / var=1 data matrix" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# get SVD / CovMatrix stuff\n", "[U, s, Vh] = svd(X)\n", "V = Vh.T # we don't use Vh later on!\n", "S = diagsvd(s, N, F) # sing vals matrix\n", "D, _ = np.linalg.eig(X.T @ X / (N-1)) # eig vals\n", "D = -np.sort(-D) # sort them, then ==\n", "d = s**2 / (N-1)\n", "print(np.allclose(d, D)) # so we go for d later on\n", "\n", "# switch polarities for nicer interpretation ot the\n", "# exam data\n", "V[:,0] *= -1\n", "U[:,0] *= -1\n", "\n", "V[:,2] *= -1\n", "U[:,2] *= -1\n", "\n", "V[:,3] *= -1\n", "U[:,3] *= -1" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# PCA\n", "US = U @ S\n", "PC_Features = US @ diagsvd(1 / np.sqrt(d), F, F) # normalised such that columns have var 1, aka (normalised) PC scores\n", "print(np.var(PC_Features, axis=0, ddof=1))\n", "#PC_Loadings = (diagsvd(np.sqrt(d), F, F) @ V.T).T # ==\n", "PC_Loadings = V @ diagsvd(np.sqrt(d), F, F) # aka PC coeff, not unit-length anymore, but normalised such that it shows correlation between PC_Features and X" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "np.allclose(X, PC_Features @ PC_Loadings.T) # check correct matrix factorisation" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# project an x column vector to a pc feature column -> do this for all options -> get all weights for linear comb of pc features\n", "# correlation uses unit-length vectors\n", "PC_Loadings_manual = np.zeros((F, F))\n", "for row in range(F):\n", " tmp_x = X[:, row] / np.linalg.norm(X[:, row])\n", " for column in range(F):\n", " tmp_pc = PC_Features[:, column] / np.linalg.norm(PC_Features[:, column])\n", " PC_Loadings_manual[row, column] = np.inner(tmp_pc, tmp_x)\n", "np.allclose(PC_Loadings_manual, PC_Loadings) # we get the PC_Loadings matrix" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# explained variance\n", "d, np.var(US, axis=0, ddof=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# explained cum variance in %\n", "cum_var = np.cumsum(d) / np.sum(d) * 100\n", "cum_var" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Check via Plots" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plt.figure(figsize=(12,8))\n", "\n", "plt.subplot(2,1,1)\n", "for f in range(F):\n", " plt.plot(X[:, f], 'o-', color='C'+str(f), label='Task '+str(f+1), ms=3)\n", "plt.legend(loc='lower left')\n", "plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n", "plt.ylabel('normalised points (mean-free, var=1)')\n", "plt.grid(True)\n", "plt.title(task_label)\n", "\n", "plt.subplot(2,1,2)\n", "for f in range(F):\n", " plt.plot(US[:, f], 'o-', color='C'+str(f), label='PCA v '+str(f+1), lw=(F-f)*2/3, ms=(F-f)*3/2)\n", "plt.legend(loc='lower left')\n", "plt.xticks([0, N-1], labels=['best grade', 'worst grade'])\n", "plt.ylabel('PC features (mean-free, sorted var)')\n", "plt.xlabel('student index (sorted grade)')\n", "plt.grid(True)\n", "plt.title(['cum var in %:', cum_var])\n", "plt.tight_layout()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# correlation between task and pc\n", "pc_label = ['PC 1', 'PC 2', 'PC 3', 'PC 4', 'PC 5']\n", "cmap = plt.get_cmap('Spectral_r', 8)\n", "fig = plt.figure(figsize=(6,4))\n", "ax = fig.add_subplot(111)\n", "cax = ax.matshow(PC_Loadings, cmap=cmap, vmin=-1, vmax=+1)\n", "fig.colorbar(cax)\n", "ax.set_xticks(np.arange(len(pc_label)))\n", "ax.set_yticks(np.arange(len(task_label)))\n", "ax.set_xticklabels(pc_label)\n", "ax.set_yticklabels(task_label)\n", "ax.set_title('Loading Matrix = PC x contributes to Task y')\n", "plt.tight_layout()\n", "\n", "# a rank 3 approximation of the data,\n", "# i.e. using only PC1, PC2 and PC3 in the linear combination to reconstruct X\n", "# would only change one grade by a 1/3 grade step\n", "# so 85.899 % explained variance would be enough to figure the actual grading\n", "\n", "# PC1 and PC2 might allow an intuitive interpretation:\n", "# students are very well prepared to convolution, Laplace and z-Domain tasks\n", "# as theses tasks are always very similar and definitiely will be queried in the exam\n", "# so, PC1 indidcates the performance on 'fulfilled' expectations and is\n", "# highly correlated with the achieved grade\n", "# the Fourier task and the sampling task were chosen out of a wide range of options\n", "# here students have rather 'unknown' expectations, which is why we need PC 2 to cover this\n", "#\n", "# PC 3 to 5 show positive vs. negative correlations, i.e. mostly one good task vs. one bad task performance\n", "# some of these results are intuitive: we know that students sometimes have preferences for Laplace vs. z-Domain " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "np.sum(PC_Loadings**2, axis=0) # that's again the explained variance of the PCs" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "np.sum(PC_Loadings**2, axis=1) # communalities, must sum to 1 in our normalised handling" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Copyright\n", "\n", "- the notebooks are provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources)\n", "- feel free to use the notebooks for your own purposes\n", "- the text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/)\n", "- the code of the IPython examples is licensed under the [MIT license](https://opensource.org/licenses/MIT)\n", "- please attribute the work as follows: *Frank Schultz, Data Driven Audio Signal Processing - A Tutorial Featuring Computational Examples, University of Rostock* ideally with relevant file(s), github URL https://github.com/spatialaudio/data-driven-audio-signal-processing-exercise, commit number and/or version tag, year." ] } ], "metadata": { "kernelspec": { "display_name": "myddasp", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.3" } }, "nbformat": 4, "nbformat_minor": 2 }