{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "\n\n# Linear classifier on sensor data with plot patterns and filters\n\nHere decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG\ndata in sensor space. Fit a linear classifier with the LinearModel object\nproviding topographical patterns which are more neurophysiologically\ninterpretable :footcite:`HaufeEtAl2014` than the classifier filters (weight\nvectors). The patterns explain how the MEG and EEG data were generated from\nthe discriminant neural sources which are extracted by the filters.\nNote patterns/filters in MEG data are more similar than EEG data\nbecause the noise is less spatially correlated in MEG than EEG.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Authors: Alexandre Gramfort \n# Romain Trachel \n# Jean-R\u00e9mi King \n#\n# License: BSD-3-Clause\n# Copyright the MNE-Python contributors." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nimport mne\nfrom mne import EvokedArray, io\nfrom mne.datasets import sample\n\n# import a linear classifier from mne.decoding\nfrom mne.decoding import LinearModel, Vectorizer, get_coef\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nsample_path = data_path / \"MEG\" / \"sample\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Set parameters\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "raw_fname = sample_path / \"sample_audvis_filt-0-40_raw.fif\"\nevent_fname = sample_path / \"sample_audvis_filt-0-40_raw-eve.fif\"\ntmin, tmax = -0.1, 0.4\nevent_id = dict(aud_l=1, vis_l=3)\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(0.5, 25, fir_design=\"firwin\")\nevents = mne.read_events(event_fname)\n\n# Read epochs\nepochs = mne.Epochs(\n raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True\n)\ndel raw\n\nlabels = epochs.events[:, -1]\n\n# get MEG data\nmeg_epochs = epochs.copy().pick(picks=\"meg\", exclude=\"bads\")\nmeg_data = meg_epochs.get_data(copy=False).reshape(len(labels), -1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Decoding in sensor space using a LogisticRegression classifier\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "clf = LogisticRegression(solver=\"liblinear\") # liblinear is faster than lbfgs\nscaler = StandardScaler()\n\n# create a linear model with LogisticRegression\nmodel = LinearModel(clf)\n\n# fit the classifier on MEG data\nX = scaler.fit_transform(meg_data)\nmodel.fit(X, labels)\n\n# Extract and plot spatial filters and spatial patterns\nfor name, coef in ((\"patterns\", model.patterns_), (\"filters\", model.filters_)):\n # We fitted the linear model onto Z-scored data. To make the filters\n # interpretable, we must reverse this normalization step\n coef = scaler.inverse_transform([coef])[0]\n\n # The data was vectorized to fit a single model across all time points and\n # all channels. We thus reshape it:\n coef = coef.reshape(len(meg_epochs.ch_names), -1)\n\n # Plot\n evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)\n fig = evoked.plot_topomap()\n fig.suptitle(f\"MEG {name}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Let's do the same on EEG data using a scikit-learn pipeline\n\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "X = epochs.pick(picks=\"eeg\", exclude=\"bads\")\ny = epochs.events[:, 2]\n\n# Define a unique pipeline to sequentially:\nclf = make_pipeline(\n Vectorizer(), # 1) vectorize across time and channels\n StandardScaler(), # 2) normalize features across trials\n LinearModel( # 3) fits a logistic regression\n LogisticRegression(solver=\"liblinear\")\n ),\n)\nclf.fit(X, y)\n\n# Extract and plot patterns and filters\nfor name in (\"patterns_\", \"filters_\"):\n # The `inverse_transform` parameter will call this method on any estimator\n # contained in the pipeline, in reverse order.\n coef = get_coef(clf, name, inverse_transform=True)\n evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)\n fig = evoked.plot_topomap()\n fig.suptitle(f\"EEG {name[:-1]}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## References\n.. footbibliography::\n\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.2" } }, "nbformat": 4, "nbformat_minor": 0 }