{ "metadata": { "name": "" }, "nbformat": 3, "nbformat_minor": 0, "worksheets": [ { "cells": [ { "cell_type": "heading", "level": 1, "metadata": {}, "source": [ "Event Info Generation" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The cells in this notebook perform two steps related to processing the log files written by Dharsh's PsychToolBox code into a format we directly use in the analysis notebook.\n", "\n", "First, read each functional run's `.mat` file and extract the relevant information. Combine the runs and write a master `.csv` file for each subject that we will use for all subsequent processing (i.e. the `.mat` files are never touched again). This file will be read directly by the analysis notebook and used for behavioral and fMRI analyses.\n", "\n", "Additionally, we will extract more specific information related to \"decoding problems\" that will be used, behind the scenes, by the `lyman` code in the analysis notebook that builds datasets for decoding analyses. This information is saved in event files that contain at least the following fields:\n", "\n", "- `run`\n", "- `condition`\n", "- `onset`\n", "\n", "Further fields can be used for confound regression -- in the code below, we save the `rt` (reaction time) fields for this purpose." ] }, { "cell_type": "code", "collapsed": false, "input": [ "import os\n", "import os.path as op\n", "import numpy as np\n", "import pandas as pd\n", "from scipy.io import loadmat\n", "import lyman" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "subjects = lyman.determine_subjects()\n", "project = lyman.gather_project_info()\n", "data_dir = project[\"data_dir\"]\n", "anal_dir = project[\"analysis_dir\"]\n", "behav_temp = op.join(data_dir, \"%s/behav/behav_data.csv\")\n", "event_temp = op.join(data_dir, \"%s/design/%s.csv\")\n", "artifact_temp = op.join(anal_dir, \"dksort/%s/preproc/run_%d/artifacts.csv\")\n", "\n", "dim_rules = [\"shape\", \"color\", \"pattern\"]\n", "dec_rules = [\"same\", \"different\"]\n", "\n", "sched_columns = [\"run\", \"condition\", \"onset\", \"rt\"]" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "for subj in subjects:\n", " if not op.exists(op.join(data_dir, subj, \"design\")):\n", " os.mkdir(op.join(data_dir, subj, \"design\"))" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "def dimension_rules(subj):\n", "\n", " subj_df = pd.read_csv(behav_temp % subj)\n", " sched = pd.DataFrame(columns=sched_columns, index=subj_df.index)\n", " subj_df = subj_df[subj_df[\"clean\"] & subj_df[\"correct\"]]\n", " sched.update(subj_df)\n", " sched[\"condition\"] = subj_df.dim_rule\n", " sched[\"onset\"] = subj_df.stim_time\n", " sched = sched.dropna()\n", " sched.to_csv(event_temp % (subj, \"dimension\"), index=False)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "def parse_ptb_files(subj):\n", "\n", " data = dict(run=[], rt=[], congruent=[],\n", " dim_rule=[], dec_rule=[],\n", " dim_shift=[], dec_shift=[],\n", " dim_stay=[], dec_stay=[],\n", " dim_shift_lag=[], dec_shift_lag=[],\n", " attend_match=[], distract_match=[],\n", " shape1=[], shape2=[],\n", " color1=[], color2=[],\n", " pattern1=[], pattern2=[],\n", " answer=[], made_resp=[], correct=[],\n", " stim_time=[], clean=[], block_pos=[])\n", "\n", " for r in range(1, 5):\n", " mat_file = op.join(data_dir, subj,\n", " \"behav/%s_run%d.mat\" % (subj, r))\n", " mat = loadmat(mat_file, squeeze_me=True)\n", " events = mat[\"theData\"]\n", " \n", " n_trials = len(events[\"RT\"][()])\n", "\n", " data[\"rt\"].append(events[\"RT\"][()])\n", " data[\"answer\"].append(events[\"keypress\"][()])\n", " data[\"made_resp\"].append(events[\"keypress\"][()] > 0)\n", " data[\"stim_time\"].append(events[\"stim1_onset\"][()] - 12)\n", " data[\"correct\"].append(events[\"score\"][()] > 0)\n", " data[\"block_pos\"].append(np.tile(range(3), n_trials / 3))\n", " data[\"run\"].append(np.ones(n_trials, int) * r)\n", "\n", " stim_vols = np.round(data[\"stim_time\"][-1] / 2)\n", " artifacts = pd.read_csv(artifact_temp % (subj, r)).max(axis=1).values\n", " art_vols = np.unique(np.argwhere(artifacts)[:, None] + np.arange(-1, 5))\n", " clean = np.logical_not(np.in1d(stim_vols, art_vols))\n", " data[\"clean\"].append(clean)\n", "\n", " sched = mat[\"sess_trials\"]\n", "\n", " matches = sched[:n_trials, 2:] - 1\n", " data[\"congruent\"].append(~np.logical_xor(*matches.T))\n", " data[\"attend_match\"].append(matches[:, 0] == 0)\n", " data[\"distract_match\"].append(matches[:, 1] == 0)\n", "\n", " for idx, rule in enumerate([\"dim\", \"dec\"]):\n", " rule_key = sched[:n_trials, idx] - 1\n", " rule_shift = np.ones(len(rule_key), bool)\n", " rule_shift[1:] = rule_key[1:] != rule_key[:-1]\n", " data[\"%s_rule\" % rule].append(rule_key)\n", " data[\"%s_shift\" % rule].append(rule_shift)\n", " rule_stay = np.logical_and(data[\"block_pos\"][-1] == 0,\n", " np.logical_not(rule_shift))\n", " data[\"%s_stay\" % rule].append(rule_stay)\n", "\n", " lag = []\n", " for i, shift_i in enumerate(rule_shift):\n", " if shift_i:\n", " lag.append(0)\n", " else:\n", " lag.append(i - np.argwhere(rule_shift[:i]).max())\n", " data[\"%s_shift_lag\" % rule].append(lag)\n", " \n", " stims = mat[\"trial\"][r - 1][\"stim\"]\n", " for j, dim in enumerate(dim_rules):\n", " for t in [1, 2]:\n", " data[\"%s%d\" % (dim, t)].append(stims[j][:72, t - 1].astype(int))\n", "\n", " data = {k: np.concatenate(v) for k, v in data.iteritems()}\n", " df = pd.DataFrame(data)\n", " df[\"subj\"] = subj\n", " df[\"dim_rule\"] = df.dim_rule.map(dict(enumerate(dim_rules)))\n", " df[\"dec_rule\"] = df.dec_rule.map(dict(enumerate(dec_rules)))\n", " df[\"answer\"] = df.answer.map({1: \"no\", 2: \"yes\"})\n", " for dim in dim_rules:\n", " df[\"%s_match\" % dim] = df[\"%s1\" % dim] == df[\"%s2\" % dim]\n", " \n", " csv_file = op.join(data_dir, subj, \"behav/behav_data.csv\")\n", " df.to_csv(csv_file, index_label=\"trial\")" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "def decision_rules(subj):\n", "\n", " subj_df = pd.read_csv(behav_temp % subj)\n", " sched = pd.DataFrame(columns=sched_columns, index=subj_df.index)\n", " subj_df = subj_df[subj_df[\"clean\"] & subj_df[\"correct\"]]\n", " sched.update(subj_df)\n", " sched[\"condition\"] = subj_df.dec_rule\n", " sched[\"onset\"] = subj_df.stim_time\n", " sched = sched.dropna()\n", " sched.to_csv(event_temp % (subj, \"decision\"), index=False)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "def dimension_cues(subj):\n", "\n", " subj_df = pd.read_csv(behav_temp % subj)\n", " sched = pd.DataFrame(columns=sched_columns, index=subj_df.index)\n", " subj_df = subj_df[subj_df[\"clean\"] & subj_df[\"correct\"]]\n", " subj_df = subj_df[subj_df.block_pos == 0]\n", " sched.update(subj_df)\n", " sched[\"condition\"] = subj_df.dim_rule\n", " sched[\"onset\"] = subj_df.stim_time - 12\n", " sched = sched.dropna()\n", " sched.to_csv(event_temp % (subj, \"dimension_cue\"), index=False)" ], "language": "python", "metadata": {}, "outputs": [] }, { "cell_type": "code", "collapsed": false, "input": [ "for subj in subjects:\n", " parse_ptb_files(subj)\n", " dimension_rules(subj)\n", " decision_rules(subj)\n", " dimension_cues(subj)" ], "language": "python", "metadata": {}, "outputs": [] } ], "metadata": {} } ] }