import numpy as np import sys import os # import provided preprocessing functions from GSI # make_delayed: creates lagged copies of embeddings to model hemodynamic delay # downsample_word_vectors: aligns word-rate embeddings to fMRI TR-rate using Lanczos interpolation sys.path.append(os.path.join(os.path.dirname(__file__), 'provided')) from preprocessing import downsample_word_vectors, make_delayed # TR (repetition time): how often the fMRI scanner takes a brain "snapshot": every 2 seconds TR = 2 # trim edges because: # First 5 seconds: subject hasn't settled into listening yet / scanner warmup # Last 10 seconds: brain response lags behind audio, so last words aren't fully captured TRIM_START = 5 TRIM_END = 10 # Hemodynamic delays: brain blood-oxygen response peaks 4-6 seconds after hearing a word # By creating copies of the embeddings shifted by 1,2,3,4 seconds, we let the ridge model # figure out which delay best predicts each voxel's response # This multiplies embedding dimension by 4 (one copy per delay) DELAYS = [1, 2, 3, 4] def trim_fmri(fmri_data, trim_start=TRIM_START, trim_end=TRIM_END, tr=TR): """Trim the first and last seconds from fMRI data. Why: after downsampling, embeddings and fMRI still don't perfectly align at the edges. Trimming removes unreliable boundary timepoints from the fMRI so that its time dimension matches the trimmed embeddings. Example for adollshouse story: Before trim: (241, 94251) — 241 timepoints x 94251 voxels After trim: (234, 94251) — removes 5s/2=2 TRs at start, 10s/2=5 TRs at end Args: fmri_data: array of shape (T, V) — timepoints x voxels trim_start: seconds to remove from beginning trim_end: seconds to remove from end tr: repetition time in seconds (how often fMRI scans) Returns: trimmed fmri array of shape (T', V) """ start_idx = int(trim_start / tr) # e.g. 5s / 2s = 2 TRs end_idx = int(trim_end / tr) # e.g. 10s / 2s = 5 TRs return fmri_data[start_idx:-end_idx] def preprocess_embeddings(stories, word_vectors, wordseqs, fmri_lengths, trim_start=TRIM_START, trim_end=TRIM_END, tr=TR, delays=DELAYS): """Full preprocessing pipeline to turn word embeddings into model features. The problem: word embeddings are at word-rate (3 words/sec, thousands of words), but fMRI is at TR-rate (1 scan/2sec, ~250 timepoints). We need to align them. Pipeline: 1. Downsample: word-rate embeddings -> TR-rate using Lanczos interpolation (1656 words, embed_dim) -> (256 TRs, embed_dim) 2. Trim: remove boundary timepoints to match fMRI dimensions (256 TRs, embed_dim) -> (249 TRs, embed_dim) 3. Delay: create 4 shifted copies to model hemodynamic lag (249 TRs, embed_dim) -> (249 TRs, embed_dim * 4) After this, X (embeddings) and Y (fMRI) have matching time dimensions and can be used in ridge regression: Y = X @ W Args: stories: list of story names to process e.g. ['adollshouse', 'avatar'] word_vectors: dict of {story: array (n_words, embed_dim)} one embedding vector per word in the story wordseqs: dict of {story: DataSequence} — this is raw_text from the pkl file DataSequence contains .data_times (word timestamps) and .tr_times (fMRI scan timestamps) needed for downsampling fmri_lengths: dict of {story: int} — number of TRs in raw fMRI for each story used to crop embeddings to match fMRI before trimming trim_start: seconds to trim from start trim_end: seconds to trim from end tr: repetition time in seconds delays: list of delay values in seconds for make_delayed Returns: dict of {story: array of shape (T', embed_dim * len(delays))} T' = trimmed number of timepoints, matches trimmed fMRI timepoints """ # step 1: downsample # Lanczos interpolation maps each word embedding to the nearest TR timepoint # uses word timestamps (data_times) and TR timestamps (tr_times) from DataSequence # result: one embedding vector per TR instead of one per word downsampled = downsample_word_vectors(stories, word_vectors, wordseqs) # step 2: trim and delay start_idx = int(trim_start / tr) # number of TRs to remove at start end_idx = int(trim_end / tr) # number of TRs to remove at end processed = {} for story in stories: arr = downsampled[story] # shape: (n_trs, embed_dim) # crop to fMRI length first —> tr_times has extra TRs not in fMRI arr = arr[:fmri_lengths[story]] arr = arr[start_idx:-end_idx] # trim edges -> shape: (T', embed_dim) arr = make_delayed(arr, delays) # add delays -> shape: (T', embed_dim * 4) processed[story] = arr return processed def load_fmri(stories, subject, data_path): """Load fMRI data for a list of stories and trim it to match embeddings. Each story's fMRI file is a (T, V) matrix: T = number of fMRI timepoints (varies by story length) V = number of voxels = 94251 (fixed, whole brain) We trim immediately on load so fMRI time dimension matches the preprocessed embeddings time dimension. Args: stories: list of story names e.g. ['adollshouse', 'avatar'] subject: subject folder name e.g. 'subject2' or 'subject3' data_path: root path to data directory Returns: dict of {story: trimmed array of shape (T', V)} T' matches the T' from preprocess_embeddings output """ fmri = {} for story in stories: path = os.path.join(data_path, subject, f'{story}.npy') data = np.load(path) # shape: (T, V) — raw fMRI fmri[story] = trim_fmri(data) # shape: (T', V) — trimmed return fmri # after preprocess_embeddings and load_fmri, for every story you end up with: # X (embeddings): (T', embed_dim * 4) <- features for ridge regression # Y (fMRI): (T', 94251) <- targets for ridge regression