| import numpy as np |
| import sys |
| import os |
|
|
| |
| |
| |
| sys.path.append(os.path.join(os.path.dirname(__file__), 'provided')) |
| from preprocessing import downsample_word_vectors, make_delayed |
|
|
| |
| TR = 2 |
|
|
| |
| |
| |
| TRIM_START = 5 |
| TRIM_END = 10 |
|
|
| |
| |
| |
| |
| DELAYS = [1, 2, 3, 4] |
|
|
|
|
| def trim_fmri(fmri_data, trim_start=TRIM_START, trim_end=TRIM_END, tr=TR): |
| """Trim the first and last seconds from fMRI data. |
| |
| Why: after downsampling, embeddings and fMRI still don't perfectly align at |
| the edges. Trimming removes unreliable boundary timepoints from the fMRI |
| so that its time dimension matches the trimmed embeddings. |
| |
| Example for adollshouse story: |
| Before trim: (241, 94251) — 241 timepoints x 94251 voxels |
| After trim: (234, 94251) — removes 5s/2=2 TRs at start, 10s/2=5 TRs at end |
| |
| Args: |
| fmri_data: array of shape (T, V) — timepoints x voxels |
| trim_start: seconds to remove from beginning |
| trim_end: seconds to remove from end |
| tr: repetition time in seconds (how often fMRI scans) |
| |
| Returns: |
| trimmed fmri array of shape (T', V) |
| """ |
| start_idx = int(trim_start / tr) |
| end_idx = int(trim_end / tr) |
| return fmri_data[start_idx:-end_idx] |
|
|
|
|
| def preprocess_embeddings(stories, word_vectors, wordseqs, fmri_lengths, |
| trim_start=TRIM_START, trim_end=TRIM_END, |
| tr=TR, delays=DELAYS): |
| """Full preprocessing pipeline to turn word embeddings into model features. |
| |
| The problem: word embeddings are at word-rate (3 words/sec, thousands of words), |
| but fMRI is at TR-rate (1 scan/2sec, ~250 timepoints). We need to align them. |
| |
| Pipeline: |
| 1. Downsample: word-rate embeddings -> TR-rate using Lanczos interpolation |
| (1656 words, embed_dim) -> (256 TRs, embed_dim) |
| |
| 2. Trim: remove boundary timepoints to match fMRI dimensions |
| (256 TRs, embed_dim) -> (249 TRs, embed_dim) |
| |
| 3. Delay: create 4 shifted copies to model hemodynamic lag |
| (249 TRs, embed_dim) -> (249 TRs, embed_dim * 4) |
| |
| After this, X (embeddings) and Y (fMRI) have matching time dimensions |
| and can be used in ridge regression: Y = X @ W |
| |
| Args: |
| stories: list of story names to process e.g. ['adollshouse', 'avatar'] |
| word_vectors: dict of {story: array (n_words, embed_dim)} |
| one embedding vector per word in the story |
| wordseqs: dict of {story: DataSequence} — this is raw_text from the pkl file |
| DataSequence contains .data_times (word timestamps) and |
| .tr_times (fMRI scan timestamps) needed for downsampling |
| fmri_lengths: dict of {story: int} — number of TRs in raw fMRI for each story |
| used to crop embeddings to match fMRI before trimming |
| trim_start: seconds to trim from start |
| trim_end: seconds to trim from end |
| tr: repetition time in seconds |
| delays: list of delay values in seconds for make_delayed |
| |
| Returns: |
| dict of {story: array of shape (T', embed_dim * len(delays))} |
| T' = trimmed number of timepoints, matches trimmed fMRI timepoints |
| """ |
| |
| |
| |
| |
| downsampled = downsample_word_vectors(stories, word_vectors, wordseqs) |
|
|
| |
| start_idx = int(trim_start / tr) |
| end_idx = int(trim_end / tr) |
|
|
| processed = {} |
| for story in stories: |
| arr = downsampled[story] |
| |
| arr = arr[:fmri_lengths[story]] |
| arr = arr[start_idx:-end_idx] |
| arr = make_delayed(arr, delays) |
| processed[story] = arr |
| return processed |
|
|
|
|
| def load_fmri(stories, subject, data_path): |
| """Load fMRI data for a list of stories and trim it to match embeddings. |
| |
| Each story's fMRI file is a (T, V) matrix: |
| T = number of fMRI timepoints (varies by story length) |
| V = number of voxels = 94251 (fixed, whole brain) |
| |
| We trim immediately on load so fMRI time dimension matches |
| the preprocessed embeddings time dimension. |
| |
| Args: |
| stories: list of story names e.g. ['adollshouse', 'avatar'] |
| subject: subject folder name e.g. 'subject2' or 'subject3' |
| data_path: root path to data directory |
| |
| Returns: |
| dict of {story: trimmed array of shape (T', V)} |
| T' matches the T' from preprocess_embeddings output |
| """ |
| fmri = {} |
| for story in stories: |
| path = os.path.join(data_path, subject, f'{story}.npy') |
| data = np.load(path) |
| fmri[story] = trim_fmri(data) |
| return fmri |
|
|
|
|
| |
| |
| |