File size: 6,257 Bytes
0b416c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import numpy as np
import sys
import os

# import provided preprocessing functions from GSI
# make_delayed: creates lagged copies of embeddings to model hemodynamic delay
# downsample_word_vectors: aligns word-rate embeddings to fMRI TR-rate using Lanczos interpolation
sys.path.append(os.path.join(os.path.dirname(__file__), 'provided'))
from preprocessing import downsample_word_vectors, make_delayed

# TR (repetition time): how often the fMRI scanner takes a brain "snapshot": every 2 seconds
TR = 2

# trim edges because:
# First 5 seconds: subject hasn't settled into listening yet / scanner warmup
# Last 10 seconds: brain response lags behind audio, so last words aren't fully captured
TRIM_START = 5
TRIM_END = 10

# Hemodynamic delays: brain blood-oxygen response peaks 4-6 seconds after hearing a word
# By creating copies of the embeddings shifted by 1,2,3,4 seconds, we let the ridge model
# figure out which delay best predicts each voxel's response
# This multiplies embedding dimension by 4 (one copy per delay)
DELAYS = [1, 2, 3, 4]


def trim_fmri(fmri_data, trim_start=TRIM_START, trim_end=TRIM_END, tr=TR):
    """Trim the first and last seconds from fMRI data.
    
    Why: after downsampling, embeddings and fMRI still don't perfectly align at
    the edges. Trimming removes unreliable boundary timepoints from the fMRI
    so that its time dimension matches the trimmed embeddings.
    
    Example for adollshouse story:
        Before trim: (241, 94251)  — 241 timepoints x 94251 voxels
        After trim:  (234, 94251)  — removes 5s/2=2 TRs at start, 10s/2=5 TRs at end
    
    Args:
        fmri_data: array of shape (T, V) — timepoints x voxels
        trim_start: seconds to remove from beginning
        trim_end: seconds to remove from end
        tr: repetition time in seconds (how often fMRI scans)
    
    Returns:
        trimmed fmri array of shape (T', V)
    """
    start_idx = int(trim_start / tr)  # e.g. 5s / 2s = 2 TRs
    end_idx = int(trim_end / tr)      # e.g. 10s / 2s = 5 TRs
    return fmri_data[start_idx:-end_idx]


def preprocess_embeddings(stories, word_vectors, wordseqs, fmri_lengths,
                          trim_start=TRIM_START, trim_end=TRIM_END,
                          tr=TR, delays=DELAYS):
    """Full preprocessing pipeline to turn word embeddings into model features.
    
    The problem: word embeddings are at word-rate (3 words/sec, thousands of words),
    but fMRI is at TR-rate (1 scan/2sec, ~250 timepoints). We need to align them.
    
    Pipeline:
        1. Downsample: word-rate embeddings -> TR-rate using Lanczos interpolation
           (1656 words, embed_dim) -> (256 TRs, embed_dim)
        
        2. Trim: remove boundary timepoints to match fMRI dimensions
           (256 TRs, embed_dim) -> (249 TRs, embed_dim)
        
        3. Delay: create 4 shifted copies to model hemodynamic lag
           (249 TRs, embed_dim) -> (249 TRs, embed_dim * 4)
    
    After this, X (embeddings) and Y (fMRI) have matching time dimensions
    and can be used in ridge regression: Y = X @ W
    
    Args:
        stories: list of story names to process e.g. ['adollshouse', 'avatar']
        word_vectors: dict of {story: array (n_words, embed_dim)}
                      one embedding vector per word in the story
        wordseqs: dict of {story: DataSequence} — this is raw_text from the pkl file
                  DataSequence contains .data_times (word timestamps) and
                  .tr_times (fMRI scan timestamps) needed for downsampling
        fmri_lengths: dict of {story: int} — number of TRs in raw fMRI for each story
                  used to crop embeddings to match fMRI before trimming
        trim_start: seconds to trim from start
        trim_end: seconds to trim from end
        tr: repetition time in seconds
        delays: list of delay values in seconds for make_delayed
    
    Returns:
        dict of {story: array of shape (T', embed_dim * len(delays))}
        T' = trimmed number of timepoints, matches trimmed fMRI timepoints
    """
    # step 1: downsample
    # Lanczos interpolation maps each word embedding to the nearest TR timepoint
    # uses word timestamps (data_times) and TR timestamps (tr_times) from DataSequence
    # result: one embedding vector per TR instead of one per word
    downsampled = downsample_word_vectors(stories, word_vectors, wordseqs)

    # step 2: trim and delay
    start_idx = int(trim_start / tr)  # number of TRs to remove at start
    end_idx = int(trim_end / tr)      # number of TRs to remove at end

    processed = {}
    for story in stories:
        arr = downsampled[story]          # shape: (n_trs, embed_dim)
        # crop to fMRI length first —> tr_times has extra TRs not in fMRI
        arr = arr[:fmri_lengths[story]]
        arr = arr[start_idx:-end_idx]     # trim edges -> shape: (T', embed_dim)
        arr = make_delayed(arr, delays)   # add delays -> shape: (T', embed_dim * 4)
        processed[story] = arr
    return processed


def load_fmri(stories, subject, data_path):
    """Load fMRI data for a list of stories and trim it to match embeddings.
    
    Each story's fMRI file is a (T, V) matrix:
        T = number of fMRI timepoints (varies by story length)
        V = number of voxels = 94251 (fixed, whole brain)
    
    We trim immediately on load so fMRI time dimension matches
    the preprocessed embeddings time dimension.
    
    Args:
        stories: list of story names e.g. ['adollshouse', 'avatar']
        subject: subject folder name e.g. 'subject2' or 'subject3'
        data_path: root path to data directory
    
    Returns:
        dict of {story: trimmed array of shape (T', V)}
        T' matches the T' from preprocess_embeddings output
    """
    fmri = {}
    for story in stories:
        path = os.path.join(data_path, subject, f'{story}.npy')
        data = np.load(path)           # shape: (T, V) — raw fMRI
        fmri[story] = trim_fmri(data)  # shape: (T', V) — trimmed
    return fmri


# after preprocess_embeddings and load_fmri, for every story you end up with:
# X (embeddings): (T', embed_dim * 4)   <- features for ridge regression
# Y (fMRI):       (T', 94251)           <- targets for ridge regression