File size: 2,379 Bytes
ae73961
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
"""
Distill 8 temporal UX signals into a 40-feature vector per video.

Per signal (8 signals × 5 features = 40):
  1. mean        — average activation
  2. peak        — maximum activation
  3. variability — std dev of activation
  4. hook        — mean of first 4.5 seconds (first impression)
  5. slope       — linear trend (coef) across full video
"""

import numpy as np
from typing import Dict, List, Tuple

SIGNAL_NAMES = [
    "aesthetic_appeal",
    "visual_fluency",
    "cognitive_load",
    "trust_affinity",
    "reward_anticipation",
    "motor_readiness",
    "surprise_novelty",
    "friction_anxiety",
]

FEATURE_NAMES: List[str] = []
for sig in SIGNAL_NAMES:
    for stat in ["mean", "peak", "variability", "hook", "slope"]:
        FEATURE_NAMES.append(f"{sig}__{stat}")

assert len(FEATURE_NAMES) == 40

HOOK_SECONDS = 4.5  # first impression window


def extract_features(signals: Dict[str, np.ndarray], tr: float = 1.5) -> np.ndarray:
    """
    signals: {signal_name: (n_timesteps,) array}
    Returns: (40,) feature vector, ordered per FEATURE_NAMES
    """
    feats = []
    t = None
    for sig_name in SIGNAL_NAMES:
        ts = signals.get(sig_name, np.zeros(1))
        if t is None:
            t = np.arange(len(ts)) * tr

        mean = float(np.mean(ts))
        peak = float(np.max(ts))
        variability = float(np.std(ts))

        # hook = mean over first 4.5s
        hook_idx = max(1, int(np.ceil(HOOK_SECONDS / tr)))
        hook = float(np.mean(ts[:hook_idx]))

        # slope = linear regression coefficient over time
        if len(ts) > 1:
            slope = float(np.polyfit(t[:len(ts)], ts, 1)[0])
        else:
            slope = 0.0

        feats.extend([mean, peak, variability, hook, slope])

    return np.array(feats, dtype=np.float32)


def feature_vector_to_dict(vec: np.ndarray) -> Dict[str, float]:
    """Convert flat (40,) back to named dict for interpretability."""
    return {name: float(val) for name, val in zip(FEATURE_NAMES, vec)}


def top_positive_negative(feat_dict: Dict[str, float], n: int = 3) -> Tuple[List[str], List[str]]:
    """Return (top_n_positive_features, top_n_negative_features) by value."""
    sorted_items = sorted(feat_dict.items(), key=lambda x: x[1], reverse=True)
    pos = [k for k, v in sorted_items[:n]]
    neg = [k for k, v in sorted_items[-n:]]
    return pos, neg