Upload scripts/extract_l1_labels.py with huggingface_hub
Browse files- scripts/extract_l1_labels.py +246 -0
scripts/extract_l1_labels.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Auto-extract L1 action labels from filenames/metadata for datasets without text.
|
| 3 |
+
|
| 4 |
+
Outputs: data/processed/{dataset}/labels.json
|
| 5 |
+
Format: { "motion_id": {"L1_action": "walk", "L1_style": "happy", "source_file": "..."}, ... }
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
import re
|
| 11 |
+
import json
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
project_root = Path(__file__).parent.parent
|
| 16 |
+
sys.path.insert(0, str(project_root))
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def extract_lafan1():
|
| 20 |
+
"""LAFAN1: filename = {theme}{take}_{subject}.bvh → L1 = theme"""
|
| 21 |
+
labels = {}
|
| 22 |
+
mdir = project_root / 'data' / 'processed' / 'lafan1' / 'motions'
|
| 23 |
+
for f in sorted(os.listdir(mdir)):
|
| 24 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 25 |
+
src = str(d.get('source_file', f))
|
| 26 |
+
# "aiming1_subject1.bvh" → "aiming"
|
| 27 |
+
match = re.match(r'([a-zA-Z]+)\d*_', src)
|
| 28 |
+
action = match.group(1).lower() if match else 'unknown'
|
| 29 |
+
labels[f.replace('.npz', '')] = {
|
| 30 |
+
'L1_action': action,
|
| 31 |
+
'source_file': src,
|
| 32 |
+
}
|
| 33 |
+
return labels
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def extract_100style():
|
| 37 |
+
"""100Style: filename = {StyleName}_{MovementType}.bvh"""
|
| 38 |
+
movement_map = {
|
| 39 |
+
'FW': 'walk forward', 'BW': 'walk backward',
|
| 40 |
+
'FR': 'run forward', 'BR': 'run backward',
|
| 41 |
+
'SW': 'sidestep walk', 'SR': 'sidestep run',
|
| 42 |
+
'ID': 'idle',
|
| 43 |
+
'TR1': 'transition 1', 'TR2': 'transition 2',
|
| 44 |
+
'TR3': 'transition 3', 'TR4': 'transition 4',
|
| 45 |
+
}
|
| 46 |
+
labels = {}
|
| 47 |
+
mdir = project_root / 'data' / 'processed' / '100style' / 'motions'
|
| 48 |
+
for f in sorted(os.listdir(mdir)):
|
| 49 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 50 |
+
src = str(d.get('source_file', f))
|
| 51 |
+
# "Happy_FW.bvh" → style="happy", movement="walk forward"
|
| 52 |
+
parts = src.replace('.bvh', '').split('_')
|
| 53 |
+
style = parts[0].lower() if parts else 'unknown'
|
| 54 |
+
movement_code = parts[1] if len(parts) > 1 else ''
|
| 55 |
+
movement = movement_map.get(movement_code, movement_code.lower())
|
| 56 |
+
action = movement.split()[0] if movement else 'unknown' # first word
|
| 57 |
+
labels[f.replace('.npz', '')] = {
|
| 58 |
+
'L1_action': action,
|
| 59 |
+
'L1_movement': movement,
|
| 60 |
+
'L1_style': style,
|
| 61 |
+
'source_file': src,
|
| 62 |
+
}
|
| 63 |
+
return labels
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def extract_bandai_namco():
|
| 67 |
+
"""Bandai Namco: has JSON metadata in original repo."""
|
| 68 |
+
labels = {}
|
| 69 |
+
mdir = project_root / 'data' / 'processed' / 'bandai_namco' / 'motions'
|
| 70 |
+
# Try to load from original JSON metadata
|
| 71 |
+
bn_meta = {}
|
| 72 |
+
meta_dirs = [
|
| 73 |
+
project_root / 'data' / 'raw' / 'BandaiNamco' / 'dataset',
|
| 74 |
+
]
|
| 75 |
+
for meta_dir in meta_dirs:
|
| 76 |
+
for json_file in meta_dir.rglob('*.json'):
|
| 77 |
+
try:
|
| 78 |
+
with open(json_file) as jf:
|
| 79 |
+
data = json.load(jf)
|
| 80 |
+
if isinstance(data, list):
|
| 81 |
+
for item in data:
|
| 82 |
+
if 'file' in item and 'content' in item:
|
| 83 |
+
bn_meta[item['file']] = item
|
| 84 |
+
except Exception:
|
| 85 |
+
continue
|
| 86 |
+
|
| 87 |
+
for f in sorted(os.listdir(mdir)):
|
| 88 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 89 |
+
src = str(d.get('source_file', f))
|
| 90 |
+
# Try metadata lookup
|
| 91 |
+
action = 'unknown'
|
| 92 |
+
style = ''
|
| 93 |
+
if src in bn_meta:
|
| 94 |
+
meta = bn_meta[src]
|
| 95 |
+
action = meta.get('content', 'unknown').lower()
|
| 96 |
+
style = meta.get('style', '').lower()
|
| 97 |
+
else:
|
| 98 |
+
# Fallback: parse filename
|
| 99 |
+
name = src.replace('.bvh', '').lower()
|
| 100 |
+
# Common patterns: walk, run, kick, punch, etc.
|
| 101 |
+
for keyword in ['walk', 'run', 'kick', 'punch', 'jump', 'dance', 'idle', 'turn',
|
| 102 |
+
'throw', 'catch', 'wave', 'bow', 'sit', 'stand', 'crouch', 'crawl']:
|
| 103 |
+
if keyword in name:
|
| 104 |
+
action = keyword
|
| 105 |
+
break
|
| 106 |
+
labels[f.replace('.npz', '')] = {
|
| 107 |
+
'L1_action': action,
|
| 108 |
+
'L1_style': style,
|
| 109 |
+
'source_file': src,
|
| 110 |
+
}
|
| 111 |
+
return labels
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def extract_cmu_mocap():
|
| 115 |
+
"""CMU MoCap: directory structure = subject/action."""
|
| 116 |
+
labels = {}
|
| 117 |
+
mdir = project_root / 'data' / 'processed' / 'cmu_mocap' / 'motions'
|
| 118 |
+
for f in sorted(os.listdir(mdir)):
|
| 119 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 120 |
+
src = str(d.get('source_file', f))
|
| 121 |
+
# "01_01.bvh" → subject=01, sequence=01
|
| 122 |
+
parts = src.replace('.bvh', '').split('_')
|
| 123 |
+
subject = parts[0] if parts else '?'
|
| 124 |
+
seq = parts[1] if len(parts) > 1 else '?'
|
| 125 |
+
labels[f.replace('.npz', '')] = {
|
| 126 |
+
'L1_action': 'motion', # CMU doesn't have action labels in filenames
|
| 127 |
+
'L1_subject': subject,
|
| 128 |
+
'L1_sequence': seq,
|
| 129 |
+
'source_file': src,
|
| 130 |
+
}
|
| 131 |
+
return labels
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def extract_mixamo():
|
| 135 |
+
"""Mixamo: hash filenames → no meaningful labels from filename alone."""
|
| 136 |
+
labels = {}
|
| 137 |
+
mdir = project_root / 'data' / 'processed' / 'mixamo' / 'motions'
|
| 138 |
+
|
| 139 |
+
# Try to load animation name mapping if available
|
| 140 |
+
anim_map = {}
|
| 141 |
+
anim_json = project_root / 'data' / 'raw' / 'Mixamo' / 'animation_frames.json'
|
| 142 |
+
if anim_json.exists():
|
| 143 |
+
try:
|
| 144 |
+
with open(anim_json) as jf:
|
| 145 |
+
data = json.load(jf)
|
| 146 |
+
for name, info in data.items():
|
| 147 |
+
# name is like "Aim_Pistol" with frame count as value
|
| 148 |
+
anim_map[name.lower()] = name
|
| 149 |
+
except Exception:
|
| 150 |
+
pass
|
| 151 |
+
|
| 152 |
+
for f in sorted(os.listdir(mdir)):
|
| 153 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 154 |
+
src = str(d.get('source_file', f)).replace('.bvh', '').replace('.fbx', '')
|
| 155 |
+
|
| 156 |
+
# Try to match hash to animation name
|
| 157 |
+
action = 'motion'
|
| 158 |
+
labels[f.replace('.npz', '')] = {
|
| 159 |
+
'L1_action': action,
|
| 160 |
+
'source_file': src,
|
| 161 |
+
}
|
| 162 |
+
return labels
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def extract_truebones_zoo():
|
| 166 |
+
"""Truebones Zoo: already has some text; extract L1 from species + filename."""
|
| 167 |
+
labels = {}
|
| 168 |
+
mdir = project_root / 'data' / 'processed' / 'truebones_zoo' / 'motions'
|
| 169 |
+
for f in sorted(os.listdir(mdir)):
|
| 170 |
+
d = dict(np.load(mdir / f, allow_pickle=True))
|
| 171 |
+
species = str(d.get('species', ''))
|
| 172 |
+
src = str(d.get('source_file', f))
|
| 173 |
+
texts = str(d.get('texts', ''))
|
| 174 |
+
|
| 175 |
+
# Extract action from filename: "__Attack1.bvh" → "attack"
|
| 176 |
+
name = src.replace('.bvh', '').strip('_').lower()
|
| 177 |
+
action = 'motion'
|
| 178 |
+
for keyword in ['attack', 'walk', 'run', 'idle', 'die', 'death', 'eat', 'bite',
|
| 179 |
+
'jump', 'fly', 'swim', 'crawl', 'sleep', 'sit', 'stand', 'turn',
|
| 180 |
+
'howl', 'bark', 'roar', 'hit', 'charge', 'gallop', 'trot', 'strike',
|
| 181 |
+
'breath', 'wing', 'tail', 'shake', 'scratch', 'pounce', 'retreat']:
|
| 182 |
+
if keyword in name:
|
| 183 |
+
action = keyword
|
| 184 |
+
break
|
| 185 |
+
|
| 186 |
+
labels[f.replace('.npz', '')] = {
|
| 187 |
+
'L1_action': action,
|
| 188 |
+
'L1_species': species,
|
| 189 |
+
'L1_species_category': _species_category(species),
|
| 190 |
+
'has_L2': bool(texts and texts not in ('', "b''")),
|
| 191 |
+
'source_file': src,
|
| 192 |
+
}
|
| 193 |
+
return labels
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def _species_category(species):
|
| 197 |
+
"""Map species to category."""
|
| 198 |
+
quadrupeds = {'Dog', 'Dog-2', 'Cat', 'Horse', 'Bear', 'BrownBear', 'PolarBear', 'PolarBearB',
|
| 199 |
+
'Buffalo', 'Camel', 'Coyote', 'Deer', 'Elephant', 'Fox', 'Gazelle', 'Goat',
|
| 200 |
+
'Hamster', 'Hippopotamus', 'Hound', 'Jaguar', 'Leapord', 'Lion', 'Lynx',
|
| 201 |
+
'Mammoth', 'Monkey', 'Puppy', 'Raindeer', 'Rat', 'Rhino', 'SabreToothTiger',
|
| 202 |
+
'SandMouse', 'Skunk'}
|
| 203 |
+
flying = {'Bat', 'Bird', 'Buzzard', 'Chicken', 'Crow', 'Eagle', 'Flamingo', 'Giantbee',
|
| 204 |
+
'Ostrich', 'Parrot', 'Parrot2', 'Pigeon', 'Pteranodon', 'Tukan'}
|
| 205 |
+
reptile = {'Alligator', 'Comodoa', 'Crocodile', 'Stego', 'Trex', 'Tricera', 'Tyranno'}
|
| 206 |
+
insect = {'Ant', 'Centipede', 'Cricket', 'FireAnt', 'Isopetra', 'Roach', 'Scorpion',
|
| 207 |
+
'Scorpion-2', 'Spider', 'SpiderG'}
|
| 208 |
+
snake = {'Anaconda', 'KingCobra'}
|
| 209 |
+
aquatic = {'Crab', 'HermitCrab', 'Jaws', 'Pirrana', 'Turtle'}
|
| 210 |
+
fantasy = {'Dragon', 'Raptor', 'Raptor2', 'Raptor3'}
|
| 211 |
+
|
| 212 |
+
if species in quadrupeds: return 'quadruped'
|
| 213 |
+
if species in flying: return 'flying'
|
| 214 |
+
if species in reptile: return 'reptile'
|
| 215 |
+
if species in insect: return 'insect'
|
| 216 |
+
if species in snake: return 'snake'
|
| 217 |
+
if species in aquatic: return 'aquatic'
|
| 218 |
+
if species in fantasy: return 'fantasy'
|
| 219 |
+
return 'other'
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def main():
|
| 223 |
+
extractors = {
|
| 224 |
+
'lafan1': extract_lafan1,
|
| 225 |
+
'100style': extract_100style,
|
| 226 |
+
'bandai_namco': extract_bandai_namco,
|
| 227 |
+
'cmu_mocap': extract_cmu_mocap,
|
| 228 |
+
'mixamo': extract_mixamo,
|
| 229 |
+
'truebones_zoo': extract_truebones_zoo,
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
for ds, extractor in extractors.items():
|
| 233 |
+
labels = extractor()
|
| 234 |
+
out_path = project_root / 'data' / 'processed' / ds / 'labels.json'
|
| 235 |
+
with open(out_path, 'w') as f:
|
| 236 |
+
json.dump(labels, f, indent=2, ensure_ascii=False)
|
| 237 |
+
|
| 238 |
+
# Stats
|
| 239 |
+
actions = [v.get('L1_action', '?') for v in labels.values()]
|
| 240 |
+
from collections import Counter
|
| 241 |
+
top = Counter(actions).most_common(5)
|
| 242 |
+
print(f'{ds:15s}: {len(labels)} labels, top actions: {top}')
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
if __name__ == '__main__':
|
| 246 |
+
main()
|