knoxel commited on
Commit
1e2f286
·
verified ·
1 Parent(s): 42507d4

Upload soccer_feature_engineering_extended.py

Browse files
soccer_feature_engineering_extended.py CHANGED
@@ -1 +1,303 @@
1
- See /app/soccer_feature_engineering_extended.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Soccer Feature Engineering Pipeline - Extended Edition
3
+ ======================================================
4
+ Adds temporal decomposition (1st half / 2nd half), tactical clustering,
5
+ similarity analysis, and visualizations.
6
+
7
+ Outputs:
8
+ - features.csv - full-match 33 features (18 rows)
9
+ - features_first_half.csv - 1st-half features (18 rows)
10
+ - features_second_half.csv - 2nd-half features (18 rows)
11
+ - features_halves_diff.csv - 2nd-half minus 1st-half deltas (18 rows)
12
+ - behavioral_fingerprint.csv - 16-dim fingerprint per team-match (18 rows)
13
+ - cluster_labels.csv - KMeans cluster assignments per row
14
+ - similarity_matrix.csv - pairwise team similarity (18x18)
15
+ - archetype_profiles.csv - centroid profiles per cluster
16
+ - analysis.png - radar chart grid of archetypes
17
+ """
18
+
19
+ import glob
20
+ import json
21
+ import os
22
+ import warnings
23
+
24
+ import numpy as np
25
+ import pandas as pd
26
+ from sklearn.cluster import KMeans
27
+ from sklearn.preprocessing import StandardScaler
28
+ from sklearn.metrics.pairwise import cosine_similarity
29
+ from sklearn.decomposition import PCA
30
+
31
+ warnings.filterwarnings("ignore")
32
+
33
+
34
+ def discover_dynamic_events_files(data_root="/app/opendata/data/matches"):
35
+ pattern = os.path.join(data_root, "*", "*_dynamic_events.csv")
36
+ files = glob.glob(pattern, recursive=True)
37
+ files.sort()
38
+ return files
39
+
40
+
41
+ def compute_features_for_match(dynamic_events_path, period_filter=None):
42
+ """
43
+ Compute all 33 features for a single match.
44
+ If period_filter is set (1 or 2), only events from that period are used.
45
+ """
46
+ df = pd.read_csv(dynamic_events_path, low_memory=False)
47
+ match_id = df["match_id"].iloc[0]
48
+
49
+ if period_filter is not None:
50
+ df = df[df["period"] == period_filter]
51
+
52
+ team_ids = sorted(df["team_id"].unique().tolist())
53
+
54
+ records = []
55
+ for team_id in team_ids:
56
+ rec = {"match_id": match_id, "team_id": int(team_id)}
57
+
58
+ pp = df[(df["event_type"] == "player_possession") & (df["team_id"] == team_id)].copy()
59
+ obe = df[(df["event_type"] == "on_ball_engagement") & (df["team_id"] == team_id)].copy()
60
+ obr = df[(df["event_type"] == "off_ball_run") & (df["team_id"] == team_id)].copy()
61
+
62
+ # DIMENSION 1 - ATTACKING STRUCTURE (att1-att5)
63
+ passes = pp[pp["pass_outcome"].notna()]
64
+ rec["att1"] = int((passes["third_end"] == "attacking_third").sum())
65
+ rec["att2"] = int(((pp["carry"] == True) & (pp["third_end"] == "attacking_third")).sum())
66
+ pass_opp_bypassed = passes["n_opponents_bypassed"].fillna(0).clip(lower=0)
67
+ rec["att3"] = float(pass_opp_bypassed.sum())
68
+ rec["att4"] = int((passes["last_line_break"] == True).sum())
69
+ rec["att5"] = int((passes["third_start"] == "attacking_third").sum())
70
+
71
+ # DIMENSION 2 - BUILD-UP PROFILE (att6-att10)
72
+ phase_counts = pp["team_in_possession_phase_type"].value_counts()
73
+ rec["att6"] = int(phase_counts.get("build_up", 0))
74
+ rec["att7"] = int(phase_counts.get("direct", 0))
75
+ rec["att8"] = int(phase_counts.get("set_play", 0))
76
+ rec["att9"] = int(phase_counts.get("quick_break", 0))
77
+ rec["att10"] = int(phase_counts.get("transition", 0))
78
+
79
+ # DIMENSION 3 - POSSESSION QUALITY (att11-att20)
80
+ rec["att11"] = int((pp["one_touch"] == True).sum())
81
+ rec["att12"] = int((pp["quick_pass"] == True).sum())
82
+ rec["att13"] = int(pp["lead_to_shot"].sum())
83
+ rec["att14"] = int(pp["lead_to_goal"].sum())
84
+ rec["att15"] = float(pp["delta_to_last_defensive_line_gain"].fillna(0).clip(lower=0).sum())
85
+ rec["att16"] = float(pp["last_defensive_line_height_gain"].fillna(0).clip(lower=0).sum())
86
+ rec["att17"] = int((pp["forward_momentum"] == True).sum())
87
+ rec["att18"] = float(pp["n_passing_options"].fillna(0).sum())
88
+ rec["att19"] = float(pp["n_passing_options_dangerous_difficult"].fillna(0).sum())
89
+ rec["att20"] = int((obr["event_subtype"] == "run_ahead_of_the_ball").sum())
90
+
91
+ # DIMENSION 4 - PRESSING & DEFENSIVE SHAPE (def1-def7)
92
+ rec["def1"] = int(len(obe))
93
+ rec["def2"] = int((obe["event_subtype"] == "counter_press").sum())
94
+ rec["def3"] = int((obe["event_subtype"] == "recovery_press").sum())
95
+ chain_starts = obe[obe["index_in_pressing_chain"] == 1.0]
96
+ rec["def4"] = float(chain_starts["pressing_chain_length"].fillna(0).sum())
97
+ rec["def5"] = int(len(chain_starts))
98
+ rec["def6"] = float(obe["pressing_chain_length"].max() if len(obe) > 0 else 0)
99
+ rec["def7"] = int((obe["stop_possession_danger"] == True).sum())
100
+
101
+ # DIMENSION 5 - OFF-BALL MOVEMENT INTELLIGENCE (run1-run5 + gng)
102
+ rec["run1"] = int((obr["break_defensive_line"] == True).sum())
103
+ rec["run2"] = int((obr["push_defensive_line"] == True).sum())
104
+ rec["run3"] = int((obr["event_subtype"] == "behind").sum())
105
+ rec["run4"] = int((obr["event_subtype"] == "overlap").sum())
106
+ rec["run5"] = int((obr["third_start"] == "attacking_third").sum())
107
+ rec["att_give_and_go_initiated"] = int((pp["initiate_give_and_go"] == True).sum())
108
+
109
+ records.append(rec)
110
+
111
+ return pd.DataFrame(records)
112
+
113
+
114
+ def build_full_match_features(files, output_path="/app/features.csv"):
115
+ all_dfs = []
116
+ for f in files:
117
+ try:
118
+ match_df = compute_features_for_match(f)
119
+ all_dfs.append(match_df)
120
+ except Exception as e:
121
+ print(f" ERROR: {os.path.basename(f)} -> {e}")
122
+
123
+ features_df = pd.concat(all_dfs, ignore_index=True)
124
+ feature_cols = (
125
+ [f"att{i}" for i in range(1, 21)] +
126
+ [f"def{i}" for i in range(1, 8)] +
127
+ [f"run{i}" for i in range(1, 6)] +
128
+ ["att_give_and_go_initiated"]
129
+ )
130
+ ordered_cols = ["match_id", "team_id"] + feature_cols
131
+ features_df = features_df[[c for c in ordered_cols if c in features_df.columns]]
132
+
133
+ for col in features_df.columns:
134
+ if features_df[col].dtype == float:
135
+ rounded = features_df[col].round(2)
136
+ if (rounded == rounded.astype(int)).all():
137
+ features_df[col] = rounded.astype(int)
138
+ else:
139
+ features_df[col] = rounded
140
+
141
+ features_df.to_csv(output_path, index=False)
142
+ print(f"Wrote full-match features: {features_df.shape}")
143
+ return features_df
144
+
145
+
146
+ def build_halves_features(files):
147
+ halves = {1: [], 2: []}
148
+ for f in files:
149
+ for period in [1, 2]:
150
+ try:
151
+ match_df = compute_features_for_match(f, period_filter=period)
152
+ match_df["period"] = period
153
+ halves[period].append(match_df)
154
+ except Exception as e:
155
+ print(f" ERROR: {os.path.basename(f)} period {period} -> {e}")
156
+
157
+ first_half = pd.concat(halves[1], ignore_index=True)
158
+ second_half = pd.concat(halves[2], ignore_index=True)
159
+
160
+ feature_cols = (
161
+ [f"att{i}" for i in range(1, 21)] +
162
+ [f"def{i}" for i in range(1, 8)] +
163
+ [f"run{i}" for i in range(1, 6)] +
164
+ ["att_give_and_go_initiated"]
165
+ )
166
+
167
+ first_half = first_half.sort_values(["match_id", "team_id"]).reset_index(drop=True)
168
+ second_half = second_half.sort_values(["match_id", "team_id"]).reset_index(drop=True)
169
+
170
+ delta = first_half[["match_id", "team_id"]].copy()
171
+ for col in feature_cols:
172
+ delta[col] = second_half[col] - first_half[col]
173
+
174
+ for df in [first_half, second_half, delta]:
175
+ for col in feature_cols:
176
+ if df[col].dtype == float:
177
+ rounded = df[col].round(2)
178
+ if (rounded == rounded.astype(int)).all():
179
+ df[col] = rounded.astype(int)
180
+ else:
181
+ df[col] = rounded
182
+
183
+ first_half.to_csv("/app/features_first_half.csv", index=False)
184
+ second_half.to_csv("/app/features_second_half.csv", index=False)
185
+ delta.to_csv("/app/features_halves_diff.csv", index=False)
186
+
187
+ print(f"Wrote 1st-half features: {first_half.shape}")
188
+ print(f"Wrote 2nd-half features: {second_half.shape}")
189
+ print(f"Wrote half-to-half deltas: {delta.shape}")
190
+ return first_half, second_half, delta
191
+
192
+
193
+ def build_behavioral_fingerprint(features_df, output_path="/app/behavioral_fingerprint.csv"):
194
+ fingerprint_cols = (
195
+ [f"att{i}" for i in range(6, 11)] +
196
+ [f"def{i}" for i in range(4, 7)] +
197
+ [f"run{i}" for i in range(1, 6)] +
198
+ ["att1", "att3", "att15", "att16"]
199
+ )
200
+ fp = features_df[["match_id", "team_id"] + fingerprint_cols].copy()
201
+ fp.to_csv(output_path, index=False)
202
+ print(f"Wrote behavioral fingerprint: {fp.shape}")
203
+ return fp, fingerprint_cols
204
+
205
+
206
+ def run_clustering(fp_df, feature_cols, n_clusters=4, output_labels="/app/cluster_labels.csv",
207
+ output_archetypes="/app/archetype_profiles.csv"):
208
+ X = fp_df[feature_cols].values
209
+ scaler = StandardScaler()
210
+ X_scaled = scaler.fit_transform(X)
211
+
212
+ kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
213
+ labels = kmeans.fit_predict(X_scaled)
214
+
215
+ fp_df["cluster"] = labels
216
+ fp_df.to_csv(output_labels, index=False)
217
+
218
+ archetypes = fp_df.groupby("cluster")[feature_cols].mean().round(2)
219
+ archetypes.to_csv(output_archetypes)
220
+ print(f"Wrote cluster labels: {fp_df.shape}")
221
+ print(f"Wrote archetype profiles: {archetypes.shape}")
222
+ print(f"\nCluster counts:\n{fp_df['cluster'].value_counts().sort_index()}")
223
+ print(f"\nArchetype Profiles:\n{archetypes}")
224
+ return labels, archetypes, kmeans, scaler
225
+
226
+
227
+ def build_similarity_matrix(fp_df, feature_cols, output_path="/app/similarity_matrix.csv"):
228
+ X = fp_df[feature_cols].values
229
+ sim = cosine_similarity(X)
230
+ labels = fp_df.apply(lambda r: f"{r['match_id']}_{r['team_id']}", axis=1)
231
+ sim_df = pd.DataFrame(sim, index=labels, columns=labels)
232
+ sim_df.to_csv(output_path)
233
+ print(f"Wrote similarity matrix: {sim_df.shape}")
234
+ return sim_df
235
+
236
+
237
+ def generate_radar_chart(archetypes, output_path="/app/analysis.png"):
238
+ import matplotlib
239
+ matplotlib.use("Agg")
240
+ import matplotlib.pyplot as plt
241
+
242
+ n_clusters = len(archetypes)
243
+ categories = archetypes.columns.tolist()
244
+ N = len(categories)
245
+
246
+ archetypes_norm = (archetypes - archetypes.min()) / (archetypes.max() - archetypes.min())
247
+ archetypes_norm = archetypes_norm.fillna(0)
248
+
249
+ angles = [n / float(N) * 2 * np.pi for n in range(N)]
250
+ angles += angles[:1]
251
+
252
+ fig, axs = plt.subplots(1, n_clusters, figsize=(5 * n_clusters, 5), subplot_kw=dict(polar=True))
253
+ if n_clusters == 1:
254
+ axs = [axs]
255
+
256
+ colors = plt.cm.tab10(np.linspace(0, 1, n_clusters))
257
+
258
+ for idx, (cluster_id, row) in enumerate(archetypes_norm.iterrows()):
259
+ ax = axs[idx]
260
+ values = row.values.tolist()
261
+ values += values[:1]
262
+ ax.plot(angles, values, color=colors[idx], linewidth=2, label=f"Cluster {cluster_id}")
263
+ ax.fill(angles, values, color=colors[idx], alpha=0.25)
264
+ ax.set_xticks(angles[:-1])
265
+ ax.set_xticklabels(categories, fontsize=7)
266
+ ax.set_title(f"Archetype {cluster_id}", fontsize=12, fontweight="bold")
267
+
268
+ plt.tight_layout()
269
+ plt.savefig(output_path, dpi=150)
270
+ plt.close()
271
+ print(f"Saved radar chart: {output_path}")
272
+
273
+
274
+ def main():
275
+ files = discover_dynamic_events_files()
276
+ print(f"Discovered {len(files)} dynamic_events.csv files\n")
277
+
278
+ features_df = build_full_match_features(files)
279
+ first_half, second_half, delta = build_halves_features(files)
280
+ fp_df, fp_cols = build_behavioral_fingerprint(features_df)
281
+ labels, archetypes, kmeans, scaler = run_clustering(fp_df, fp_cols, n_clusters=4)
282
+ sim_matrix = build_similarity_matrix(fp_df, fp_cols)
283
+ generate_radar_chart(archetypes)
284
+
285
+ print("\n=== ALL OUTPUTS GENERATED ===")
286
+ for f in [
287
+ "/app/features.csv",
288
+ "/app/features_first_half.csv",
289
+ "/app/features_second_half.csv",
290
+ "/app/features_halves_diff.csv",
291
+ "/app/behavioral_fingerprint.csv",
292
+ "/app/cluster_labels.csv",
293
+ "/app/archetype_profiles.csv",
294
+ "/app/similarity_matrix.csv",
295
+ "/app/analysis.png",
296
+ ]:
297
+ if os.path.exists(f):
298
+ sz = os.path.getsize(f)
299
+ print(f" {f} ({sz} bytes)")
300
+
301
+
302
+ if __name__ == "__main__":
303
+ main()