soccer-feature-engineering / soccer_feature_engineering_extended.py
knoxel's picture
Upload soccer_feature_engineering_extended.py
1e2f286 verified
"""
Soccer Feature Engineering Pipeline - Extended Edition
======================================================
Adds temporal decomposition (1st half / 2nd half), tactical clustering,
similarity analysis, and visualizations.
Outputs:
- features.csv - full-match 33 features (18 rows)
- features_first_half.csv - 1st-half features (18 rows)
- features_second_half.csv - 2nd-half features (18 rows)
- features_halves_diff.csv - 2nd-half minus 1st-half deltas (18 rows)
- behavioral_fingerprint.csv - 16-dim fingerprint per team-match (18 rows)
- cluster_labels.csv - KMeans cluster assignments per row
- similarity_matrix.csv - pairwise team similarity (18x18)
- archetype_profiles.csv - centroid profiles per cluster
- analysis.png - radar chart grid of archetypes
"""
import glob
import json
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA
warnings.filterwarnings("ignore")
def discover_dynamic_events_files(data_root="/app/opendata/data/matches"):
pattern = os.path.join(data_root, "*", "*_dynamic_events.csv")
files = glob.glob(pattern, recursive=True)
files.sort()
return files
def compute_features_for_match(dynamic_events_path, period_filter=None):
"""
Compute all 33 features for a single match.
If period_filter is set (1 or 2), only events from that period are used.
"""
df = pd.read_csv(dynamic_events_path, low_memory=False)
match_id = df["match_id"].iloc[0]
if period_filter is not None:
df = df[df["period"] == period_filter]
team_ids = sorted(df["team_id"].unique().tolist())
records = []
for team_id in team_ids:
rec = {"match_id": match_id, "team_id": int(team_id)}
pp = df[(df["event_type"] == "player_possession") & (df["team_id"] == team_id)].copy()
obe = df[(df["event_type"] == "on_ball_engagement") & (df["team_id"] == team_id)].copy()
obr = df[(df["event_type"] == "off_ball_run") & (df["team_id"] == team_id)].copy()
# DIMENSION 1 - ATTACKING STRUCTURE (att1-att5)
passes = pp[pp["pass_outcome"].notna()]
rec["att1"] = int((passes["third_end"] == "attacking_third").sum())
rec["att2"] = int(((pp["carry"] == True) & (pp["third_end"] == "attacking_third")).sum())
pass_opp_bypassed = passes["n_opponents_bypassed"].fillna(0).clip(lower=0)
rec["att3"] = float(pass_opp_bypassed.sum())
rec["att4"] = int((passes["last_line_break"] == True).sum())
rec["att5"] = int((passes["third_start"] == "attacking_third").sum())
# DIMENSION 2 - BUILD-UP PROFILE (att6-att10)
phase_counts = pp["team_in_possession_phase_type"].value_counts()
rec["att6"] = int(phase_counts.get("build_up", 0))
rec["att7"] = int(phase_counts.get("direct", 0))
rec["att8"] = int(phase_counts.get("set_play", 0))
rec["att9"] = int(phase_counts.get("quick_break", 0))
rec["att10"] = int(phase_counts.get("transition", 0))
# DIMENSION 3 - POSSESSION QUALITY (att11-att20)
rec["att11"] = int((pp["one_touch"] == True).sum())
rec["att12"] = int((pp["quick_pass"] == True).sum())
rec["att13"] = int(pp["lead_to_shot"].sum())
rec["att14"] = int(pp["lead_to_goal"].sum())
rec["att15"] = float(pp["delta_to_last_defensive_line_gain"].fillna(0).clip(lower=0).sum())
rec["att16"] = float(pp["last_defensive_line_height_gain"].fillna(0).clip(lower=0).sum())
rec["att17"] = int((pp["forward_momentum"] == True).sum())
rec["att18"] = float(pp["n_passing_options"].fillna(0).sum())
rec["att19"] = float(pp["n_passing_options_dangerous_difficult"].fillna(0).sum())
rec["att20"] = int((obr["event_subtype"] == "run_ahead_of_the_ball").sum())
# DIMENSION 4 - PRESSING & DEFENSIVE SHAPE (def1-def7)
rec["def1"] = int(len(obe))
rec["def2"] = int((obe["event_subtype"] == "counter_press").sum())
rec["def3"] = int((obe["event_subtype"] == "recovery_press").sum())
chain_starts = obe[obe["index_in_pressing_chain"] == 1.0]
rec["def4"] = float(chain_starts["pressing_chain_length"].fillna(0).sum())
rec["def5"] = int(len(chain_starts))
rec["def6"] = float(obe["pressing_chain_length"].max() if len(obe) > 0 else 0)
rec["def7"] = int((obe["stop_possession_danger"] == True).sum())
# DIMENSION 5 - OFF-BALL MOVEMENT INTELLIGENCE (run1-run5 + gng)
rec["run1"] = int((obr["break_defensive_line"] == True).sum())
rec["run2"] = int((obr["push_defensive_line"] == True).sum())
rec["run3"] = int((obr["event_subtype"] == "behind").sum())
rec["run4"] = int((obr["event_subtype"] == "overlap").sum())
rec["run5"] = int((obr["third_start"] == "attacking_third").sum())
rec["att_give_and_go_initiated"] = int((pp["initiate_give_and_go"] == True).sum())
records.append(rec)
return pd.DataFrame(records)
def build_full_match_features(files, output_path="/app/features.csv"):
all_dfs = []
for f in files:
try:
match_df = compute_features_for_match(f)
all_dfs.append(match_df)
except Exception as e:
print(f" ERROR: {os.path.basename(f)} -> {e}")
features_df = pd.concat(all_dfs, ignore_index=True)
feature_cols = (
[f"att{i}" for i in range(1, 21)] +
[f"def{i}" for i in range(1, 8)] +
[f"run{i}" for i in range(1, 6)] +
["att_give_and_go_initiated"]
)
ordered_cols = ["match_id", "team_id"] + feature_cols
features_df = features_df[[c for c in ordered_cols if c in features_df.columns]]
for col in features_df.columns:
if features_df[col].dtype == float:
rounded = features_df[col].round(2)
if (rounded == rounded.astype(int)).all():
features_df[col] = rounded.astype(int)
else:
features_df[col] = rounded
features_df.to_csv(output_path, index=False)
print(f"Wrote full-match features: {features_df.shape}")
return features_df
def build_halves_features(files):
halves = {1: [], 2: []}
for f in files:
for period in [1, 2]:
try:
match_df = compute_features_for_match(f, period_filter=period)
match_df["period"] = period
halves[period].append(match_df)
except Exception as e:
print(f" ERROR: {os.path.basename(f)} period {period} -> {e}")
first_half = pd.concat(halves[1], ignore_index=True)
second_half = pd.concat(halves[2], ignore_index=True)
feature_cols = (
[f"att{i}" for i in range(1, 21)] +
[f"def{i}" for i in range(1, 8)] +
[f"run{i}" for i in range(1, 6)] +
["att_give_and_go_initiated"]
)
first_half = first_half.sort_values(["match_id", "team_id"]).reset_index(drop=True)
second_half = second_half.sort_values(["match_id", "team_id"]).reset_index(drop=True)
delta = first_half[["match_id", "team_id"]].copy()
for col in feature_cols:
delta[col] = second_half[col] - first_half[col]
for df in [first_half, second_half, delta]:
for col in feature_cols:
if df[col].dtype == float:
rounded = df[col].round(2)
if (rounded == rounded.astype(int)).all():
df[col] = rounded.astype(int)
else:
df[col] = rounded
first_half.to_csv("/app/features_first_half.csv", index=False)
second_half.to_csv("/app/features_second_half.csv", index=False)
delta.to_csv("/app/features_halves_diff.csv", index=False)
print(f"Wrote 1st-half features: {first_half.shape}")
print(f"Wrote 2nd-half features: {second_half.shape}")
print(f"Wrote half-to-half deltas: {delta.shape}")
return first_half, second_half, delta
def build_behavioral_fingerprint(features_df, output_path="/app/behavioral_fingerprint.csv"):
fingerprint_cols = (
[f"att{i}" for i in range(6, 11)] +
[f"def{i}" for i in range(4, 7)] +
[f"run{i}" for i in range(1, 6)] +
["att1", "att3", "att15", "att16"]
)
fp = features_df[["match_id", "team_id"] + fingerprint_cols].copy()
fp.to_csv(output_path, index=False)
print(f"Wrote behavioral fingerprint: {fp.shape}")
return fp, fingerprint_cols
def run_clustering(fp_df, feature_cols, n_clusters=4, output_labels="/app/cluster_labels.csv",
output_archetypes="/app/archetype_profiles.csv"):
X = fp_df[feature_cols].values
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
labels = kmeans.fit_predict(X_scaled)
fp_df["cluster"] = labels
fp_df.to_csv(output_labels, index=False)
archetypes = fp_df.groupby("cluster")[feature_cols].mean().round(2)
archetypes.to_csv(output_archetypes)
print(f"Wrote cluster labels: {fp_df.shape}")
print(f"Wrote archetype profiles: {archetypes.shape}")
print(f"\nCluster counts:\n{fp_df['cluster'].value_counts().sort_index()}")
print(f"\nArchetype Profiles:\n{archetypes}")
return labels, archetypes, kmeans, scaler
def build_similarity_matrix(fp_df, feature_cols, output_path="/app/similarity_matrix.csv"):
X = fp_df[feature_cols].values
sim = cosine_similarity(X)
labels = fp_df.apply(lambda r: f"{r['match_id']}_{r['team_id']}", axis=1)
sim_df = pd.DataFrame(sim, index=labels, columns=labels)
sim_df.to_csv(output_path)
print(f"Wrote similarity matrix: {sim_df.shape}")
return sim_df
def generate_radar_chart(archetypes, output_path="/app/analysis.png"):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
n_clusters = len(archetypes)
categories = archetypes.columns.tolist()
N = len(categories)
archetypes_norm = (archetypes - archetypes.min()) / (archetypes.max() - archetypes.min())
archetypes_norm = archetypes_norm.fillna(0)
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
fig, axs = plt.subplots(1, n_clusters, figsize=(5 * n_clusters, 5), subplot_kw=dict(polar=True))
if n_clusters == 1:
axs = [axs]
colors = plt.cm.tab10(np.linspace(0, 1, n_clusters))
for idx, (cluster_id, row) in enumerate(archetypes_norm.iterrows()):
ax = axs[idx]
values = row.values.tolist()
values += values[:1]
ax.plot(angles, values, color=colors[idx], linewidth=2, label=f"Cluster {cluster_id}")
ax.fill(angles, values, color=colors[idx], alpha=0.25)
ax.set_xticks(angles[:-1])
ax.set_xticklabels(categories, fontsize=7)
ax.set_title(f"Archetype {cluster_id}", fontsize=12, fontweight="bold")
plt.tight_layout()
plt.savefig(output_path, dpi=150)
plt.close()
print(f"Saved radar chart: {output_path}")
def main():
files = discover_dynamic_events_files()
print(f"Discovered {len(files)} dynamic_events.csv files\n")
features_df = build_full_match_features(files)
first_half, second_half, delta = build_halves_features(files)
fp_df, fp_cols = build_behavioral_fingerprint(features_df)
labels, archetypes, kmeans, scaler = run_clustering(fp_df, fp_cols, n_clusters=4)
sim_matrix = build_similarity_matrix(fp_df, fp_cols)
generate_radar_chart(archetypes)
print("\n=== ALL OUTPUTS GENERATED ===")
for f in [
"/app/features.csv",
"/app/features_first_half.csv",
"/app/features_second_half.csv",
"/app/features_halves_diff.csv",
"/app/behavioral_fingerprint.csv",
"/app/cluster_labels.csv",
"/app/archetype_profiles.csv",
"/app/similarity_matrix.csv",
"/app/analysis.png",
]:
if os.path.exists(f):
sz = os.path.getsize(f)
print(f" {f} ({sz} bytes)")
if __name__ == "__main__":
main()