| """ |
| ALWAS ML Models — Train and evaluate all 3 models: |
| 1. Hours Estimation (XGBoost Regressor) |
| 2. Complexity Classification (XGBoost + LightGBM Ensemble) |
| 3. Bottleneck Risk Prediction (Gradient Boosting Classifier) |
| """ |
| import numpy as np |
| import pandas as pd |
| import json |
| import joblib |
| import os |
| from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold |
| from sklearn.preprocessing import LabelEncoder, OrdinalEncoder |
| from sklearn.metrics import ( |
| mean_absolute_error, mean_squared_error, r2_score, |
| classification_report, confusion_matrix, accuracy_score, |
| f1_score, roc_auc_score |
| ) |
| from sklearn.calibration import CalibratedClassifierCV |
| import xgboost as xgb |
| import lightgbm as lgb |
|
|
| |
| print("=" * 60) |
| print("ALWAS ML MODEL TRAINING") |
| print("=" * 60) |
|
|
| df = pd.read_csv('/app/alwas_blocks_dataset.csv') |
| print(f"\nLoaded {len(df)} blocks ({df['is_completed'].sum()} completed, {(~df['is_completed'].astype(bool)).sum()} in-progress)") |
|
|
| |
| print("\n--- Feature Engineering ---") |
|
|
| |
| tech_node_encoder = LabelEncoder() |
| block_type_encoder = LabelEncoder() |
| priority_encoder = OrdinalEncoder(categories=[['P4-Low', 'P3-Medium', 'P2-High', 'P1-Critical']]) |
| engineer_encoder = LabelEncoder() |
|
|
| df['tech_node_encoded'] = tech_node_encoder.fit_transform(df['tech_node']) |
| df['block_type_encoded'] = block_type_encoder.fit_transform(df['block_type']) |
| df['priority_encoded'] = priority_encoder.fit_transform(df[['priority']]).astype(int).flatten() |
| df['engineer_encoded'] = engineer_encoder.fit_transform(df['engineer_id']) |
|
|
| |
| df['type_node_interaction'] = df['tech_node_encoded'] * 10 + df['block_type_encoded'] |
| df['complexity_score'] = df['constraint_complexity'] * df['transistor_count_log'] |
| df['size_priority_interaction'] = df['transistor_count_log'] * df['priority_numeric'] |
|
|
| |
| complexity_encoder = LabelEncoder() |
| df['complexity_encoded'] = complexity_encoder.fit_transform(df['complexity']) |
| bottleneck_encoder = LabelEncoder() |
| df['bottleneck_encoded'] = bottleneck_encoder.fit_transform(df['bottleneck_risk']) |
|
|
| |
| print("\n" + "=" * 60) |
| print("MODEL 1: Hours Estimation (XGBoost Regressor)") |
| print("=" * 60) |
|
|
| |
| completed = df[df['is_completed'] == 1].copy() |
|
|
| HOURS_FEATURES = [ |
| 'tech_node_encoded', 'block_type_encoded', 'priority_encoded', |
| 'transistor_count', 'transistor_count_log', 'has_dependencies', |
| 'num_dependencies', 'constraint_complexity', 'drc_iterations', |
| 'engineer_skill_factor', 'type_node_interaction', 'complexity_score', |
| 'size_priority_interaction' |
| ] |
|
|
| X_hours = completed[HOURS_FEATURES] |
| y_hours = completed['actual_hours'] |
|
|
| X_train_h, X_test_h, y_train_h, y_test_h = train_test_split( |
| X_hours, y_hours, test_size=0.2, random_state=42 |
| ) |
|
|
| |
| hours_model = xgb.XGBRegressor( |
| n_estimators=500, |
| learning_rate=0.05, |
| max_depth=7, |
| subsample=0.8, |
| colsample_bytree=0.8, |
| min_child_weight=3, |
| reg_alpha=0.1, |
| reg_lambda=1.0, |
| objective='reg:squarederror', |
| tree_method='hist', |
| random_state=42, |
| early_stopping_rounds=50, |
| ) |
| hours_model.fit( |
| X_train_h, y_train_h, |
| eval_set=[(X_test_h, y_test_h)], |
| verbose=False |
| ) |
|
|
| |
| y_pred_h = hours_model.predict(X_test_h) |
| mae = mean_absolute_error(y_test_h, y_pred_h) |
| rmse = np.sqrt(mean_squared_error(y_test_h, y_pred_h)) |
| r2 = r2_score(y_test_h, y_pred_h) |
| mape = np.mean(np.abs((y_test_h - y_pred_h) / y_test_h)) * 100 |
|
|
| print(f"\nHours Estimation Results:") |
| print(f" MAE: {mae:.2f} hours") |
| print(f" RMSE: {rmse:.2f} hours") |
| print(f" R²: {r2:.4f}") |
| print(f" MAPE: {mape:.1f}%") |
|
|
| |
| importance = pd.DataFrame({ |
| 'feature': HOURS_FEATURES, |
| 'importance': hours_model.feature_importances_ |
| }).sort_values('importance', ascending=False) |
| print(f"\nTop features for hours estimation:") |
| print(importance.to_string(index=False)) |
|
|
| |
| cv_scores = cross_val_score( |
| xgb.XGBRegressor(n_estimators=500, learning_rate=0.05, max_depth=7, |
| subsample=0.8, colsample_bytree=0.8, tree_method='hist', random_state=42), |
| X_hours, y_hours, cv=5, scoring='r2' |
| ) |
| print(f"\n5-Fold CV R²: {cv_scores.mean():.4f} ± {cv_scores.std():.4f}") |
|
|
| |
| print("\n" + "=" * 60) |
| print("MODEL 2: Complexity Classification (XGBoost + LightGBM Ensemble)") |
| print("=" * 60) |
|
|
| COMPLEXITY_FEATURES = [ |
| 'tech_node_encoded', 'block_type_encoded', 'priority_encoded', |
| 'transistor_count', 'transistor_count_log', 'has_dependencies', |
| 'num_dependencies', 'constraint_complexity', 'drc_iterations', |
| 'type_node_interaction', 'complexity_score', 'size_priority_interaction' |
| ] |
|
|
| X_comp = completed[COMPLEXITY_FEATURES] |
| y_comp = completed['complexity_encoded'] |
|
|
| X_train_c, X_test_c, y_train_c, y_test_c = train_test_split( |
| X_comp, y_comp, test_size=0.2, random_state=42, stratify=y_comp |
| ) |
|
|
| |
| xgb_clf = xgb.XGBClassifier( |
| n_estimators=500, |
| learning_rate=0.05, |
| max_depth=6, |
| subsample=0.8, |
| colsample_bytree=0.8, |
| objective='multi:softprob', |
| num_class=3, |
| tree_method='hist', |
| random_state=42, |
| early_stopping_rounds=50, |
| ) |
| xgb_clf.fit(X_train_c, y_train_c, eval_set=[(X_test_c, y_test_c)], verbose=False) |
|
|
| |
| lgb_clf = lgb.LGBMClassifier( |
| n_estimators=500, |
| learning_rate=0.05, |
| num_leaves=63, |
| subsample=0.8, |
| colsample_bytree=0.8, |
| random_state=42, |
| verbose=-1, |
| ) |
| lgb_clf.fit(X_train_c, y_train_c) |
|
|
| |
| xgb_proba = xgb_clf.predict_proba(X_test_c) |
| lgb_proba = lgb_clf.predict_proba(X_test_c) |
| ensemble_proba = (xgb_proba + lgb_proba) / 2 |
| y_pred_c = np.argmax(ensemble_proba, axis=1) |
|
|
| accuracy = accuracy_score(y_test_c, y_pred_c) |
| f1 = f1_score(y_test_c, y_pred_c, average='weighted') |
|
|
| print(f"\nComplexity Classification Results (Ensemble):") |
| print(f" Accuracy: {accuracy:.4f}") |
| print(f" F1 (weighted): {f1:.4f}") |
| print(f"\nClassification Report:") |
| target_names = complexity_encoder.classes_ |
| print(classification_report(y_test_c, y_pred_c, target_names=target_names)) |
|
|
| |
| xgb_acc = accuracy_score(y_test_c, xgb_clf.predict(X_test_c)) |
| lgb_acc = accuracy_score(y_test_c, lgb_clf.predict(X_test_c)) |
| print(f" XGBoost alone: {xgb_acc:.4f}") |
| print(f" LightGBM alone: {lgb_acc:.4f}") |
| print(f" Ensemble: {accuracy:.4f}") |
|
|
| |
| print("\n" + "=" * 60) |
| print("MODEL 3: Bottleneck Risk Prediction (Gradient Boosting)") |
| print("=" * 60) |
|
|
| |
| BOTTLENECK_FEATURES = [ |
| 'tech_node_encoded', 'block_type_encoded', 'priority_encoded', |
| 'transistor_count_log', 'has_dependencies', 'num_dependencies', |
| 'constraint_complexity', 'estimated_hours', 'hours_logged', |
| 'hours_over_estimate_ratio', 'drc_iterations', 'drc_violations_total', |
| 'lvs_mismatches_total', 'current_stage_idx', 'days_in_current_stage', |
| 'engineer_skill_factor', 'is_overdue', 'complexity_score' |
| ] |
|
|
| X_bn = df[BOTTLENECK_FEATURES] |
| y_bn = df['bottleneck_encoded'] |
|
|
| X_train_b, X_test_b, y_train_b, y_test_b = train_test_split( |
| X_bn, y_bn, test_size=0.2, random_state=42, stratify=y_bn |
| ) |
|
|
| |
| base_bn_model = xgb.XGBClassifier( |
| n_estimators=500, |
| learning_rate=0.05, |
| max_depth=6, |
| subsample=0.8, |
| colsample_bytree=0.8, |
| scale_pos_weight=1, |
| objective='multi:softprob', |
| num_class=3, |
| tree_method='hist', |
| random_state=42, |
| ) |
|
|
| |
| bn_model = CalibratedClassifierCV(base_bn_model, cv=3, method='isotonic') |
| bn_model.fit(X_train_b, y_train_b) |
|
|
| y_pred_b = bn_model.predict(X_test_b) |
| y_proba_b = bn_model.predict_proba(X_test_b) |
|
|
| bn_accuracy = accuracy_score(y_test_b, y_pred_b) |
| bn_f1 = f1_score(y_test_b, y_pred_b, average='weighted') |
|
|
| print(f"\nBottleneck Risk Prediction Results:") |
| print(f" Accuracy: {bn_accuracy:.4f}") |
| print(f" F1 (weighted): {bn_f1:.4f}") |
| print(f"\nClassification Report:") |
| bn_target_names = bottleneck_encoder.classes_ |
| print(classification_report(y_test_b, y_pred_b, target_names=bn_target_names)) |
|
|
| |
| print("\n" + "=" * 60) |
| print("SAVING MODELS") |
| print("=" * 60) |
|
|
| os.makedirs('/app/models', exist_ok=True) |
|
|
| |
| joblib.dump(hours_model, '/app/models/hours_estimator.joblib') |
| joblib.dump(xgb_clf, '/app/models/complexity_xgb.joblib') |
| joblib.dump(lgb_clf, '/app/models/complexity_lgb.joblib') |
| joblib.dump(bn_model, '/app/models/bottleneck_predictor.joblib') |
|
|
| |
| joblib.dump(tech_node_encoder, '/app/models/tech_node_encoder.joblib') |
| joblib.dump(block_type_encoder, '/app/models/block_type_encoder.joblib') |
| joblib.dump(priority_encoder, '/app/models/priority_encoder.joblib') |
| joblib.dump(engineer_encoder, '/app/models/engineer_encoder.joblib') |
| joblib.dump(complexity_encoder, '/app/models/complexity_encoder.joblib') |
| joblib.dump(bottleneck_encoder, '/app/models/bottleneck_encoder.joblib') |
|
|
| |
| feature_config = { |
| 'hours_features': HOURS_FEATURES, |
| 'complexity_features': COMPLEXITY_FEATURES, |
| 'bottleneck_features': BOTTLENECK_FEATURES, |
| 'tech_nodes': list(tech_node_encoder.classes_), |
| 'block_types': list(block_type_encoder.classes_), |
| 'priorities': ['P4-Low', 'P3-Medium', 'P2-High', 'P1-Critical'], |
| 'complexity_classes': list(complexity_encoder.classes_), |
| 'bottleneck_classes': list(bottleneck_encoder.classes_), |
| } |
| with open('/app/models/feature_config.json', 'w') as f: |
| json.dump(feature_config, f, indent=2) |
|
|
| |
| metrics = { |
| 'hours_estimation': { |
| 'mae': round(mae, 2), |
| 'rmse': round(rmse, 2), |
| 'r2': round(r2, 4), |
| 'mape_percent': round(mape, 1), |
| 'cv_r2_mean': round(cv_scores.mean(), 4), |
| 'cv_r2_std': round(cv_scores.std(), 4), |
| }, |
| 'complexity_classification': { |
| 'accuracy': round(accuracy, 4), |
| 'f1_weighted': round(f1, 4), |
| 'xgb_accuracy': round(xgb_acc, 4), |
| 'lgb_accuracy': round(lgb_acc, 4), |
| 'ensemble_accuracy': round(accuracy, 4), |
| }, |
| 'bottleneck_prediction': { |
| 'accuracy': round(bn_accuracy, 4), |
| 'f1_weighted': round(bn_f1, 4), |
| }, |
| 'training_data': { |
| 'total_samples': len(df), |
| 'completed_blocks': int(df['is_completed'].sum()), |
| 'in_progress_blocks': int((~df['is_completed'].astype(bool)).sum()), |
| } |
| } |
| with open('/app/models/metrics.json', 'w') as f: |
| json.dump(metrics, f, indent=2) |
|
|
| print(f"\nModels saved to /app/models/:") |
| for f in sorted(os.listdir('/app/models')): |
| size = os.path.getsize(f'/app/models/{f}') |
| print(f" {f} ({size:,} bytes)") |
|
|
| print("\n" + "=" * 60) |
| print("ALL MODELS TRAINED SUCCESSFULLY") |
| print("=" * 60) |
|
|