# streamlit_app.py """ Credit Invisibility Solver — Streamlit App Run: streamlit run streamlit_app.py """ import streamlit as st import numpy as np import pandas as pd import shap import lightgbm as lgb import matplotlib.pyplot as plt import matplotlib matplotlib.use("Agg") import json import joblib import os import plotly.graph_objects as go from sentence_transformers import SentenceTransformer from sklearn.decomposition import PCA # ─── Page config ────────────────────────────────────────────────────────────── st.set_page_config( page_title="Credit Invisibility Solver", page_icon="💳", layout="wide", initial_sidebar_state="expanded", ) # ─── Custom CSS ─────────────────────────────────────────────────────────────── st.markdown(""" """, unsafe_allow_html=True) # ─── Load artifacts ─────────────────────────────────────────────────────────── MODEL_DIR = "./models" @st.cache_resource def load_models(): models = [] for i in range(1, 6): path = f"{MODEL_DIR}/lgbm_fold_{i}.txt" if os.path.exists(path): m = lgb.Booster(model_file=path) models.append(m) return models @st.cache_resource def load_artifacts(): pca = joblib.load(f"{MODEL_DIR}/pca.pkl") if os.path.exists(f"{MODEL_DIR}/pca.pkl") else None scaler = joblib.load(f"{MODEL_DIR}/scaler.pkl") if os.path.exists(f"{MODEL_DIR}/scaler.pkl") else None fc_path = f"{MODEL_DIR}/feature_cols.json" if os.path.exists(fc_path): with open(fc_path) as f: feature_cols = json.load(f) else: feature_cols = [] return pca, scaler, feature_cols @st.cache_resource def load_sbert(): try: return SentenceTransformer("all-MiniLM-L6-v2") except Exception: return None # ─── Helper functions ───────────────────────────────────────────────────────── def build_single_applicant_features(inputs: dict, pca, sbert) -> pd.DataFrame: """Transform raw user inputs into model-ready features.""" income = inputs["income"] credit = inputs["credit_amount"] age = inputs["age"] emp_yrs = inputs["employment_years"] ext1 = inputs["ext_score_1"] ext2 = inputs["ext_score_2"] ext3 = inputs["ext_score_3"] # Build a synthetic text for NLP embedding literacy = "strong financial planning habits" if np.mean([ext1,ext2,ext3]) > 0.6 else ( "moderate financial awareness" if np.mean([ext1,ext2,ext3]) > 0.4 else "limited financial experience") text = ( f"Applicant aged {age:.0f} years with annual income of {income:.0f}. " f"Requesting credit of {credit:.0f}. Employed for {emp_yrs:.1f} years. " f"Client demonstrates {literacy}. External score: {np.mean([ext1,ext2,ext3]):.2f}. " f"{'Owns property.' if inputs['owns_realty'] else 'No property.'} " f"{'Has dependents.' if inputs['has_children'] else 'No children.'}" ) # NLP embed + PCA if sbert is not None: emb = sbert.encode([text], normalize_embeddings=True) if pca is not None: emb = pca.transform(emb) nlp_dict = {f"NLP_EMB_{i}": emb[0][i] for i in range(emb.shape[1])} else: # Demo mode — deterministic pseudo-embeddings n_dims = pca.n_components_ if pca is not None else 32 nlp_dict = {f"NLP_EMB_{i}": 0.0 for i in range(n_dims)} # Tabular features tab_dict = { "AMT_INCOME_TOTAL": income, "AMT_CREDIT": credit, "AMT_ANNUITY": inputs["annuity"], "AMT_GOODS_PRICE": credit * 0.9, "DAYS_BIRTH": -age * 365, "DAYS_EMPLOYED": -emp_yrs * 365, "EXT_SOURCE_1": ext1, "EXT_SOURCE_2": ext2, "EXT_SOURCE_3": ext3, "EXT_SOURCE_MEAN": np.mean([ext1, ext2, ext3]), "EXT_SOURCE_MIN": np.min([ext1, ext2, ext3]), "EXT_SOURCE_PROD": ext1 * ext2 * ext3, "EXT_SOURCE_STD": np.std([ext1, ext2, ext3]), "EXT1_EXT2_INTERACTION": ext1 * ext2, "EXT2_EXT3_INTERACTION": ext2 * ext3, "CREDIT_INCOME_RATIO": credit / (income + 1), "ANNUITY_INCOME_RATIO": inputs["annuity"] / (income + 1), "CREDIT_TERM": inputs["annuity"] / (credit + 1), "AGE_YEARS": age, "EMPLOYMENT_YEARS": emp_yrs, "EMPLOYED_RATIO": emp_yrs / (age + 1), "INCOME_PER_PERSON": income / (inputs["family_size"] + 1), "CNT_FAM_MEMBERS": inputs["family_size"], "CNT_CHILDREN": inputs["n_children"], "CHILDREN_RATIO": inputs["n_children"] / (inputs["family_size"] + 1), "FLAG_OWN_REALTY": int(inputs["owns_realty"]), "FLAG_OWN_CAR": int(inputs["owns_car"]), "HAS_CAR_REALTY": int(inputs["owns_realty"] and inputs["owns_car"]), "DOCUMENT_COUNT": inputs["doc_count"], "TOTAL_ENQUIRIES": inputs["total_enquiries"], "BUREAU_COUNT": inputs["bureau_count"], "BUREAU_ACTIVE_COUNT": inputs["bureau_active"], } feat = {**tab_dict, **nlp_dict} return pd.DataFrame([feat]) def predict_risk(df_feat: pd.DataFrame, models: list, feature_cols: list) -> float: """Ensemble predict across all loaded fold models.""" # Align columns — fill missing with 0 for col in feature_cols: if col not in df_feat.columns: df_feat[col] = 0.0 df_feat = df_feat[feature_cols] preds = [m.predict(df_feat, num_iteration=m.best_iteration) for m in models] return float(np.mean(preds)) def risk_band(score: float) -> tuple: if score < 0.15: return "LOW RISK", "risk-low", "#4CAF50", "✅" elif score < 0.40: return "MEDIUM RISK", "risk-medium", "#FF9800", "⚠️" else: return "HIGH RISK", "risk-high", "#F44336", "🚨" def get_shap_values(model, df_feat, feature_cols): for col in feature_cols: if col not in df_feat.columns: df_feat[col] = 0.0 df_feat = df_feat[feature_cols] explainer = shap.TreeExplainer(model) sv = explainer.shap_values(df_feat) if isinstance(sv, list): sv = sv[1] return sv, explainer.expected_value if not isinstance(explainer.expected_value, list) else explainer.expected_value[1], df_feat # ─── Main App ───────────────────────────────────────────────────────────────── def main(): st.markdown('
" "Alternative data ML pipeline to score the 1.7B credit-invisible population" "
", unsafe_allow_html=True ) st.divider() # Load models try: models = load_models() pca, scaler, feature_cols = load_artifacts() sbert = load_sbert() model_loaded = len(models) > 0 except Exception as e: st.error(f"⚠️ Could not load models: {e}. Running in demo mode.") model_loaded = False models, pca, scaler, feature_cols = [], None, None, [] # ── Sidebar ─────────────────────────────────────────────────────────────── with st.sidebar: st.image("https://img.shields.io/badge/Model-LightGBM%20%2B%20XGBoost-brightgreen", use_container_width=True) st.markdown("### 🎛️ Applicant Profile") st.markdown('', unsafe_allow_html=True) income = st.number_input("Annual Income (₹)", 10000, 10000000, 250000, step=10000) credit_amount = st.number_input("Requested Credit (₹)", 10000, 5000000, 500000, step=10000) annuity = st.number_input("Monthly Annuity (₹)", 1000, 200000, 15000, step=1000) st.markdown('', unsafe_allow_html=True) age = st.slider("Age (years)", 20, 70, 35) employment_yrs = st.slider("Employment Years", 0, 40, 5) family_size = st.slider("Family Size", 1, 10, 3) n_children = st.slider("Number of Children", 0, 5, 0) st.markdown('', unsafe_allow_html=True) owns_realty = st.checkbox("Owns Property", True) owns_car = st.checkbox("Owns Car", False) st.markdown('', unsafe_allow_html=True) ext_score_1 = st.slider("External Score 1 (Bureau)", 0.0, 1.0, 0.6, 0.01) ext_score_2 = st.slider("External Score 2 (Behaviour)", 0.0, 1.0, 0.55, 0.01) ext_score_3 = st.slider("External Score 3 (Alt Data)", 0.0, 1.0, 0.50, 0.01) bureau_count = st.number_input("# Previous Bureau Enquiries", 0, 50, 2) bureau_active = st.number_input("# Active Bureau Credits", 0, 20, 1) total_enquiries= st.number_input("# Total Loan Enquiries", 0, 100, 3) doc_count = st.number_input("# Documents Submitted", 0, 20, 5) predict_btn = st.button("🔮 Score Applicant", use_container_width=True, type="primary") # ── Main Panels ─────────────────────────────────────────────────────────── col1, col2, col3 = st.columns(3) inputs = dict( income=income, credit_amount=credit_amount, annuity=annuity, age=age, employment_years=employment_yrs, family_size=family_size, n_children=n_children, owns_realty=owns_realty, owns_car=owns_car, ext_score_1=ext_score_1, ext_score_2=ext_score_2, ext_score_3=ext_score_3, bureau_count=bureau_count, bureau_active=bureau_active, total_enquiries=total_enquiries, doc_count=doc_count, has_children=n_children>0, ) if predict_btn or True: # Show demo on load with st.spinner("Running ML pipeline..."): df_feat = build_single_applicant_features(inputs, pca, sbert) if model_loaded: risk_score = predict_risk(df_feat, models, feature_cols) else: # Demo mode — compute heuristic score risk_score = float(np.clip( 0.9 - 0.4*np.mean([ext_score_1,ext_score_2,ext_score_3]) - 0.1*(employment_yrs/40) + 0.15*(credit_amount/income if income>0 else 0.5) + np.random.normal(0, 0.02), 0.01, 0.99 )) label, css_class, color, icon = risk_band(risk_score) credit_score = int(300 + (1 - risk_score) * 550) # map to 300-850 range # ── KPI Row ─────────────────────────────────────────────────────────── col1.metric("Default Probability", f"{risk_score*100:.1f}%", delta=f"{(risk_score-0.5)*100:+.1f}% vs avg") col2.metric("Alt Credit Score", f"{credit_score}", delta=None) col3.metric("Risk Band", f"{icon} {label}", delta=None) st.divider() # ── Risk Card ───────────────────────────────────────────────────────── st.markdown(f'" "Built with LightGBM + XGBoost + Sentence-BERT + SHAP + River (ADWIN) + W&B | " "Home Credit Default Risk Dataset | " "For the 1.7B credit-invisible 🌍" "
", unsafe_allow_html=True ) if __name__ == "__main__": main()