Spaces:
Sleeping
Sleeping
| import copy | |
| from .ml_model import ml_engine | |
| import json | |
| KEY_MAP = { | |
| "gender": "Gender", | |
| "married": "Married", | |
| "dependents": "Dependents", | |
| "education": "Education", | |
| "self_employed": "Self_Employed", | |
| "applicant_income": "ApplicantIncome", | |
| "coapplicant_income": "CoapplicantIncome", | |
| "loan_amount": "LoanAmount", | |
| "loan_amount_term": "Loan_Amount_Term", | |
| "credit_history": "Credit_History", | |
| "property_area": "Property_Area" | |
| } | |
| def map_keys(data: dict) -> dict: | |
| return {KEY_MAP.get(k, k): v for k, v in data.items()} | |
| def prepare_input(base_data: dict) -> dict: | |
| # Compute derived features for the ML Engine | |
| total_income = base_data["ApplicantIncome"] + base_data["CoapplicantIncome"] | |
| # Avoid division by zero | |
| term = base_data["Loan_Amount_Term"] if base_data["Loan_Amount_Term"] > 0 else 360 | |
| emi = (base_data["LoanAmount"] * 1000) / term | |
| balance_income = total_income - emi | |
| base_data["Total_Income"] = total_income | |
| base_data["EMI"] = emi | |
| base_data["Balance_Income"] = balance_income | |
| return base_data | |
| def process_pipeline(input_data: dict) -> dict: | |
| """Evaluates the loan application and runs sensitivity analysis if rejected.""" | |
| # 0. Map snake_case API keys to PascalCase Model keys | |
| mapped_input = map_keys(input_data) | |
| # 1. Base Evaluation | |
| processed_input = prepare_input(copy.deepcopy(mapped_input)) | |
| result, confidence = ml_engine.predict(processed_input) | |
| dti_ratio = 0.0 | |
| if processed_input["Total_Income"] > 0: | |
| dti_ratio = processed_input["EMI"] / processed_input["Total_Income"] | |
| dti_ratio_pct = min(100.0, dti_ratio * 100) | |
| response = { | |
| "prediction": result, | |
| "confidence": confidence, | |
| "dti_ratio": dti_ratio_pct, | |
| "optimized_suggestion": "Your profile meets all current thresholds. No optimization necessary.", | |
| "feature_importances": ml_engine.get_feature_importances(), | |
| "benchmarks": ml_engine.get_benchmarks() | |
| } | |
| # 2. What-If Optimization (If Rejected) | |
| if result == "N": | |
| # Simulate lower loan amount | |
| sim_data_loan = copy.deepcopy(mapped_input) | |
| loan_decrease_needed = 0 | |
| loan_approved = False | |
| while sim_data_loan["LoanAmount"] > 10 and not loan_approved: | |
| sim_data_loan["LoanAmount"] -= 10 # Decrease by 10k | |
| loan_decrease_needed += 10 | |
| p_input = prepare_input(copy.deepcopy(sim_data_loan)) | |
| sim_res, sim_conf = ml_engine.predict(p_input) | |
| if sim_res == "Y": | |
| loan_approved = True | |
| break | |
| # Simulate higher coapplicant income | |
| sim_data_inc = copy.deepcopy(mapped_input) | |
| inc_increase_needed = 0 | |
| inc_approved = False | |
| while inc_increase_needed < 20000 and not inc_approved: | |
| sim_data_inc["CoapplicantIncome"] += 1000 # Increase by 1k | |
| inc_increase_needed += 1000 | |
| p_input_inc = prepare_input(copy.deepcopy(sim_data_inc)) | |
| sim_res_inc, sim_conf_inc = ml_engine.predict(p_input_inc) | |
| if sim_res_inc == "Y": | |
| inc_approved = True | |
| break | |
| # Generate rule-based optimization text (Determnistic) | |
| suggestion = "Unfortunately, we couldn't find a minor adjustment to approve your loan. Consider improving your CIBIL score." | |
| if loan_approved and inc_approved: | |
| suggestion = f"Reducing your loan request by ₹{loan_decrease_needed * 1000} OR adding a co-applicant income of ~₹{inc_increase_needed} would likely result in approval." | |
| elif loan_approved: | |
| suggestion = f"Reducing your loan request by ₹{loan_decrease_needed * 1000} would likely result in an approval." | |
| elif inc_approved: | |
| suggestion = f"Adding a co-applicant income of ~₹{inc_increase_needed} would likely result in an approval." | |
| if mapped_input["Credit_History"] == 0: | |
| suggestion = "Your Credit History is 0. This is the primary blocking factor. Improving your credit standing is required before other adjustments will work." | |
| response["optimized_suggestion"] = suggestion | |
| return response | |