Shreyas1441AI commited on
Commit
ed00ceb
·
verified ·
1 Parent(s): dc5e4ee

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -233
app.py DELETED
@@ -1,233 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import pandas as pd
4
- import csv
5
- import os
6
- import json
7
- from tqdm import tqdm
8
- import gradio as gr
9
-
10
- # =========================================================
11
- # MODEL ARCHITECTURE
12
- # =========================================================
13
- class DeepRegressor(nn.Module):
14
- def __init__(self):
15
- super().__init__()
16
- self.net = nn.Sequential(
17
- nn.Linear(1, 128),
18
- nn.ReLU(),
19
- nn.Linear(128, 256),
20
- nn.ReLU(),
21
- nn.Linear(256, 256),
22
- nn.ReLU(),
23
- nn.Linear(256, 128),
24
- nn.ReLU(),
25
- nn.Linear(128, 1)
26
- )
27
-
28
- def forward(self, x):
29
- return self.net(x)
30
-
31
-
32
- # =========================================================
33
- # MODELS (HARDCODED — USER DOES NOT INPUT)
34
- # =========================================================
35
-
36
- high_mod = f"from_smartsbyviva_mark2_11_12_1Dec25_epoch_124_r2_1.00000000000000000000_rmse_0.00000090259020531177.pt"
37
- mid_mod = f"from_smartsbyviva_mark2_11_12_1Dec25_epoch_143_r2_1.00000000000000000000_rmse_0.00000076154023975762.pt"
38
- low_mod = f"from_smartsbyviva_mark2_11_12_1Dec25_epoch_176_r2_1.00000000000000000000_rmse_0.00000068535079250664.pt"
39
-
40
- ultra1 = f"from_smartsbyviva_mark2_11_12_1Dec25_epoch_1_r2_1.00000000000000000000_rmse_0.00000329740002026770 (1).pt"
41
- ultra2 = f"from_smartsbyviva_mark2_11_12_1Dec25_epoch_197_r2_1.00000000000000000000_rmse_0.00000115787004480322.pt"
42
-
43
-
44
- # =========================================================
45
- # PREDICTION
46
- # =========================================================
47
- def predict_value(input_value, model_path):
48
- model = DeepRegressor()
49
- model.load_state_dict(torch.load(model_path, map_location="cpu"))
50
- model.eval()
51
- x = torch.tensor([[float(input_value)]], dtype=torch.float32)
52
- with torch.no_grad():
53
- y_pred = model(x).item()
54
- return float(f"{y_pred:.13f}")
55
-
56
-
57
- # =========================================================
58
- # POST PROCESS
59
- # =========================================================
60
- def post_process1(inputz, pred):
61
- input_last_6 = inputz[-6:]
62
- output_last_8 = f"{pred:.8f}"[-8:]
63
-
64
- a_u = input_last_6[:2]
65
- b_u = input_last_6[2:4]
66
- c_u = input_last_6[4:]
67
-
68
- a_l = output_last_8[:2]
69
- b_l = output_last_8[2:4]
70
- c_l = output_last_8[4:6]
71
- d_l = output_last_8[6:]
72
-
73
- def calc_diff(u, l):
74
- if int(u) > int(l):
75
- return int(u) - int(l)
76
- return (100 + int(u)) - int(l)
77
-
78
- diff1 = calc_diff(a_u, a_l)
79
- diff2 = calc_diff(b_u, b_l)
80
- diff3 = calc_diff(c_u, c_l)
81
-
82
- sigma = str(int(d_l) + diff1 + diff2 + diff3)
83
- return sigma
84
-
85
-
86
- # =========================================================
87
- # TRANSFORM
88
- # =========================================================
89
- def transform_and_average(a, b, c, d, e):
90
- def transform(code: str) -> float:
91
- first_digit = int(code[0])
92
- base = (first_digit + 1) * 10
93
- decimal_part = int(code[1:]) / 10 if len(code) > 1 else 0
94
- return base + decimal_part
95
-
96
- return (
97
- transform(a),
98
- transform(b),
99
- transform(c),
100
- transform(d),
101
- transform(e),
102
- )
103
-
104
-
105
- # =========================================================
106
- # CLOSEST VALUE FINDER
107
- # =========================================================
108
- def closest_code(v1, v2, v3, v4, v5, answer):
109
- avg = (v1 + v2 + v3 + v4 + v5) / 5
110
- target_last_digit = int(answer) % 10
111
-
112
- closest = None
113
- min_diff = float("inf")
114
-
115
- for n in range(0, 1000):
116
- if n % 10 == target_last_digit:
117
- diff = abs(n - avg)
118
- if diff < min_diff:
119
- closest = n
120
- min_diff = diff
121
-
122
- return closest
123
-
124
- def load_file_content(file_obj):
125
- if isinstance(file_obj, dict) and "path" in file_obj:
126
- # Standard Gradio object
127
- with open(file_obj["path"], "r") as f:
128
- return f.read()
129
- elif hasattr(file_obj, "name"):
130
- # Older file interface
131
- with open(file_obj.name, "r") as f:
132
- return f.read()
133
- else:
134
- raise ValueError("Unsupported file type")
135
-
136
-
137
-
138
- # =========================================================
139
- # MAIN PROCESS FOR GRADIO
140
- # =========================================================
141
- import io
142
- import csv
143
- from tqdm import tqdm
144
-
145
- def run_pipeline(txt_file):
146
- # ---- FIX START ----
147
- text = load_file_content(txt_file)
148
- data = text.strip()
149
- # ---- FIX END ----
150
-
151
- window_size = 12
152
- total_windows = len(data) - window_size + 1
153
-
154
- fieldnames = ["tier1", "tier2", "tier3", "tier4", "tier5", "tier6"]
155
-
156
- # ------------------------------------------------------------
157
- # ⬇️ IN-MEMORY CSV BUFFER (NO FILESYSTEM)
158
- # ------------------------------------------------------------
159
- buffer = io.StringIO()
160
- writer = csv.DictWriter(buffer, fieldnames=fieldnames)
161
- writer.writeheader()
162
-
163
- rows_buffer = []
164
- update_counter = 0
165
- updates_msg = ""
166
-
167
- for i in tqdm(range(total_windows)):
168
- input_data_norm = data[i:i+12]
169
- input_to_send = input_data_norm[:11]
170
- answer = input_data_norm[-1]
171
- input_data = "0." + input_to_send
172
-
173
- pred1 = predict_value(input_data, high_mod)
174
- pred2 = predict_value(input_data, mid_mod)
175
- pred3 = predict_value(input_data, low_mod)
176
- pred4 = predict_value(input_data, ultra1)
177
- pred5 = predict_value(input_data, ultra2)
178
-
179
- s1 = post_process1(input_data_norm, pred1)
180
- s2 = post_process1(input_data_norm, pred2)
181
- s3 = post_process1(input_data_norm, pred3)
182
- s4 = post_process1(input_data_norm, pred4)
183
- s5 = post_process1(input_data_norm, pred5)
184
-
185
- v1, v2, v3, v4, v5 = transform_and_average(s1, s2, s3, s4, s5)
186
- cudo = closest_code(v1, v2, v3, v4, v5, answer)
187
-
188
- rows_buffer.append({
189
- "tier1": v1,
190
- "tier2": v2,
191
- "tier3": v3,
192
- "tier4": v4,
193
- "tier5": v5,
194
- "tier6": cudo
195
- })
196
-
197
- # ------------------ BATCH WRITE ------------------
198
- if len(rows_buffer) >= 1000:
199
- update_counter += 1
200
- writer.writerows(rows_buffer)
201
- updates_msg += f"Updated #{update_counter} (saved {i+1} rows)\n"
202
- rows_buffer = []
203
-
204
- # -------- final save --------
205
- if rows_buffer:
206
- writer.writerows(rows_buffer)
207
-
208
- updates_msg += "\n🎉 Processing Complete"
209
-
210
- # ------------------------------------------------------------
211
- # IMPORTANT: reset pointer to start of buffer
212
- # ------------------------------------------------------------
213
- buffer.seek(0)
214
-
215
- # Return buffer → gr.File will turn into downloadable CSV
216
- return buffer, updates_msg
217
-
218
-
219
-
220
- # =========================================================
221
- # GRADIO UI
222
- # =========================================================
223
- gr.Interface(
224
- fn=run_pipeline,
225
- inputs=gr.File(label="Upload TXT"),
226
- outputs=[
227
- gr.File(label="Download CSV Output"),
228
- gr.Textbox(label="Logs")
229
- ]
230
- ).launch()
231
-
232
-
233
- ui.launch()