JatinAutonomousLabs commited on
Commit
aa83462
·
verified ·
1 Parent(s): 0eaf786

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +366 -0
  2. config.json +140 -0
  3. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Universal Multi-Agent Platform - Core Application (Production Ready)
4
+ Auto-generated with Gradio 4.x compatibility
5
+ """
6
+
7
+ import gradio as gr
8
+ import pandas as pd
9
+ from typing import Dict, Any, List, Optional, Tuple
10
+ from pathlib import Path
11
+ import json
12
+ import os
13
+
14
+ # ============================================================================
15
+ # IMPORT ENABLED PLUGINS
16
+ # ============================================================================
17
+ from plugins.processors.schema_detector import *
18
+ from plugins.processors.text_processor import *
19
+ from plugins.outputs.table_formatter import *
20
+ from plugins.processors.date_normalizer import *
21
+ from plugins.file_handlers.csv_handler import *
22
+ from plugins.outputs.report_generator import *
23
+ from plugins.file_handlers.excel_handler import *
24
+ from plugins.memory.document_memory import *
25
+ from plugins.processors.data_cleaner import *
26
+ from plugins.analyzers.statistical_analyzer import *
27
+ from plugins.analyzers.time_series_analyzer import *
28
+ from plugins.outputs.chart_generator import *
29
+ from plugins.memory.conversation_memory import *
30
+
31
+ # ============================================================================
32
+ # PLUGIN MANAGER (Handles all plugin interactions)
33
+ # ============================================================================
34
+
35
+ class PluginManager:
36
+ """Manage all plugins and application state."""
37
+
38
+ def __init__(self):
39
+ # Initialize file handlers
40
+ self.file_handlers = []
41
+ self.file_handlers.append(CSVHandler())
42
+ self.file_handlers.append(ExcelHandler())
43
+
44
+ # Initialize processors/analyzers
45
+ self.data_cleaner = DataCleaner() if True else None
46
+ self.time_series_analyzer = TimeSeriesAnalyzer() if True else None
47
+ self.statistical_analyzer = StatisticalAnalyzer() if True else None
48
+
49
+ # Initialize memory/outputs
50
+ self.conversation_memory = ConversationMemory() if True else None
51
+ self.table_formatter = TableFormatter() if True else None
52
+ self.chart_generator = ChartGenerator() if True else None
53
+
54
+ # Data storage
55
+ self.loaded_data: Optional[Dict[str, Any]] = None
56
+ self.cleaned_df: Optional[pd.DataFrame] = None
57
+ self.last_chart_json: Optional[str] = None
58
+
59
+ def load_file(self, file_path: str) -> Dict[str, Any]:
60
+ """Load file using appropriate handler and automatically clean data."""
61
+ self.loaded_data = None
62
+ self.cleaned_df = None
63
+ self.last_chart_json = None
64
+
65
+ if not os.path.exists(file_path):
66
+ return {"success": False, "error": "File not found on server"}
67
+
68
+ for handler in self.file_handlers:
69
+ if handler.can_handle(file_path):
70
+ result = handler.load(file_path)
71
+ if result.get("success"):
72
+ self.loaded_data = result
73
+
74
+ # Auto-clean tabular data
75
+ df = self._get_raw_df()
76
+ if df is not None and self.data_cleaner:
77
+ df = self.data_cleaner.clean_dataframe(df)
78
+ self.cleaned_df = self.data_cleaner.enforce_schema(df)
79
+
80
+ if "metadata" not in result:
81
+ result["metadata"] = {}
82
+ result["metadata"]["cleaned_shape"] = list(self.cleaned_df.shape)
83
+ result["metadata"]["cleaned_cols"] = list(self.cleaned_df.columns)
84
+
85
+ return result
86
+
87
+ return {"success": False, "error": "No handler found for this file type"}
88
+
89
+ def _get_raw_df(self) -> Optional[pd.DataFrame]:
90
+ """Internal method to extract a DataFrame from loaded_data."""
91
+ if not self.loaded_data:
92
+ return None
93
+ if "combined" in self.loaded_data and isinstance(self.loaded_data["combined"], pd.DataFrame):
94
+ return self.loaded_data["combined"]
95
+ elif "data" in self.loaded_data and isinstance(self.loaded_data["data"], pd.DataFrame):
96
+ return self.loaded_data["data"]
97
+ return None
98
+
99
+ # Initialize plugin manager
100
+ pm = PluginManager()
101
+
102
+ # ============================================================================
103
+ # GRADIO INTERFACE LOGIC
104
+ # ============================================================================
105
+
106
+ def upload_file(file):
107
+ """Handle file upload."""
108
+ if file is None:
109
+ return "❌ No file uploaded", None
110
+
111
+ try:
112
+ result = pm.load_file(file.name)
113
+
114
+ if result.get("success"):
115
+ # Get appropriate handler for preview
116
+ preview_html = "Data loaded successfully"
117
+ for handler in pm.file_handlers:
118
+ if handler.can_handle(file.name) and hasattr(handler, 'preview'):
119
+ preview_html = handler.preview(result)
120
+ break
121
+
122
+ shape_info = f"Shape: {pm.cleaned_df.shape}" if pm.cleaned_df is not None else "Non-tabular data"
123
+
124
+ summary = "✅ File loaded and processed successfully\n"
125
+ summary += f"Type: {result.get('file_type', 'unknown')}\n"
126
+ summary += f"Data: {shape_info}\n\n"
127
+ summary += "Ready for conversational analysis!"
128
+
129
+ return summary, preview_html
130
+
131
+ return f"❌ Error: {result.get('error')}", None
132
+
133
+ except Exception as e:
134
+ return f"❌ Critical Error: {str(e)}", None
135
+
136
+
137
+ def process_query(query: str, history: List) -> Tuple[List, str, Optional[str]]:
138
+ """
139
+ Executes conversational analytics.
140
+ Returns: updated history, empty query text, and chart JSON.
141
+ """
142
+
143
+ if not query or not query.strip():
144
+ return history + [("", "❌ Please enter a question")], "", None
145
+
146
+ if pm.conversation_memory:
147
+ pm.conversation_memory.add_message("user", query)
148
+
149
+ df = pm.cleaned_df
150
+ pm.last_chart_json = None
151
+
152
+ # Handle No Data Case
153
+ if df is None or df.empty:
154
+ # Check if non-tabular data was loaded
155
+ if pm.loaded_data and pm.loaded_data.get('file_type') in ['pdf', 'docx']:
156
+ document_text = pm.loaded_data.get('text', '') or str(pm.loaded_data.get('text_data', [{}])[0].get('text', 'No text'))
157
+ response = "📄 **Document Content Loaded**\n\n"
158
+ response += "The system has loaded a document. Advanced NLP analysis would be applied here.\n"
159
+ response += f"Text Sample: {document_text[:200]}..."
160
+ else:
161
+ response = "❌ No **data** loaded for analysis. Please upload a file first."
162
+
163
+ if pm.conversation_memory:
164
+ pm.conversation_memory.add_message("assistant", response)
165
+ return history + [(query, response)], "", None
166
+
167
+ try:
168
+ # Execute Analytics
169
+ if pm.time_series_analyzer:
170
+ description, result_df = pm.time_series_analyzer.analyze_query(df, query)
171
+ elif pm.statistical_analyzer:
172
+ stats = pm.statistical_analyzer.analyze(df)
173
+ description = "📊 Statistical Analysis Results"
174
+ result_df = pd.DataFrame(stats.get('columns', {})).T
175
+ else:
176
+ description = "⚠️ No analyzer available. Upload data and try basic queries."
177
+ result_df = None
178
+
179
+ final_response = f"**Query:** {query}\n\n{description}\n\n"
180
+ chart_json = None
181
+
182
+ if result_df is not None and not result_df.empty:
183
+ # Format Table Output
184
+ if pm.table_formatter:
185
+ table_markdown = pm.table_formatter.format_to_markdown(result_df.head(10))
186
+ final_response += "### Results (Top 10 Rows):\n"
187
+ final_response += table_markdown
188
+ final_response += f"\n\n*Total Rows: {len(result_df):,}*"
189
+
190
+ # Generate Chart Output
191
+ if pm.chart_generator and len(result_df.columns) >= 2:
192
+ try:
193
+ x_col = result_df.columns[0]
194
+ y_col = result_df.columns[1]
195
+ chart_json = pm.chart_generator.create_chart_html(
196
+ result_df.head(20),
197
+ 'bar',
198
+ x=x_col,
199
+ y=y_col,
200
+ title=description.split('\n')[0][:50]
201
+ )
202
+ except Exception as chart_err:
203
+ print(f"Chart generation failed: {chart_err}")
204
+
205
+ else:
206
+ final_response = f"**Query:** {query}\n\n{description}"
207
+
208
+ if pm.conversation_memory:
209
+ pm.conversation_memory.add_message("assistant", final_response)
210
+
211
+ return history + [(query, final_response)], "", chart_json
212
+
213
+ except Exception as e:
214
+ import traceback
215
+ error_trace = traceback.format_exc()
216
+ response = f"❌ Analysis Error: {str(e)}\n\nDebug Info:\n```\n{error_trace[:500]}\n```"
217
+ return history + [(query, response)], "", None
218
+
219
+
220
+ def create_ui():
221
+ """Create Gradio interface (Gradio 4.x compatible)."""
222
+
223
+ with gr.Blocks(title="Universal AI Platform", theme=gr.themes.Soft()) as demo:
224
+ gr.Markdown("# 🤖 Universal Multi-Agent Platform")
225
+ gr.Markdown("## AI-Powered Analysis & Conversational Intelligence")
226
+
227
+ with gr.Tabs():
228
+ # ================================================================
229
+ # FILE UPLOAD TAB
230
+ # ================================================================
231
+ with gr.Tab("📁 Upload & Process"):
232
+ with gr.Row():
233
+ with gr.Column(scale=1):
234
+ file_upload = gr.File(
235
+ label="Upload Your File",
236
+ file_types=[".xlsx", ".xls", ".csv", ".pdf", ".docx", ".json", ".xml"],
237
+ interactive=True
238
+ )
239
+ upload_btn = gr.Button("📤 Process File", variant="primary", size="lg")
240
+ upload_status = gr.Textbox(
241
+ label="Status",
242
+ lines=8,
243
+ value="Ready to process files. Supported: Excel, CSV, PDF, Word, JSON, XML",
244
+ interactive=False
245
+ )
246
+
247
+ with gr.Column(scale=2):
248
+ data_preview = gr.HTML(label="Data Preview")
249
+
250
+ upload_btn.click(
251
+ fn=upload_file,
252
+ inputs=[file_upload],
253
+ outputs=[upload_status, data_preview]
254
+ )
255
+
256
+ # ================================================================
257
+ # CHAT INTERFACE TAB
258
+ # ================================================================
259
+ with gr.Tab("💬 Ask Questions"):
260
+ chatbot = gr.Chatbot(
261
+ height=450,
262
+ label="Conversational AI Assistant",
263
+ type='tuples',
264
+ show_copy_button=True
265
+ )
266
+
267
+ gr.Markdown("""
268
+ ### 📝 Example Queries:
269
+ - "Summarize the data"
270
+ - "Show me aggregated statistics"
271
+ - "Group by [column name]"
272
+ - "Segment the data into categories"
273
+ - "Analyze trends over time"
274
+ - "Show correlation between columns"
275
+ """)
276
+
277
+ with gr.Row():
278
+ msg = gr.Textbox(
279
+ label="Your Query",
280
+ placeholder="Ask anything about your data...",
281
+ scale=4,
282
+ lines=2
283
+ )
284
+ submit_btn = gr.Button("Send", variant="primary", scale=1, size="lg")
285
+
286
+ # Chart display area
287
+ chart_display = gr.HTML(
288
+ label="Visualization",
289
+ value=""
290
+ )
291
+
292
+ # Clear button
293
+ with gr.Row():
294
+ clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary")
295
+
296
+ def process_and_display(query: str, history: List) -> Tuple[List, str, str]:
297
+ """Process query and return chart HTML."""
298
+ updated_history, empty_msg, chart_json_str = process_query(query, history)
299
+
300
+ # Convert chart JSON to HTML with embedded Plotly
301
+ # KEY FIX: Use string concatenation instead of f-string substitution
302
+ chart_html = ""
303
+ if chart_json_str:
304
+ # Build the HTML string using concatenation to avoid f-string issues
305
+ chart_html = (
306
+ '<div style="width: 100%; height: 500px; margin-top: 20px;">' +
307
+ '<script src="https://cdn.plot.ly/plotly-2.27.0.min.js"></script>' +
308
+ '<div id="plotly-chart-container"></div>' +
309
+ '<script>' +
310
+ '(function() {' +
311
+ 'try {' +
312
+ 'const chartData = ' + chart_json_str + ';' +
313
+ "Plotly.newPlot('plotly-chart-container', chartData.data, chartData.layout, {responsive: true, displayModeBar: true});" +
314
+ '} catch (e) {' +
315
+ "console.error('Chart rendering error:', e);" +
316
+ "document.getElementById('plotly-chart-container').innerHTML = '<p style=\"color: red; padding: 20px;\">Chart rendering failed: ' + e.message + '</p>';" +
317
+ '}' +
318
+ '})();' +
319
+ '</script>' +
320
+ '</div>'
321
+ )
322
+
323
+ return updated_history, empty_msg, chart_html
324
+
325
+ # Wire up the chat interface
326
+ msg.submit(
327
+ process_and_display,
328
+ inputs=[msg, chatbot],
329
+ outputs=[chatbot, msg, chart_display]
330
+ )
331
+
332
+ submit_btn.click(
333
+ process_and_display,
334
+ inputs=[msg, chatbot],
335
+ outputs=[chatbot, msg, chart_display]
336
+ )
337
+
338
+ clear_btn.click(
339
+ lambda: ([], ""),
340
+ outputs=[chatbot, chart_display]
341
+ )
342
+
343
+ gr.Markdown("---")
344
+ gr.Markdown(f"**Enabled Plugins:** Schema Detector, Text Processor, Table Formatter, Date Normalizer, CSV Handler, Report Generator, Excel Handler, Document Memory, Data Cleaner, Statistical Analyzer, Time Series Analyzer, Chart Generator, Conversation Memory")
345
+ gr.Markdown("*Powered by Universal AI Agent Development Platform*")
346
+
347
+ return demo
348
+
349
+ # ============================================================================
350
+ # MAIN ENTRY POINT
351
+ # ============================================================================
352
+
353
+ if __name__ == "__main__":
354
+ # Check for environment variables
355
+ if not os.getenv("OPENAI_API_KEY"):
356
+ print("⚠️ Warning: OPENAI_API_KEY not set (not required for basic analytics)")
357
+
358
+ # Launch application
359
+ print("🚀 Launching Universal AI Platform...")
360
+ demo = create_ui()
361
+ demo.launch(
362
+ server_name="0.0.0.0",
363
+ server_port=7860,
364
+ share=False,
365
+ show_error=True
366
+ )
config.json ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "agents": [
3
+ {
4
+ "id": "agent1",
5
+ "name": "DataExtractor",
6
+ "role": "Extracts and interprets data from uploaded Excel files",
7
+ "model": "gpt-3.5-turbo",
8
+ "temperature": 0.5,
9
+ "max_tokens": 500,
10
+ "system_prompt": "Identify and extract all relevant data from the uploaded Excel sheet.",
11
+ "capabilities": [
12
+ "data_extraction",
13
+ "data_interpretation"
14
+ ],
15
+ "status": "idle",
16
+ "tasks_completed": 0,
17
+ "tasks_failed": 0,
18
+ "tokens_used": 0,
19
+ "avg_response_time": 0.0,
20
+ "progress": 0.0,
21
+ "current_task": null
22
+ },
23
+ {
24
+ "id": "agent2",
25
+ "name": "DataAnalyzer",
26
+ "role": "Analyzes extracted data for patterns and insights",
27
+ "model": "gpt-4",
28
+ "temperature": 0.7,
29
+ "max_tokens": 750,
30
+ "system_prompt": "Analyze the extracted data to find patterns, insights, and possible correlations.",
31
+ "capabilities": [
32
+ "data_analysis",
33
+ "pattern_recognition"
34
+ ],
35
+ "status": "idle",
36
+ "tasks_completed": 0,
37
+ "tasks_failed": 0,
38
+ "tokens_used": 0,
39
+ "avg_response_time": 0.0,
40
+ "progress": 0.0,
41
+ "current_task": null
42
+ },
43
+ {
44
+ "id": "agent3",
45
+ "name": "ChatbotInterface",
46
+ "role": "Provides a conversational interface for user interaction",
47
+ "model": "gpt-3.5-turbo",
48
+ "temperature": 0.6,
49
+ "max_tokens": 600,
50
+ "system_prompt": "Interact with the user to provide data summaries, answer queries, and receive instructions for further data manipulation.",
51
+ "capabilities": [
52
+ "user_interaction",
53
+ "data_summarization"
54
+ ],
55
+ "status": "idle",
56
+ "tasks_completed": 0,
57
+ "tasks_failed": 0,
58
+ "tokens_used": 0,
59
+ "avg_response_time": 0.0,
60
+ "progress": 0.0,
61
+ "current_task": null
62
+ },
63
+ {
64
+ "id": "agent4",
65
+ "name": "DataModifier",
66
+ "role": "Modifies and updates data as per user or system requirements",
67
+ "model": "gpt-4",
68
+ "temperature": 0.4,
69
+ "max_tokens": 1000,
70
+ "system_prompt": "Modify the data according to user commands or predefined rules.",
71
+ "capabilities": [
72
+ "data_modification",
73
+ "data_updating"
74
+ ],
75
+ "status": "idle",
76
+ "tasks_completed": 0,
77
+ "tasks_failed": 0,
78
+ "tokens_used": 0,
79
+ "avg_response_time": 0.0,
80
+ "progress": 0.0,
81
+ "current_task": null
82
+ }
83
+ ],
84
+ "segments": [
85
+ {
86
+ "id": "segment1",
87
+ "name": "DataProcessing",
88
+ "objective": "Process data from extraction to analysis",
89
+ "agent_ids": [
90
+ "agent1",
91
+ "agent2"
92
+ ],
93
+ "workflow": "sequential",
94
+ "coordination_strategy": "priority",
95
+ "completion": 0.0,
96
+ "status": "pending",
97
+ "tokens_used": 0,
98
+ "cost": 0.0
99
+ },
100
+ {
101
+ "id": "segment2",
102
+ "name": "UserInteraction",
103
+ "objective": "Interact with the user to refine data processing and output",
104
+ "agent_ids": [
105
+ "agent3",
106
+ "agent4"
107
+ ],
108
+ "workflow": "parallel",
109
+ "coordination_strategy": "round_robin",
110
+ "completion": 0.0,
111
+ "status": "pending",
112
+ "tokens_used": 0,
113
+ "cost": 0.0
114
+ }
115
+ ],
116
+ "plugins": {
117
+ "schema_detector": true,
118
+ "text_processor": true,
119
+ "table_formatter": true,
120
+ "date_normalizer": true,
121
+ "csv_handler": true,
122
+ "report_generator": true,
123
+ "excel_handler": true,
124
+ "document_memory": true,
125
+ "data_cleaner": true,
126
+ "statistical_analyzer": true,
127
+ "time_series_analyzer": true,
128
+ "chart_generator": true,
129
+ "conversation_memory": true
130
+ },
131
+ "dependencies": [
132
+ "markdown",
133
+ "numpy",
134
+ "openpyxl",
135
+ "pandas",
136
+ "plotly",
137
+ "tabulate",
138
+ "xlrd"
139
+ ]
140
+ }
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ markdown
3
+ numpy
4
+ numpy>=1.24.0
5
+ openpyxl
6
+ pandas
7
+ pandas>=2.0.0
8
+ plotly
9
+ tabulate
10
+ xlrd