Spaces:
Running
Running
| import gradio as gr | |
| from datetime import datetime | |
| from groq import Groq | |
| import traceback | |
| import json | |
| import os | |
| import subprocess | |
| import tempfile | |
| # --- 1. API KEY --- | |
| api_key_coder = os.environ.get('fristapi') | |
| if not api_key_coder: | |
| raise ValueError("Groq API key not found. Set fristapi environment variable.") | |
| # --- 2. LLM CLIENT --- | |
| class GroqLLM: | |
| def __init__(self, api_key, model="llama-3.3-70b-versatile", temperature=0.0): | |
| self.client = Groq(api_key=api_key) | |
| self.model = model | |
| self.temperature = temperature | |
| def invoke(self, prompt): | |
| try: | |
| response = self.client.chat.completions.create( | |
| model=self.model, | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=self.temperature, | |
| max_tokens=2000 | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"LLM Error: {str(e)}" | |
| llm = GroqLLM(api_key=api_key_coder) | |
| # ============================================ | |
| # PART 1: SmartQA - TEST CASE GENERATOR (Modified) | |
| # ============================================ | |
| class KnowledgeInput: | |
| def __init__(self, requirements=None, dom=None, api_spec=None, user_flows=None, source_code=None, recording=None): | |
| self.requirements = requirements | |
| self.dom = dom | |
| self.api_spec = api_spec | |
| self.user_flows = user_flows | |
| self.source_code = source_code | |
| self.recording = recording | |
| class KnowledgeProcessor: | |
| def parse_requirements(self, text): | |
| return text.strip() if text else "" | |
| def parse_dom(self, dom_text): | |
| return dom_text[:4000] if dom_text else "" | |
| def parse_api(self, api_text): | |
| return api_text[:4000] if api_text else "" | |
| def parse_flows(self, flows_text): | |
| return flows_text.strip() if flows_text else "" | |
| def analyze_code(self, code_text): | |
| return code_text[:4000] if code_text else "" | |
| def parse_recording(self, rec_text): | |
| return rec_text.strip() if rec_text else "" | |
| def process(self, knowledge): | |
| data = {} | |
| if knowledge.requirements and knowledge.requirements.strip(): | |
| data["req"] = self.parse_requirements(knowledge.requirements) | |
| if knowledge.dom and knowledge.dom.strip(): | |
| data["ui"] = self.parse_dom(knowledge.dom) | |
| if knowledge.api_spec and knowledge.api_spec.strip(): | |
| data["api"] = self.parse_api(knowledge.api_spec) | |
| if knowledge.user_flows and knowledge.user_flows.strip(): | |
| data["flows"] = self.parse_flows(knowledge.user_flows) | |
| if knowledge.source_code and knowledge.source_code.strip(): | |
| data["code"] = self.analyze_code(knowledge.source_code) | |
| if knowledge.recording and knowledge.recording.strip(): | |
| data["record"] = self.parse_recording(knowledge.recording) | |
| return data | |
| class TestCaseGenerator: | |
| """Generates STRUCTURED TEST CASES (NOT executable code)""" | |
| def __init__(self, llm): | |
| self.llm = llm | |
| def generate_test_cases(self, data, language="Python"): | |
| """Generate test cases based on available sources""" | |
| # Determine available sources | |
| available_sources = [] | |
| input_section = "" | |
| if data.get("req"): | |
| available_sources.append("Requirements") | |
| input_section += f""" | |
| ### 📋 Requirements (Primary Source): | |
| {data['req']} | |
| """ | |
| if data.get("flows"): | |
| available_sources.append("User Flows") | |
| input_section += f""" | |
| ### 🔄 User Flows: | |
| {data['flows']} | |
| """ | |
| if data.get("api"): | |
| available_sources.append("API Specification") | |
| input_section += f""" | |
| ### 🔌 API Specification: | |
| {data['api']} | |
| """ | |
| if data.get("ui"): | |
| available_sources.append("UI/DOM Structure") | |
| input_section += f""" | |
| ### 🖥️ UI/DOM Structure: | |
| {data['ui']} | |
| """ | |
| if data.get("code"): | |
| available_sources.append("Source Code") | |
| input_section += f""" | |
| ### 💻 Source Code: | |
| {data['code']} | |
| """ | |
| if data.get("record"): | |
| available_sources.append("User Recording") | |
| input_section += f""" | |
| ### 📹 User Recording: | |
| {data['record']} | |
| """ | |
| # Check if there's any input | |
| if not available_sources: | |
| return """⚠️ **No input provided** | |
| Please provide at least one source to generate test cases. | |
| The **Requirements** field is recommended for best results.""" | |
| prompt = f""" | |
| You are a SENIOR QA Engineer specializing in test case design. | |
| Your task: Generate STRUCTURED, PROFESSIONAL TEST CASES. | |
| **CRITICAL: Generate TEST CASES only - NO executable code, NO programming syntax, NO imports.** | |
| ===================== | |
| CONFIGURATION | |
| ===================== | |
| Target Language Reference: {language} (for context only - you are NOT generating code) | |
| ===================== | |
| AVAILABLE SOURCES: {', '.join(available_sources)} | |
| ===================== | |
| STRICT RULES (MANDATORY) | |
| ===================== | |
| 1. **NO CODE**: Generate only human-readable test cases | |
| 2. **Base test cases ONLY on the sources provided above** | |
| 3. **If Requirements exist**: Generate comprehensive functional test cases | |
| 4. **If User Flows exist**: Generate E2E test scenarios | |
| 5. **If API Spec exists**: Generate API test cases for each endpoint | |
| 6. **If UI/DOM exists**: Use to understand elements (don't generate code) | |
| 7. **If Source Code exists**: Use to understand logic (don't generate code) | |
| 8. **Include both positive and negative scenarios where applicable** | |
| 9. **Each test case MUST follow the exact format below** | |
| ===================== | |
| TEST CASE FORMAT (MANDATORY) | |
| ===================== | |
| --- | |
| ### **TC-XXX:** [Descriptive Test Case Title] | |
| | Field | Details | | |
| |-------|---------| | |
| | **Type** | [UI/API/Unit/Integration/E2E] | | |
| | **Priority** | [High/Medium/Low] | | |
| | **Source** | [Which source this came from] | | |
| **Preconditions:** | |
| - [Condition 1] | |
| - [Condition 2] | |
| **Test Steps:** | |
| 1. [Step 1 - be specific and actionable] | |
| 2. [Step 2 - be specific and actionable] | |
| 3. [Step 3 - be specific and actionable] | |
| **Test Data:** | |
| - [Data value 1] | |
| - [Data value 2] | |
| **Expected Results:** | |
| - [Expected outcome 1] | |
| - [Expected outcome 2] | |
| **Postconditions:** | |
| - [Cleanup steps if needed] | |
| --- | |
| ===================== | |
| INPUT DATA | |
| ===================== | |
| {input_section} | |
| ===================== | |
| OUTPUT FORMAT | |
| ===================== | |
| Start with a summary table: | |
| # 📊 Test Case Summary | |
| | Metric | Value | | |
| |--------|-------| | |
| | **Total Test Cases** | [number] | | |
| | **Sources Used** | {', '.join(available_sources)} | | |
| | **Generated** | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} | | |
| Then generate each test case using the format above. | |
| Number test cases sequentially: TC-001, TC-002, TC-003, etc. | |
| Be thorough but concise. Focus on quality and clarity. | |
| """ | |
| return self.llm.invoke(prompt) | |
| def generate(self, data, language="Python"): | |
| return self.generate_test_cases(data, language) | |
| class SmartQASystem: | |
| def __init__(self, llm): | |
| self.processor = KnowledgeProcessor() | |
| self.generator = TestCaseGenerator(llm) | |
| def run(self, knowledge, language="Python"): | |
| processed = self.processor.process(knowledge) | |
| if not processed: | |
| return { | |
| "test_cases": "⚠️ **No input provided.**\n\nPlease fill in at least one field. The **Requirements** field is recommended for best results.", | |
| "summary": "No data to process" | |
| } | |
| generated_tests = self.generator.generate(processed, language) | |
| # Create summary | |
| sources_used = list(processed.keys()) | |
| source_names = { | |
| "req": "Requirements", | |
| "ui": "UI/DOM", | |
| "api": "API Spec", | |
| "flows": "User Flows", | |
| "code": "Source Code", | |
| "record": "Recording" | |
| } | |
| sources_list = [source_names.get(s, s) for s in sources_used] | |
| summary = f"""## 📊 Generation Summary | |
| | Item | Details | | |
| |------|---------| | |
| | **Sources Used** | {', '.join(sources_list)} | | |
| | **Language Context** | {language} | | |
| | **Output Type** | Test Cases (Human-readable) | | |
| | **Generated** | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} | | |
| ### 📌 Important Notes: | |
| - These are **test cases**, not executable code | |
| - Use these as specifications for manual testing | |
| - Can be used as a guide for automation development | |
| - Each test case follows a structured format | |
| """ | |
| return { | |
| "test_cases": generated_tests, | |
| "summary": summary | |
| } | |
| smartqa = SmartQASystem(llm) | |
| # ============================================ | |
| # PART 2: HealTest AI (UPDATED with language support) | |
| # ============================================ | |
| # --- Real Pytest Runner --- | |
| class PytestRunner: | |
| def run(self, test_script): | |
| test_file = None | |
| try: | |
| # Create temporary test file | |
| with tempfile.NamedTemporaryFile(delete=False, suffix="_test.py", mode="w", encoding="utf-8") as f: | |
| f.write(test_script) | |
| test_file = f.name | |
| # Run pytest | |
| result = subprocess.run( | |
| ["pytest", test_file, "--maxfail=1", "--disable-warnings", "-q"], | |
| capture_output=True, | |
| text=True, | |
| timeout=30 | |
| ) | |
| # Parse result | |
| if result.returncode == 0: | |
| return { | |
| "status": "passed", | |
| "logs": result.stdout, | |
| "error": "" | |
| } | |
| else: | |
| return { | |
| "status": "failed", | |
| "error": result.stderr if result.stderr else result.stdout, | |
| "logs": result.stdout | |
| } | |
| except Exception as e: | |
| return { | |
| "status": "failed", | |
| "error": str(e), | |
| "logs": "" | |
| } | |
| finally: | |
| try: | |
| if test_file and os.path.exists(test_file): | |
| os.remove(test_file) | |
| except: | |
| pass | |
| class DOMMatcher: | |
| def find_similar(self, dom, failed_locator): | |
| return "button#submit-btn", 0.92 | |
| runner = PytestRunner() | |
| dom_matcher = DOMMatcher() | |
| def detect_failure(test_script): | |
| return runner.run(test_script) | |
| def analyze_root_cause(failure_data, language="python"): | |
| error = failure_data.get("error", "Unknown") | |
| logs = failure_data.get("logs", "") | |
| dom = failure_data.get("dom", "") | |
| prompt = f""" | |
| You are a strict QA auditor and {language.upper()} testing expert. | |
| The following test script failed: | |
| Language: {language} | |
| DOM: {dom} | |
| Error: {error} | |
| Logs: {logs} | |
| 1. Analyze the root cause of the failure in detail. | |
| 2. Suggest the exact fix required. | |
| 3. Provide a corrected version of the test script incorporating your fix. | |
| 4. Ensure the corrected script would pass logically if the fix is applied. | |
| 5. Do NOT mark failing tests as passed; be strict and precise. | |
| Output format (JSON): | |
| {{ | |
| "root_cause": "...", | |
| "technical_reason": "...", | |
| "suggested_fix": "...", | |
| "corrected_script": "..." | |
| }} | |
| """ | |
| analysis = llm.invoke(prompt) | |
| try: | |
| # Extract JSON from response | |
| import re | |
| json_match = re.search(r'\{.*\}', analysis, re.DOTALL) | |
| if json_match: | |
| return json.loads(json_match.group()) | |
| return {"root_cause": analysis, "corrected_script": ""} | |
| except: | |
| return {"root_cause": analysis, "corrected_script": ""} | |
| def heal_locator(failure_data): | |
| dom = failure_data.get("dom", "") | |
| error = failure_data.get("error", "") | |
| new_locator, score = dom_matcher.find_similar(dom, error) | |
| return {"suggested_locator": new_locator, "confidence": score} | |
| def update_script(script_content, old_locator, new_locator): | |
| return script_content.replace(old_locator, new_locator) | |
| def reexecute_test(test_script): | |
| return runner.run(test_script) | |
| def generate_report(data): | |
| prompt = f""" | |
| You are a STRICT QA auditor. | |
| Given the test execution data (JSON): | |
| {json.dumps(data, indent=2, ensure_ascii=False)} | |
| Tasks: | |
| 1. Summarize each step in a table: Step | Status | Key Info | |
| 2. Provide Root Cause analysis: root cause, technical reason | |
| 3. Provide Suggested Fixes | |
| 4. Describe Healing Attempt outcome | |
| 5. Final Status (PASSED or FAILED) | |
| 6. Recommendations / Next Steps | |
| 7. Confidence level | |
| 8. Be concise, structured, readable | |
| 9. Do not repeat information | |
| Output: Human-readable report + JSON backup | |
| """ | |
| return llm.invoke(prompt) | |
| def run_complete_analysis(test_script, language="python"): | |
| report_data = { | |
| "original_script": test_script, | |
| "steps": [], | |
| "final_result": {}, | |
| "healing_applied": False | |
| } | |
| # --- 1. Execute original test --- | |
| result = detect_failure(test_script) | |
| report_data["steps"].append({"step": "initial_execution", "result": result}) | |
| # --- 2. If failed, apply analysis and self-healing --- | |
| if result["status"] == "failed": | |
| report_data["healing_applied"] = True | |
| # Analyze root cause and get corrected script | |
| analysis = analyze_root_cause(result, language) | |
| report_data["steps"].append({"step": "root_cause_analysis", "analysis": analysis}) | |
| # Healing attempt (locator correction) | |
| healing = heal_locator(result) | |
| report_data["steps"].append({"step": "healing_attempt", "healing": healing}) | |
| # --- Apply corrected script if provided --- | |
| corrected_script = analysis.get("corrected_script", "").strip() | |
| if corrected_script: | |
| report_data["steps"].append({"step": "corrected_script", "new_script": corrected_script}) | |
| final_result = reexecute_test(corrected_script) | |
| report_data["final_result"] = final_result | |
| report_data["steps"].append({"step": "re_execution", "result": final_result}) | |
| elif "suggested_locator" in healing: | |
| old = "button" | |
| new = healing["suggested_locator"] | |
| updated_script = update_script(test_script, old, new) | |
| report_data["steps"].append({"step": "script_updated", "new_script": updated_script}) | |
| final_result = reexecute_test(updated_script) | |
| report_data["final_result"] = final_result | |
| report_data["steps"].append({"step": "re_execution", "result": final_result}) | |
| else: | |
| report_data["final_result"] = result | |
| else: | |
| report_data["final_result"] = result | |
| # --- 3. Generate full report --- | |
| report = generate_report(report_data) | |
| report_data["full_report"] = report | |
| return report_data | |
| # ============================================ | |
| # PART 3: Synthetic Test Data Generator (NEW) | |
| # ============================================ | |
| class SyntheticDataGenerator: | |
| """Generates synthetic test data in multiple formats""" | |
| def __init__(self, llm): | |
| self.llm = llm | |
| def generate_data(self, schema_description, data_type="json", language="Python", record_count=5): | |
| """ | |
| Generate synthetic test data based on schema description | |
| """ | |
| # Map data types to formats | |
| format_map = { | |
| "json": "JSON (array of objects)", | |
| "csv": "CSV (comma-separated values with headers)", | |
| "sql": "SQL INSERT statements", | |
| "xml": "XML document", | |
| "python_dict": "Python list of dictionaries", | |
| "yaml": "YAML format", | |
| "excel_style": "Table format (markdown)" | |
| } | |
| output_format = format_map.get(data_type, "JSON") | |
| prompt = f""" | |
| You are a TEST DATA ENGINEER specializing in synthetic data generation. | |
| Generate realistic, diverse, and comprehensive test data. | |
| ===================== | |
| CONFIGURATION | |
| ===================== | |
| Data Schema: {schema_description} | |
| Output Format: {output_format} | |
| Number of Records: {record_count} | |
| Programming Context: {language} | |
| ===================== | |
| REQUIREMENTS | |
| ===================== | |
| 1. Generate {record_count} unique, realistic records | |
| 2. Include edge cases and boundary values | |
| 3. Ensure data variety (different types, values) | |
| 4. Make data look production-like | |
| 5. Include at least one record with null/empty values (if applicable) | |
| 6. Include at least one record with extreme values | |
| ===================== | |
| DATA FORMAT RULES | |
| ===================== | |
| **For JSON:** | |
| - Output as valid JSON array | |
| - Each object should have consistent keys | |
| - Format: ```json ... ``` | |
| **For CSV:** | |
| - Include headers as first line | |
| - Comma-separated values | |
| - Format: ```csv ... ``` | |
| **For SQL:** | |
| - Generate INSERT statements | |
| - Include CREATE TABLE statement | |
| - Format: ```sql ... ``` | |
| **For XML:** | |
| - Valid XML structure | |
| - Root element with child records | |
| - Format: ```xml ... ``` | |
| **For Python:** | |
| - List of dictionaries | |
| - Valid Python syntax | |
| - Format: ```python ... ``` | |
| **For YAML:** | |
| - Proper YAML indentation | |
| - Format: ```yaml ... ``` | |
| **For Table:** | |
| - Markdown table format | |
| - Clear column headers | |
| - Format: Markdown table | |
| ===================== | |
| ADDITIONAL REQUIREMENTS | |
| ===================== | |
| - Include data validation notes | |
| - Add brief description of data patterns | |
| - Highlight special cases (edge values, nulls, etc.) | |
| - Ensure data is realistic and usable for testing | |
| ===================== | |
| SCHEMA DESCRIPTION | |
| ===================== | |
| {schema_description} | |
| ===================== | |
| OUTPUT STRUCTURE | |
| ===================== | |
| Generate the response with: | |
| ## 📊 Data Generation Summary | |
| - **Schema**: [brief description] | |
| - **Format**: {output_format} | |
| - **Records**: {record_count} | |
| - **Language Context**: {language} | |
| ## 📝 Generated Test Data | |
| ```{data_type if data_type != 'excel_style' else 'markdown'} | |
| [generated data here] | |
| 🔍 Data Quality Notes | |
| [Special cases included] | |
| [Edge values] | |
| [Validation notes] | |
| 💡 Usage Example | |
| [How to use this data in {language} tests] | |
| """ | |
| return self.llm.invoke(prompt) | |
| def generate_bulk_data(self, schema_description, formats=None, record_counts=None): | |
| """Generate data in multiple formats at once""" | |
| if formats is None: | |
| formats = ["json", "csv", "sql"] | |
| if record_counts is None: | |
| record_counts = [3, 5, 10] | |
| results = {} | |
| for format_type in formats: | |
| count = record_counts[0] if len(record_counts) == 1 else record_counts[formats.index(format_type)] | |
| results[format_type] = self.generate_data(schema_description, format_type, record_count=count) | |
| return results | |
| # Initialize the synthetic data generator | |
| synthetic_generator = SyntheticDataGenerator(llm) | |
| # ============================================ | |
| # PART 4: Gradio UI Functions | |
| # ============================================ | |
| # --- SmartQA Functions --- | |
| def run_smartqa(requirements, dom, api_spec, flows, code, recording, language): | |
| try: | |
| knowledge = KnowledgeInput( | |
| requirements=requirements, | |
| dom=dom, | |
| api_spec=api_spec, | |
| user_flows=flows, | |
| source_code=code, | |
| recording=recording | |
| ) | |
| result = smartqa.run(knowledge, language) | |
| # Format the output | |
| output = f"""{result['summary']} | |
| 📋 Generated Test Cases | |
| {result['test_cases']} | |
| """ | |
| return output | |
| except Exception as e: | |
| return f"""❌ Error Occurred | |
| {str(e)} | |
| {traceback.format_exc()} | |
| """ | |
| def load_smartqa_examples(): | |
| example_requirements = """User Authentication System: | |
| User can register with email and password | |
| User can login with valid credentials | |
| User cannot login with invalid credentials | |
| User can reset password via email | |
| User can logout successfully | |
| Shopping Cart: | |
| User can add products to cart | |
| User can remove products from cart | |
| User can update product quantity | |
| Cart total updates correctly""" | |
| example_dom = """<html> | |
| <body> | |
| <input id="email" placeholder="Email"> | |
| <input id="password" type="password"> | |
| <button id="login-btn">Login</button> | |
| <button id="add-to-cart">Add to Cart</button> | |
| <button id="remove-item">Remove</button> | |
| <span class="total-price">$0.00</span> | |
| </body> | |
| </html>""" | |
| example_api = """POST /api/auth/login | |
| Request: { "email": "string", "password": "string" } | |
| Response: 200 { "token": "string", "user": {} } | |
| Response: 401 { "error": "Invalid credentials" } | |
| POST /api/cart/add | |
| Request: { "productId": "string", "quantity": "number" } | |
| Response: 200 { "cart": {}, "total": "number" }""" | |
| example_flows = """E-Commerce Flow: | |
| User opens website | |
| User logs in | |
| User searches for product | |
| User adds product to cart | |
| User views cart | |
| User proceeds to checkout""" | |
| example_code = """def add_to_cart(product_id, quantity): | |
| if not product_id: | |
| return {"error": "Product ID required"} | |
| cart = session.get('cart', {}) | |
| cart[product_id] = cart.get(product_id, 0) + quantity | |
| session['cart'] = cart | |
| return {"success": True, "cart": cart}""" | |
| example_recording = """Session Recording: | |
| Navigated to /login | |
| Entered email: user@example.com | |
| Entered password | |
| Clicked Login button | |
| Redirected to /products | |
| Clicked Add to Cart on product #1 | |
| Cart updated with 1 item""" | |
| return ( | |
| example_requirements, | |
| example_dom, | |
| example_api, | |
| example_flows, | |
| example_code, | |
| example_recording | |
| ) | |
| def clear_smartqa(): | |
| return "", "", "", "", "", "" | |
| # --- HealTest Functions (UPDATED) --- | |
| def load_heal_examples(language): | |
| examples = { | |
| "python": ( | |
| """from selenium import webdriver | |
| from selenium.webdriver.common.by import By | |
| def test_login_ui(): | |
| driver = webdriver.Chrome() | |
| driver.get("https://example.com/login") | |
| driver.find_element(By.ID, "submit-btn").click() | |
| assert False""", | |
| """Test: Login via UI | |
| Steps: | |
| 1. Open login page | |
| 2. Click Login button | |
| Expected: | |
| User redirected to dashboard""" | |
| ), | |
| "javascript": ( | |
| """const { Builder, By } = require('selenium-webdriver'); | |
| async function testLogin() { | |
| let driver = await new Builder().forBrowser('chrome').build(); | |
| await driver.get('https://example.com/login'); | |
| await driver.findElement(By.id('submit-btn')).click(); | |
| throw new Error('Test failed'); | |
| }""", | |
| """Test: JavaScript Login Test | |
| Steps: | |
| 1. Navigate to login page | |
| 2. Click submit button | |
| Expected: | |
| Login successful""" | |
| ), | |
| "java": ( | |
| """import org.openqa.selenium.By; | |
| import org.openqa.selenium.WebDriver; | |
| import org.openqa.selenium.chrome.ChromeDriver; | |
| public class LoginTest { | |
| public static void main(String[] args) { | |
| WebDriver driver = new ChromeDriver(); | |
| driver.get("https://example.com/login"); | |
| driver.findElement(By.id("submit-btn")).click(); | |
| throw new RuntimeException("Test failed"); | |
| } | |
| }""", | |
| """Test: Java Login Test | |
| Steps: | |
| 1. Open browser | |
| 2. Navigate to login | |
| 3. Click submit | |
| Expected: | |
| Login processed""" | |
| ), | |
| "csharp": ( | |
| """using OpenQA.Selenium; | |
| using OpenQA.Selenium.Chrome; | |
| class LoginTest { | |
| static void Main() { | |
| IWebDriver driver = new ChromeDriver(); | |
| driver.Navigate().GoToUrl("https://example.com/login"); | |
| driver.FindElement(By.Id("submit-btn")).Click(); | |
| throw new Exception("Test failed"); | |
| } | |
| }""", | |
| """Test: C# Login Test | |
| Steps: | |
| 1. Initialize Chrome driver | |
| 2. Navigate to login page | |
| 3. Click submit button | |
| Expected: | |
| Login executed""" | |
| ) | |
| } | |
| return examples.get(language, examples["python"]) | |
| def process_test_with_language(script_text, testcase_text, language): | |
| try: | |
| if not script_text.strip(): | |
| return "⛔ Please paste a test script." | |
| # Map language selection | |
| lang_map = { | |
| "python": "python", | |
| "javascript": "javascript", | |
| "java": "java", | |
| "csharp": "csharp", | |
| "auto": "python" # Default to python for auto | |
| } | |
| actual_lang = lang_map.get(language.lower(), "python") | |
| result = run_complete_analysis(script_text, actual_lang) | |
| step_table = "Step Summary:\n" + "-"*80 + "\n" | |
| step_table += f"{'Step':20} | {'Status':9} | {'Key Info'}\n" | |
| step_table += "-"*80 + "\n" | |
| for step in result["steps"]: | |
| step_name = step["step"].replace("_", " ").title() | |
| status = "FAILED" if step.get("result", {}).get("status") == "failed" else "COMPLETED" | |
| key_info = "" | |
| if "result" in step and "error" in step["result"]: | |
| key_info = step["result"]["error"][:50] | |
| elif "healing" in step and "suggested_locator" in step["healing"]: | |
| key_info = f"Suggested: {step['healing']['suggested_locator']}" | |
| step_table += f"{step_name:20} | {status:9} | {key_info}\n" | |
| step_table += "-"*80 + "\n" | |
| display = f"# 📝 HealTest AI Report\n\n" | |
| display += f"## Test Script Preview:\n\n```{actual_lang}\n{script_text[:500]}{'...' if len(script_text) > 500 else ''}\n```\n\n" | |
| display += f"## Test Case:\n{testcase_text}\n\n" | |
| display += f"## Steps:\n\n{step_table}\n" | |
| if len(result["steps"]) > 1 and "analysis" in result["steps"][1]: | |
| analysis = result["steps"][1]["analysis"] | |
| display += f"## Root Cause:\n{analysis.get('root_cause', 'N/A')}\n\n" | |
| if analysis.get('corrected_script'): | |
| display += f"## Corrected Script:\n```{actual_lang}\n{analysis['corrected_script']}\n```\n\n" | |
| display += f"## Final Status: **{result['final_result']['status'].upper()}**\n\n" | |
| if result['final_result']['status'] == 'failed': | |
| display += "## Recommendations:\n- Verify test logic and assertions\n- Ensure element interactability\n- Check proper locators\n- Review test environment setup\n" | |
| return display | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}" | |
| # ============================================ | |
| # Synthetic Data UI Functions (UPDATED) | |
| # ============================================ | |
| def load_schema_template(template_name): | |
| templates = { | |
| "custom": '''{ | |
| "user_id": "uuid", | |
| "full_name": "name", | |
| "email": "email", | |
| "age": {"type": "int", "min": 18, "max": 99}, | |
| "is_active": "boolean", | |
| "country": "country" | |
| }''', | |
| "user_profile": '''{ | |
| "user_id": "integer", | |
| "username": "string (3-20 chars)", | |
| "email": "email format", | |
| "age": "integer (18-80)", | |
| "country": "ISO country code", | |
| "is_premium": "boolean", | |
| "registration_date": "date (last 2 years)", | |
| "last_login": "datetime" | |
| }''', | |
| "ecommerce_order": '''{ | |
| "order_id": "string (format: ORD-XXXX)", | |
| "customer_name": "string", | |
| "email": "email", | |
| "items": [ | |
| { | |
| "product_id": "string", | |
| "quantity": "integer (1-10)", | |
| "price": "decimal (10.99-999.99)" | |
| } | |
| ], | |
| "total_amount": "decimal", | |
| "status": "enum(pending,shipped,delivered,cancelled)", | |
| "order_date": "datetime" | |
| }''', | |
| "api_request_log": '''{ | |
| "request_id": "uuid", | |
| "endpoint": "string", | |
| "method": "enum(GET,POST,PUT,DELETE)", | |
| "status_code": "integer (200,400,401,404,500)", | |
| "response_time_ms": "integer (50-5000)", | |
| "timestamp": "datetime", | |
| "user_agent": "string", | |
| "ip_address": "ipv4" | |
| }''', | |
| "contact_info": '''{ | |
| "contact_id": "integer", | |
| "first_name": "string", | |
| "last_name": "string", | |
| "email": "email", | |
| "phone": "phone number", | |
| "address": { | |
| "street": "string", | |
| "city": "string", | |
| "state": "string", | |
| "zip_code": "string", | |
| "country": "string" | |
| }, | |
| "company": "string", | |
| "notes": "string (optional)" | |
| }''' | |
| } | |
| return templates.get(template_name, templates["custom"]) | |
| def generate_synthetic_data(language, schema_json, record_count, template_name): | |
| """Wrapper function for Gradio""" | |
| try: | |
| if not schema_json or not schema_json.strip(): | |
| return ( | |
| {"error": "No schema provided"}, | |
| "# No data generated\nPlease provide a schema definition.", | |
| schema_json, | |
| "### 📊 Generation Statistics\nNo data generated yet" | |
| ) | |
| # Parse record count | |
| try: | |
| count = int(record_count) | |
| if count < 1: | |
| count = 1 | |
| elif count > 100: | |
| count = 100 | |
| except: | |
| count = 5 | |
| # Generate synthetic data | |
| result = synthetic_generator.generate_data( | |
| schema_description=schema_json, | |
| data_type="json", | |
| language=language, | |
| record_count=count | |
| ) | |
| # Parse result to extract JSON | |
| import re | |
| json_match = re.search(r'```json\n(.*?)\n```', result, re.DOTALL) | |
| if json_match: | |
| try: | |
| data_json = json.loads(json_match.group(1)) | |
| # Limit preview to first 10 records | |
| if isinstance(data_json, list) and len(data_json) > 10: | |
| preview_data = data_json[:10] | |
| else: | |
| preview_data = data_json | |
| # Generate Python code template | |
| code_template = f'''# Generated test data for {language} | |
| # Created with QA Suite Synthetic Data Generator | |
| import json | |
| # Sample test data (first {min(count, 10)} records) | |
| test_data = {json.dumps(preview_data, indent=2, ensure_ascii=False)} | |
| # Usage example: | |
| for record in test_data: | |
| # Your test logic here | |
| print(record) | |
| # Example: validate record structure | |
| # assert 'user_id' in record | |
| # assert 'email' in record | |
| ''' | |
| stats = f"""### 📊 Generation Statistics | |
| - **Records Generated**: {count} | |
| - **Language**: {language} | |
| - **Template**: {template_name} | |
| - **Status**: ✅ Success | |
| **Data Quality Notes:** | |
| - Realistic test data created | |
| - Includes edge cases and variations | |
| - Ready for immediate use in tests | |
| """ | |
| return (preview_data, code_template, schema_json, stats) | |
| except json.JSONDecodeError: | |
| return ( | |
| {"error": "Could not parse generated data"}, | |
| result, | |
| schema_json, | |
| "### ⚠️ Generation completed but output format needs review" | |
| ) | |
| else: | |
| return ( | |
| {"raw_output": result[:1000]}, | |
| result, | |
| schema_json, | |
| "### 📊 Generation Statistics\nData generated but not in expected JSON format" | |
| ) | |
| except Exception as e: | |
| error_data = {"error": str(e), "traceback": traceback.format_exc()} | |
| return ( | |
| error_data, | |
| f"# Error generating data\n\n```\n{str(e)}\n```", | |
| schema_json, | |
| f"### ❌ Generation Failed\n\n**Error**: {str(e)}" | |
| ) | |
| # ============================================ | |
| # PART 5: Gradio UI (FIXED FOR GRADIO 6.0) | |
| # ============================================ | |
| with gr.Blocks(title="QA Suite: SmartQA + HealTest AI + Synthetic Data", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 🧠 QA Suite: Complete Testing Platform") | |
| gr.Markdown("### SmartQA | HealTest AI | Synthetic Test Data Generator") | |
| # ============================================ | |
| # Tab 1: SmartQA | |
| # ============================================ | |
| with gr.Tab("🎯 SmartQA - Test Case Generator"): | |
| with gr.Column(): | |
| gr.Markdown("### Generate Professional Test Cases from Multiple Sources") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| language_dropdown = gr.Dropdown( | |
| choices=["Python", "JavaScript", "Java", "C#"], | |
| value="Python", | |
| label="🎯 Target Language (for context only)" | |
| ) | |
| with gr.Tab("📋 Requirements"): | |
| req_input = gr.Textbox(lines=6, label="Requirements / User Stories") | |
| with gr.Tab("🔄 User Flows"): | |
| flow_input = gr.Textbox(lines=4, label="User Flows / Journey Maps") | |
| with gr.Tab("🔌 API Spec"): | |
| api_input = gr.Textbox(lines=4, label="API Specification / OpenAPI") | |
| with gr.Tab("🖥️ UI/DOM"): | |
| dom_input = gr.Textbox(lines=4, label="UI/DOM Structure") | |
| with gr.Tab("💻 Source Code"): | |
| code_input = gr.Textbox(lines=4, label="Source Code") | |
| with gr.Tab("📹 Recording"): | |
| rec_input = gr.Textbox(lines=4, label="User Session Recording") | |
| with gr.Column(scale=1): | |
| gr.Markdown("### 🚀 Actions") | |
| smartqa_run_btn = gr.Button("📝 Generate Test Cases", variant="primary") | |
| smartqa_example_btn = gr.Button("📂 Load Examples", variant="secondary") | |
| smartqa_clear_btn = gr.Button("🗑️ Clear All", variant="secondary") | |
| gr.Markdown("---") | |
| gr.Markdown("### 📊 Generated Test Cases") | |
| smartqa_output = gr.Markdown("✨ Click 'Generate Test Cases' to start...") | |
| # ربط الأحداث | |
| smartqa_run_btn.click( | |
| fn=run_smartqa, | |
| inputs=[req_input, dom_input, api_input, flow_input, code_input, rec_input, language_dropdown], | |
| outputs=smartqa_output | |
| ) | |
| smartqa_example_btn.click( | |
| fn=load_smartqa_examples, | |
| inputs=[], | |
| outputs=[req_input, dom_input, api_input, flow_input, code_input, rec_input] | |
| ) | |
| smartqa_clear_btn.click( | |
| fn=clear_smartqa, | |
| inputs=[], | |
| outputs=[req_input, dom_input, api_input, flow_input, code_input, rec_input] | |
| ) | |
| # ============================================ | |
| # Tab 2: HealTest AI | |
| # ============================================ | |
| with gr.Tab("🔧 HealTest AI - Self-Healing Tests"): | |
| with gr.Column(): | |
| gr.Markdown("### Self-Healing Test Automation for Multiple Languages") | |
| gr.Markdown("Supports: Python, JavaScript, Java, C#") | |
| with gr.Row(): | |
| with gr.Column(): | |
| heal_language = gr.Dropdown( | |
| choices=["python", "javascript", "java", "csharp", "auto"], | |
| value="auto", | |
| label="🌐 Test Language (auto-detect recommended)" | |
| ) | |
| heal_script_input = gr.Textbox( | |
| label="Test Script", | |
| lines=12, | |
| placeholder="Paste your test script here..." | |
| ) | |
| heal_testcase_input = gr.Textbox( | |
| label="Test Case Description (optional)", | |
| lines=3, | |
| placeholder="Describe what this test should do..." | |
| ) | |
| heal_analyze_btn = gr.Button("🚀 Start Analysis & Healing", variant="primary") | |
| gr.Markdown("### 📚 Quick Examples:") | |
| with gr.Row(): | |
| heal_example_python = gr.Button("🐍 Python") | |
| heal_example_js = gr.Button("📜 JavaScript") | |
| heal_example_java = gr.Button("☕ Java") | |
| heal_example_csharp = gr.Button("🔷 C#") | |
| heal_output = gr.Markdown("✨ Click 'Start Analysis & Healing' to begin...") | |
| heal_analyze_btn.click( | |
| fn=process_test_with_language, | |
| inputs=[heal_script_input, heal_testcase_input, heal_language], | |
| outputs=heal_output | |
| ) | |
| heal_example_python.click( | |
| fn=lambda: load_heal_examples("python"), | |
| inputs=[], | |
| outputs=[heal_script_input, heal_testcase_input] | |
| ) | |
| heal_example_js.click( | |
| fn=lambda: load_heal_examples("javascript"), | |
| inputs=[], | |
| outputs=[heal_script_input, heal_testcase_input] | |
| ) | |
| heal_example_java.click( | |
| fn=lambda: load_heal_examples("java"), | |
| inputs=[], | |
| outputs=[heal_script_input, heal_testcase_input] | |
| ) | |
| heal_example_csharp.click( | |
| fn=lambda: load_heal_examples("csharp"), | |
| inputs=[], | |
| outputs=[heal_script_input, heal_testcase_input] | |
| ) | |
| # ============================================ | |
| # Tab 3: Synthetic Data Generator | |
| # ============================================ | |
| with gr.Tab("🎲 Synthetic Test Data"): | |
| with gr.Column(): | |
| gr.Markdown("### Generate Realistic Synthetic Test Data") | |
| gr.Markdown("Create realistic test data for your applications with one click") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| data_language = gr.Dropdown( | |
| choices=["python", "javascript", "java", "csharp"], | |
| value="python", | |
| label="🎯 Target Language" | |
| ) | |
| data_count = gr.Slider( | |
| minimum=1, | |
| maximum=100, | |
| value=10, | |
| step=1, | |
| label="📊 Number of Records" | |
| ) | |
| gr.Markdown("### 📝 Define Data Schema") | |
| data_template = gr.Dropdown( | |
| choices=["custom", "user_profile", "ecommerce_order", "api_request_log", "contact_info"], | |
| value="custom", | |
| label="📁 Schema Template" | |
| ) | |
| schema_input = gr.Textbox( | |
| value='''{ | |
| "user_id": "uuid", | |
| "full_name": "name", | |
| "email": "email", | |
| "age": {"type": "int", "min": 18, "max": 99}, | |
| "is_active": "boolean", | |
| "country": "country" | |
| }''', | |
| lines=12, | |
| label="Schema Definition (JSON)" | |
| ) | |
| with gr.Column(scale=1): | |
| data_stats = gr.Markdown("### 📊 Generation Statistics\n_No data generated yet_") | |
| data_output = gr.JSON(label="Generated Data Preview (First 10 records)") | |
| code_output = gr.Code(label="Ready-to-use Code Template", language="python", interactive=False, lines=15) | |
| generate_btn = gr.Button("✨ Generate Test Data", variant="primary") | |
| data_template.change( | |
| fn=load_schema_template, | |
| inputs=[data_template], | |
| outputs=[schema_input] | |
| ) | |
| generate_btn.click( | |
| fn=generate_synthetic_data, | |
| inputs=[data_language, schema_input, data_count, data_template], | |
| outputs=[data_output, code_output, schema_input, data_stats] | |
| ) | |
| # ============================================ | |
| # Launch Application | |
| # ============================================ | |
| if __name__ == "__main__": | |
| print("🚀 Launching QA Suite: SmartQA + HealTest AI + Synthetic Data Generator") | |
| print("=" * 60) | |
| print("✅ All components loaded successfully!") | |
| print("📊 Features:") | |
| print(" - SmartQA: Test case generation from multiple sources") | |
| print(" - HealTest AI: Self-healing test automation (4 languages)") | |
| print(" - Synthetic Data: Generate realistic test data") | |
| print("=" * 60) | |
| demo.launch( | |
| share=True, | |
| debug=False, | |
| server_name="0.0.0.0", | |
| server_port=7860 | |
| ) |