ressay1973 commited on
Commit
5196201
·
verified ·
1 Parent(s): 61ab4fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -41
app.py CHANGED
@@ -1,10 +1,9 @@
1
- from smolagents import CodeAgent, HfApiModel, load_tool, tool
2
  import yaml
3
  import gradio as gr
4
 
5
  def classify_incident(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, workaround: str) -> str:
6
  """Classifies an incident based on predefined rules."""
7
-
8
  if service_criticity == "High" and disruption == "Full":
9
  return "P1" # Critical issue
10
  elif service_criticity == "High" and (disruption == "Degraded" or affectation_time == "Up 15 mins"):
@@ -16,23 +15,15 @@ def classify_incident(service_criticity: str, disruption: str, affectation_time:
16
 
17
  @tool
18
  def diagnose_incident(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, workaround: str) -> str:
19
- """Diagnoses an incident and determines its severity priority.
20
- Args:
21
- service_criticity: High or Low
22
- disruption: Full, Degraded, or None
23
- affectation_time: Up 15 mins, Less 15 mins
24
- magnitude: High, Low, or None
25
- workaround: Yes or No
26
- """
27
  priority = classify_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
28
  return f"Incident classified as priority: {priority}"
29
 
30
- # Load model from Hugging Face
31
  model = HfApiModel(
32
- max_tokens=2096,
33
- temperature=0.5,
34
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
35
- custom_role_conversions=None,
36
  )
37
 
38
  # Load prompt templates
@@ -43,40 +34,33 @@ with open("prompts.yaml", 'r') as stream:
43
  agent = CodeAgent(
44
  model=model,
45
  tools=[diagnose_incident],
46
- max_steps=6,
47
- verbosity_level=1,
48
- grammar=None,
49
- planning_interval=None,
50
  name="Incident Diagnosis Agent",
51
  description="An agent that classifies incidents based on severity criteria.",
52
  prompt_templates=prompt_templates
53
  )
54
 
55
- def chat_diagnose_incident(conversation):
56
- """Simulates a conversation with the agent to diagnose an incident."""
57
-
58
- conversation.append("Agent: Hola, cuéntame qué ocurrió con el servicio.")
59
- service_criticity = gr.Textbox(label="Tu respuesta")
60
- conversation.append("Agent: ¿El problema causa una interrupción total o solo degradación del servicio?")
61
- disruption = gr.Textbox(label="Tu respuesta")
62
- conversation.append("Agent: ¿Cuánto tiempo lleva afectado?")
63
- affectation_time = gr.Textbox(label="Tu respuesta")
64
- conversation.append("Agent: ¿Qué tan grave es el impacto en el sistema?")
65
- magnitude = gr.Textbox(label="Tu respuesta")
66
- conversation.append("Agent: ¿Existe algún tipo de solución alternativa o workaround?")
67
- workaround = gr.Textbox(label="Tu respuesta")
68
-
69
  priority = diagnose_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
70
- conversation.append(f"Agent: Con base en la información, el incidente se clasifica como prioridad {priority}.")
71
- return "\n".join(conversation)
72
 
73
- # Gradio UI para conversación
74
- iface = gr.ChatInterface(
75
  fn=chat_diagnose_incident,
 
 
 
 
 
 
 
 
76
  title="Asistente de Diagnóstico de Incidentes",
77
- description="Conversación interactiva con el agente para clasificar la severidad del incidente.",
78
- theme="dark"
79
  )
80
 
81
- # Lanzar UI
82
- iface.launch()
 
1
+ from smolagents import CodeAgent, HfApiModel, tool
2
  import yaml
3
  import gradio as gr
4
 
5
  def classify_incident(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, workaround: str) -> str:
6
  """Classifies an incident based on predefined rules."""
 
7
  if service_criticity == "High" and disruption == "Full":
8
  return "P1" # Critical issue
9
  elif service_criticity == "High" and (disruption == "Degraded" or affectation_time == "Up 15 mins"):
 
15
 
16
  @tool
17
  def diagnose_incident(service_criticity: str, disruption: str, affectation_time: str, magnitude: str, workaround: str) -> str:
18
+ """Determines the severity priority of an incident."""
 
 
 
 
 
 
 
19
  priority = classify_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
20
  return f"Incident classified as priority: {priority}"
21
 
22
+ # Load model with optimized settings
23
  model = HfApiModel(
24
+ max_tokens=512, # Reduce max tokens to improve response time
25
+ temperature=0.3, # Lower temperature for more deterministic results
26
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct'
 
27
  )
28
 
29
  # Load prompt templates
 
34
  agent = CodeAgent(
35
  model=model,
36
  tools=[diagnose_incident],
37
+ max_steps=3, # Reduce steps to minimize latency
38
+ verbosity_level=0, # Lower verbosity for efficiency
 
 
39
  name="Incident Diagnosis Agent",
40
  description="An agent that classifies incidents based on severity criteria.",
41
  prompt_templates=prompt_templates
42
  )
43
 
44
+ def chat_diagnose_incident(service_criticity, disruption, affectation_time, magnitude, workaround):
45
+ """Diagnoses an incident based on user inputs."""
 
 
 
 
 
 
 
 
 
 
 
 
46
  priority = diagnose_incident(service_criticity, disruption, affectation_time, magnitude, workaround)
47
+ return f"Agent: El incidente se clasifica como prioridad {priority}."
 
48
 
49
+ # Gradio UI
50
+ iface = gr.Interface(
51
  fn=chat_diagnose_incident,
52
+ inputs=[
53
+ gr.Dropdown(["High", "Low"], label="Criticidad del servicio"),
54
+ gr.Dropdown(["Full", "Degraded", "None"], label="Interrupción"),
55
+ gr.Dropdown(["Up 15 mins", "Less 15 mins"], label="Tiempo de afectación"),
56
+ gr.Dropdown(["High", "Low", "None"], label="Magnitud"),
57
+ gr.Dropdown(["Yes", "No"], label="¿Hay workaround?"),
58
+ ],
59
+ outputs=gr.Textbox(),
60
  title="Asistente de Diagnóstico de Incidentes",
61
+ description="Selecciona las opciones para diagnosticar el incidente.",
62
+ theme="default"
63
  )
64
 
65
+ # Lanzar UI\ if __name__ == "__main__":
66
+ iface.launch()