billyaungmyint commited on
Commit
2277115
·
verified ·
1 Parent(s): ac40257

Sync from GitHub via hub-sync

Browse files
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
VERSION CHANGED
@@ -1 +1 @@
1
- ded093e61cbf8b55e23e7751a0a2ffe76d0ab6b9
 
1
+ 7af57131ccbed63d444f4a45e8fa1a2a0fdce918
pyproject.toml CHANGED
@@ -6,7 +6,10 @@ readme = "README.md"
6
  requires-python = ">=3.13"
7
  dependencies = [
8
  "ddgs>=9.14.1",
 
9
  "gradio>=5.49.1",
10
  "huggingface-hub>=1.11.0",
11
- "smolagents[litellm,toolkit]>=1.22.0",
 
 
12
  ]
 
6
  requires-python = ">=3.13"
7
  dependencies = [
8
  "ddgs>=9.14.1",
9
+ "e2b-code-interpreter>=2.6.2",
10
  "gradio>=5.49.1",
11
  "huggingface-hub>=1.11.0",
12
+ "numpy>=2.4.4",
13
+ "pandas>=3.0.2",
14
+ "smolagents[e2b,litellm,telemetry,toolkit]>=1.22.0",
15
  ]
smolagents/code001.py CHANGED
@@ -1,12 +1,19 @@
1
  from smolagents import CodeAgent, InferenceClientModel , LiteLLMModel
2
 
3
  # Initialize a model (using Hugging Face Inference API)
4
- # model = InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash")
5
- model = LiteLLMModel("openai/gpt-4.1-mini")
6
 
7
  # Create an agent with no tools
8
  agent = CodeAgent(tools=[], model=model)
9
 
10
  # Run the agent with a task
11
  result = agent.run("Calculate the sum of numbers from 1 to 10")
12
- print(result)
 
 
 
 
 
 
 
 
1
  from smolagents import CodeAgent, InferenceClientModel , LiteLLMModel
2
 
3
  # Initialize a model (using Hugging Face Inference API)
4
+ model = InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash")
5
+ # model = LiteLLMModel("openai/gpt-4.1-mini")
6
 
7
  # Create an agent with no tools
8
  agent = CodeAgent(tools=[], model=model)
9
 
10
  # Run the agent with a task
11
  result = agent.run("Calculate the sum of numbers from 1 to 10")
12
+ print(result)
13
+
14
+ # messages = [
15
+ # {"role": "user", "content": [{"type": "text", "text": "Calculate the sum of numbers from 1 to 10"}]}
16
+ # ]
17
+ # # Note: Models are usually called by the Agent,
18
+ # # but you can call them directly with a list of messages like this:
19
+ # print(model(messages))
smolagents/smol_e2b.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from smolagents import InferenceClientModel, CodeAgent
3
+
4
+
5
+ with CodeAgent(model=InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash"), tools=[], executor_type="e2b") as agent:
6
+ agent.run("Can you give me the 100th Fibonacci number?")
7
+ print("Waiting 5 seconds before shutting down sandbox...")
8
+ time.sleep(5)
smolagents/smol_gradio.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import (
2
+ load_tool,
3
+ CodeAgent,
4
+ InferenceClientModel,
5
+ GradioUI
6
+ )
7
+
8
+ # Import tool from Hub
9
+ image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True)
10
+
11
+ model = InferenceClientModel()
12
+
13
+ # Initialize the agent with the image generation tool
14
+ agent = CodeAgent(tools=[image_generation_tool], model=model)
15
+
16
+ GradioUI(agent).launch()
smolagents/smol_in_e2b.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from e2b_code_interpreter import Sandbox
2
+ import os
3
+
4
+ # Define your agent application
5
+ agent_code = """
6
+ import os
7
+ from smolagents import CodeAgent, InferenceClientModel
8
+
9
+ # Initialize the agents
10
+ # Note: Using a specific model that is known to work with fireworks-ai
11
+ model_id = "deepseek-ai/DeepSeek-V4-Flash"
12
+
13
+ agent = CodeAgent(
14
+ model=InferenceClientModel(model_id=model_id, token=os.getenv("HF_TOKEN")),
15
+ tools=[],
16
+ name="coder_agent",
17
+ description="This agent takes care of your difficult algorithmic problems using code."
18
+ )
19
+
20
+ manager_agent = CodeAgent(
21
+ model=InferenceClientModel(model_id=model_id, token=os.getenv("HF_TOKEN")),
22
+ tools=[],
23
+ managed_agents=[agent],
24
+ )
25
+
26
+ # Run the agent
27
+ response = manager_agent.run("What's the 5th Fibonacci number?")
28
+ print(response)
29
+ """
30
+
31
+ def run_code_raise_errors(sandbox, code: str) -> str:
32
+ execution = sandbox.run_code(
33
+ code,
34
+ envs={'HF_TOKEN': os.getenv('HF_TOKEN')}
35
+ )
36
+ if execution.error:
37
+ execution_logs = "\n".join([str(log) for log in execution.logs.stdout])
38
+ logs = execution_logs
39
+ logs += execution.error.traceback
40
+ raise ValueError(logs)
41
+ return "\n".join([str(log) for log in execution.logs.stdout])
42
+
43
+ # Use context manager to ensure sandbox is closed
44
+ with Sandbox.create() as sandbox:
45
+ print("Sandbox created. Installing smolagents...")
46
+ # Install required packages
47
+ sandbox.commands.run("pip install smolagents")
48
+
49
+ print("Running agent code...")
50
+ # Run the agent code in the sandbox
51
+ execution_logs = run_code_raise_errors(sandbox, agent_code)
52
+ print("Execution Result:")
53
+ print(execution_logs)
smolagents/smol_phoenix.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/docs/smolagents/tutorials/inspect_runs
2
+ import smolagents.local_python_executor as lpe
3
+ from concurrent.futures import ThreadPoolExecutor
4
+
5
+ # Fix for compatibility with smolagents >= 1.22.0 and openinference-instrumentation-smolagents
6
+ if not hasattr(lpe, "ThreadPoolExecutor"):
7
+ lpe.ThreadPoolExecutor = ThreadPoolExecutor
8
+
9
+ from phoenix.otel import register
10
+ from openinference.instrumentation.smolagents import SmolagentsInstrumentor
11
+
12
+ register()
13
+ SmolagentsInstrumentor().instrument()
14
+
15
+ from smolagents import (
16
+ CodeAgent,
17
+ ToolCallingAgent,
18
+ WebSearchTool,
19
+ VisitWebpageTool,
20
+ InferenceClientModel,
21
+ )
22
+
23
+ model = InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash")
24
+
25
+ search_agent = CodeAgent(
26
+ tools=[WebSearchTool(), VisitWebpageTool()],
27
+ model=model,
28
+ name="search_agent",
29
+ description="This is an agent that can do web search.",
30
+ )
31
+
32
+ manager_agent = CodeAgent(
33
+ tools=[],
34
+ model=model,
35
+ managed_agents=[search_agent],
36
+ )
37
+ manager_agent.run(
38
+ "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?"
39
+ )
smolagents/tools001.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool
2
+
3
+ model = InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash")
4
+ agent = CodeAgent(
5
+ # tools=[DuckDuckGoSearchTool()],
6
+ tools=[],
7
+ model=model,
8
+ )
9
+
10
+ # Now the agent can search the web!
11
+ result = agent.run("What is the current weather in Paris?")
12
+ print(result)
uv.lock CHANGED
The diff for this file is too large to render. See raw diff