details.wes
solved the problems in hf token
7f62453
"""
Crew definition for the agents service.
LLM is provided via LiteLLM against Hugging Face. Set the model id in the environment — see
AGENTS_LLM_MODEL in .env.example (LiteLLM form: huggingface/<org>/<model>).
Pick a model that your Hugging Face account can run via Inference Providers (check the model's Hub page
for Inference / provider badges), or set AGENTS_LLM_BASE_URL to your own OpenAI-compatible endpoint.
Qwen/WebWorld-32B is not served on the public Inference Providers router; use it only with
AGENTS_LLM_BASE_URL (e.g. your own HF Inference Endpoint or vLLM).
If a plain huggingface/<org>/<model> call fails, LiteLLM supports:
huggingface/<provider>/<org>/<model>
only when that provider is listed on the model card for this model (do not guess the provider).
Required environment:
HF_TOKEN (or HUGGINGFACE_HUB_TOKEN)
AGENTS_LLM_MODEL
Optional:
AGENTS_LLM_TEMPERATURE float, default 0.5
AGENTS_LLM_BASE_URL OpenAI-compatible base URL (e.g. HF Inference Endpoint)
"""
from __future__ import annotations
import os
from typing import List
from crewai import Agent, Crew, LLM, Process, Task
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.project import CrewBase, agent, crew, task
def _resolve_hf_token() -> str:
token = (os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_HUB_TOKEN") or "").strip()
if (token.startswith('"') and token.endswith('"')) or (token.startswith("'") and token.endswith("'")):
token = token[1:-1].strip()
if not token:
raise RuntimeError(
"Missing Hugging Face token. Set HF_TOKEN (or HUGGINGFACE_HUB_TOKEN) in the "
"agents service environment."
)
return token
def _strip_optional_env_quotes(value: str) -> str:
v = value.strip()
if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")):
v = v[1:-1].strip()
return v
def _resolve_llm_model() -> str:
raw = (os.getenv("AGENTS_LLM_MODEL") or "").strip()
raw = _strip_optional_env_quotes(raw)
if not raw:
raise RuntimeError(
"Missing AGENTS_LLM_MODEL. Set it in the environment (see .env.example), e.g. "
"huggingface/Qwen/Qwen2.5-7B-Instruct"
)
return raw
def _build_llm() -> LLM:
model = _resolve_llm_model()
temperature = float(os.getenv("AGENTS_LLM_TEMPERATURE", "0.5"))
hf_token = _resolve_hf_token()
os.environ["HF_TOKEN"] = hf_token
os.environ["HUGGINGFACE_HUB_TOKEN"] = hf_token
base_url = (os.getenv("AGENTS_LLM_BASE_URL") or "").strip().rstrip("/")
base_url = _strip_optional_env_quotes(base_url) if base_url else ""
if "webworld" in model.lower() and not base_url:
raise RuntimeError(
"AGENTS_LLM_MODEL is set to a WebWorld model, which Hugging Face Inference Providers "
"does not host. Set AGENTS_LLM_MODEL to a routed instruct model, or run WebWorld on your "
"own endpoint and set AGENTS_LLM_BASE_URL to that OpenAI-compatible base URL."
)
if base_url:
return LLM(
model=model,
base_url=base_url,
api_key=hf_token,
temperature=temperature,
)
return LLM(model=model, api_key=hf_token, temperature=temperature)
@CrewBase
class ContentCrew:
"""LinkedIn post writing crew — runs inside the agents service."""
agents: List[BaseAgent]
tasks: List[Task]
@agent
def writer_agent(self) -> Agent:
return Agent(
config=self.agents_config["writer_agent"],
llm=_build_llm(),
max_tokens=420,
verbose=False,
)
@agent
def editor_agent(self) -> Agent:
return Agent(
config=self.agents_config["editor_agent"],
llm=_build_llm(),
max_tokens=380,
verbose=False,
)
@task
def write_post_task(self) -> Task:
return Task(config=self.tasks_config["write_post_task"])
@task
def edit_post_task(self) -> Task:
return Task(config=self.tasks_config["edit_post_task"])
@crew
def crew(self) -> Crew:
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
verbose=False,
)