Spaces:
Running
Running
fix: increase tour max_tokens to 8192
Browse filesCartographer's verbose concept descriptions hit 6654 chars (finish_reason=length)
at 4096 tokens. Gemini 2.5 Flash supports up to 65k output tokens — 8192 gives
enough headroom for any realistic repo while staying well within free-tier limits.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
backend/services/diagram_service.py
CHANGED
|
@@ -533,7 +533,7 @@ class DiagramService:
|
|
| 533 |
# temperature=0.0 — tour must be factual and consistent across regenerations.
|
| 534 |
# json_mode=True — forces JSON output on OpenAI-compatible providers so we
|
| 535 |
# never have to strip markdown fences or rescue half-parsed responses.
|
| 536 |
-
raw = self._gen.generate(_TOUR_SYSTEM, prompt, temperature=0.0, json_mode=True, max_tokens=
|
| 537 |
|
| 538 |
try:
|
| 539 |
tour = _parse_json(raw)
|
|
@@ -633,7 +633,7 @@ class DiagramService:
|
|
| 633 |
prompt = _TOUR_PROMPT.format(repo=repo, chunk_summary=chunk_summary)
|
| 634 |
|
| 635 |
yield {"stage": "generating", "progress": 0.55, "message": "Generating concept tour with AI…"}
|
| 636 |
-
raw = self._gen.generate(_TOUR_SYSTEM, prompt, temperature=0.0, json_mode=True, max_tokens=
|
| 637 |
|
| 638 |
yield {"stage": "parsing", "progress": 0.90, "message": "Finalizing…"}
|
| 639 |
try:
|
|
|
|
| 533 |
# temperature=0.0 — tour must be factual and consistent across regenerations.
|
| 534 |
# json_mode=True — forces JSON output on OpenAI-compatible providers so we
|
| 535 |
# never have to strip markdown fences or rescue half-parsed responses.
|
| 536 |
+
raw = self._gen.generate(_TOUR_SYSTEM, prompt, temperature=0.0, json_mode=True, max_tokens=8192)
|
| 537 |
|
| 538 |
try:
|
| 539 |
tour = _parse_json(raw)
|
|
|
|
| 633 |
prompt = _TOUR_PROMPT.format(repo=repo, chunk_summary=chunk_summary)
|
| 634 |
|
| 635 |
yield {"stage": "generating", "progress": 0.55, "message": "Generating concept tour with AI…"}
|
| 636 |
+
raw = self._gen.generate(_TOUR_SYSTEM, prompt, temperature=0.0, json_mode=True, max_tokens=8192)
|
| 637 |
|
| 638 |
yield {"stage": "parsing", "progress": 0.90, "message": "Finalizing…"}
|
| 639 |
try:
|