chore: lint format

This commit is contained in:
Richard Tang
2026-04-24 10:12:06 -07:00
parent 386bbd5780
commit 22d75bfb05
4 changed files with 20 additions and 29 deletions
+2 -5
View File
@@ -3456,9 +3456,7 @@ class AgentLoop(AgentProtocol):
tc.tool_name,
tc.tool_input or {},
)
caption_tasks[tc.tool_use_id] = asyncio.create_task(
_captioning_chain(intent, res.image_content)
)
caption_tasks[tc.tool_use_id] = asyncio.create_task(_captioning_chain(intent, res.image_content))
for tc in tool_calls[:executed_in_batch]:
result = results_by_id.get(tc.tool_use_id)
@@ -3492,8 +3490,7 @@ class AgentLoop(AgentProtocol):
if caption:
vision_fallback_marker = f"[vision-fallback caption]\n{caption}"
logger.info(
"vision_fallback: captioned %d image(s) for tool '%s' "
"(model '%s' routed through fallback)",
"vision_fallback: captioned %d image(s) for tool '%s' (model '%s' routed through fallback)",
len(image_content),
tc.tool_name,
ctx.llm.model if ctx.llm else "?",
@@ -219,10 +219,7 @@ async def caption_tool_image(
# Don't dump the base64 image data into the log file — that
# would balloon the jsonl with mostly-binary noise.
elided_blocks: list[dict[str, Any]] = [{"type": "text", "text": intent}]
elided_blocks.extend(
{"type": "image_url", "image_url": {"url": "<elided>"}}
for _ in range(len(image_content))
)
elided_blocks.extend({"type": "image_url", "image_url": {"url": "<elided>"}} for _ in range(len(image_content)))
log_llm_turn(
node_id="vision_fallback_subagent",
stream_id="vision_fallback",
+7 -15
View File
@@ -451,9 +451,7 @@ def _extract_cost(response: Any, model: str) -> float:
input_tokens = int(getattr(usage, "prompt_tokens", 0) or 0)
output_tokens = int(getattr(usage, "completion_tokens", 0) or 0)
cache_read, cache_creation = _extract_cache_tokens(usage)
fallback = _cost_from_catalog_pricing(
model, input_tokens, output_tokens, cache_read, cache_creation
)
fallback = _cost_from_catalog_pricing(model, input_tokens, output_tokens, cache_read, cache_creation)
if fallback > 0:
return fallback
return 0.0
@@ -491,9 +489,7 @@ def _cost_from_tokens(
return float(total)
except Exception as exc:
logger.debug("[cost] cost_per_token failed for %s: %s", model, exc)
return _cost_from_catalog_pricing(
model, input_tokens, output_tokens, cached_tokens, cache_creation_tokens
)
return _cost_from_catalog_pricing(model, input_tokens, output_tokens, cached_tokens, cache_creation_tokens)
def _extract_cache_tokens(usage: Any) -> tuple[int, int]:
@@ -524,11 +520,9 @@ def _extract_cache_tokens(usage: Any) -> tuple[int, int]:
if _details is not None
else getattr(usage, "cache_read_input_tokens", 0) or 0
)
cache_creation = (
getattr(_details, "cache_write_tokens", 0) or 0
if _details is not None
else 0
) or (getattr(usage, "cache_creation_input_tokens", 0) or 0)
cache_creation = (getattr(_details, "cache_write_tokens", 0) or 0 if _details is not None else 0) or (
getattr(usage, "cache_creation_input_tokens", 0) or 0
)
return cache_read, cache_creation
@@ -2411,8 +2405,7 @@ class LiteLLMProvider(LLMProvider):
output_tokens = getattr(usage, "completion_tokens", 0) or 0
cached_tokens, cache_creation_tokens = _extract_cache_tokens(usage)
logger.debug(
"[tokens] finish-chunk usage: input=%d output=%d "
"cached=%d cache_creation=%d model=%s",
"[tokens] finish-chunk usage: input=%d output=%d cached=%d cache_creation=%d model=%s",
input_tokens,
output_tokens,
cached_tokens,
@@ -2421,8 +2414,7 @@ class LiteLLMProvider(LLMProvider):
)
logger.debug(
"[tokens] finish event: input=%d output=%d cached=%d "
"cache_creation=%d stop=%s model=%s",
"[tokens] finish event: input=%d output=%d cached=%d cache_creation=%d stop=%s model=%s",
input_tokens,
output_tokens,
cached_tokens,
+10 -5
View File
@@ -1393,7 +1393,8 @@ class TestExtractCacheTokens:
client's perspective)."""
usage = MagicMock(spec=["prompt_tokens_details", "cache_creation_input_tokens"])
usage.prompt_tokens_details = MagicMock(
spec=["cached_tokens"], cached_tokens=120,
spec=["cached_tokens"],
cached_tokens=120,
)
usage.cache_creation_input_tokens = 0
cache_read, cache_creation = _extract_cache_tokens(usage)
@@ -1408,7 +1409,8 @@ class TestExtractCacheTokens:
OpenRouter responses, so this is the path that matters in practice."""
usage = MagicMock()
usage.prompt_tokens_details = MagicMock(
cached_tokens=80, cache_write_tokens=50,
cached_tokens=80,
cache_write_tokens=50,
)
# Explicitly set the Anthropic-native field to 0 to prove we don't
# depend on it for OpenRouter responses.
@@ -1423,7 +1425,8 @@ class TestExtractCacheTokens:
the fallback so non-OpenRouter Anthropic continues to work."""
usage = MagicMock(spec=["prompt_tokens_details", "cache_creation_input_tokens"])
usage.prompt_tokens_details = MagicMock(
spec=["cached_tokens"], cached_tokens=80,
spec=["cached_tokens"],
cached_tokens=80,
)
usage.cache_creation_input_tokens = 50
cache_read, cache_creation = _extract_cache_tokens(usage)
@@ -1471,7 +1474,8 @@ class TestStreamingChunksFallbackPreservesCacheFields:
last_chunk = MagicMock()
last_chunk.usage = MagicMock()
last_chunk.usage.prompt_tokens_details = MagicMock(
cached_tokens=0, cache_write_tokens=5601,
cached_tokens=0,
cache_write_tokens=5601,
)
last_chunk.usage.cache_creation_input_tokens = 0
chunks = [empty_usage_chunk, empty_usage_chunk, last_chunk]
@@ -1498,7 +1502,8 @@ class TestStreamingChunksFallbackPreservesCacheFields:
last_chunk = MagicMock()
last_chunk.usage = MagicMock()
last_chunk.usage.prompt_tokens_details = MagicMock(
cached_tokens=5601, cache_write_tokens=0,
cached_tokens=5601,
cache_write_tokens=0,
)
last_chunk.usage.cache_creation_input_tokens = 0