feature: log llm turn stop reasons
This commit is contained in:
@@ -26,7 +26,7 @@ Usage:
|
|||||||
storage = AdenCachedStorage(
|
storage = AdenCachedStorage(
|
||||||
local_storage=EncryptedFileStorage(),
|
local_storage=EncryptedFileStorage(),
|
||||||
aden_provider=provider,
|
aden_provider=provider,
|
||||||
cache_ttl_seconds=300, # Re-check Aden every 5 minutes
|
cache_ttl_seconds=600, # Re-check Aden every 5 minutes
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create store
|
# Create store
|
||||||
@@ -77,7 +77,7 @@ class AdenCachedStorage(CredentialStorage):
|
|||||||
storage = AdenCachedStorage(
|
storage = AdenCachedStorage(
|
||||||
local_storage=EncryptedFileStorage(),
|
local_storage=EncryptedFileStorage(),
|
||||||
aden_provider=provider,
|
aden_provider=provider,
|
||||||
cache_ttl_seconds=300, # 5 minutes
|
cache_ttl_seconds=00, # 5 minutes
|
||||||
)
|
)
|
||||||
|
|
||||||
store = CredentialStore(
|
store = CredentialStore(
|
||||||
|
|||||||
@@ -479,6 +479,16 @@ class EventLoopNode(NodeProtocol):
|
|||||||
)
|
)
|
||||||
total_input_tokens += turn_tokens.get("input", 0)
|
total_input_tokens += turn_tokens.get("input", 0)
|
||||||
total_output_tokens += turn_tokens.get("output", 0)
|
total_output_tokens += turn_tokens.get("output", 0)
|
||||||
|
await self._publish_llm_turn_complete(
|
||||||
|
stream_id,
|
||||||
|
node_id,
|
||||||
|
stop_reason=turn_tokens.get("stop_reason", ""),
|
||||||
|
model=turn_tokens.get("model", ""),
|
||||||
|
input_tokens=turn_tokens.get("input", 0),
|
||||||
|
output_tokens=turn_tokens.get("output", 0),
|
||||||
|
execution_id=execution_id,
|
||||||
|
iteration=iteration,
|
||||||
|
)
|
||||||
break # success — exit retry loop
|
break # success — exit retry loop
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -1283,6 +1293,8 @@ class EventLoopNode(NodeProtocol):
|
|||||||
elif isinstance(event, FinishEvent):
|
elif isinstance(event, FinishEvent):
|
||||||
token_counts["input"] += event.input_tokens
|
token_counts["input"] += event.input_tokens
|
||||||
token_counts["output"] += event.output_tokens
|
token_counts["output"] += event.output_tokens
|
||||||
|
token_counts["stop_reason"] = event.stop_reason
|
||||||
|
token_counts["model"] = event.model
|
||||||
|
|
||||||
elif isinstance(event, StreamErrorEvent):
|
elif isinstance(event, StreamErrorEvent):
|
||||||
if not event.recoverable:
|
if not event.recoverable:
|
||||||
@@ -1300,10 +1312,12 @@ class EventLoopNode(NodeProtocol):
|
|||||||
|
|
||||||
final_text = accumulated_text
|
final_text = accumulated_text
|
||||||
logger.info(
|
logger.info(
|
||||||
"[%s] LLM response: text=%r tool_calls=%s",
|
"[%s] LLM response: text=%r tool_calls=%s stop=%s model=%s",
|
||||||
node_id,
|
node_id,
|
||||||
accumulated_text[:300] if accumulated_text else "(empty)",
|
accumulated_text[:300] if accumulated_text else "(empty)",
|
||||||
[tc.tool_name for tc in tool_calls] if tool_calls else "[]",
|
[tc.tool_name for tc in tool_calls] if tool_calls else "[]",
|
||||||
|
token_counts.get("stop_reason", "?"),
|
||||||
|
token_counts.get("model", "?"),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Record assistant message (write-through via conversation store)
|
# Record assistant message (write-through via conversation store)
|
||||||
@@ -2688,6 +2702,29 @@ class EventLoopNode(NodeProtocol):
|
|||||||
execution_id=execution_id,
|
execution_id=execution_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def _publish_llm_turn_complete(
|
||||||
|
self,
|
||||||
|
stream_id: str,
|
||||||
|
node_id: str,
|
||||||
|
stop_reason: str,
|
||||||
|
model: str,
|
||||||
|
input_tokens: int,
|
||||||
|
output_tokens: int,
|
||||||
|
execution_id: str = "",
|
||||||
|
iteration: int | None = None,
|
||||||
|
) -> None:
|
||||||
|
if self._event_bus:
|
||||||
|
await self._event_bus.emit_llm_turn_complete(
|
||||||
|
stream_id=stream_id,
|
||||||
|
node_id=node_id,
|
||||||
|
stop_reason=stop_reason,
|
||||||
|
model=model,
|
||||||
|
input_tokens=input_tokens,
|
||||||
|
output_tokens=output_tokens,
|
||||||
|
execution_id=execution_id,
|
||||||
|
iteration=iteration,
|
||||||
|
)
|
||||||
|
|
||||||
async def _publish_loop_completed(
|
async def _publish_loop_completed(
|
||||||
self, stream_id: str, node_id: str, iterations: int, execution_id: str = ""
|
self, stream_id: str, node_id: str, iterations: int, execution_id: str = ""
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|||||||
@@ -88,6 +88,7 @@ class EventType(StrEnum):
|
|||||||
# LLM streaming observability
|
# LLM streaming observability
|
||||||
LLM_TEXT_DELTA = "llm_text_delta"
|
LLM_TEXT_DELTA = "llm_text_delta"
|
||||||
LLM_REASONING_DELTA = "llm_reasoning_delta"
|
LLM_REASONING_DELTA = "llm_reasoning_delta"
|
||||||
|
LLM_TURN_COMPLETE = "llm_turn_complete"
|
||||||
|
|
||||||
# Tool lifecycle
|
# Tool lifecycle
|
||||||
TOOL_CALL_STARTED = "tool_call_started"
|
TOOL_CALL_STARTED = "tool_call_started"
|
||||||
@@ -595,6 +596,36 @@ class EventBus:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def emit_llm_turn_complete(
|
||||||
|
self,
|
||||||
|
stream_id: str,
|
||||||
|
node_id: str,
|
||||||
|
stop_reason: str,
|
||||||
|
model: str,
|
||||||
|
input_tokens: int,
|
||||||
|
output_tokens: int,
|
||||||
|
execution_id: str | None = None,
|
||||||
|
iteration: int | None = None,
|
||||||
|
) -> None:
|
||||||
|
"""Emit LLM turn completion with stop reason and model metadata."""
|
||||||
|
data: dict = {
|
||||||
|
"stop_reason": stop_reason,
|
||||||
|
"model": model,
|
||||||
|
"input_tokens": input_tokens,
|
||||||
|
"output_tokens": output_tokens,
|
||||||
|
}
|
||||||
|
if iteration is not None:
|
||||||
|
data["iteration"] = iteration
|
||||||
|
await self.publish(
|
||||||
|
AgentEvent(
|
||||||
|
type=EventType.LLM_TURN_COMPLETE,
|
||||||
|
stream_id=stream_id,
|
||||||
|
node_id=node_id,
|
||||||
|
execution_id=execution_id,
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
# === TOOL LIFECYCLE PUBLISHERS ===
|
# === TOOL LIFECYCLE PUBLISHERS ===
|
||||||
|
|
||||||
async def emit_tool_call_started(
|
async def emit_tool_call_started(
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ DEFAULT_EVENT_TYPES = [
|
|||||||
EventType.NODE_LOOP_STARTED,
|
EventType.NODE_LOOP_STARTED,
|
||||||
EventType.NODE_LOOP_ITERATION,
|
EventType.NODE_LOOP_ITERATION,
|
||||||
EventType.NODE_LOOP_COMPLETED,
|
EventType.NODE_LOOP_COMPLETED,
|
||||||
|
EventType.LLM_TURN_COMPLETE,
|
||||||
EventType.NODE_ACTION_PLAN,
|
EventType.NODE_ACTION_PLAN,
|
||||||
EventType.EDGE_TRAVERSED,
|
EventType.EDGE_TRAVERSED,
|
||||||
EventType.GOAL_PROGRESS,
|
EventType.GOAL_PROGRESS,
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ EVENT_FORMAT: dict[EventType, tuple[str, str]] = {
|
|||||||
EventType.NODE_LOOP_STARTED: ("@@", "cyan"),
|
EventType.NODE_LOOP_STARTED: ("@@", "cyan"),
|
||||||
EventType.NODE_LOOP_ITERATION: ("..", "dim"),
|
EventType.NODE_LOOP_ITERATION: ("..", "dim"),
|
||||||
EventType.NODE_LOOP_COMPLETED: ("@@", "dim"),
|
EventType.NODE_LOOP_COMPLETED: ("@@", "dim"),
|
||||||
|
EventType.LLM_TURN_COMPLETE: ("◆", "green"),
|
||||||
EventType.NODE_STALLED: ("!!", "bold yellow"),
|
EventType.NODE_STALLED: ("!!", "bold yellow"),
|
||||||
EventType.NODE_INPUT_BLOCKED: ("!!", "yellow"),
|
EventType.NODE_INPUT_BLOCKED: ("!!", "yellow"),
|
||||||
EventType.GOAL_PROGRESS: ("%%", "blue"),
|
EventType.GOAL_PROGRESS: ("%%", "blue"),
|
||||||
@@ -87,6 +88,12 @@ def extract_event_text(event: AgentEvent) -> str:
|
|||||||
return f"State changed: {data.get('key', 'unknown')}"
|
return f"State changed: {data.get('key', 'unknown')}"
|
||||||
elif et == EventType.CLIENT_INPUT_REQUESTED:
|
elif et == EventType.CLIENT_INPUT_REQUESTED:
|
||||||
return "Waiting for user input"
|
return "Waiting for user input"
|
||||||
|
elif et == EventType.LLM_TURN_COMPLETE:
|
||||||
|
stop = data.get("stop_reason", "?")
|
||||||
|
model = data.get("model", "?")
|
||||||
|
inp = data.get("input_tokens", 0)
|
||||||
|
out = data.get("output_tokens", 0)
|
||||||
|
return f"{model} → {stop} ({inp}+{out} tokens)"
|
||||||
else:
|
else:
|
||||||
return f"{et.value}: {data}"
|
return f"{et.value}: {data}"
|
||||||
|
|
||||||
|
|||||||
@@ -382,7 +382,7 @@ class AdenCachedStorage(CredentialStorage):
|
|||||||
storage = AdenCachedStorage(
|
storage = AdenCachedStorage(
|
||||||
local_storage=EncryptedFileStorage(),
|
local_storage=EncryptedFileStorage(),
|
||||||
aden_provider=provider,
|
aden_provider=provider,
|
||||||
cache_ttl_seconds=300, # 5 minutes
|
cache_ttl_seconds=600, # 5 minutes
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -448,7 +448,7 @@ from core.framework.credentials.aden import AdenCachedStorage
|
|||||||
storage = AdenCachedStorage(
|
storage = AdenCachedStorage(
|
||||||
local_storage=EncryptedFileStorage(),
|
local_storage=EncryptedFileStorage(),
|
||||||
aden_provider=provider,
|
aden_provider=provider,
|
||||||
cache_ttl_seconds=300, # Re-check Aden every 5 min
|
cache_ttl_seconds=600, # Re-check Aden every 5 min
|
||||||
)
|
)
|
||||||
|
|
||||||
store = CredentialStore(
|
store = CredentialStore(
|
||||||
|
|||||||
Reference in New Issue
Block a user