Merge pull request #6166 from aden-hive/fix/subagent-reply-stall
Release / Create Release (push) Waiting to run

micro-fix: update escalation tests for new ESCALATION_REQUESTED flow
This commit is contained in:
RichardTang-Aden
2026-03-10 19:47:00 -07:00
committed by GitHub
3 changed files with 41 additions and 47 deletions
+8 -12
View File
@@ -970,13 +970,13 @@ class TestEscalationFlow:
)
@pytest.mark.asyncio
async def test_wait_for_response_emits_client_events(
async def test_wait_for_response_emits_escalation_event(
self,
runtime,
parent_node_spec,
subagent_node_spec,
):
"""Escalation should emit CLIENT_OUTPUT_DELTA and CLIENT_INPUT_REQUESTED events."""
"""Escalation should emit ESCALATION_REQUESTED to the queen."""
from framework.graph.event_loop_node import _EscalationReceiver
bus = EventBus()
@@ -986,7 +986,7 @@ class TestEscalationFlow:
bus_events.append(event)
bus.subscribe(
event_types=[EventType.CLIENT_OUTPUT_DELTA, EventType.CLIENT_INPUT_REQUESTED],
event_types=[EventType.ESCALATION_REQUESTED],
handler=handler,
)
@@ -1034,16 +1034,12 @@ class TestEscalationFlow:
await node._execute_subagent(ctx, "researcher", "Navigate page with CAPTCHA")
await injector
# Should have emitted both events
output_deltas = [e for e in bus_events if e.type == EventType.CLIENT_OUTPUT_DELTA]
input_requests = [e for e in bus_events if e.type == EventType.CLIENT_INPUT_REQUESTED]
# Should have emitted ESCALATION_REQUESTED
escalation_events = [e for e in bus_events if e.type == EventType.ESCALATION_REQUESTED]
assert len(output_deltas) >= 1, "Should emit CLIENT_OUTPUT_DELTA with the message"
assert output_deltas[0].data["content"] == "CAPTCHA detected on page"
assert output_deltas[0].node_id == "parent" # Shows as parent talking
assert len(input_requests) >= 1, "Should emit CLIENT_INPUT_REQUESTED for routing"
assert ":escalation:" in input_requests[0].node_id # Escalation ID for routing
assert len(escalation_events) >= 1, "Should emit ESCALATION_REQUESTED"
assert escalation_events[0].data["context"] == "CAPTCHA detected on page"
assert ":escalation:" in escalation_events[0].node_id
@pytest.mark.asyncio
async def test_non_blocking_report_still_works(
+30 -34
View File
@@ -3,9 +3,8 @@
Tests the FULL routing chain:
ExecutionStream GraphExecutor EventLoopNode _execute_subagent
_report_callback registers _EscalationReceiver in executor.node_registry
emit CLIENT_INPUT_REQUESTED with escalation_id
subscriber calls stream.inject_input(escalation_id, "done")
ExecutionStream finds _EscalationReceiver in executor.node_registry
emit ESCALATION_REQUESTED (queen handles the escalation)
queen inject_worker_message() finds _EscalationReceiver via get_waiting_nodes()
receiver.inject_event("done") unblocks the subagent
subagent continues and completes
"""
@@ -227,26 +226,30 @@ async def test_escalation_e2e_through_execution_stream(tmp_path):
stream_holder: list[ExecutionStream] = []
async def escalation_handler(event: AgentEvent):
"""Simulate a TUI/runner: when CLIENT_INPUT_REQUESTED arrives with
an escalation node_id, inject the user's response via the stream."""
"""Simulate the queen: when ESCALATION_REQUESTED arrives,
find the waiting receiver and inject the response via the stream."""
all_events.append(event)
if event.type == EventType.CLIENT_INPUT_REQUESTED:
node_id = event.node_id
if ":escalation:" in node_id:
escalation_events.append(event)
# Small delay to simulate user typing
await asyncio.sleep(0.05)
# Route through the REAL inject_input chain
stream = stream_holder[0]
success = await stream.inject_input(node_id, "done logging in")
assert success, (
f"inject_input({node_id!r}) returned False — "
"escalation receiver not found in executor.node_registry"
)
inject_called.set()
if event.type == EventType.ESCALATION_REQUESTED:
escalation_events.append(event)
# Small delay to simulate queen processing
await asyncio.sleep(0.05)
# Route through the REAL inject_input chain — find the waiting
# escalation receiver via get_waiting_nodes() (mirrors what
# inject_worker_message does in the queen lifecycle tools).
stream = stream_holder[0]
waiting = stream.get_waiting_nodes()
assert waiting, "Should have a waiting escalation receiver"
target_node_id = waiting[0]["node_id"]
assert ":escalation:" in target_node_id
success = await stream.inject_input(target_node_id, "done logging in")
assert success, (
f"inject_input({target_node_id!r}) returned False — "
"escalation receiver not found in executor.node_registry"
)
inject_called.set()
bus.subscribe(
event_types=[EventType.CLIENT_INPUT_REQUESTED, EventType.CLIENT_OUTPUT_DELTA],
event_types=[EventType.ESCALATION_REQUESTED],
handler=escalation_handler,
)
@@ -297,17 +300,7 @@ async def test_escalation_e2e_through_execution_stream(tmp_path):
# 3. Escalation event has correct structure
esc_event = escalation_events[0]
assert ":escalation:" in esc_event.node_id
assert esc_event.data["prompt"] == "Login required for LinkedIn. Please log in manually."
# 4. CLIENT_OUTPUT_DELTA was emitted for the escalation message
output_deltas = [
e
for e in all_events
if e.type == EventType.CLIENT_OUTPUT_DELTA and "Login required" in e.data.get("content", "")
]
assert len(output_deltas) >= 1, (
"Should have emitted CLIENT_OUTPUT_DELTA with escalation message"
)
assert esc_event.data["context"] == "Login required for LinkedIn. Please log in manually."
# 5. The parent node got the subagent's result
assert "result" in result.output
@@ -444,7 +437,7 @@ async def test_escalation_cleanup_after_completion(tmp_path):
stream_holder: list[ExecutionStream] = []
async def auto_respond(event: AgentEvent):
if event.type == EventType.CLIENT_INPUT_REQUESTED and ":escalation:" in event.node_id:
if event.type == EventType.ESCALATION_REQUESTED:
stream = stream_holder[0]
# Snapshot the active executor's node_registry BEFORE responding
@@ -462,10 +455,13 @@ async def test_escalation_cleanup_after_completion(tmp_path):
)
await asyncio.sleep(0.02)
await stream.inject_input(event.node_id, "ok")
# Find the waiting escalation receiver and inject response
waiting = stream.get_waiting_nodes()
if waiting:
await stream.inject_input(waiting[0]["node_id"], "ok")
bus.subscribe(
event_types=[EventType.CLIENT_INPUT_REQUESTED],
event_types=[EventType.ESCALATION_REQUESTED],
handler=auto_respond,
)
+3 -1
View File
@@ -56,7 +56,9 @@ def check_openai_compatible(api_key: str, endpoint: str, name: str) -> dict:
return {"valid": False, "message": f"{name} API returned status {r.status_code}"}
def check_minimax(api_key: str, api_base: str = "https://api.minimax.io/v1", **_: str) -> dict:
def check_minimax(
api_key: str, api_base: str = "https://api.minimax.io/v1", **_: str
) -> dict:
"""Validate via chatcompletion_v2 endpoint with empty messages.
MiniMax doesn't support GET /models; their native endpoint is