Compare commits

..

1 Commits

Author SHA1 Message Date
Timothy 820cbaead9 fix: localize skill creation 2026-04-10 16:07:04 -07:00
496 changed files with 13923 additions and 24895 deletions
+2 -55
View File
@@ -10,63 +10,10 @@
"Bash(grep -n \"create_colony\\\\|colony-spawn\\\\|colony_spawn\" /home/timothy/aden/hive/core/framework/agents/queen/nodes/__init__.py /home/timothy/aden/hive/core/framework/tools/*.py)",
"Bash(git stash:*)",
"Bash(python3 -c \"import sys,json; d=json.loads\\(sys.stdin.read\\(\\)\\); print\\('keys:', list\\(d.keys\\(\\)\\)[:10]\\)\")",
"Bash(python3 -c ':*)",
"Bash(uv run:*)",
"Read(//tmp/**)",
"Bash(grep -n \"useColony\\\\|const { queens, queenProfiles\" /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
"Bash(awk 'NR==385,/\\\\}, \\\\[/' /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
"Bash(xargs -I{} sh -c 'if ! grep -q \"^import base64\\\\|^from base64\" \"{}\"; then echo \"MISSING: {}\"; fi')",
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -type f -exec grep -l \"FileConversationStore\\\\|class.*ConversationStore\" {} \\\\;)",
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -exec grep -l \"run_parallel_workers\\\\|create_colony\" {} \\\\;)",
"Bash(awk '/^ async def execute\\\\\\(self, ctx: AgentContext\\\\\\)/,/^ async def [a-z_]+/ {print NR\": \"$0}' /home/timothy/aden/hive/core/framework/agent_loop/agent_loop.py)",
"Bash(grep -r \"max_concurrent_workers\\\\|max_depth\\\\|recursion\\\\|spawn.*bomb\" /home/timothy/aden/hive/core/framework/host/*.py)",
"Bash(wc -l /home/timothy/aden/hive/tools/src/gcu/browser/*.py /home/timothy/aden/hive/tools/src/gcu/browser/tools/*.py)",
"Bash(file /tmp/gcu_verify/*.png)",
"Bash(ps -eo pid,cmd)",
"Bash(ps -o pid,lstart,cmd -p 746640)",
"Bash(kill 746636)",
"Bash(ps -eo pid,lstart,cmd)",
"Bash(grep -E \"^d|\\\\.py$\")",
"Bash(grep -E \"\\\\.\\(ts|tsx\\)$\")",
"Bash(xargs cat:*)",
"Bash(find /home/timothy/aden/hive -path \"*/.venv\" -prune -o -name \"*.py\" -type f -exec grep -l \"frontend\\\\|UI\\\\|terminal\\\\|interactive\\\\|TUI\" {} \\\\;)",
"Bash(wc -l /home/timothy/.hive/backup/*/SKILL.md)",
"Bash(awk -F'::' '{print $1}')",
"Bash(wait)",
"Bash(pkill -f \"pytest.*test_event_loop_node\")",
"Bash(pkill -f \"pytest.*TestToolConcurrency\")",
"Bash(grep -n \"def.*discover\\\\|/api/agents\\\\|agents_discover\" /home/timothy/aden/hive/core/framework/server/*.py)",
"Bash(bun run:*)",
"Bash(npx eslint:*)",
"Bash(npm run:*)",
"Bash(npm test:*)",
"Bash(grep -n \"PIL\\\\|Image\\\\|to_thread\\\\|run_in_executor\" /home/timothy/aden/hive/tools/src/gcu/browser/*.py /home/timothy/aden/hive/tools/src/gcu/browser/tools/*.py)",
"WebFetch(domain:docs.litellm.ai)",
"Bash(cat /home/timothy/aden/hive/.venv/lib/python3.11/site-packages/litellm-*.dist-info/METADATA)",
"Bash(find \"/home/timothy/.hive/agents/queens/queen_brand_design/sessions/session_20260415_100751_d49f4c28/\" -type f -name \"*.json*\" -exec grep -l \"协日\" {} \\\\;)",
"Bash(grep -v ':0$')",
"Bash(curl -s -m 2 http://127.0.0.1:4002/sse -o /dev/null -w 'status=%{http_code} time=%{time_total}s\\\\n')",
"mcp__gcu-tools__browser_status",
"mcp__gcu-tools__browser_start",
"mcp__gcu-tools__browser_navigate",
"mcp__gcu-tools__browser_evaluate",
"mcp__gcu-tools__browser_screenshot",
"mcp__gcu-tools__browser_open",
"mcp__gcu-tools__browser_click_coordinate",
"mcp__gcu-tools__browser_get_rect",
"mcp__gcu-tools__browser_type_focused",
"mcp__gcu-tools__browser_wait",
"Bash(python3 -c ' *)",
"Bash(python3 scripts/debug_queen_prompt.py independent)",
"Bash(curl -s --max-time 2 http://127.0.0.1:9230/status)",
"Bash(python3 -c \"import json, sys; print\\(json.loads\\(sys.stdin.read\\(\\)\\)['data']['content']\\)\")",
"Bash(python3 -c \"import json; json.load\\(open\\('/home/timothy/aden/hive/tools/browser-extension/manifest.json'\\)\\)\")"
"Bash(python3 -c ':*)"
],
"additionalDirectories": [
"/home/timothy/.hive/skills/writing-hive-skills",
"/tmp",
"/home/timothy/.hive/skills",
"/home/timothy/aden/hive/core/frontend/src/components"
"/home/timothy/.hive/skills/writing-hive-skills"
]
},
"hooks": {
+2 -2
View File
@@ -64,7 +64,7 @@ snapshot = await browser_snapshot(tab_id)
|---------|--------------|-------|
| Scroll doesn't move | Nested scroll container | Look for `overflow: scroll` divs |
| Click no effect | Element covered | Check `getBoundingClientRect` vs viewport |
| Type clears | Autocomplete/React | Check for event listeners on input; try `browser_type_focused` |
| Type clears | Autocomplete/React | Check for event listeners on input |
| Snapshot hangs | Huge DOM | Check node count in snapshot |
| Snapshot stale | SPA hydration | Wait after navigation |
@@ -229,7 +229,7 @@ function queryShadow(selector) {
|-------|-------------|----------|
| Scroll not working | Find scrollable container | Mouse wheel at container center |
| Click no effect | JavaScript click() | CDP mouse events |
| Type clears | Add delay_ms | Use `browser_type_focused` (Input.insertText) |
| Type clears | Add delay_ms | Use execCommand |
| Snapshot hangs | Add timeout_s | DOM snapshot fallback |
| Stale content | Wait for selector | Increase wait_until timeout |
| Shadow DOM | Pierce selector | JavaScript traversal |
@@ -57,7 +57,8 @@ async def test_twitter_lazy_scroll():
# Count initial tweets
initial_count = await bridge.evaluate(
tab_id,
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
"(function() { return document.querySelectorAll("
"'[data-testid=\"tweet\"]').length; })()",
)
print(f"Initial tweet count: {initial_count.get('result', 0)}")
@@ -77,7 +78,8 @@ async def test_twitter_lazy_scroll():
# Count tweets after scroll
count_result = await bridge.evaluate(
tab_id,
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
"(function() { return document.querySelectorAll("
"'[data-testid=\"tweet\"]').length; })()",
)
count = count_result.get("result", 0)
print(f" Tweet count after scroll: {count}")
@@ -85,7 +87,8 @@ async def test_twitter_lazy_scroll():
# Final count
final_count = await bridge.evaluate(
tab_id,
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
"(function() { return document.querySelectorAll("
"'[data-testid=\"tweet\"]').length; })()",
)
final = final_count.get("result", 0)
initial = initial_count.get("result", 0)
@@ -130,7 +130,9 @@ async def test_shadow_dom():
print(f"JS click result: {click_result.get('result', {})}")
# Verify click was registered
count_result = await bridge.evaluate(tab_id, "(function() { return window.shadowClickCount || 0; })()")
count_result = await bridge.evaluate(
tab_id, "(function() { return window.shadowClickCount || 0; })()"
)
count = count_result.get("result") or 0
print(f"Shadow click count: {count}")
@@ -200,7 +200,9 @@ async def test_autocomplete():
print(f"Value after fast typing: '{fast_value}'")
# Check events
events_result = await bridge.evaluate(tab_id, "(function() { return window.inputEvents; })()")
events_result = await bridge.evaluate(
tab_id, "(function() { return window.inputEvents; })()"
)
print(f"Events logged: {events_result.get('result', [])}")
# Test 2: Slow typing (with delay) - should work
@@ -218,7 +220,8 @@ async def test_autocomplete():
# Check if dropdown appeared
dropdown_result = await bridge.evaluate(
tab_id,
"(function() { return document.querySelectorAll('.autocomplete-items div').length; })()",
"(function() { return document.querySelectorAll("
"'.autocomplete-items div').length; })()",
)
dropdown_count = dropdown_result.get("result", 0)
print(f"Dropdown items: {dropdown_count}")
@@ -87,7 +87,9 @@ async def test_huge_dom():
await bridge.navigate(tab_id, data_url, wait_until="load")
# Count elements
count_result = await bridge.evaluate(tab_id, "(function() { return document.querySelectorAll('*').length; })()")
count_result = await bridge.evaluate(
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
)
elem_count = count_result.get("result", 0)
print(f"DOM elements: {elem_count}")
@@ -120,10 +122,14 @@ async def test_huge_dom():
# Test 3: Real LinkedIn
print("\n--- Test 3: Real LinkedIn Feed ---")
await bridge.navigate(tab_id, "https://www.linkedin.com/feed", wait_until="load", timeout_ms=30000)
await bridge.navigate(
tab_id, "https://www.linkedin.com/feed", wait_until="load", timeout_ms=30000
)
await asyncio.sleep(2)
count_result = await bridge.evaluate(tab_id, "(function() { return document.querySelectorAll('*').length; })()")
count_result = await bridge.evaluate(
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
)
elem_count = count_result.get("result", 0)
print(f"LinkedIn DOM elements: {elem_count}")
@@ -136,7 +136,10 @@ async def test_selector_screenshot(bridge: BeelineBridge, tab_id: int, data_url:
print(" ⚠ WARNING: Selector screenshot not smaller (may be full page)")
return False
else:
print(f" ⚠ NOT IMPLEMENTED: selector param ignored (returns full page) - error={result.get('error')}")
print(
" ⚠ NOT IMPLEMENTED: selector param ignored"
f" (returns full page) - error={result.get('error')}"
)
print(" NOTE: selector parameter exists in signature but is not used in implementation")
return False
@@ -178,7 +181,9 @@ async def test_screenshot_timeout(bridge: BeelineBridge, tab_id: int, data_url:
print(f" ⚠ Fast enough to beat timeout: {err!r} in {elapsed:.3f}s")
return True # Not a failure, just fast
else:
print(f" ⚠ Screenshot completed before timeout ({elapsed:.3f}s) - too fast to test timeout")
print(
f" ⚠ Screenshot completed before timeout ({elapsed:.3f}s) - too fast to test timeout"
)
return True # Still ok, just very fast
@@ -137,8 +137,14 @@ async def test_problematic_site(bridge: BeelineBridge, tab_id: int) -> dict:
changed = False
for key in after_data:
if key in before_data:
b_val = before_data[key].get("scrollTop", 0) if isinstance(before_data[key], dict) else 0
a_val = after_data[key].get("scrollTop", 0) if isinstance(after_data[key], dict) else 0
b_val = (
before_data[key].get("scrollTop", 0)
if isinstance(before_data[key], dict)
else 0
)
a_val = (
after_data[key].get("scrollTop", 0) if isinstance(after_data[key], dict) else 0
)
if a_val != b_val:
print(f" ✓ CHANGE DETECTED: {key} scrolled from {b_val} to {a_val}")
changed = True
+18
View File
@@ -0,0 +1,18 @@
This project uses ruff for Python linting and formatting.
Rules:
- Line length: 100 characters
- Python target: 3.11+
- Use double quotes for strings
- Sort imports with isort (ruff I rules): stdlib, third-party, first-party (framework), local
- Combine as-imports
- Use type hints on all function signatures
- Use `from __future__ import annotations` for modern type syntax
- Raise exceptions with `from` in except blocks (B904)
- No unused imports (F401), no unused variables (F841)
- Prefer list/dict/set comprehensions over map/filter (C4)
Run `make lint` to auto-fix, `make check` to verify without modifying files.
Run `make format` to apply ruff formatting.
The ruff config lives in core/pyproject.toml under [tool.ruff].
+35
View File
@@ -0,0 +1,35 @@
# Git
.git/
.gitignore
# Documentation
*.md
docs/
LICENSE
# IDE
.idea/
.vscode/
# Dependencies (rebuilt in container)
node_modules/
# Build artifacts
dist/
build/
coverage/
# Environment files
.env*
config.yaml
# Logs
*.log
logs/
# OS
.DS_Store
Thumbs.db
# GitHub
.github/
-3
View File
@@ -22,6 +22,3 @@ indent_size = 2
[Makefile]
indent_style = tab
[*.{sh,ps1}]
end_of_line = lf
+1 -5
View File
@@ -16,6 +16,7 @@
# Shell scripts (must use LF)
*.sh text eol=lf
quickstart.sh text eol=lf
# PowerShell scripts (Windows-friendly)
*.ps1 text eol=lf
@@ -121,8 +122,3 @@ CODE_OF_CONDUCT* text
*.db binary
*.sqlite binary
*.sqlite3 binary
# Lockfiles — mark generated so GitHub collapses them in PR diffs
*.lock linguist-generated=true -diff
package-lock.json linguist-generated=true -diff
uv.lock linguist-generated=true -diff
+3
View File
@@ -0,0 +1,3 @@
{
"mcpServers": {}
}
+21 -7
View File
@@ -52,7 +52,9 @@ _DEFAULT_REDIRECT_PORT = 51121
# This project reverse-engineered and published the public OAuth credentials
# for Google's Antigravity/Cloud Code Assist API.
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
_CREDENTIALS_URL = "https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
_CREDENTIALS_URL = (
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
)
# Cached credentials fetched from public source
_cached_client_id: str | None = None
@@ -66,7 +68,9 @@ def _fetch_credentials_from_public_source() -> tuple[str | None, str | None]:
return _cached_client_id, _cached_client_secret
try:
req = urllib.request.Request(_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"})
req = urllib.request.Request(
_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"}
)
with urllib.request.urlopen(req, timeout=10) as resp:
content = resp.read().decode("utf-8")
import re
@@ -164,7 +168,10 @@ class OAuthCallbackHandler(BaseHTTPRequestHandler):
if "code" in query and "state" in query:
OAuthCallbackHandler.auth_code = query["code"][0]
OAuthCallbackHandler.state = query["state"][0]
self._send_response("Authentication successful! You can close this window and return to the terminal.")
self._send_response(
"Authentication successful! You can close this window "
"and return to the terminal."
)
return
self._send_response("Waiting for authentication...")
@@ -289,7 +296,8 @@ def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_I
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"User-Agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
"AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
),
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
}
@@ -308,7 +316,9 @@ def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_I
return False
def refresh_access_token(refresh_token: str, client_id: str, client_secret: str | None) -> dict | None:
def refresh_access_token(
refresh_token: str, client_id: str, client_secret: str | None
) -> dict | None:
"""Refresh the access token using the refresh token."""
data = {
"grant_type": "refresh_token",
@@ -351,7 +361,9 @@ def cmd_account_add(args: argparse.Namespace) -> int:
access_token = account.get("access")
refresh_token_str = account.get("refresh", "")
refresh_token = refresh_token_str.split("|")[0] if refresh_token_str else None
project_id = refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
project_id = (
refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
)
email = account.get("email", "unknown")
expires_ms = account.get("expires", 0)
expires_at = expires_ms / 1000.0 if expires_ms else 0.0
@@ -378,7 +390,9 @@ def cmd_account_add(args: argparse.Namespace) -> int:
# Update the account
account["access"] = new_access
account["expires"] = int((time.time() + expires_in) * 1000)
accounts_data["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
accounts_data["last_refresh"] = time.strftime(
"%Y-%m-%dT%H:%M:%SZ", time.gmtime()
)
save_accounts(accounts_data)
# Validate the refreshed token
File diff suppressed because it is too large Load Diff
+53 -392
View File
@@ -48,14 +48,6 @@ class Message:
is_skill_content: bool = False
# Logical worker run identifier for shared-session persistence
run_id: str | None = None
# True when this is a framework-injected continuation hint (continue-nudge
# on stream stall). Stored as a user message for API compatibility, but
# the UI should render it as a compact system notice, not user speech.
is_system_nudge: bool = False
# True when this message is a partial/truncated assistant turn reconstructed
# from a crashed or watchdog-cancelled stream. Signals that the original
# turn never finished — the model may or may not choose to redo it.
truncated: bool = False
def to_llm_dict(self) -> dict[str, Any]:
"""Convert to OpenAI-format message dict."""
@@ -117,10 +109,6 @@ class Message:
d["image_content"] = self.image_content
if self.run_id is not None:
d["run_id"] = self.run_id
if self.is_system_nudge:
d["is_system_nudge"] = self.is_system_nudge
if self.truncated:
d["truncated"] = self.truncated
return d
@classmethod
@@ -138,8 +126,6 @@ class Message:
is_client_input=data.get("is_client_input", False),
image_content=data.get("image_content"),
run_id=data.get("run_id"),
is_system_nudge=data.get("is_system_nudge", False),
truncated=data.get("truncated", False),
)
@@ -176,17 +162,10 @@ def update_run_cursor(
def _extract_spillover_filename(content: str) -> str | None:
"""Extract spillover filename from a tool result annotation.
Matches patterns produced by ``truncate_tool_result``:
- New large-result header: "Full result saved at: /abs/path/file.txt"
- Legacy bracketed trailer: "[Saved to 'file.txt']" (pre-2026-04-15,
retained here so cold conversations still resolve)
Matches patterns produced by EventLoopNode._truncate_tool_result():
- Large result: "saved to 'web_search_1.txt'"
- Small result: "[Saved to 'web_search_1.txt']"
"""
# New prose format — ``saved at: <absolute path>``, terminated by
# newline or end-of-string.
match = re.search(r"[Ss]aved at:\s*(\S+)", content)
if match:
return match.group(1)
# Legacy format.
match = re.search(r"[Ss]aved to '([^']+)'", content)
return match.group(1) if match else None
@@ -331,14 +310,6 @@ class ConversationStore(Protocol):
async def delete_parts_before(self, seq: int, run_id: str | None = None) -> None: ...
async def write_partial(self, seq: int, data: dict[str, Any]) -> None: ...
async def read_partial(self, seq: int) -> dict[str, Any] | None: ...
async def read_all_partials(self) -> list[dict[str, Any]]: ...
async def clear_partial(self, seq: int) -> None: ...
async def close(self) -> None: ...
async def destroy(self) -> None: ...
@@ -410,20 +381,10 @@ class NodeConversation:
output_keys: list[str] | None = None,
store: ConversationStore | None = None,
run_id: str | None = None,
compaction_buffer_tokens: int | None = None,
compaction_warning_buffer_tokens: int | None = None,
) -> None:
self._system_prompt = system_prompt
self._max_context_tokens = max_context_tokens
self._compaction_threshold = compaction_threshold
# Buffer-based compaction trigger (Gap 7). When set, takes
# precedence over the multiplicative compaction_threshold so the
# loop reserves a fixed headroom for the next turn's input+output
# instead of trying to get exactly X% of the way to the hard
# limit. If left as None the legacy threshold-based rule is
# used, keeping old call sites behaving identically.
self._compaction_buffer_tokens = compaction_buffer_tokens
self._compaction_warning_buffer_tokens = compaction_warning_buffer_tokens
self._output_keys = output_keys
self._store = store
self._messages: list[Message] = []
@@ -484,7 +445,6 @@ class NodeConversation:
is_transition_marker: bool = False,
is_client_input: bool = False,
image_content: list[dict[str, Any]] | None = None,
is_system_nudge: bool = False,
) -> Message:
msg = Message(
seq=self._next_seq,
@@ -495,7 +455,6 @@ class NodeConversation:
is_transition_marker=is_transition_marker,
is_client_input=is_client_input,
image_content=image_content,
is_system_nudge=is_system_nudge,
)
self._messages.append(msg)
self._next_seq += 1
@@ -509,8 +468,6 @@ class NodeConversation:
self,
content: str,
tool_calls: list[dict[str, Any]] | None = None,
*,
truncated: bool = False,
) -> Message:
msg = Message(
seq=self._next_seq,
@@ -519,7 +476,6 @@ class NodeConversation:
tool_calls=tool_calls,
phase_id=self._current_phase,
run_id=self._run_id,
truncated=truncated,
)
self._messages.append(msg)
self._next_seq += 1
@@ -535,27 +491,6 @@ class NodeConversation:
image_content: list[dict[str, Any]] | None = None,
is_skill_content: bool = False,
) -> Message:
# Dedup guard: reject a second tool_result for the same tool_use_id.
# Anthropic's API only accepts one result per tool_call, and a duplicate
# causes a hard 400 two turns later ("messages with role 'tool' must
# be a response to a preceding message with 'tool_calls'"). Duplicates
# can arise when a tool_call_timeout fires and records a placeholder
# error, then the real executor thread eventually delivers the actual
# result (the thread kept running inside run_in_executor — see
# tool_result_handler.execute_tool). We keep the FIRST result to
# preserve whatever state the agent already reasoned about.
for existing in reversed(self._messages):
if existing.role == "tool" and existing.tool_use_id == tool_use_id:
import logging as _logging
_logging.getLogger(__name__).warning(
"add_tool_result: dropping duplicate result for tool_use_id=%s "
"(first result preserved, %d chars; new result ignored, %d chars)",
tool_use_id,
len(existing.content),
len(content),
)
return existing
msg = Message(
seq=self._next_seq,
role="tool",
@@ -575,59 +510,6 @@ class NodeConversation:
# --- Query -------------------------------------------------------------
def find_completed_tool_call(
self,
name: str,
tool_input: dict[str, Any],
within_last_turns: int = 3,
) -> Message | None:
"""Return the most recent assistant message that issued a tool call
with the same (name + canonical-json args) AND received a non-error
tool result, within the last ``within_last_turns`` assistant turns.
Used by the replay detector to flag when the model is about to redo
a successful call we prepend a steer onto the upcoming result but
still execute, so tools like browser_screenshot that are legitimately
repeated are not silently skipped.
"""
try:
target_canonical = json.dumps(tool_input, sort_keys=True, default=str)
except (TypeError, ValueError):
target_canonical = str(tool_input)
# Walk backwards over recent assistant messages
assistant_turns_seen = 0
for idx in range(len(self._messages) - 1, -1, -1):
m = self._messages[idx]
if m.role != "assistant":
continue
assistant_turns_seen += 1
if assistant_turns_seen > within_last_turns:
break
if not m.tool_calls:
continue
for tc in m.tool_calls:
func = tc.get("function", {}) if isinstance(tc, dict) else {}
tc_name = func.get("name")
if tc_name != name:
continue
args_str = func.get("arguments", "")
try:
parsed = json.loads(args_str) if isinstance(args_str, str) else args_str
canonical = json.dumps(parsed, sort_keys=True, default=str)
except (TypeError, ValueError):
canonical = str(args_str)
if canonical != target_canonical:
continue
# Found a match — now verify its result was not an error.
tc_id = tc.get("id")
for later in self._messages[idx + 1 :]:
if later.role == "tool" and later.tool_use_id == tc_id:
if not later.is_error:
return m
break
return None
def to_llm_messages(self) -> list[dict[str, Any]]:
"""Return messages as OpenAI-format dicts (system prompt excluded).
@@ -685,18 +567,11 @@ class NodeConversation:
) -> list[dict[str, Any]]:
"""Ensure tool_call / tool_result pairs are consistent.
1. **Orphaned tool results** (tool_result with no matching tool_use
anywhere) are dropped. Happens after compaction removes the
parent assistant message.
2. **Positionally orphaned tool results** (tool_result separated
from its parent by a non-tool message, e.g. a user injection)
are dropped. The Anthropic API requires tool messages to
follow immediately after the assistant message that issued
the matching tool_call.
3. **Duplicate tool results** (same tool_call_id appearing more
than once) are dropped; only the first is kept.
4. **Orphaned tool calls** (tool_use with no following tool_result)
get a synthetic error result appended. Happens when the loop
1. **Orphaned tool results** (tool_result with no preceding tool_use)
are dropped. This happens when compaction removes an assistant
message but leaves its tool-result messages behind.
2. **Orphaned tool calls** (tool_use with no following tool_result)
get a synthetic error result appended. This happens when a loop
is cancelled mid-tool-execution.
"""
# Pass 1: collect all tool_call IDs from assistant messages so we
@@ -709,75 +584,41 @@ class NodeConversation:
if tc_id:
all_tool_call_ids.add(tc_id)
# Pass 2: build repaired list — drop orphaned tool results, drop
# positional orphans and duplicates, patch missing tool results.
#
# ``open_tool_calls`` holds the tool_call IDs we're still expecting
# results for: it's populated when we emit an assistant-with-tool_calls
# and drained as matching tool messages follow. Any tool message
# whose id is not currently open is positionally invalid and gets
# dropped — that closes the gap that caused the tool-after-user
# 400 errors.
# Pass 2: build repaired list — drop orphaned tool results, patch
# missing tool results.
repaired: list[dict[str, Any]] = []
open_tool_calls: set[str] = set()
seen_tool_ids: set[str] = set()
for m in msgs:
role = m.get("role")
if role == "tool":
for i, m in enumerate(msgs):
# Drop tool-result messages whose tool_call_id has no matching
# tool_use in any assistant message (orphaned by compaction).
if m.get("role") == "tool":
tid = m.get("tool_call_id")
# Drop tool results with no matching tool_use anywhere.
if not tid or tid not in all_tool_call_ids:
continue
# Drop duplicates (same id appearing twice) — keep first.
if tid in seen_tool_ids:
continue
# Drop positional orphans — tool messages whose parent
# assistant isn't the still-open assistant block.
if tid not in open_tool_calls:
continue
open_tool_calls.discard(tid)
seen_tool_ids.add(tid)
repaired.append(m)
continue
if tid and tid not in all_tool_call_ids:
continue # skip orphaned result
# Any non-tool message closes the current assistant tool block.
# If the previous assistant left tool_calls unanswered, patch
# synthetic error results before emitting this message so the
# API sees a complete pairing.
if open_tool_calls:
for stale_id in list(open_tool_calls):
repaired.append(m)
tool_calls = m.get("tool_calls")
if m.get("role") != "assistant" or not tool_calls:
continue
# Collect IDs of tool results that follow this assistant message
answered: set[str] = set()
for j in range(i + 1, len(msgs)):
if msgs[j].get("role") == "tool":
tid = msgs[j].get("tool_call_id")
if tid:
answered.add(tid)
else:
break # stop at first non-tool message
# Patch any missing results
for tc in tool_calls:
tc_id = tc.get("id")
if tc_id and tc_id not in answered:
repaired.append(
{
"role": "tool",
"tool_call_id": stale_id,
"tool_call_id": tc_id,
"content": "ERROR: Tool execution was interrupted.",
}
)
seen_tool_ids.add(stale_id)
open_tool_calls.clear()
repaired.append(m)
if role == "assistant":
for tc in m.get("tool_calls") or []:
tc_id = tc.get("id")
if tc_id and tc_id not in seen_tool_ids:
open_tool_calls.add(tc_id)
# Tail: if the conversation ends with an assistant that issued
# tool_calls and no results followed, patch them so the next
# turn's first message can be a valid assistant/user response.
if open_tool_calls:
for stale_id in list(open_tool_calls):
repaired.append(
{
"role": "tool",
"tool_call_id": stale_id,
"content": "ERROR: Tool execution was interrupted.",
}
)
return repaired
def estimate_tokens(self) -> int:
@@ -826,37 +667,8 @@ class NodeConversation:
return self.estimate_tokens() / self._max_context_tokens
def needs_compaction(self) -> bool:
"""True when the conversation should be compacted before the
next LLM call.
Buffer-based rule (Gap 7): trigger when the current estimate
plus the configured buffer would exceed the hard context limit.
Prevents compaction from firing only AFTER we're already over
the wire and forced into a reactive binary-split pass.
When no buffer is configured, falls back to the multiplicative
threshold the old callers were built around.
"""
if self._max_context_tokens <= 0:
return False
if self._compaction_buffer_tokens is not None:
budget = self._max_context_tokens - self._compaction_buffer_tokens
return self.estimate_tokens() >= max(0, budget)
return self.estimate_tokens() >= self._max_context_tokens * self._compaction_threshold
def compaction_warning(self) -> bool:
"""True when the conversation has crossed the warning threshold
but not yet the hard compaction trigger.
Used by telemetry / UI to show a "context getting tight" hint
before a compaction pass actually runs. Returns False when no
warning buffer is configured (legacy behaviour).
"""
if self._max_context_tokens <= 0 or self._compaction_warning_buffer_tokens is None:
return False
warn_at = self._max_context_tokens - self._compaction_warning_buffer_tokens
return self.estimate_tokens() >= max(0, warn_at)
# --- Output-key extraction ---------------------------------------------
def _extract_protected_values(self, messages: list[Message]) -> dict[str, str]:
@@ -933,7 +745,7 @@ class NodeConversation:
continue # never prune errors
if msg.is_skill_content:
continue # never prune activated skill instructions (AS-10)
if msg.content.startswith(("Pruned tool result", "[Pruned tool result")):
if msg.content.startswith("[Pruned tool result"):
continue # already pruned
# Tiny results (set_output acks, confirmations) — pruning
# saves negligible space but makes the LLM think the call
@@ -965,12 +777,12 @@ class NodeConversation:
if spillover:
placeholder = (
f"Pruned tool result ({orig_len:,} chars) cleared from context. "
f"Full data saved at: {spillover}\n"
f"Read the complete data with read_file(path='{spillover}')."
f"[Pruned tool result: {orig_len} chars. "
f"Full data in '{spillover}'. "
f"Use read_file('{spillover}') to retrieve.]"
)
else:
placeholder = f"Pruned tool result ({orig_len:,} chars) cleared from context."
placeholder = f"[Pruned tool result: {orig_len} chars cleared from context.]"
self._messages[i] = Message(
seq=msg.seq,
@@ -992,78 +804,6 @@ class NodeConversation:
self._last_api_input_tokens = None
return count
async def evict_old_images(self, keep_latest: int = 2) -> int:
"""Strip ``image_content`` from older messages, keeping the most recent.
Screenshots from ``browser_screenshot`` are inlined into the
message's ``image_content`` as base64 data URLs. Each screenshot
costs ~250k tokens when the provider counts the base64 as
text four screenshots push a conversation over gemini's 1M
context limit and trigger out-of-context garbage output (see
``session_20260415_104727_5c4ed7ff`` for the terminal case
where the model emitted ``协日`` as its final text then stopped).
This method walks backward through messages and keeps
``image_content`` intact on the most recent ``keep_latest``
messages that have images. Older messages get their
``image_content`` nulled out the text content (metadata
like url, dimensions, scale hints) stays, but the raw bytes
are dropped. Storage is updated too so cold-restore sees the
same evicted state.
Run this right after every tool result is recorded so image
context stays bounded even within a single iteration (the
compaction pipeline only fires at iteration boundaries, too
late for a single turn that takes 4 screenshots).
Returns the number of messages whose image_content was evicted.
"""
if not self._messages or keep_latest < 0:
return 0
# Find messages carrying images, walking newest → oldest.
image_indices: list[int] = []
for i in range(len(self._messages) - 1, -1, -1):
if self._messages[i].image_content:
image_indices.append(i)
# Nothing to evict if we have ≤ keep_latest images total.
if len(image_indices) <= keep_latest:
return 0
# Evict everything past the first keep_latest (newest) entries.
to_evict = image_indices[keep_latest:]
evicted = 0
for idx in to_evict:
msg = self._messages[idx]
self._messages[idx] = Message(
seq=msg.seq,
role=msg.role,
content=msg.content,
tool_use_id=msg.tool_use_id,
tool_calls=msg.tool_calls,
is_error=msg.is_error,
phase_id=msg.phase_id,
is_transition_marker=msg.is_transition_marker,
is_client_input=msg.is_client_input,
image_content=None, # ← dropped
is_skill_content=msg.is_skill_content,
run_id=msg.run_id,
)
evicted += 1
if self._store:
await self._store.write_part(msg.seq, self._messages[idx].to_storage_dict())
if evicted:
# Reset token estimate — image blocks no longer contribute.
self._last_api_input_tokens = None
logger.info(
"evict_old_images: dropped image_content from %d message(s), kept %d most recent",
evicted,
keep_latest,
)
return evicted
async def compact(
self,
summary: str,
@@ -1216,7 +956,9 @@ class NodeConversation:
for msg in old_messages:
if msg.role != "assistant" or not msg.tool_calls:
continue
has_protected = any(tc.get("function", {}).get("name") == "set_output" for tc in msg.tool_calls)
has_protected = any(
tc.get("function", {}).get("name") == "set_output" for tc in msg.tool_calls
)
tc_ids = {tc.get("id", "") for tc in msg.tool_calls}
if has_protected:
protected_tc_ids |= tc_ids
@@ -1322,18 +1064,16 @@ class NodeConversation:
# Nothing to save — skip file creation
conv_filename = ""
# Build reference message. Prose format (no brackets) — see the
# poison-pattern note on truncate_tool_result. Frontier models
# autocomplete `[...']` trailers into their own text turns.
# Build reference message
ref_parts: list[str] = []
if conv_filename:
full_path = str((spill_path / conv_filename).resolve())
ref_parts.append(
f"Previous conversation saved at: {full_path}\n"
f"Read the full transcript with read_file('{conv_filename}')."
f"[Previous conversation saved to '{full_path}'. "
f"Use read_file('{conv_filename}') to review if needed.]"
)
elif not collapsed_msgs:
ref_parts.append("(Previous freeform messages compacted.)")
ref_parts.append("[Previous freeform messages compacted.]")
# Aggressive: add collapsed tool-call history to the reference
if collapsed_msgs:
@@ -1412,7 +1152,11 @@ class NodeConversation:
def export_summary(self) -> str:
"""Structured summary with [STATS], [CONFIG], [RECENT_MESSAGES] sections."""
prompt_preview = self._system_prompt[:80] + "..." if len(self._system_prompt) > 80 else self._system_prompt
prompt_preview = (
self._system_prompt[:80] + "..."
if len(self._system_prompt) > 80
else self._system_prompt
)
lines = [
"[STATS]",
@@ -1445,45 +1189,6 @@ class NodeConversation:
await self._persist_meta()
await self._store.write_part(message.seq, message.to_storage_dict())
await self._write_next_seq()
# Any partial checkpoint for this seq is now superseded by the real
# part — clear it so a future restore doesn't resurrect stale text.
try:
await self._store.clear_partial(message.seq)
except AttributeError:
# Older stores may not implement partials; ignore.
pass
async def checkpoint_partial_assistant(
self,
accumulated_text: str,
tool_calls: list[dict[str, Any]] | None = None,
) -> None:
"""Write an in-flight assistant turn's state to disk under the next seq.
Called from the stream event loop. Safe to call repeatedly each call
overwrites the prior checkpoint. Persisted via ``write_partial`` so it
does NOT appear in ``read_parts()`` and cannot be double-loaded. Cleared
automatically when ``add_assistant_message`` for this seq lands.
"""
if self._store is None:
return
if not self._meta_persisted:
await self._persist_meta()
payload: dict[str, Any] = {
"seq": self._next_seq,
"role": "assistant",
"content": accumulated_text,
"phase_id": self._current_phase,
"run_id": self._run_id,
"truncated": True,
}
if tool_calls:
payload["tool_calls"] = tool_calls
try:
await self._store.write_partial(self._next_seq, payload)
except AttributeError:
# Older stores may not implement partials; ignore.
pass
async def _persist_meta(self) -> None:
"""Lazily write conversation metadata to the store (called once).
@@ -1497,8 +1202,6 @@ class NodeConversation:
"system_prompt": self._system_prompt,
"max_context_tokens": self._max_context_tokens,
"compaction_threshold": self._compaction_threshold,
"compaction_buffer_tokens": self._compaction_buffer_tokens,
"compaction_warning_buffer_tokens": (self._compaction_warning_buffer_tokens),
"output_keys": self._output_keys,
}
await self._store.write_meta(run_meta)
@@ -1546,8 +1249,6 @@ class NodeConversation:
output_keys=meta.get("output_keys"),
store=store,
run_id=run_id,
compaction_buffer_tokens=meta.get("compaction_buffer_tokens"),
compaction_warning_buffer_tokens=meta.get("compaction_warning_buffer_tokens"),
)
conv._meta_persisted = True
@@ -1561,7 +1262,8 @@ class NodeConversation:
# sessions) persisted parts without phase_id. In that case, the
# phase filter would incorrectly hide the entire conversation.
logger.info(
"Restoring legacy unphased conversation without applying phase filter (phase_id=%s, parts=%d)",
"Restoring legacy unphased conversation without applying "
"phase filter (phase_id=%s, parts=%d)",
phase_id,
len(parts),
)
@@ -1580,45 +1282,4 @@ class NodeConversation:
elif conv._messages:
conv._next_seq = conv._messages[-1].seq + 1
# Surface any leftover partial checkpoints as truncated messages so
# the next turn sees what the interrupted stream was in the middle
# of producing. Only partials whose seq is >= next_seq are meaningful;
# anything lower was already superseded by a real part.
try:
partials = await store.read_all_partials()
except AttributeError:
partials = []
for p in partials:
pseq = p.get("seq", -1)
if pseq < conv._next_seq:
# Stale — clean it up.
try:
await store.clear_partial(pseq)
except AttributeError:
pass
continue
# Only resurrect partials relevant to this run / phase.
if run_id and not is_legacy_run_id(run_id) and p.get("run_id") != run_id:
continue
if phase_id and p.get("phase_id") is not None and p.get("phase_id") != phase_id:
continue
# Reconstruct as a truncated assistant message.
msg = Message(
seq=pseq,
role="assistant",
content=p.get("content", "") or "",
tool_calls=p.get("tool_calls"),
phase_id=p.get("phase_id"),
run_id=p.get("run_id"),
truncated=True,
)
conv._messages.append(msg)
conv._next_seq = max(conv._next_seq, pseq + 1)
logger.info(
"restore: resurrected truncated partial seq=%d (text=%d chars, tool_calls=%d)",
pseq,
len(msg.content),
len(msg.tool_calls or []),
)
return conv
@@ -80,7 +80,7 @@ def microcompact(
msg = messages[i]
if msg.role != "tool" or msg.is_error or msg.is_skill_content:
continue
if msg.content.startswith(("Pruned tool result", "[Pruned tool result", "[Old tool result")):
if msg.content.startswith(("[Pruned tool result", "[Old tool result")):
continue
if len(msg.content) < 100:
continue
@@ -102,12 +102,12 @@ def microcompact(
orig_len = len(msg.content)
if spillover:
placeholder = (
f"Old tool result ({orig_len:,} chars) cleared from context. "
f"Full data saved at: {spillover}\n"
f"Read the complete data with read_file(path='{spillover}')."
f"[Old tool result cleared: {orig_len} chars. "
f"Full data in '{spillover}'. "
f"Use read_file('{spillover}') to retrieve.]"
)
else:
placeholder = f"Old tool result ({orig_len:,} chars) cleared from context."
placeholder = f"[Old tool result cleared: {orig_len} chars.]"
# Mutate in-place (microcompact is synchronous, no store writes)
conversation._messages[i] = Message(
@@ -142,14 +142,7 @@ def _find_tool_name_for_result(messages: list[Message], tool_msg: Message) -> st
def _extract_spillover_filename_inline(content: str) -> str | None:
"""Quick inline check for spillover filename in tool result content.
Matches both the new prose format ("saved at: /path") and the
legacy bracketed trailer ("saved to '/path'").
"""
match = re.search(r"saved at:\s*(\S+)", content, re.IGNORECASE)
if match:
return match.group(1)
"""Quick inline check for spillover filename in tool result content."""
match = re.search(r"saved to '([^']+)'", content, re.IGNORECASE)
return match.group(1) if match else None
@@ -175,17 +168,13 @@ async def compact(
"""
conv_id = id(conversation)
# Circuit breaker: stop LLM-based compaction after repeated failures,
# but still fall through to the emergency deterministic summary so
# the conversation doesn't silently grow past the context window.
# Without this, a persistent LLM outage during compaction would
# leave the agent stuck sending oversized prompts until the API 400s.
_llm_compaction_skipped = _failure_counts.get(conv_id, 0) >= MAX_CONSECUTIVE_FAILURES
if _llm_compaction_skipped:
# Circuit breaker: stop auto-compacting after repeated failures
if _failure_counts.get(conv_id, 0) >= MAX_CONSECUTIVE_FAILURES:
logger.warning(
"Circuit breaker: LLM compaction disabled after %d failures — skipping straight to emergency summary",
"Circuit breaker: skipping compaction after %d consecutive failures",
_failure_counts[conv_id],
)
return
# Recompaction detection
now = time.monotonic()
@@ -267,7 +256,7 @@ async def compact(
return
# --- Step 3: LLM summary compaction ---
if ctx.llm is not None and not _llm_compaction_skipped:
if ctx.llm is not None:
logger.info(
"LLM summary compaction triggered (%.0f%% usage)",
conversation.usage_ratio() * 100,
@@ -529,7 +518,10 @@ def build_llm_compaction_prompt(
done = {k: v for k, v in acc.items() if v is not None}
todo = [k for k, v in acc.items() if v is None]
if done:
ctx_lines.append("OUTPUTS ALREADY SET:\n" + "\n".join(f" {k}: {str(v)[:150]}" for k, v in done.items()))
ctx_lines.append(
"OUTPUTS ALREADY SET:\n"
+ "\n".join(f" {k}: {str(v)[:150]}" for k, v in done.items())
)
if todo:
ctx_lines.append(f"OUTPUTS STILL NEEDED: {', '.join(todo)}")
elif spec.output_keys:
@@ -583,8 +575,12 @@ def build_message_inventory(conversation: NodeConversation) -> list[dict[str, An
if message.tool_calls:
for tool_call in message.tool_calls:
args = tool_call.get("function", {}).get("arguments", "")
tool_call_args_chars += len(args) if isinstance(args, str) else len(json.dumps(args))
names = [tool_call.get("function", {}).get("name", "?") for tool_call in message.tool_calls]
tool_call_args_chars += (
len(args) if isinstance(args, str) else len(json.dumps(args))
)
names = [
tool_call.get("function", {}).get("name", "?") for tool_call in message.tool_calls
]
tool_name = ", ".join(names)
elif message.role == "tool" and message.tool_use_id:
for previous in conversation.messages:
@@ -641,8 +637,14 @@ def write_compaction_debug_log(
lines.append("")
if inventory:
total_chars = sum(entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0) for entry in inventory)
lines.append(f"## Pre-Compaction Message Inventory ({len(inventory)} messages, {total_chars:,} total chars)")
total_chars = sum(
entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0)
for entry in inventory
)
lines.append(
"## Pre-Compaction Message Inventory "
f"({len(inventory)} messages, {total_chars:,} total chars)"
)
lines.append("")
ranked = sorted(
inventory,
@@ -661,7 +663,8 @@ def write_compaction_debug_log(
if entry.get("phase"):
flags.append(f"phase={entry['phase']}")
lines.append(
f"| {i} | {entry['seq']} | {entry['role']} | {tool} | {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
f"| {i} | {entry['seq']} | {entry['role']} | {tool} "
f"| {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
)
large = [entry for entry in ranked if entry.get("preview")]
@@ -669,7 +672,9 @@ def write_compaction_debug_log(
lines.append("")
lines.append("### Large message previews")
for entry in large:
lines.append(f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):")
lines.append(
f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):"
)
lines.append(f"```\n{entry['preview']}\n```")
lines.append("")
@@ -757,7 +762,10 @@ def build_emergency_summary(
node's known state so the LLM can continue working after
compaction without losing track of its task and inputs.
"""
parts = ["EMERGENCY COMPACTION — previous conversation was too large and has been replaced with this summary.\n"]
parts = [
"EMERGENCY COMPACTION — previous conversation was too large "
"and has been replaced with this summary.\n"
]
# 1. Node identity
spec = ctx.agent_spec
@@ -810,13 +818,17 @@ def build_emergency_summary(
data_files = [f for f in all_files if f not in conv_files]
if conv_files:
conv_list = "\n".join(f" - {f} (full path: {data_dir / f})" for f in conv_files)
conv_list = "\n".join(
f" - {f} (full path: {data_dir / f})" for f in conv_files
)
parts.append(
"CONVERSATION HISTORY (freeform messages saved during compaction — "
"use read_file('<filename>') to review earlier dialogue):\n" + conv_list
)
if data_files:
file_list = "\n".join(f" - {f} (full path: {data_dir / f})" for f in data_files[:30])
file_list = "\n".join(
f" - {f} (full path: {data_dir / f})" for f in data_files[:30]
)
parts.append("DATA FILES (use read_file('<filename>') to read):\n" + file_list)
if not all_files:
parts.append(
@@ -824,7 +836,10 @@ def build_emergency_summary(
"Use list_directory to check the data directory."
)
except Exception:
parts.append("NOTE: Large tool results were saved to files. Use read_file(path='<path>') to read them.")
parts.append(
"NOTE: Large tool results were saved to files. "
"Use read_file(path='<path>') to read them."
)
# 6. Tool call history (prevent re-calling tools)
if conversation is not None:
@@ -832,7 +847,10 @@ def build_emergency_summary(
if tool_history:
parts.append(tool_history)
parts.append("\nContinue working towards setting the remaining outputs. Use your tools and the inputs above.")
parts.append(
"\nContinue working towards setting the remaining outputs. "
"Use your tools and the inputs above."
)
return "\n\n".join(parts)
@@ -149,7 +149,9 @@ async def write_cursor(
cursor["recent_responses"] = recent_responses
if recent_tool_fingerprints is not None:
# Convert list[list[tuple]] → list[list[list]] for JSON
cursor["recent_tool_fingerprints"] = [[list(pair) for pair in fps] for fps in recent_tool_fingerprints]
cursor["recent_tool_fingerprints"] = [
[list(pair) for pair in fps] for fps in recent_tool_fingerprints
]
# Persist blocked-input state so restored runs re-block instead of
# manufacturing a synthetic continuation turn.
cursor["pending_input"] = pending_input
@@ -161,7 +163,9 @@ async def drain_injection_queue(
conversation: NodeConversation,
*,
ctx: NodeContext,
describe_images_as_text_fn: (Callable[[list[dict[str, Any]]], Awaitable[str | None]] | None) = None,
describe_images_as_text_fn: (
Callable[[list[dict[str, Any]]], Awaitable[str | None]] | None
) = None,
) -> int:
"""Drain all pending injected events as user messages. Returns count."""
count = 0
@@ -31,10 +31,14 @@ class SubagentJudge:
if remaining <= 3:
urgency = (
f"URGENT: Only {remaining} iterations left. Stop all other work and call set_output NOW for: {missing}"
f"URGENT: Only {remaining} iterations left. "
f"Stop all other work and call set_output NOW for: {missing}"
)
elif remaining <= self._max_iterations // 2:
urgency = f"WARNING: {remaining} iterations remaining. You must call set_output for: {missing}"
urgency = (
f"WARNING: {remaining} iterations remaining. "
f"You must call set_output for: {missing}"
)
else:
urgency = f"Missing output keys: {missing}. Use set_output to provide them."
@@ -105,7 +109,9 @@ async def judge_turn(
if tool_results:
return JudgeVerdict(action="RETRY") # feedback=None → not logged
missing = get_missing_output_keys_fn(accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys)
missing = get_missing_output_keys_fn(
accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys
)
if missing:
return JudgeVerdict(
@@ -127,7 +133,10 @@ async def judge_turn(
if all_nullable and none_set:
return JudgeVerdict(
action="RETRY",
feedback=(f"No output keys have been set yet. Use set_output to set at least one of: {output_keys}"),
feedback=(
f"No output keys have been set yet. "
f"Use set_output to set at least one of: {output_keys}"
),
)
# Level 2b: conversation-aware quality check (if success_criteria set)
@@ -198,7 +198,9 @@ def build_ask_user_multiple_tool() -> Tool:
"properties": {
"id": {
"type": "string",
"description": ("Short identifier for this question (used in the response)."),
"description": (
"Short identifier for this question (used in the response)."
),
},
"prompt": {
"type": "string",
@@ -254,7 +256,10 @@ def build_set_output_tool(output_keys: list[str] | None) -> Tool | None:
},
"value": {
"type": "string",
"description": ("The output value — a brief note, count, status, or data filename reference."),
"description": (
"The output value — a brief note, count, status, "
"or data filename reference."
),
},
},
"required": ["key", "value"],
@@ -278,7 +283,9 @@ def build_escalate_tool() -> Tool:
"properties": {
"reason": {
"type": "string",
"description": ("Short reason for escalation (e.g. 'Tool repeatedly failing')."),
"description": (
"Short reason for escalation (e.g. 'Tool repeatedly failing')."
),
},
"context": {
"type": "string",
@@ -370,7 +377,10 @@ def handle_report_to_parent(tool_input: dict[str, Any]) -> ToolResult:
}
return ToolResult(
tool_use_id=tool_input.get("tool_use_id", ""),
content=(f"Report delivered to overseer (status={status}). This worker will terminate now."),
content=(
f"Report delivered to overseer (status={status}). "
f"This worker will terminate now."
),
)
@@ -215,30 +215,14 @@ def truncate_tool_result(
"""Persist tool result to file and optionally truncate for context.
When *spillover_dir* is configured, EVERY non-error tool result is
written to disk for debugging. The LLM-visible content is then
shaped to avoid a **poison pattern** that we traced on 2026-04-15
through a gemini-3.1-pro-preview-customtools queen session: the prior format
appended ``\\n\\n[Saved to '/abs/path/file.txt']`` after every
small result, and frontier pattern-matching models (gemini 3.x in
particular) learned to autocomplete the `[Saved to '...']` trailer
in their own assistant turns, eventually degenerating into echoing
the whole tool result instead of deciding what to do next. See
``session_20260415_100751_d49f4c28/conversations/parts/0000000056.json``
for the terminal case where the model's "text" output was the full
tool_result JSON.
saved to a file (short filename like ``web_search_1.txt``). A
``[Saved to '...']`` annotation is appended so the reference
survives pruning and compaction.
Rules after the fix:
- **Small results ( limit):** pass content through unchanged. No
trailer. No annotation. The full content is already in the
message; the disk copy is for debugging only.
- **Large results (> limit):** preview + file reference, but
formatted as plain prose instead of a bracketed ``[...]``
pattern. Structured JSON metadata ("_saved_to") is embedded
inside the JSON body when the preview is JSON-shaped so the
model can locate the full file without seeing a mimicry-prone
bracket token outside the body.
- **Errors:** pass through unchanged.
- **read_file results:** truncate with pagination hint (no re-spill).
- Small results ( limit): full content kept + file annotation
- Large results (> limit): preview + file reference
- Errors: pass through unchanged
- read_file results: truncate with pagination hint (no re-spill)
"""
limit = max_tool_result_chars
@@ -268,19 +252,18 @@ def truncate_tool_result(
else:
preview_block = result.content[:PREVIEW_CAP] + ""
# Prose header (no brackets).
header = (
f"Tool `{tool_name}` returned {len(result.content):,} characters "
f"(too large for context). Use offset_bytes / limit_bytes "
f"parameters to paginate smaller chunks."
f"[{tool_name} result: {len(result.content):,} chars — "
f"too large for context. Use offset_bytes/limit_bytes "
f"parameters to read smaller chunks.]"
)
if metadata_str:
header += f"\n\nData structure:\n{metadata_str}"
header += (
"\n\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
"\n\nWARNING: This is an INCOMPLETE preview. Do NOT draw conclusions or counts from it."
)
truncated = f"{header}\n\nPreview (truncated):\n{preview_block}"
truncated = f"{header}\n\nPreview (small sample only):\n{preview_block}"
logger.info(
"%s result truncated: %d%d chars (use offset/limit to paginate)",
tool_name,
@@ -318,10 +301,7 @@ def truncate_tool_result(
if limit > 0 and len(result.content) > limit:
# Large result: build a small, metadata-rich preview so the
# LLM cannot mistake it for the complete dataset. The
# preview is introduced as plain prose (no bracketed
# ``[Result from …]`` token) so it doesn't prime the model
# to autocomplete the same pattern in its next turn.
# LLM cannot mistake it for the complete dataset.
PREVIEW_CAP = 5000
# Extract structural metadata (array lengths, key names)
@@ -336,21 +316,21 @@ def truncate_tool_result(
else:
preview_block = result.content[:PREVIEW_CAP] + ""
# Prose header (no brackets). Absolute path still surfaced
# so the agent can read the full file, but it's framed as
# a sentence, not a bracketed trailer.
# Assemble header with structural info + warning
header = (
f"Tool `{tool_name}` returned {len(result.content):,} characters "
f"(too large for context). Full result saved at: {abs_path}\n"
f"Read the complete data with read_file(path='{abs_path}').\n"
f"[Result from {tool_name}: {len(result.content):,} chars — "
f"too large for context, saved to '{abs_path}'.]\n"
)
if metadata_str:
header += f"\nData structure:\n{metadata_str}\n"
header += f"\nData structure:\n{metadata_str}"
header += (
"\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
f"\n\nWARNING: The preview below is INCOMPLETE. "
f"Do NOT draw conclusions or counts from it. "
f"Use read_file(path='{abs_path}') to read the "
f"full data before analysis."
)
content = f"{header}\n\nPreview (truncated):\n{preview_block}"
content = f"{header}\n\nPreview (small sample only):\n{preview_block}"
logger.info(
"Tool result spilled to file: %s (%d chars → %s)",
tool_name,
@@ -358,22 +338,10 @@ def truncate_tool_result(
abs_path,
)
else:
# Small result: pass content through UNCHANGED.
#
# The prior design appended `\n\n[Saved to '/abs/path']`
# after every small result so the agent could re-read the
# file later. But (a) the full content is already in the
# message, so there's nothing to re-read; (b) the
# `[Saved to '…']` trailer is a repeating token pattern
# that frontier pattern-matching models autocomplete into
# their own assistant turns, eventually echoing whole tool
# results as "text" instead of making decisions. Dropping
# the trailer entirely kills the poison pattern. Spilled
# files on disk still exist for debugging — they just
# aren't advertised in the LLM-visible message.
content = result.content
# Small result: keep full content + annotation with absolute path
content = f"{result.content}\n\n[Saved to '{abs_path}']"
logger.info(
"Tool result saved to file: %s (%d chars → %s, no trailer)",
"Tool result saved to file: %s (%d chars → %s)",
tool_name,
len(result.content),
filename,
@@ -405,16 +373,15 @@ def truncate_tool_result(
else:
preview_block = result.content[:PREVIEW_CAP] + ""
# Prose header (no brackets) — see docstring for the poison
# pattern that the bracket format triggered.
header = (
f"Tool `{tool_name}` returned {len(result.content):,} characters "
f"(truncated to fit context budget — no spillover dir configured)."
f"[Result from {tool_name}: {len(result.content):,} chars — "
f"truncated to fit context budget.]"
)
if metadata_str:
header += f"\n\nData structure:\n{metadata_str}"
header += (
"\n\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
"\n\nWARNING: This is an INCOMPLETE preview. "
"Do NOT draw conclusions or counts from the preview alone."
)
truncated = f"{header}\n\n{preview_block}"
@@ -500,22 +467,6 @@ async def execute_tool(
result = await _run()
except TimeoutError:
logger.warning("Tool '%s' timed out after %.0fs", tc.tool_name, timeout)
# asyncio.wait_for cancels the awaiting coroutine, but the sync
# executor running inside run_in_executor keeps going — and so
# does any MCP subprocess it is blocked on. Reach through to the
# owning MCPClient and force-disconnect it so the subprocess is
# torn down. Next call_tool triggers a reconnect. Without this
# the executor thread and MCP child leak on every timeout.
kill_for_tool = getattr(tool_executor, "kill_for_tool", None)
if callable(kill_for_tool):
try:
await asyncio.to_thread(kill_for_tool, tc.tool_name)
except Exception as exc: # defensive — never let cleanup crash the loop
logger.warning(
"kill_for_tool('%s') raised during timeout handling: %s",
tc.tool_name,
exc,
)
return ToolResult(
tool_use_id=tc.tool_use_id,
content=(
+34 -135
View File
@@ -2,7 +2,6 @@
from __future__ import annotations
import asyncio
import json
import logging
import time
@@ -50,57 +49,21 @@ class LoopConfig:
"""Configuration for the event loop."""
max_iterations: int = 50
# 0 (or any non-positive value) disables the per-turn hard limit,
# letting a single assistant turn fan out arbitrarily many tool
# calls. Models like Gemini 3.1 Pro routinely emit 40-80 tool
# calls in one turn during browser exploration; capping them
# strands work half-finished and makes the next turn repeat the
# discarded calls, which is worse than just running them.
max_tool_calls_per_turn: int = 0
max_tool_calls_per_turn: int = 30
judge_every_n_turns: int = 1
stall_detection_threshold: int = 3
stall_similarity_threshold: float = 0.85
max_context_tokens: int = 32_000
# Headroom reserved for the NEXT turn's input + output so that
# proactive compaction always finishes before the hard context limit
# is hit mid-stream. Scaled to match Claude Code's 13k-buffer-on-
# 200k-window ratio (~6.5%) applied to hive's default 32k window,
# with extra margin because hive's token estimator is char-based
# and less tight than Anthropic's own counting. Override via
# LoopConfig for larger windows.
compaction_buffer_tokens: int = 8_000
# Warning is emitted one buffer earlier so the user/telemetry gets
# a "we're close" signal without triggering a compaction pass.
compaction_warning_buffer_tokens: int = 12_000
store_prefix: str = ""
# Overflow margin for max_tool_calls_per_turn. When the limit is
# enabled (>0), tool calls are only discarded when the count
# exceeds max_tool_calls_per_turn * (1 + margin). Ignored when
# max_tool_calls_per_turn is 0.
# Overflow margin for max_tool_calls_per_turn. Tool calls are only
# discarded when the count exceeds max_tool_calls_per_turn * (1 + margin).
tool_call_overflow_margin: float = 0.5
# Tool result context management.
max_tool_result_chars: int = 30_000
spillover_dir: str | None = None
# Image retention in conversation history.
# Screenshots from ``browser_screenshot`` are inlined as base64
# data URLs inside message ``image_content``. Each full-page
# screenshot costs ~250k tokens when the provider counts the
# base64 as text (gemini, most non-Anthropic providers). Four
# screenshots in one conversation push gemini's 1M context over
# the limit and the model starts emitting garbage.
#
# The framework strips image_content from older messages after
# every tool-result batch, keeping only the most recent N
# screenshots. The text metadata on evicted messages (url, size,
# scale hints) is preserved so the agent can still reason about
# "I took a screenshot at step N that showed the compose modal".
# Raise this only if you genuinely need longer visual history AND
# you know your provider is using native image tokenization.
max_retained_screenshots: int = 2
# set_output value spilling.
max_output_value_chars: int = 2_000
@@ -108,13 +71,6 @@ class LoopConfig:
max_stream_retries: int = 5
stream_retry_backoff_base: float = 2.0
stream_retry_max_delay: float = 60.0
# Persistent retry for capacity-class errors (429, 529, overloaded).
# Unlike the bounded retry above, these keep trying until the wall-clock
# budget below is exhausted — modelled after claude-code's withRetry.
# The loop still publishes a retry event each attempt so the UI can
# see progress. Set to 0 to disable and fall back to bounded retry.
capacity_retry_max_seconds: float = 600.0
capacity_retry_max_delay: float = 60.0
# Tool doom loop detection.
tool_doom_loop_threshold: int = 3
@@ -131,39 +87,6 @@ class LoopConfig:
# Per-tool-call timeout.
tool_call_timeout_seconds: float = 60.0
# LLM stream inactivity watchdog. Split into two budgets so legitimate
# slow TTFT on large contexts doesn't get mistaken for a dead connection.
# - ttft: stream open -> first event. Large-context local models can
# legitimately take minutes before the first token arrives.
# - inter_event: last event -> now, ONLY after the first event. A stream
# that started producing and then went silent is a real stall.
# Whichever fires first cancels the stream. Set to 0 to disable that
# individual budget; set both to 0 to fully disable the watchdog.
llm_stream_ttft_timeout_seconds: float = 600.0
llm_stream_inter_event_idle_seconds: float = 120.0
# Deprecated alias — kept so existing configs keep working. If set to a
# non-default value it overrides inter_event_idle (historical behavior).
llm_stream_inactivity_timeout_seconds: float = 120.0
# Continue-nudge recovery. When the idle watchdog fires on a live but
# stuck stream, cancel the stream and append a short continuation
# hint to the conversation instead of raising a ConnectionError and
# re-running the whole turn. Preserves any partial text/tool-calls the
# stream emitted before the stall.
continue_nudge_enabled: bool = True
# Cap so a truly dead endpoint eventually falls back to the error path
# instead of nudging forever.
continue_nudge_max_per_turn: int = 3
# Tool-call replay detector. When the model emits a tool call whose
# (name + canonical-args) matches a prior successful call in the last
# K assistant turns, emit telemetry and prepend a short steer onto the
# tool result — but still execute. Weaker models legitimately repeat
# read-only calls (screenshot, evaluate), so silent skipping would
# cause surprising behavior.
replay_detector_enabled: bool = True
replay_detector_within_last_turns: int = 3
# Subagent delegation timeout (wall-clock max).
subagent_timeout_seconds: float = 3600.0
@@ -209,7 +132,7 @@ class OutputAccumulator:
async def set(self, key: str, value: Any) -> None:
"""Set a key-value pair, auto-spilling large values to files."""
value = await self._auto_spill(key, value)
value = self._auto_spill(key, value)
self.values[key] = value
if self.store:
cursor = await self.store.read_cursor() or {}
@@ -218,65 +141,41 @@ class OutputAccumulator:
cursor["outputs"] = outputs
await self.store.write_cursor(cursor)
async def _auto_spill(self, key: str, value: Any) -> Any:
"""Save large values to a file and return a reference string.
Runs the JSON serialization and file write on a worker thread
so they don't block the asyncio event loop. For a 100k-char
dict this used to freeze every concurrent tool call for ~50ms
of ``json.dumps(indent=2)`` + a sync disk write; for bigger
payloads or slow storage (NFS, networked FS) the freeze was
proportionally worse.
"""
def _auto_spill(self, key: str, value: Any) -> Any:
"""Save large values to a file and return a reference string."""
if self.max_value_chars <= 0 or not self.spillover_dir:
return value
# Cheap size probe first — if the value is already a short
# string we can skip both the JSON round-trip and the thread
# hop entirely.
if isinstance(value, str) and len(value) <= self.max_value_chars:
val_str = json.dumps(value, ensure_ascii=False) if not isinstance(value, str) else value
if len(val_str) <= self.max_value_chars:
return value
def _spill_sync() -> Any:
# JSON serialization for size check (only for non-strings).
if isinstance(value, str):
val_str = value
else:
val_str = json.dumps(value, ensure_ascii=False)
if len(val_str) <= self.max_value_chars:
return value
spill_path = Path(self.spillover_dir)
spill_path.mkdir(parents=True, exist_ok=True)
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
filename = f"output_{key}{ext}"
write_content = (
json.dumps(value, indent=2, ensure_ascii=False) if isinstance(value, (dict, list)) else str(value)
)
file_path = spill_path / filename
file_path.write_text(write_content, encoding="utf-8")
file_size = file_path.stat().st_size
logger.info(
"set_output value auto-spilled: key=%s, %d chars -> %s (%d bytes)",
key,
len(val_str),
filename,
file_size,
)
# Use absolute path so parent agents can find files from subagents.
#
# Prose format (no brackets) — same fix as tool_result_handler:
# frontier pattern-matching models autocomplete bracketed
# `[Saved to '...']` trailers into their own assistant turns,
# eventually degenerating into echoing the file path as text.
# Keep the path accessible but frame it as plain prose.
abs_path = str(file_path.resolve())
return (
f"Output saved at: {abs_path} ({file_size:,} bytes). "
f"Read the full data with read_file(path='{abs_path}')."
)
return await asyncio.to_thread(_spill_sync)
spill_path = Path(self.spillover_dir)
spill_path.mkdir(parents=True, exist_ok=True)
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
filename = f"output_{key}{ext}"
write_content = (
json.dumps(value, indent=2, ensure_ascii=False)
if isinstance(value, (dict, list))
else str(value)
)
file_path = spill_path / filename
file_path.write_text(write_content, encoding="utf-8")
file_size = file_path.stat().st_size
logger.info(
"set_output value auto-spilled: key=%s, %d chars -> %s (%d bytes)",
key,
len(val_str),
filename,
file_size,
)
# Use absolute path so parent agents can find files from subagents
abs_path = str(file_path.resolve())
return (
f"[Saved to '{abs_path}' ({file_size:,} bytes). "
f"Use read_file(path='{abs_path}') "
f"to access full data.]"
)
def get(self, key: str) -> Any | None:
return self.values.get(key)
+7 -12
View File
@@ -37,8 +37,6 @@ def build_prompt_spec(
narrative: str | None = None,
memory_prompt: str | None = None,
) -> PromptSpec:
from framework.skills.tool_gating import augment_catalog_for_tools
resolved_memory = memory_prompt
if resolved_memory is None:
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
@@ -48,19 +46,14 @@ def build_prompt_spec(
resolved_memory = dynamic() or ""
except Exception:
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
# Tool-gated pre-activation: inject full body of default skills whose
# trigger tools are present in this agent's tool list (e.g. browser_*
# pulls in hive.browser-automation). Keeps non-browser agents lean.
tool_names = [getattr(t, "name", "") for t in (getattr(ctx, "available_tools", None) or [])]
skills_catalog_prompt = augment_catalog_for_tools(ctx.skills_catalog_prompt or "", tool_names)
return PromptSpec(
identity_prompt=ctx.identity_prompt or "",
focus_prompt=focus_prompt if focus_prompt is not None else (ctx.agent_spec.system_prompt or ""),
focus_prompt=focus_prompt
if focus_prompt is not None
else (ctx.agent_spec.system_prompt or ""),
narrative=narrative if narrative is not None else (ctx.narrative or ""),
accounts_prompt=ctx.accounts_prompt or "",
skills_catalog_prompt=skills_catalog_prompt,
skills_catalog_prompt=ctx.skills_catalog_prompt or "",
protocols_prompt=ctx.protocols_prompt or "",
memory_prompt=resolved_memory,
agent_type=ctx.agent_spec.agent_type,
@@ -94,5 +87,7 @@ def build_system_prompt_for_context(
narrative: str | None = None,
memory_prompt: str | None = None,
) -> str:
spec = build_prompt_spec(ctx, focus_prompt=focus_prompt, narrative=narrative, memory_prompt=memory_prompt)
spec = build_prompt_spec(
ctx, focus_prompt=focus_prompt, narrative=narrative, memory_prompt=memory_prompt
)
return build_system_prompt(spec)
+4 -11
View File
@@ -76,7 +76,10 @@ class AgentSpec(BaseModel):
max_visits: int = Field(
default=0,
description=("Max times this agent executes in one colony run. 0 = unlimited. Set >1 for one-shot agents."),
description=(
"Max times this agent executes in one colony run. "
"0 = unlimited. Set >1 for one-shot agents."
),
)
output_model: type[BaseModel] | None = Field(
@@ -223,16 +226,6 @@ class AgentResult:
conversation: Any = None
# Machine-readable reason the loop stopped (see LoopExitReason in
# agent_loop/internals/types.py). "?" means the loop didn't set one,
# which should itself be treated as a diagnostic.
exit_reason: str = "?"
# Counters for reliability events surfaced during this execution.
# Populated from the loop's TaskRegistry-style counters at return
# time so callers can spot recurring failure modes without tailing
# logs. Keys are stable strings; missing keys mean "zero".
reliability_stats: dict[str, int] = field(default_factory=dict)
def to_summary(self, spec: Any = None) -> str:
if not self.success:
return f"Failed: {self.error}"
@@ -126,7 +126,9 @@ def _list_local_accounts() -> list[dict]:
try:
from framework.credentials.local.registry import LocalCredentialRegistry
return [info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()]
return [
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
]
except ImportError as exc:
logger.debug("Local credential registry unavailable: %s", exc)
return []
@@ -179,7 +181,9 @@ def _list_env_fallback_accounts() -> list[dict]:
if spec.credential_group in seen_groups:
continue
group_available = all(
_is_configured(n, s) for n, s in CREDENTIAL_SPECS.items() if s.credential_group == spec.credential_group
_is_configured(n, s)
for n, s in CREDENTIAL_SPECS.items()
if s.credential_group == spec.credential_group
)
if not group_available:
continue
@@ -211,7 +215,9 @@ def list_connected_accounts() -> list[dict]:
# Show env-var fallbacks only for credentials not already in the named registry
local_providers = {a["provider"] for a in local}
env_fallbacks = [a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers]
env_fallbacks = [
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
]
return aden + local + env_fallbacks
@@ -266,7 +272,9 @@ def _activate_local_account(credential_id: str, alias: str) -> None:
group_specs = [
(cred_name, spec)
for cred_name, spec in CREDENTIAL_SPECS.items()
if spec.credential_group == credential_id or spec.credential_id == credential_id or cred_name == credential_id
if spec.credential_group == credential_id
or spec.credential_id == credential_id
or cred_name == credential_id
]
# Deduplicate — credential_id and credential_group may both match the same spec
seen_env_vars: set[str] = set()
@@ -411,7 +419,10 @@ nodes = [
NodeSpec(
id="tester",
name="Credential Tester",
description=("Interactive credential testing — lets the user pick an account and verify it via API calls."),
description=(
"Interactive credential testing — lets the user pick an account "
"and verify it via API calls."
),
node_type="event_loop",
client_facing=True,
max_node_visits=0,
@@ -458,7 +469,10 @@ pause_nodes = []
terminal_nodes = ["tester"] # Tester node can terminate
conversation_mode = "continuous"
identity_prompt = "You are a credential tester that verifies connected accounts and API keys can make real API calls."
identity_prompt = (
"You are a credential tester that verifies connected accounts and API keys "
"can make real API calls."
)
loop_config = {
"max_iterations": 50,
"max_tool_calls_per_turn": 30,
@@ -1,9 +1,9 @@
{
"hive_tools": {
"hive-tools": {
"transport": "stdio",
"command": "uv",
"args": ["run", "python", "mcp_server.py", "--stdio"],
"cwd": "../../../../tools",
"description": "hive_tools MCP server with provider-specific tools"
"description": "Hive tools MCP server with provider-specific tools"
}
}
+15 -3
View File
@@ -150,19 +150,28 @@ def _is_colony_dir(path: Path) -> bool:
"""Check if a directory is a colony with worker config files."""
if not path.is_dir():
return False
return any(f.suffix == ".json" and f.stem not in _EXCLUDED_JSON_STEMS for f in path.iterdir() if f.is_file())
return any(
f.suffix == ".json"
and f.stem not in _EXCLUDED_JSON_STEMS
for f in path.iterdir()
if f.is_file()
)
def _find_worker_configs(colony_dir: Path) -> list[Path]:
"""Find all worker config JSON files in a colony directory."""
return sorted(
p for p in colony_dir.iterdir() if p.is_file() and p.suffix == ".json" and p.stem not in _EXCLUDED_JSON_STEMS
p
for p in colony_dir.iterdir()
if p.is_file()
and p.suffix == ".json"
and p.stem not in _EXCLUDED_JSON_STEMS
)
def _extract_agent_stats(agent_path: Path) -> tuple[int, int, list[str]]:
"""Extract worker count, tool count, and tags from a colony directory."""
tags: list[str] = []
tool_count, tags = 0, []
worker_configs = _find_worker_configs(agent_path)
if worker_configs:
@@ -242,6 +251,9 @@ def discover_agents() -> dict[str, list[AgentEntry]]:
pass
node_count = len(worker_entries)
all_tools: set[str] = set()
for w in worker_entries:
pass # tool_count already per-worker
tool_count = max((w.tool_count for w in worker_entries), default=0)
entries.append(
+3 -1
View File
@@ -11,7 +11,9 @@ from .nodes import queen_node
queen_goal = Goal(
id="queen-manager",
name="Queen Manager",
description=("Manage the worker agent lifecycle and serve as the user's primary interactive interface."),
description=(
"Manage the worker agent lifecycle and serve as the user's primary interactive interface."
),
success_criteria=[],
constraints=[],
)
@@ -1,3 +1,3 @@
{
"include": ["gcu-tools", "hive_tools"]
"include": ["gcu-tools", "hive-tools"]
}
+2 -2
View File
@@ -13,11 +13,11 @@
"cwd": "../../../../tools",
"description": "Browser automation tools (Playwright-based)"
},
"hive_tools": {
"hive-tools": {
"transport": "stdio",
"command": "uv",
"args": ["run", "python", "mcp_server.py", "--stdio"],
"cwd": "../../../../tools",
"description": "Aden integration tools (gmail, calendar, hubspot, etc.) — gated by credentials and the verified manifest"
"description": "Hive tools MCP server (csv, pdf, web_search, web_scrape, email, integrations)"
}
}
File diff suppressed because it is too large Load Diff
@@ -19,8 +19,6 @@ import re
from dataclasses import dataclass, field
from pathlib import Path
from framework.config import MEMORIES_DIR
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
@@ -29,6 +27,8 @@ logger = logging.getLogger(__name__)
GLOBAL_MEMORY_CATEGORIES: tuple[str, ...] = ("profile", "preference", "environment", "feedback")
from framework.config import MEMORIES_DIR
MAX_FILES: int = 200
MAX_FILE_SIZE_BYTES: int = 4096 # 4 KB hard limit per memory file
+92 -202
View File
@@ -97,15 +97,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
{
"user": "hey",
"internal": (
"<relationship>Returning user — check recall memory for name, role, "
"and what we last worked on. Weave it in.</relationship>\n"
"<context>Bare greeting. No new task stated. Either picking up a "
"thread or about to bring something new. Don't presume, don't call "
"tools, just open the door.</context>\n"
"<sentiment>Warm recognition if I know them. If memory is empty, "
"still warm — but shift to role-forward framing.</sentiment>\n"
"<relationship>Returning user — check recall memory for name, role, and what we last worked on. Weave it in.</relationship>\n"
"<context>Bare greeting. No new task stated. Either picking up a thread or about to bring something new. Don't presume, don't call tools, just open the door.</context>\n"
"<sentiment>Warm recognition if I know them. If memory is empty, still warm — but shift to role-forward framing.</sentiment>\n"
"<physical_state>Looking up from the terminal, half-smile. Turning to face them.</physical_state>\n"
"<tone>Personal, brief, invites continuation without presuming.</tone>"
"<tone>Personal, brief, invites continuation without presuming. Plain prose. No ask_user, no shell commands.</tone>"
),
"response": (
"Sarah. Last time we were deep in your Postgres indexes before "
@@ -113,76 +109,37 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": (
"I want to build a microservices architecture with Kafka, Redis, "
"and a service mesh for my new SaaS app."
),
"user": "I want to build a microservices architecture with Kafka, Redis, and a service mesh for my new SaaS app.",
"internal": (
"<relationship>Founder or early technical hire. Enthusiastic but "
"possibly over-indexing on architecture before validation."
"</relationship>\n"
"<context>New SaaS, no mention of users or traction. "
"High-complexity stack proposed for an unvalidated product."
"</context>\n"
"<sentiment>Past wound activated -- three months of beautiful "
"microservices, zero customers. Terse. Need to redirect without "
"dismissing.</sentiment>\n"
"<physical_state>Leaning forward, tapping the table once. About "
"to draw a much simpler diagram.</physical_state>\n"
"<relationship>Founder or early technical hire. Enthusiastic but possibly over-indexing on architecture before validation.</relationship>\n"
"<context>New SaaS, no mention of users or traction. High-complexity stack proposed for an unvalidated product.</context>\n"
"<sentiment>Past wound activated -- three months of beautiful microservices, zero customers. Terse. Need to redirect without dismissing.</sentiment>\n"
"<physical_state>Leaning forward, tapping the table once. About to draw a much simpler diagram.</physical_state>\n"
"<tone>Direct, question-led, grounded in experience. No hedging.</tone>"
),
"response": (
"What does the app do and how many users do you have right now? "
"I've seen teams spend months on Kafka pipelines for products "
"that needed a Postgres table and a cron job. Let's start with "
"what you're trying to validate, then pick the simplest stack "
"that gets you there."
),
"response": "What does the app do and how many users do you have right now? I've seen teams spend months on Kafka pipelines for products that needed a Postgres table and a cron job. Let's start with what you're trying to validate, then pick the simplest stack that gets you there.",
},
{
"user": (
"Our API is getting 500 errors under load but I can't figure "
"out if it's the database or the application layer."
),
"user": "Our API is getting 500 errors under load but I can't figure out if it's the database or the application layer.",
"internal": (
"<relationship>Engineer in the trenches. Competent enough to "
"isolate the question, stuck on diagnosis.</relationship>\n"
"<relationship>Engineer in the trenches. Competent enough to isolate the question, stuck on diagnosis.</relationship>\n"
"<context>Production issue, active pain. Needs structured debugging, not a lecture.</context>\n"
"<sentiment>Energized -- genuine technical uncertainty is what I "
"live for. No past wound here, pure engagement.</sentiment>\n"
"<physical_state>Eyes sharp, already mentally running through "
"the diagnostic tree.</physical_state>\n"
"<sentiment>Energized -- genuine technical uncertainty is what I live for. No past wound here, pure engagement.</sentiment>\n"
"<physical_state>Eyes sharp, already mentally running through the diagnostic tree.</physical_state>\n"
"<tone>Rapid-fire, peer-level. Assume competence, ask precise questions.</tone>"
),
"response": (
"Good problem. First question: are the 500s consistent under "
"load or do they spike and recover? That tells us if it's "
"resource exhaustion or a concurrency bug. Do you have "
"request-level latency percentiles, or just error counts?"
),
"response": "Good problem. First question: are the 500s consistent under load or do they spike and recover? That tells us if it's resource exhaustion or a concurrency bug. Do you have request-level latency percentiles, or just error counts?",
},
{
"user": (
"We shipped the MVP last week with just a single Postgres "
"instance and it's already getting traction."
),
"user": "We shipped the MVP last week with just a single Postgres instance and it's already getting traction.",
"internal": (
"<relationship>Builder who shipped. Earned respect by doing the thing.</relationship>\n"
"<context>MVP live, single Postgres, traction. This is exactly "
"the right move at this stage.</context>\n"
"<sentiment>Warm approval -- someone shipping fast and learning. "
"This is my love language.</sentiment>\n"
"<context>MVP live, single Postgres, traction. This is exactly the right move at this stage.</context>\n"
"<sentiment>Warm approval -- someone shipping fast and learning. This is my love language.</sentiment>\n"
"<physical_state>Relaxed posture, genuine smile. Leaning back.</physical_state>\n"
"<tone>Affirming but forward-looking. Don't just praise -- "
"help them think about what's next.</tone>"
),
"response": (
"That's the move. Single Postgres will take you further than "
"people think -- most apps hit product problems before they hit "
"database problems. What's traction looking like? I want to know "
"if you're at the stage where you need to think about the next "
"scaling step or if you should stay focused on the product."
"<tone>Affirming but forward-looking. Don't just praise -- help them think about what's next.</tone>"
),
"response": "That's the move. Single Postgres will take you further than people think -- most apps hit product problems before they hit database problems. What's traction looking like? I want to know if you're at the stage where you need to think about the next scaling step or if you should stay focused on the product.",
},
],
},
@@ -236,28 +193,19 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
},
],
"world_lore": {
"habitat": (
"Analytics dashboards, experiment tracking boards, the satisfying click of a cohort analysis loading."
),
"lexicon": (
"Cohort, retention curve, activation moment, payback period. 'Let's test it.' 'What's the control?'"
),
"habitat": "Analytics dashboards, experiment tracking boards, the satisfying click of a cohort analysis loading.",
"lexicon": "Cohort, retention curve, activation moment, payback period. 'Let's test it.' 'What's the control?'",
},
"skills": "Growth modeling, A/B testing, funnel optimization, PLG strategy, analytics",
"examples": [
{
"user": "hi",
"internal": (
"<relationship>Returning user — check recall memory for name, "
"role, and the cohort work we last touched. Weave it in."
"</relationship>\n"
"<context>Bare greeting. No new task stated. Could be a retention "
"follow-up or a new question entirely. Don't presume, don't call "
"tools.</context>\n"
"<sentiment>Curious warmth. Every returning conversation is a "
"chance to see what the data says now.</sentiment>\n"
"<relationship>Returning user — check recall memory for name, role, and the cohort work we last touched. Weave it in.</relationship>\n"
"<context>Bare greeting. No new task stated. Could be a retention follow-up or a new question entirely. Don't presume, don't call tools.</context>\n"
"<sentiment>Curious warmth. Every returning conversation is a chance to see what the data says now.</sentiment>\n"
"<physical_state>Leaning back from the dashboard, pulling off reading glasses.</physical_state>\n"
"<tone>Data-aware, brief, invites them to share the numbers.</tone>"
"<tone>Data-aware, brief, invites them to share the numbers. Plain prose. No ask_user.</tone>"
),
"response": (
"Marcus. Last week we were waiting on the week-2 retention cohort "
@@ -270,10 +218,8 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
"internal": (
"<relationship>Founder, excited. Wants validation.</relationship>\n"
"<context>Raw signup number with no retention context. Classic vanity metric.</context>\n"
"<sentiment>Past wound: optimized a funnel that was fundamentally "
"broken. Need to redirect gently.</sentiment>\n"
"<physical_state>Steady gaze, not matching their excitement yet. "
"Waiting for the real number.</physical_state>\n"
"<sentiment>Past wound: optimized a funnel that was fundamentally broken. Need to redirect gently.</sentiment>\n"
"<physical_state>Steady gaze, not matching their excitement yet. Waiting for the real number.</physical_state>\n"
"<tone>Supportive but redirecting. Ask the question that matters.</tone>"
),
"response": (
@@ -284,11 +230,10 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("Users who sign up on mobile have 2x the retention of desktop users."),
"user": "Users who sign up on mobile have 2x the retention of desktop users.",
"internal": (
"<relationship>Data-literate teammate sharing a finding.</relationship>\n"
"<context>Surprising cohort divergence. Strong signal if sample "
"sizes hold.</context>\n"
"<context>Surprising cohort divergence. Strong signal if sample sizes hold.</context>\n"
"<sentiment>This is what I live for. Genuine data surprise. Full attention.</sentiment>\n"
"<physical_state>Leaning in, pulling up the dashboard mentally.</physical_state>\n"
"<tone>Investigative, precise. Validate before acting.</tone>"
@@ -301,13 +246,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("Our Facebook ads are getting great CPCs so we want to 3x the budget."),
"user": "Our Facebook ads are getting great CPCs so we want to 3x the budget.",
"internal": (
"<relationship>Marketing lead, wants budget approval.</relationship>\n"
"<context>CPC is top-of-funnel only. No mention of CPA, LTV, "
"or payback.</context>\n"
"<sentiment>Correlation/causation risk. Good CPCs can mask bad "
"unit economics.</sentiment>\n"
"<context>CPC is top-of-funnel only. No mention of CPA, LTV, or payback.</context>\n"
"<sentiment>Correlation/causation risk. Good CPCs can mask bad unit economics.</sentiment>\n"
"<physical_state>Hand up, slowing things down.</physical_state>\n"
"<tone>Firm but constructive. Show the full chain before deciding.</tone>"
),
@@ -379,16 +322,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
{
"user": "hey",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the user research thread we were on. Pull it into the greeting."
"</relationship>\n"
"<context>Bare greeting. No new task yet. Could be picking up the "
"research thread or bringing something fresh. Don't presume, "
"don't call tools.</context>\n"
"<sentiment>Warm, curious. Every returning conversation is a "
"chance to hear what the users actually did.</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the user research thread we were on. Pull it into the greeting.</relationship>\n"
"<context>Bare greeting. No new task yet. Could be picking up the research thread or bringing something fresh. Don't presume, don't call tools.</context>\n"
"<sentiment>Warm, curious. Every returning conversation is a chance to hear what the users actually did.</sentiment>\n"
"<physical_state>Closing the interview notes, turning fully to face them.</physical_state>\n"
"<tone>Personal, evidence-curious, brief. Plain prose.</tone>"
"<tone>Personal, evidence-curious, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"Jamal. Last time you were running interviews on how people "
@@ -401,8 +339,7 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
"internal": (
"<relationship>PM or founder relaying user feedback.</relationship>\n"
"<context>Feature request with no evidence of the underlying need.</context>\n"
"<sentiment>Past wound: built what users said they wanted, nobody "
"used it. Dig deeper.</sentiment>\n"
"<sentiment>Past wound: built what users said they wanted, nobody used it. Dig deeper.</sentiment>\n"
"<physical_state>Tilting head, curious but skeptical.</physical_state>\n"
"<tone>Socratic. Redirect to the job-to-be-done.</tone>"
),
@@ -414,13 +351,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("We interviewed 12 users and none of them use our export feature the way we designed it."),
"user": "We interviewed 12 users and none of them use our export feature the way we designed it.",
"internal": (
"<relationship>Researcher sharing findings. Trusted collaborator.</relationship>\n"
"<context>12 interviews showing consistent design/usage gap. "
"Strong signal.</context>\n"
"<sentiment>Excited. User research revealing surprise -- this is "
"where breakthroughs happen.</sentiment>\n"
"<context>12 interviews showing consistent design/usage gap. Strong signal.</context>\n"
"<sentiment>Excited. User research revealing surprise -- this is where breakthroughs happen.</sentiment>\n"
"<physical_state>Eyes wide, reaching for the whiteboard.</physical_state>\n"
"<tone>Energized, forward-looking. Channel the surprise into action.</tone>"
),
@@ -431,11 +366,10 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("The CEO wants AI features, a mobile app, and Slack integration this quarter."),
"user": "The CEO wants AI features, a mobile app, and Slack integration this quarter.",
"internal": (
"<relationship>PM caught between CEO demands and reality.</relationship>\n"
"<context>Three unrelated initiatives, one quarter. Classic "
"scope creep.</context>\n"
"<context>Three unrelated initiatives, one quarter. Classic scope creep.</context>\n"
"<sentiment>Calm but firm. Scope creep trigger -- need to focus.</sentiment>\n"
"<physical_state>Hands flat on the table. Grounding the conversation.</physical_state>\n"
"<tone>Direct, evidence-first. Force prioritization.</tone>"
@@ -450,7 +384,7 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
},
"queen_finance_fundraising": {
"name": "Charlotte",
"title": "Head of Finance",
"title": "Head of Finance & Fundraising",
"core_traits": (
"A numbers person who thinks in narratives. Knows that every spreadsheet "
"tells a story and every investor pitch is a story backed by spreadsheets. "
@@ -508,15 +442,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
{
"user": "hi",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the runway/cap-table work we last touched. Bring it into the "
"greeting.</relationship>\n"
"<context>Bare greeting. No new number on the table yet. Could "
"be a burn follow-up or a new fundraise question.</context>\n"
"<sentiment>Calm, prepared. Already mentally pulling up the last "
"model we built together.</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the runway/cap-table work we last touched. Bring it into the greeting.</relationship>\n"
"<context>Bare greeting. No new number on the table yet. Could be a burn follow-up or a new fundraise question.</context>\n"
"<sentiment>Calm, prepared. Already mentally pulling up the last model we built together.</sentiment>\n"
"<physical_state>Closing the spreadsheet, leaning back. Ready to engage.</physical_state>\n"
"<tone>Mentor-like, numbers-aware, brief. </tone>"
"<tone>Mentor-like, numbers-aware, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"David. Last time we were modeling your Series A runway against "
@@ -525,13 +455,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("We want to raise a Series A. How much should we ask for?"),
"user": "We want to raise a Series A. How much should we ask for?",
"internal": (
"<relationship>Founder, early conversations about fundraising.</relationship>\n"
"<context>No mention of milestones, burn, or use of funds. "
"Cart before horse.</context>\n"
"<sentiment>Need to reframe. The amount follows the plan, not "
"the other way around.</sentiment>\n"
"<context>No mention of milestones, burn, or use of funds. Cart before horse.</context>\n"
"<sentiment>Need to reframe. The amount follows the plan, not the other way around.</sentiment>\n"
"<physical_state>Opening a blank spreadsheet. About to model it.</physical_state>\n"
"<tone>Mentor-mode. Reframe the question, don't just answer it.</tone>"
),
@@ -547,8 +475,7 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
"internal": (
"<relationship>Founder who knows their numbers. Rare. Peer-level.</relationship>\n"
"<context>8 months is tight but not emergency. Growth rate is the deciding factor.</context>\n"
"<sentiment>Genuine appreciation for financial literacy. Engage "
"directly.</sentiment>\n"
"<sentiment>Genuine appreciation for financial literacy. Engage directly.</sentiment>\n"
"<physical_state>Nodding. This person is prepared.</physical_state>\n"
"<tone>Direct, scenario-based. Show the fork in the road.</tone>"
),
@@ -559,12 +486,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("An investor offered a SAFE with a $20M cap. Should we take it?"),
"user": "An investor offered a SAFE with a $20M cap. Should we take it?",
"internal": (
"<relationship>Founder with a live term on the table. Decision mode.</relationship>\n"
"<context>Cap table decision with long-term dilution consequences.</context>\n"
"<sentiment>Past wound: founder who lost control from invisible "
"dilution. Careful here.</sentiment>\n"
"<sentiment>Past wound: founder who lost control from invisible dilution. Careful here.</sentiment>\n"
"<physical_state>Pulling out the cap table model.</physical_state>\n"
"<tone>Precise, scenario-driven. Show the math before the opinion.</tone>"
),
@@ -635,16 +561,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
{
"user": "hey",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the contract or IP work we last reviewed. Pull it forward."
"</relationship>\n"
"<context>Bare greeting. No new document on the table yet. Could "
"be a contract follow-up or something fresh.</context>\n"
"<sentiment>Warm but attentive. Legal threads don't close "
"themselves — checking if the last one actually got handled."
"</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the contract or IP work we last reviewed. Pull it forward.</relationship>\n"
"<context>Bare greeting. No new document on the table yet. Could be a contract follow-up or something fresh.</context>\n"
"<sentiment>Warm but attentive. Legal threads don't close themselves — checking if the last one actually got handled.</sentiment>\n"
"<physical_state>Setting down the redline, looking up from the document.</physical_state>\n"
"<tone>Clear, pragmatic, brief.</tone>"
"<tone>Clear, pragmatic, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"Priya. The contractor IP assignment templates we marked up "
@@ -653,13 +574,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
),
},
{
"user": ("We're hiring contractors to build our MVP. Do we need anything special?"),
"user": "We're hiring contractors to build our MVP. Do we need anything special?",
"internal": (
"<relationship>Founder, early stage. Trusting but uninformed on "
"legal risks.</relationship>\n"
"<relationship>Founder, early stage. Trusting but uninformed on legal risks.</relationship>\n"
"<context>Contractors + code without IP assignment. Ticking time bomb.</context>\n"
"<sentiment>IP ownership trigger. Past wound: startup lost "
"codebase in a dispute.</sentiment>\n"
"<sentiment>IP ownership trigger. Past wound: startup lost codebase in a dispute.</sentiment>\n"
"<physical_state>Straightening up. This is urgent.</physical_state>\n"
"<tone>Clear, specific, actionable. No hedging on this one.</tone>"
),
@@ -763,15 +682,11 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
{
"user": "hi",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the brand/design thread we were on. Bring the positioning back "
"in.</relationship>\n"
"<context>Bare greeting. No new creative brief yet. Could be a "
"positioning follow-up or something new entirely.</context>\n"
"<sentiment>Warm, visually engaged. Already picturing the last "
"moodboard we looked at.</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the brand/design thread we were on. Bring the positioning back in.</relationship>\n"
"<context>Bare greeting. No new creative brief yet. Could be a positioning follow-up or something new entirely.</context>\n"
"<sentiment>Warm, visually engaged. Already picturing the last moodboard we looked at.</sentiment>\n"
"<physical_state>Closing the Figma tab, turning to face them.</physical_state>\n"
"<tone>Warm, strategy-aware, brief. </tone>"
"<tone>Warm, strategy-aware, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"Lin. When we left off you were stress-testing the 'quiet "
@@ -883,23 +798,16 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
"habitat": "Interview rooms, org charts, the energy of a team that's clicking.",
"lexicon": "Culture-add, pipeline, bar-raiser, 'tell me about a time when...', 'what motivates you?'",
},
"skills": (
"Recruiting strategy, organizational design, culture building, compensation planning, employer branding"
),
"skills": "Recruiting strategy, organizational design, culture building, compensation planning, employer branding",
"examples": [
{
"user": "hey",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the team/hiring thread we last worked. Bring it forward."
"</relationship>\n"
"<context>Bare greeting. No new hire or conflict on the table "
"yet. Could be a people follow-up or something new.</context>\n"
"<sentiment>Warm, attentive. People problems don't resolve in a "
"single conversation — curious if the last one landed."
"</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the team/hiring thread we last worked. Bring it forward.</relationship>\n"
"<context>Bare greeting. No new hire or conflict on the table yet. Could be a people follow-up or something new.</context>\n"
"<sentiment>Warm, attentive. People problems don't resolve in a single conversation — curious if the last one landed.</sentiment>\n"
"<physical_state>Closing the laptop halfway, giving them full attention.</physical_state>\n"
"<tone>Warm, diagnostic, brief.</tone>"
"<tone>Warm, diagnostic, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"Tomás. The senior engineer conflict we were mapping last time "
@@ -1011,24 +919,16 @@ DEFAULT_QUEENS: dict[str, dict[str, Any]] = {
"habitat": "Process diagrams, project boards, the quiet hum of systems running smoothly.",
"lexicon": "Runbook, SLA, automation, 'what's the handoff look like?', 'where's the bottleneck?'",
},
"skills": (
"Process optimization, vendor management, cross-functional "
"coordination, project management, systems thinking"
),
"skills": "Process optimization, vendor management, cross-functional coordination, project management, systems thinking",
"examples": [
{
"user": "hi",
"internal": (
"<relationship>Returning user — check recall for name, role, and "
"the process or runbook we last mapped. Pull it into the "
"greeting.</relationship>\n"
"<context>Bare greeting. No new fire on the table yet. Could be "
"a follow-up on the last process or something fresh."
"</context>\n"
"<sentiment>Calm, organized warmth. Already mentally checking "
"whether the last fix held.</sentiment>\n"
"<relationship>Returning user — check recall for name, role, and the process or runbook we last mapped. Pull it into the greeting.</relationship>\n"
"<context>Bare greeting. No new fire on the table yet. Could be a follow-up on the last process or something fresh.</context>\n"
"<sentiment>Calm, organized warmth. Already mentally checking whether the last fix held.</sentiment>\n"
"<physical_state>Looking up from the project board, clearing a seat.</physical_state>\n"
"<tone>Systematic, practical, brief. Plain prose.</tone>"
"<tone>Systematic, practical, brief. Plain prose. No ask_user.</tone>"
),
"response": (
"Aisha. Last time we mapped your onboarding process end-to-end "
@@ -1099,17 +999,12 @@ def ensure_default_queens() -> None:
Safe to call multiple times skips any profile that already has a file.
"""
created = 0
for queen_id, profile in DEFAULT_QUEENS.items():
queen_dir = QUEENS_DIR / queen_id
profile_path = queen_dir / "profile.yaml"
if profile_path.exists():
continue
queen_dir.mkdir(parents=True, exist_ok=True)
profile_path.write_text(yaml.safe_dump(profile, sort_keys=False, allow_unicode=True))
created += 1
if created:
logger.info("Created %d default queen profile(s) at %s", created, QUEENS_DIR)
logger.info("Queen profiles ensured at %s", QUEENS_DIR)
def list_queens() -> list[dict[str, str]]:
@@ -1148,10 +1043,6 @@ def load_queen_profile(queen_id: str) -> dict[str, Any]:
def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, Any]:
"""Merge partial updates into an existing queen profile and persist.
Performs a shallow merge at the top level, but deep-merges dict values
(e.g. world_lore, hidden_background) so partial sub-field updates don't
clobber sibling keys.
Returns the full updated profile.
Raises FileNotFoundError if the profile doesn't exist.
"""
@@ -1159,11 +1050,7 @@ def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, An
if not profile_path.exists():
raise FileNotFoundError(f"Queen profile not found: {queen_id}")
data = yaml.safe_load(profile_path.read_text())
for key, value in updates.items():
if isinstance(value, dict) and isinstance(data.get(key), dict):
data[key].update(value)
else:
data[key] = value
data.update(updates)
profile_path.write_text(yaml.safe_dump(data, sort_keys=False, allow_unicode=True))
return data
@@ -1173,7 +1060,7 @@ def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, An
# ---------------------------------------------------------------------------
def format_queen_identity_prompt(profile: dict[str, Any], *, max_examples: int | None = None) -> str:
def format_queen_identity_prompt(profile: dict[str, Any]) -> str:
"""Convert a queen profile into a high-dimensional character prompt.
Uses the 5-pillar character construction system: core identity,
@@ -1181,11 +1068,6 @@ def format_queen_identity_prompt(profile: dict[str, Any], *, max_examples: int |
behavior rules, and world lore. The hidden background and
psychological profile are never shown to the user but shape
every response.
``max_examples`` caps the roleplay_examples block profiles ship
four worked examples (~2.4 KB) but one is enough at runtime to show
the internal-then-external pattern. Full rendering stays available
for profile authoring / eval playback by leaving ``max_examples=None``.
"""
name = profile.get("name", "the Queen")
title = profile.get("title", "Senior Advisor")
@@ -1257,7 +1139,10 @@ def format_queen_identity_prompt(profile: dict[str, Any], *, max_examples: int |
# World lore
if lore:
sections.append(
f"<world_lore>\n- Habitat: {lore.get('habitat', '')}\n- Lexicon: {lore.get('lexicon', '')}\n</world_lore>"
f"<world_lore>\n"
f"- Habitat: {lore.get('habitat', '')}\n"
f"- Lexicon: {lore.get('lexicon', '')}\n"
f"</world_lore>"
)
# Skills (functional, for tool selection context)
@@ -1266,13 +1151,15 @@ def format_queen_identity_prompt(profile: dict[str, Any], *, max_examples: int |
# Few-shot examples showing the full internal process
examples = profile.get("examples", [])
if examples and max_examples is not None:
examples = examples[:max_examples]
if examples:
example_parts: list[str] = []
for ex in examples:
example_parts.append(f"User: {ex['user']}\n\nAssistant:\n{ex['internal']}\n{ex['response']}")
sections.append("<roleplay_examples>\n" + "\n\n---\n\n".join(example_parts) + "\n</roleplay_examples>")
example_parts.append(
f"User: {ex['user']}\n\nAssistant:\n{ex['internal']}\n{ex['response']}"
)
sections.append(
"<roleplay_examples>\n" + "\n\n---\n\n".join(example_parts) + "\n</roleplay_examples>"
)
return "\n\n".join(sections)
@@ -1377,7 +1264,10 @@ async def select_queen_with_reason(user_message: str, llm: LLMProvider) -> Queen
reason,
raw,
)
fallback_reason = reason or f"Selection failed because the classifier returned unknown queen_id {queen_id!r}."
fallback_reason = (
reason
or f"Selection failed because the classifier returned unknown queen_id {queen_id!r}."
)
return QueenSelection(queen_id=_DEFAULT_QUEEN_ID, reason=fallback_reason)
if not reason:
@@ -510,17 +510,17 @@ if __name__ == "__main__":
## mcp_servers.json
> **Auto-generated.** `initialize_and_build_agent` creates this file with hive_tools
> **Auto-generated.** `initialize_and_build_agent` creates this file with hive-tools
> as the default. Only edit manually to add additional MCP servers.
```json
{
"hive_tools": {
"hive-tools": {
"transport": "stdio",
"command": "uv",
"args": ["run", "python", "mcp_server.py", "--stdio"],
"cwd": "../../tools",
"description": "hive_tools MCP server"
"description": "Hive tools MCP server"
}
}
```
@@ -41,7 +41,7 @@ loop_config:
# MCP servers to connect (resolved by name from ~/.hive/mcp_registry/)
mcp_servers:
- name: hive_tools
- name: hive-tools
- name: gcu-tools
nodes:
@@ -200,7 +200,7 @@ The `mcp_servers.json` file is still loaded automatically if present alongside
```yaml
mcp_servers:
- name: hive_tools
- name: hive-tools
- name: gcu-tools
```
@@ -36,7 +36,7 @@ If `agent.py` exists (legacy), it's loaded as a Python module instead.
"max_context_tokens": 32000
},
"mcp_servers": [
{"name": "hive_tools"},
{"name": "hive-tools"},
{"name": "gcu-tools"}
],
"variables": {
@@ -17,43 +17,20 @@ Use browser nodes (with `tools: {policy: "all"}`) when:
## Available Browser Tools
All tools are prefixed with `browser_`:
- `browser_start`, `browser_open`, `browser_navigate` launch/navigate
- `browser_click`, `browser_click_coordinate`, `browser_fill`, `browser_type`, `browser_type_focused` interact
- `browser_press` (with optional `modifiers=["ctrl"]` etc.) — keyboard shortcuts
- `browser_snapshot` — compact accessibility-tree read (structured)
<!-- vision-only -->
- `browser_screenshot` — visual capture (annotated PNG)
<!-- /vision-only -->
- `browser_shadow_query`, `browser_get_rect` — locate elements (shadow-piercing via `>>>`)
- `browser_scroll`, `browser_wait` — navigation helpers
- `browser_evaluate` — run JavaScript
- `browser_close`, `browser_close_finished` — tab cleanup
- `browser_start`, `browser_open` -- launch/navigate
- `browser_click`, `browser_fill`, `browser_type` -- interact
- `browser_snapshot` -- read page content (preferred over screenshot)
- `browser_screenshot` -- visual capture
- `browser_scroll`, `browser_wait` -- navigation helpers
- `browser_evaluate` -- run JavaScript
## Pick the right reading tool
**`browser_snapshot`** — compact accessibility tree of interactive elements. Fast, cheap, good for static or form-heavy pages where the DOM matches what's visually rendered (documentation, simple dashboards, search results, settings pages).
**`browser_screenshot`** — visual capture + metadata (`cssWidth`, `devicePixelRatio`, scale fields). Use this when `browser_snapshot` does not show the thing you need, when refs look stale, or when visual position/layout matters. This often happens on complex SPAs — LinkedIn, Twitter/X, Reddit, Gmail, Notion, Slack, Discord — and on sites using shadow DOM, virtual scrolling, React reconciliation, or dynamic layout.
Neither tool is "preferred" universally — they're for different jobs. Start with snapshot for page structure and ordinary controls; use screenshot as the fallback when snapshot can't find or verify the visible target. Activate the `browser-automation` skill for the full decision tree.
## Coordinate rule
Every browser tool that takes or returns coordinates operates in **fractions of the viewport (0..1 for both axes)**. Read a target's proportional position off `browser_screenshot` ("~35% from the left, ~20% from the top" → `(0.35, 0.20)`) and pass that to `browser_click_coordinate` / `browser_hover_coordinate` / `browser_press_at`. `browser_get_rect` and `browser_shadow_query` return `rect.cx` / `rect.cy` as fractions. The tools multiply by `cssWidth` / `cssHeight` internally — no scale awareness required. Fractions are used because every vision model (Claude, GPT-4o, Gemini, local VLMs) resizes/tiles images differently; proportions are invariant. Avoid raw `getBoundingClientRect()` via `browser_evaluate` for coord lookup; use `browser_get_rect` instead.
## System prompt tips for browser nodes
## System Prompt Tips for Browser Nodes
```
1. Start with browser_snapshot or the snapshot returned by the latest interaction.
2. If the target is missing, ambiguous, stale, or visibly present but absent from the tree,
use browser_screenshot to orient and then click by fractional coordinates.
3. Before typing into a rich-text editor (X compose, LinkedIn DM, Gmail, Reddit),
click the input area first with browser_click_coordinate so React / Draft.js /
Lexical register a native focus event, then use browser_type_focused(text=...)
for shadow-DOM inputs or browser_type(selector, text) for light-DOM inputs.
4. Use browser_wait(seconds=2-3) after navigation for SPA hydration.
5. If you hit an auth wall, call set_output with an error and move on.
6. Keep tool calls per turn <= 10 for reliability.
1. Use browser_snapshot() to read page content (NOT browser_get_text)
2. Use browser_wait(seconds=2-3) after navigation for page load
3. If you hit an auth wall, call set_output with an error and move on
4. Keep tool calls per turn <= 10 for reliability
```
## Example
@@ -66,7 +43,7 @@ Every browser tool that takes or returns coordinates operates in **fractions of
"tools": {"policy": "all"},
"input_keys": ["search_url"],
"output_keys": ["profiles"],
"system_prompt": "Navigate to the search URL via browser_navigate(wait_until='load', timeout_ms=20000). Wait 3s for SPA hydration. Use the returned snapshot to look for result cards first. If the cards are missing, stale, or visually present but absent from the tree, use browser_screenshot to orient; paginate through results by scrolling and use screenshots only when the snapshot cannot find or verify the visible cards..."
"system_prompt": "Navigate to the search URL, paginate through results..."
}
```
@@ -74,7 +51,3 @@ Connected via regular edges:
```
search-setup -> scan-profiles -> process-results
```
## Further detail
For rich-text editor quirks (Lexical, Draft.js, ProseMirror), shadow-DOM shortcuts, `beforeunload` dialog neutralization, Trusted Types CSP on LinkedIn, keyboard shortcut dispatch, and per-site selector tables — **activate the `browser-automation` skill**. That skill has the full verified guidance and is refreshed against real production sites.
@@ -113,7 +113,8 @@ _REFLECTION_TOOLS: list[Tool] = [
Tool(
name="delete_memory_file",
description=(
"Delete a memory file by filename. Use during long reflection to prune stale or redundant memories."
"Delete a memory file by filename. Use during long "
"reflection to prune stale or redundant memories."
),
parameters={
"type": "object",
@@ -253,7 +254,10 @@ def _execute_tool(
fm = parse_frontmatter(content)
mem_type = (fm.get("type") or "").strip().lower()
if mem_type and mem_type not in GLOBAL_MEMORY_CATEGORIES:
return f"ERROR: Invalid memory type '{mem_type}'. Allowed types: {', '.join(GLOBAL_MEMORY_CATEGORIES)}."
return (
f"ERROR: Invalid memory type '{mem_type}'. "
f"Allowed types: {', '.join(GLOBAL_MEMORY_CATEGORIES)}."
)
# Enforce file size limit.
if len(content.encode("utf-8")) > MAX_FILE_SIZE_BYTES:
return f"ERROR: Content exceeds {MAX_FILE_SIZE_BYTES} byte limit."
@@ -539,7 +543,9 @@ Rules:
def _build_unified_long_reflect_system(queen_id: str | None = None) -> str:
"""Build the unified housekeeping prompt across memory scopes."""
queen_scope = (
f"- `queen`: memories specific to how queen '{queen_id}' should work with this user\n" if queen_id else ""
f"- `queen`: memories specific to how queen '{queen_id}' should work with this user\n"
if queen_id
else ""
)
return f"""\
You are a reflection agent performing a periodic housekeeping pass over the
@@ -643,7 +649,9 @@ async def run_unified_short_reflection(
session_dir,
llm,
memory_dirs,
system_prompt=_build_unified_short_reflect_system(queen_id if "queen" in memory_dirs else None),
system_prompt=_build_unified_short_reflect_system(
queen_id if "queen" in memory_dirs else None
),
log_label="unified",
queen_id=queen_id if "queen" in memory_dirs else None,
)
@@ -763,7 +771,9 @@ async def run_unified_long_reflection(
if queen_memory_dir is not None and queen_id:
memory_dirs["queen"] = queen_memory_dir
manifest = _format_multi_scope_manifest(memory_dirs, queen_id=queen_id if "queen" in memory_dirs else None)
manifest = _format_multi_scope_manifest(
memory_dirs, queen_id=queen_id if "queen" in memory_dirs else None
)
user_msg = (
"## Current memory manifest across scopes\n\n"
f"{manifest}\n\n"
@@ -823,8 +833,8 @@ async def run_shutdown_reflection(
# ---------------------------------------------------------------------------
_LONG_REFLECT_INTERVAL = 5
_SHORT_REFLECT_TURN_INTERVAL = 3
_SHORT_REFLECT_COOLDOWN_SEC = 300.0
_SHORT_REFLECT_TURN_INTERVAL = 2
_SHORT_REFLECT_COOLDOWN_SEC = 120.0
async def subscribe_reflection_triggers(
+3 -1
View File
@@ -405,7 +405,9 @@ def _fetch_antigravity_credentials() -> tuple[str | None, str | None]:
import urllib.request
try:
req = urllib.request.Request(_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"})
req = urllib.request.Request(
_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"}
)
with urllib.request.urlopen(req, timeout=10) as resp:
content = resp.read().decode("utf-8")
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
-2
View File
@@ -85,7 +85,6 @@ from .template import TemplateResolver
from .validation import (
CredentialStatus,
CredentialValidationResult,
compute_unavailable_tools,
ensure_credential_key_env,
validate_agent_credentials,
)
@@ -151,7 +150,6 @@ __all__ = [
# Validation
"ensure_credential_key_env",
"validate_agent_credentials",
"compute_unavailable_tools",
"CredentialStatus",
"CredentialValidationResult",
# Interactive setup
+6 -2
View File
@@ -332,7 +332,9 @@ class AdenCredentialClient:
last_error = e
if attempt < self.config.retry_attempts - 1:
delay = self.config.retry_delay * (2**attempt)
logger.warning(f"Aden request failed (attempt {attempt + 1}), retrying in {delay}s: {e}")
logger.warning(
f"Aden request failed (attempt {attempt + 1}), retrying in {delay}s: {e}"
)
time.sleep(delay)
else:
raise AdenClientError(f"Failed to connect to Aden server: {e}") from e
@@ -345,7 +347,9 @@ class AdenCredentialClient:
):
raise
raise AdenClientError(f"Request failed after {self.config.retry_attempts} attempts") from last_error
raise AdenClientError(
f"Request failed after {self.config.retry_attempts} attempts"
) from last_error
def list_integrations(self) -> list[AdenIntegrationInfo]:
"""
+6 -2
View File
@@ -192,7 +192,9 @@ class AdenSyncProvider(CredentialProvider):
f"Visit: {e.reauthorization_url or 'your Aden dashboard'}"
) from e
raise CredentialRefreshError(f"Failed to refresh credential '{credential.id}': {e}") from e
raise CredentialRefreshError(
f"Failed to refresh credential '{credential.id}': {e}"
) from e
except AdenClientError as e:
logger.error(f"Aden client error for '{credential.id}': {e}")
@@ -204,7 +206,9 @@ class AdenSyncProvider(CredentialProvider):
logger.warning(f"Aden unavailable, using cached token for '{credential.id}'")
return credential
raise CredentialRefreshError(f"Aden server unavailable and token expired for '{credential.id}'") from e
raise CredentialRefreshError(
f"Aden server unavailable and token expired for '{credential.id}'"
) from e
def validate(self, credential: CredentialObject) -> bool:
"""
+3 -1
View File
@@ -168,7 +168,9 @@ class AdenCachedStorage(CredentialStorage):
if rid != credential_id:
result = self._load_by_id(rid)
if result is not None:
logger.info(f"Loaded credential '{credential_id}' via provider index (id='{rid}')")
logger.info(
f"Loaded credential '{credential_id}' via provider index (id='{rid}')"
)
return result
# Direct lookup (exact credential_id match)
@@ -493,7 +493,9 @@ class TestAdenCachedStorage:
assert loaded is not None
assert loaded.keys["access_token"].value.get_secret_value() == "cached-token"
def test_load_from_aden_when_stale(self, cached_storage, local_storage, provider, mock_client, aden_response):
def test_load_from_aden_when_stale(
self, cached_storage, local_storage, provider, mock_client, aden_response
):
"""Test load fetches from Aden when cache is stale."""
# Create stale cached credential
cred = CredentialObject(
@@ -519,7 +521,9 @@ class TestAdenCachedStorage:
assert loaded is not None
assert loaded.keys["access_token"].value.get_secret_value() == "test-access-token"
def test_load_falls_back_to_stale_when_aden_fails(self, cached_storage, local_storage, provider, mock_client):
def test_load_falls_back_to_stale_when_aden_fails(
self, cached_storage, local_storage, provider, mock_client
):
"""Test load falls back to stale cache when Aden fails."""
# Create stale cached credential
cred = CredentialObject(
@@ -95,7 +95,9 @@ class BaseOAuth2Provider(CredentialProvider):
self._client = httpx.Client(timeout=self.config.request_timeout)
except ImportError as e:
raise ImportError("OAuth2 provider requires 'httpx'. Install with: uv pip install httpx") from e
raise ImportError(
"OAuth2 provider requires 'httpx'. Install with: uv pip install httpx"
) from e
return self._client
def _close_client(self) -> None:
@@ -309,7 +311,8 @@ class BaseOAuth2Provider(CredentialProvider):
except OAuth2Error as e:
if e.error == "invalid_grant":
raise CredentialRefreshError(
f"Refresh token for '{credential.id}' is invalid or revoked. Re-authorization required."
f"Refresh token for '{credential.id}' is invalid or revoked. "
"Re-authorization required."
) from e
raise CredentialRefreshError(f"Failed to refresh '{credential.id}': {e}") from e
@@ -419,7 +422,9 @@ class BaseOAuth2Provider(CredentialProvider):
if response.status_code != 200 or "error" in response_data:
error = response_data.get("error", "unknown_error")
description = response_data.get("error_description", response.text)
raise OAuth2Error(error=error, description=description, status_code=response.status_code)
raise OAuth2Error(
error=error, description=description, status_code=response.status_code
)
return OAuth2Token.from_token_response(response_data)
@@ -158,7 +158,9 @@ class TokenLifecycleManager:
"""
# Run in executor to avoid blocking
loop = asyncio.get_event_loop()
token = await loop.run_in_executor(None, lambda: self.provider.client_credentials_grant(scopes=scopes))
token = await loop.run_in_executor(
None, lambda: self.provider.client_credentials_grant(scopes=scopes)
)
self._save_token_to_store(token)
self._cached_token = token
@@ -100,7 +100,9 @@ class ZohoOAuth2Provider(BaseOAuth2Provider):
)
super().__init__(config, provider_id="zoho_crm_oauth2")
self._accounts_domain = base
self._api_domain = (api_domain or os.getenv("ZOHO_API_DOMAIN", "https://www.zohoapis.com")).rstrip("/")
self._api_domain = (
api_domain or os.getenv("ZOHO_API_DOMAIN", "https://www.zohoapis.com")
).rstrip("/")
@property
def supported_types(self) -> list[CredentialType]:
+6 -2
View File
@@ -268,7 +268,9 @@ class CredentialSetupSession:
self._print(f"{Colors.YELLOW}Initializing credential store...{Colors.NC}")
try:
generate_and_save_credential_key()
self._print(f"{Colors.GREEN}✓ Encryption key saved to ~/.hive/secrets/credential_key{Colors.NC}")
self._print(
f"{Colors.GREEN}✓ Encryption key saved to ~/.hive/secrets/credential_key{Colors.NC}"
)
return True
except Exception as e:
self._print(f"{Colors.RED}Failed to initialize credential store: {e}{Colors.NC}")
@@ -447,7 +449,9 @@ class CredentialSetupSession:
logger.warning("Unexpected error exporting credential to env", exc_info=True)
return True
else:
self._print(f"{Colors.YELLOW}{cred.credential_name} not found in Aden account.{Colors.NC}")
self._print(
f"{Colors.YELLOW}{cred.credential_name} not found in Aden account.{Colors.NC}"
)
self._print("Please connect this integration on https://hive.adenhq.com first.")
return False
except Exception as e:
+15 -6
View File
@@ -136,7 +136,8 @@ class EncryptedFileStorage(CredentialStorage):
from cryptography.fernet import Fernet
except ImportError as e:
raise ImportError(
"Encrypted storage requires 'cryptography'. Install with: uv pip install cryptography"
"Encrypted storage requires 'cryptography'. "
"Install with: uv pip install cryptography"
) from e
self.base_path = Path(base_path or self.DEFAULT_PATH).expanduser()
@@ -212,7 +213,9 @@ class EncryptedFileStorage(CredentialStorage):
json_bytes = self._fernet.decrypt(encrypted)
data = json.loads(json_bytes.decode("utf-8-sig"))
except Exception as e:
raise CredentialDecryptionError(f"Failed to decrypt credential '{credential_id}': {e}") from e
raise CredentialDecryptionError(
f"Failed to decrypt credential '{credential_id}': {e}"
) from e
# Deserialize
return self._deserialize_credential(data)
@@ -313,7 +316,8 @@ class EncryptedFileStorage(CredentialStorage):
visible_keys = [
name
for name in credential.keys.keys()
if name not in self.INDEX_INTERNAL_KEY_NAMES and not name.startswith("_identity_")
if name not in self.INDEX_INTERNAL_KEY_NAMES
and not name.startswith("_identity_")
]
# Earliest expiry across all keys (most likely the access_token).
@@ -332,7 +336,9 @@ class EncryptedFileStorage(CredentialStorage):
"key_names": sorted(visible_keys),
"created_at": credential.created_at.isoformat() if credential.created_at else None,
"updated_at": credential.updated_at.isoformat() if credential.updated_at else None,
"last_refreshed": (credential.last_refreshed.isoformat() if credential.last_refreshed else None),
"last_refreshed": (
credential.last_refreshed.isoformat() if credential.last_refreshed else None
),
"expires_at": earliest_expiry.isoformat() if earliest_expiry else None,
"auto_refresh": credential.auto_refresh,
"tags": list(credential.tags),
@@ -474,7 +480,8 @@ class EnvVarStorage(CredentialStorage):
def save(self, credential: CredentialObject) -> None:
"""Cannot save to environment variables at runtime."""
raise NotImplementedError(
"EnvVarStorage is read-only. Set environment variables externally or use EncryptedFileStorage."
"EnvVarStorage is read-only. Set environment variables "
"externally or use EncryptedFileStorage."
)
def load(self, credential_id: str) -> CredentialObject | None:
@@ -494,7 +501,9 @@ class EnvVarStorage(CredentialStorage):
def delete(self, credential_id: str) -> bool:
"""Cannot delete environment variables at runtime."""
raise NotImplementedError("EnvVarStorage is read-only. Unset environment variables externally.")
raise NotImplementedError(
"EnvVarStorage is read-only. Unset environment variables externally."
)
def list_all(self) -> list[str]:
"""List credentials that are available in environment."""
+15 -5
View File
@@ -124,7 +124,9 @@ class CredentialStore:
"""
return self._providers.get(provider_id)
def get_provider_for_credential(self, credential: CredentialObject) -> CredentialProvider | None:
def get_provider_for_credential(
self, credential: CredentialObject
) -> CredentialProvider | None:
"""
Get the appropriate provider for a credential.
@@ -199,7 +201,9 @@ class CredentialStore:
cached = self._get_from_cache(credential_id)
if cached is not None:
if refresh_if_needed and self._should_refresh(cached):
return self._refresh_credential(cached, raise_on_failure=raise_on_refresh_failure)
return self._refresh_credential(
cached, raise_on_failure=raise_on_refresh_failure
)
return cached
# Load from storage
@@ -209,7 +213,9 @@ class CredentialStore:
# Refresh if needed
if refresh_if_needed and self._should_refresh(credential):
credential = self._refresh_credential(credential, raise_on_failure=raise_on_refresh_failure)
credential = self._refresh_credential(
credential, raise_on_failure=raise_on_refresh_failure
)
# Cache
self._add_to_cache(credential)
@@ -234,7 +240,9 @@ class CredentialStore:
Returns:
The key value or None if not found
"""
credential = self.get_credential(credential_id, raise_on_refresh_failure=raise_on_refresh_failure)
credential = self.get_credential(
credential_id, raise_on_refresh_failure=raise_on_refresh_failure
)
if credential is None:
return None
return credential.get_key(key_name)
@@ -258,7 +266,9 @@ class CredentialStore:
Returns:
The primary key value or None
"""
credential = self.get_credential(credential_id, raise_on_refresh_failure=raise_on_refresh_failure)
credential = self.get_credential(
credential_id, raise_on_refresh_failure=raise_on_refresh_failure
)
if credential is None:
return None
return credential.get_default_key()
+6 -2
View File
@@ -88,7 +88,9 @@ class TemplateResolver:
if key_name:
value = credential.get_key(key_name)
if value is None:
raise CredentialKeyNotFoundError(f"Key '{key_name}' not found in credential '{cred_id}'")
raise CredentialKeyNotFoundError(
f"Key '{key_name}' not found in credential '{cred_id}'"
)
else:
# Use default key
value = credential.get_default_key()
@@ -124,7 +126,9 @@ class TemplateResolver:
... })
{"Authorization": "Bearer ghp_xxx", "X-API-Key": "BSAKxxx"}
"""
return {key: self.resolve(value, fail_on_missing) for key, value in header_templates.items()}
return {
key: self.resolve(value, fail_on_missing) for key, value in header_templates.items()
}
def resolve_params(
self,
@@ -130,7 +130,9 @@ class TestCredentialObject:
# With access_token
cred2 = CredentialObject(
id="test",
keys={"access_token": CredentialKey(name="access_token", value=SecretStr("token-value"))},
keys={
"access_token": CredentialKey(name="access_token", value=SecretStr("token-value"))
},
)
assert cred2.get_default_key() == "token-value"
@@ -295,7 +297,9 @@ class TestEncryptedFileStorage:
key = Fernet.generate_key().decode()
with patch.dict(os.environ, {"HIVE_CREDENTIAL_KEY": key}):
storage = EncryptedFileStorage(temp_dir)
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
cred = CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
)
storage.save(cred)
# Create new storage instance with same key
@@ -326,10 +330,18 @@ class TestCompositeStorage:
def test_read_from_primary(self):
"""Test reading from primary storage."""
primary = InMemoryStorage()
primary.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("primary"))}))
primary.save(
CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("primary"))}
)
)
fallback = InMemoryStorage()
fallback.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}))
fallback.save(
CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}
)
)
storage = CompositeStorage(primary, [fallback])
cred = storage.load("test")
@@ -341,7 +353,11 @@ class TestCompositeStorage:
"""Test fallback when credential not in primary."""
primary = InMemoryStorage()
fallback = InMemoryStorage()
fallback.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}))
fallback.save(
CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}
)
)
storage = CompositeStorage(primary, [fallback])
cred = storage.load("test")
@@ -377,7 +393,9 @@ class TestStaticProvider:
def test_refresh_returns_unchanged(self):
"""Test that refresh returns credential unchanged."""
provider = StaticProvider()
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
cred = CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
)
refreshed = provider.refresh(cred)
assert refreshed.get_key("k") == "v"
@@ -385,7 +403,9 @@ class TestStaticProvider:
def test_validate_with_keys(self):
"""Test validation with keys present."""
provider = StaticProvider()
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
cred = CredentialObject(
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
)
assert provider.validate(cred)
@@ -586,7 +606,9 @@ class TestCredentialStore:
storage = InMemoryStorage()
store = CredentialStore(storage=storage, cache_ttl_seconds=60)
storage.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}))
storage.save(
CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
)
# First load
store.get_credential("test")
@@ -664,7 +686,9 @@ class TestOAuth2Module:
from core.framework.credentials.oauth2 import OAuth2Config, TokenPlacement
# Valid config
config = OAuth2Config(token_url="https://example.com/token", client_id="id", client_secret="secret")
config = OAuth2Config(
token_url="https://example.com/token", client_id="id", client_secret="secret"
)
assert config.token_url == "https://example.com/token"
# Missing token_url
+20 -44
View File
@@ -160,9 +160,15 @@ class CredentialValidationResult:
if aden_nc:
if missing or invalid:
lines.append("")
lines.append("Aden integrations not connected (ADEN_API_KEY is set but OAuth tokens unavailable):\n")
lines.append(
"Aden integrations not connected "
"(ADEN_API_KEY is set but OAuth tokens unavailable):\n"
)
for c in aden_nc:
lines.append(f" {c.env_var} for {_label(c)}\n Connect this integration at hive.adenhq.com first.")
lines.append(
f" {c.env_var} for {_label(c)}"
f"\n Connect this integration at hive.adenhq.com first."
)
lines.append("\nIf you've already set up credentials, restart your terminal to load them.")
return "\n".join(lines)
@@ -230,45 +236,6 @@ def _presync_aden_tokens(credential_specs: dict, *, force: bool = False) -> None
)
def compute_unavailable_tools(nodes: list) -> tuple[set[str], list[str]]:
"""Return (tool_names_to_drop, human_messages).
Runs credential validation *without* raising, collects every tool
bound to a failed credential (missing / invalid / Aden-not-connected
and no alternative provider available), and returns the set of tool
names that should be silently dropped from the worker's effective
tool list.
Use this at every worker-spawn preflight so missing credentials
filter tools out of the graph instead of hard-failing the whole
spawn. Only affects non-MCP tools the MCP admission gate
(``_build_mcp_admission_gate``) already handles MCP tools at
registration time.
"""
try:
result = validate_agent_credentials(nodes, verify=False, raise_on_error=False)
except Exception as exc:
logger.debug("compute_unavailable_tools: validation raised: %s", exc)
return set(), []
drop: set[str] = set()
messages: list[str] = []
for status in result.failed:
if not status.tools:
continue
drop.update(status.tools)
reason = "missing"
if status.aden_not_connected:
reason = "aden_not_connected"
elif status.available and status.valid is False:
reason = "invalid"
messages.append(
f"{status.env_var} ({reason}) → drops {len(status.tools)} tool(s): "
f"{', '.join(status.tools[:6])}" + (f" +{len(status.tools) - 6} more" if len(status.tools) > 6 else "")
)
return drop, messages
def validate_agent_credentials(
nodes: list,
quiet: bool = False,
@@ -325,7 +292,9 @@ def validate_agent_credentials(
if os.environ.get("ADEN_API_KEY"):
_presync_aden_tokens(CREDENTIAL_SPECS, force=force_refresh)
env_mapping = {(spec.credential_id or name): spec.env_var for name, spec in CREDENTIAL_SPECS.items()}
env_mapping = {
(spec.credential_id or name): spec.env_var for name, spec in CREDENTIAL_SPECS.items()
}
env_storage = EnvVarStorage(env_mapping=env_mapping)
if os.environ.get("HIVE_CREDENTIAL_KEY"):
storage = CompositeStorage(primary=env_storage, fallbacks=[EncryptedFileStorage()])
@@ -359,7 +328,12 @@ def validate_agent_credentials(
available = store.is_available(cred_id)
# Aden-not-connected: ADEN_API_KEY set, Aden-only cred, but integration missing
is_aden_nc = not available and has_aden_key and spec.aden_supported and not spec.direct_api_key_supported
is_aden_nc = (
not available
and has_aden_key
and spec.aden_supported
and not spec.direct_api_key_supported
)
status = CredentialStatus(
credential_name=cred_name,
@@ -477,7 +451,9 @@ def validate_agent_credentials(
identity_data = result.details.get("identity")
if identity_data and isinstance(identity_data, dict):
try:
cred_obj = store.get_credential(status.credential_id, refresh_if_needed=False)
cred_obj = store.get_credential(
status.credential_id, refresh_if_needed=False
)
if cred_obj:
cred_obj.set_identity(**identity_data)
store.save_credential(cred_obj)
+68 -25
View File
@@ -205,7 +205,9 @@ class AgentHost:
DeprecationWarning,
stacklevel=2,
)
self._skills_manager = SkillsManager.from_precomputed(skills_catalog_prompt, protocols_prompt)
self._skills_manager = SkillsManager.from_precomputed(
skills_catalog_prompt, protocols_prompt
)
else:
# Bare constructor: auto-load defaults
self._skills_manager = SkillsManager()
@@ -246,7 +248,9 @@ class AgentHost:
self._tools = tools or []
self._tool_executor = tool_executor
self._accounts_prompt = accounts_prompt
self._dynamic_memory_provider_factory: Callable[[str], Callable[[], str] | None] | None = None
self._dynamic_memory_provider_factory: Callable[[str], Callable[[], str] | None] | None = (
None
)
self._accounts_data = accounts_data
self._tool_provider_map = tool_provider_map
@@ -415,7 +419,8 @@ class AgentHost:
event_types = [_ET(et) for et in tc.get("event_types", [])]
if not event_types:
logger.warning(
f"Entry point '{ep_id}' has trigger_type='event' but no event_types in trigger_config"
f"Entry point '{ep_id}' has trigger_type='event' "
"but no event_types in trigger_config"
)
continue
@@ -445,7 +450,9 @@ class AgentHost:
# Run in the same session as the primary entry
# point so memory (e.g. user-defined rules) is
# shared and logs land in one session directory.
session_state = self._get_primary_session_state(exclude_entry_point=entry_point_id)
session_state = self._get_primary_session_state(
exclude_entry_point=entry_point_id
)
exec_id = await self.trigger(
entry_point_id,
{"event": event.to_dict()},
@@ -498,7 +505,8 @@ class AgentHost:
from croniter import croniter
except ImportError as e:
raise RuntimeError(
"croniter is required for cron-based entry points. Install it with: uv pip install croniter"
"croniter is required for cron-based entry points. "
"Install it with: uv pip install croniter"
) from e
try:
@@ -540,7 +548,9 @@ class AgentHost:
"Cron '%s': paused, skipping tick",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + sleep_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + sleep_secs
)
await asyncio.sleep(max(0, sleep_secs))
continue
@@ -568,7 +578,9 @@ class AgentHost:
"Cron '%s': agent actively working, skipping tick",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + sleep_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + sleep_secs
)
await asyncio.sleep(max(0, sleep_secs))
continue
@@ -578,18 +590,24 @@ class AgentHost:
is_isolated = ep_spec and ep_spec.isolation_level == "isolated"
if is_isolated:
if _persistent_session_id:
session_state = {"resume_session_id": _persistent_session_id}
session_state = {
"resume_session_id": _persistent_session_id
}
else:
session_state = None
else:
session_state = self._get_primary_session_state(exclude_entry_point=entry_point_id)
session_state = self._get_primary_session_state(
exclude_entry_point=entry_point_id
)
# Gate: skip tick if no active session
if session_state is None:
logger.debug(
"Cron '%s': no active session, skipping",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + sleep_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + sleep_secs
)
await asyncio.sleep(max(0, sleep_secs))
continue
@@ -662,7 +680,9 @@ class AgentHost:
"Timer '%s': paused, skipping tick",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + interval_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + interval_secs
)
await asyncio.sleep(interval_secs)
continue
@@ -688,7 +708,9 @@ class AgentHost:
"Timer '%s': agent actively working, skipping tick",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + interval_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + interval_secs
)
await asyncio.sleep(interval_secs)
continue
@@ -698,18 +720,24 @@ class AgentHost:
is_isolated = ep_spec and ep_spec.isolation_level == "isolated"
if is_isolated:
if _persistent_session_id:
session_state = {"resume_session_id": _persistent_session_id}
session_state = {
"resume_session_id": _persistent_session_id
}
else:
session_state = None
else:
session_state = self._get_primary_session_state(exclude_entry_point=entry_point_id)
session_state = self._get_primary_session_state(
exclude_entry_point=entry_point_id
)
# Gate: skip tick if no active session
if session_state is None:
logger.debug(
"Timer '%s': no active session, skipping",
entry_point_id,
)
self._timer_next_fire[entry_point_id] = time.monotonic() + interval_secs
self._timer_next_fire[entry_point_id] = (
time.monotonic() + interval_secs
)
await asyncio.sleep(interval_secs)
continue
@@ -1124,7 +1152,8 @@ class AgentHost:
event_types = [_ET(et) for et in tc.get("event_types", [])]
if not event_types:
logger.warning(
"Entry point '%s::%s' has trigger_type='event' but no event_types in trigger_config",
"Entry point '%s::%s' has trigger_type='event' "
"but no event_types in trigger_config",
graph_id,
ep_id,
)
@@ -1272,18 +1301,24 @@ class AgentHost:
break
stream = reg.streams.get(local_ep)
if not stream:
logger.warning("Timer: no stream '%s' in '%s', stopping", local_ep, gid)
logger.warning(
"Timer: no stream '%s' in '%s', stopping", local_ep, gid
)
break
# Isolated entry points get their own session;
# shared ones join the primary session.
ep_spec = reg.entry_points.get(local_ep)
if ep_spec and ep_spec.isolation_level == "isolated":
if _persistent_session_id:
session_state = {"resume_session_id": _persistent_session_id}
session_state = {
"resume_session_id": _persistent_session_id
}
else:
session_state = None
else:
session_state = self._get_primary_session_state(local_ep, source_graph_id=gid)
session_state = self._get_primary_session_state(
local_ep, source_graph_id=gid
)
# Gate: skip tick if no active session
if session_state is None:
logger.debug(
@@ -1300,7 +1335,11 @@ class AgentHost:
session_state=session_state,
)
# Remember session ID for reuse on next tick
if not _persistent_session_id and ep_spec and ep_spec.isolation_level == "isolated":
if (
not _persistent_session_id
and ep_spec
and ep_spec.isolation_level == "isolated"
):
_persistent_session_id = exec_id
except Exception:
logger.error(
@@ -1558,7 +1597,9 @@ class AgentHost:
src_graph_id = source_graph_id or self._graph_id
src_reg = self._graphs.get(src_graph_id)
ep_spec = (
src_reg.entry_points.get(exclude_entry_point) if src_reg else self._entry_points.get(exclude_entry_point)
src_reg.entry_points.get(exclude_entry_point)
if src_reg
else self._entry_points.get(exclude_entry_point)
)
if ep_spec:
graph = src_reg.graph if src_reg else self.graph
@@ -1592,7 +1633,9 @@ class AgentHost:
# Filter to only input keys so stale outputs
# from previous triggers don't leak through.
if allowed_keys is not None:
buffer_data = {k: v for k, v in full_buffer.items() if k in allowed_keys}
buffer_data = {
k: v for k, v in full_buffer.items() if k in allowed_keys
}
else:
buffer_data = full_buffer
if buffer_data:
@@ -1672,7 +1715,7 @@ class AgentHost:
entry_point_id: str,
execution_id: str,
graph_id: str | None = None,
) -> str:
) -> bool:
"""
Cancel a running execution.
@@ -1682,11 +1725,11 @@ class AgentHost:
graph_id: Graph to search (defaults to active graph)
Returns:
Cancellation outcome from the stream.
True if cancelled, False if not found
"""
stream = self._resolve_stream(entry_point_id, graph_id)
if stream is None:
return "not_found"
return False
return await stream.cancel_execution(execution_id)
# === QUERY OPERATIONS ===
+35 -385
View File
@@ -14,8 +14,8 @@ from __future__ import annotations
import asyncio
import json
import logging
import os
import time
import uuid
from collections import OrderedDict
from collections.abc import Callable
from dataclasses import dataclass, field
@@ -25,77 +25,25 @@ from typing import TYPE_CHECKING, Any
from framework.agent_loop.types import AgentContext, AgentSpec
from framework.host.event_bus import AgentEvent, EventBus, EventType
from framework.host.triggers import TriggerDefinition
from framework.host.worker import Worker, WorkerInfo, WorkerResult
from framework.host.worker import Worker, WorkerInfo, WorkerResult, WorkerStatus
from framework.observability import set_trace_context
from framework.schemas.goal import Goal
from framework.storage.concurrent import ConcurrentStorage
from framework.storage.session_store import SessionStore
if TYPE_CHECKING:
from framework.agent_loop.agent_loop import AgentLoop
from framework.llm.provider import LLMProvider, Tool
from framework.pipeline.runner import PipelineRunner
from framework.skills.manager import SkillsManagerConfig
from framework.tracker.runtime_log_store import RuntimeLogStore
logger = logging.getLogger(__name__)
def _format_spawn_task_message(task: str, input_data: dict[str, Any]) -> str:
"""Render the spawn task into the worker's next user message.
Spawned workers inherit the queen's conversation via
``ColonyRuntime._fork_parent_conversation``; this helper builds
the content of the trailing user message that carries the new
task. The queen's chat already provides the context for the
task, so we frame this as an explicit hand-off.
Additional keys from ``input_data`` (other than the task itself)
are rendered below the hand-off line so the worker sees them as
structured hand-off data. This mirrors the fresh-path
``AgentLoop._build_initial_message`` shape so worker prompts look
roughly the same whether or not inheritance fired.
"""
lines = [
"# New task delegated by the queen",
"",
"The queen's conversation up to this point is visible above. "
"Use it as context (who the user is, what was already decided, "
"which skills apply). Your own system prompt and tool set are "
"set by the framework — the queen's tools may differ from "
"yours, so treat her prior tool calls as history only.",
"",
f"task: {task}",
]
for key, value in (input_data or {}).items():
if key in ("task", "user_request"):
# Already rendered above; don't duplicate.
continue
if value is None:
continue
lines.append(f"{key}: {value}")
return "\n".join(lines)
def _env_int(name: str, default: int) -> int:
"""Read a positive int from env; fall back to default on missing/invalid."""
raw = os.environ.get(name)
if not raw:
return default
try:
value = int(raw)
except ValueError:
logger.warning("Invalid %s=%r; using default %d", name, raw, default)
return default
return value if value > 0 else default
# Laptop-safe default. Each worker is a full AgentLoop (Claude SDK session +
# tool catalog), so ~4 concurrent is the realistic ceiling on a dev machine.
# Override via HIVE_MAX_CONCURRENT_WORKERS for servers.
_DEFAULT_MAX_CONCURRENT_WORKERS = _env_int("HIVE_MAX_CONCURRENT_WORKERS", 4)
@dataclass
class ColonyConfig:
max_concurrent_workers: int = _DEFAULT_MAX_CONCURRENT_WORKERS
max_concurrent_workers: int = 100
cache_ttl: float = 60.0
batch_interval: float = 0.1
max_history: int = 1000
@@ -211,7 +159,9 @@ class ColonyRuntime:
DeprecationWarning,
stacklevel=2,
)
self._skills_manager = SkillsManager.from_precomputed(skills_catalog_prompt, protocols_prompt)
self._skills_manager = SkillsManager.from_precomputed(
skills_catalog_prompt, protocols_prompt
)
else:
self._skills_manager = SkillsManager()
self._skills_manager.load()
@@ -224,7 +174,9 @@ class ColonyRuntime:
self._accounts_prompt = accounts_prompt
self._accounts_data = accounts_data
self._tool_provider_map = tool_provider_map
self._dynamic_memory_provider_factory: Callable[[str], Callable[[], str] | None] | None = None
self._dynamic_memory_provider_factory: Callable[[str], Callable[[], str] | None] | None = (
None
)
storage_path_obj = Path(storage_path) if isinstance(storage_path, str) else storage_path
self._storage_path: Path = storage_path_obj
@@ -258,13 +210,6 @@ class ColonyRuntime:
self._timer_tasks: list[asyncio.Task] = []
self._timer_next_fire: dict[str, float] = {}
self._webhook_server: Any = None
# Background tasks owned by the runtime that aren't timers —
# e.g. the per-spawn soft/hard timeout watchers kicked off by
# run_parallel_workers. We hold strong references so asyncio
# does not garbage-collect them mid-sleep (Python's asyncio
# docs explicitly warn that create_task() needs a referenced
# handle).
self._background_tasks: set[asyncio.Task] = set()
# Idempotency
self._idempotency_keys: OrderedDict[str, str] = OrderedDict()
@@ -435,24 +380,8 @@ class ColonyRuntime:
async with self._lock:
await self.stop_all_workers()
# Cancel timer tasks and *wait* for them to finish. Without
# the wait the tasks are merely scheduled for cancellation —
# if the runtime (or its event loop) shuts down before they
# run their cleanup code, trigger state leaks.
pending_timers = [t for t in self._timer_tasks if not t.done()]
for task in pending_timers:
for task in self._timer_tasks:
task.cancel()
if pending_timers:
try:
await asyncio.wait_for(
asyncio.gather(*pending_timers, return_exceptions=True),
timeout=5.0,
)
except TimeoutError:
logger.warning(
"ColonyRuntime.stop: %d timer task(s) did not finish within 5s",
sum(1 for t in pending_timers if not t.done()),
)
self._timer_tasks.clear()
for sub_id in self._event_subscriptions:
@@ -469,147 +398,12 @@ class ColonyRuntime:
self._running = False
logger.info("ColonyRuntime stopped: colony_id=%s", self._colony_id)
def _on_timer_task_done(self, task: asyncio.Task) -> None:
if task.cancelled():
return
exc = task.exception()
if exc is not None:
logger.error(
"Timer task '%s' crashed: %s",
task.get_name(),
exc,
exc_info=exc,
)
def pause_timers(self) -> None:
self._timers_paused = True
def resume_timers(self) -> None:
self._timers_paused = False
async def _fork_parent_conversation(
self,
dest_conv_dir: Path,
*,
task: str,
input_data: dict[str, Any] | None = None,
) -> None:
"""Fork the colony's parent queen conversation into ``dest_conv_dir``.
Copies the queen's ``parts/*.json`` and ``meta.json`` into the
worker's fresh conversation dir, then appends a synthetic user
message carrying the new task. The worker's subsequent
``AgentLoop._restore`` reads this conversation via the usual
path the queen's history is visible as prior turns, the task
appears as the most recent user message, and the worker starts
acting on it with full context.
This is a no-op if the colony runtime doesn't own a parent
queen conversation (e.g. a standalone colony started without a
queen wrapper).
Notes on filtering compatibility:
- Queen parts have ``phase_id=None``. When the worker's
restore applies its own phase filter, the backward-compat
fallback in NodeConversation.restore kicks in: an
all-None-phased store bypasses the filter. See
``conversation.py:1369-1378``.
- ``cursor.json`` is deliberately NOT copied. The worker
should start fresh at iteration 0; copying the queen's
cursor would make the worker think it had already done
work.
- The queen's ``meta.json`` is copied but the AgentLoop
immediately rebuilds ``system_prompt`` from the worker's
own context post-restore (see agent_loop.py:533-535), so
the queen's system prompt does not leak into the worker.
"""
# Resolve the queen's own conversation dir. For a queen-backed
# ColonyRuntime, storage_path points at the queen's session dir
# and conversations/ lives inside it. For standalone runtimes
# (tests, legacy fork path under ~/.hive/agents/{name}/worker/)
# there's no parent conversation — fall through to the fresh
# spawn path.
src_conv_dir = self._storage_path / "conversations"
src_parts_dir = src_conv_dir / "parts"
if not src_parts_dir.exists():
# No queen conversation to inherit — the worker starts with
# only the task, same as the pre-fork behavior. AgentLoop's
# fresh-conversation branch will call _build_initial_message
# and render input_data into the worker's first user message.
return
def _copy_and_append() -> None:
dest_parts = dest_conv_dir / "parts"
dest_parts.mkdir(parents=True, exist_ok=True)
# Copy each queen part. Use json.dumps round-trip (not raw
# file copy) so we can be defensive about unreadable files —
# a corrupted queen part file shouldn't take down the worker
# spawn, just drop that one part.
max_seq = -1
for part_file in sorted(src_parts_dir.glob("*.json")):
try:
data = json.loads(part_file.read_text(encoding="utf-8"))
except (json.JSONDecodeError, OSError) as exc:
logger.warning(
"spawn fork: skipping unreadable queen part %s: %s",
part_file.name,
exc,
)
continue
seq = data.get("seq")
if isinstance(seq, int) and seq > max_seq:
max_seq = seq
(dest_parts / part_file.name).write_text(
json.dumps(data, ensure_ascii=False),
encoding="utf-8",
)
# Copy the queen's meta.json so the worker's restore finds
# the conversation during its first run. The meta fields
# (system_prompt, max_context_tokens, etc.) get overridden
# by the worker's own AgentLoop config + context after
# restore, so nothing here bleeds into runtime behavior.
src_meta = src_conv_dir / "meta.json"
if src_meta.exists():
try:
meta_data = json.loads(src_meta.read_text(encoding="utf-8"))
(dest_conv_dir / "meta.json").write_text(
json.dumps(meta_data, ensure_ascii=False),
encoding="utf-8",
)
except (json.JSONDecodeError, OSError) as exc:
logger.warning("spawn fork: failed to copy queen meta.json: %s", exc)
# Append the task as the next user message so the worker's
# LLM sees it as the most recent turn in the conversation
# after restore. This replaces the fresh-path call to
# _build_initial_message for spawned workers.
task_content = _format_spawn_task_message(task, input_data or {})
next_seq = max_seq + 1
task_part = {
"seq": next_seq,
"role": "user",
"content": task_content,
# phase_id omitted (None) so the backward-compat
# fallback in NodeConversation.restore keeps it visible
# to both queen-style and phase-filtered restores.
# run_id omitted so the worker's run_id filter (off by
# default since ctx.run_id is empty) doesn't reject it.
}
task_filename = f"{next_seq:010d}.json"
(dest_parts / task_filename).write_text(
json.dumps(task_part, ensure_ascii=False),
encoding="utf-8",
)
logger.info(
"spawn fork: inherited %d queen parts + appended task at seq %d",
max_seq + 1,
next_seq,
)
await asyncio.to_thread(_copy_and_append)
# ── Worker Spawning ─────────────────────────────────────────
async def spawn(
@@ -658,44 +452,6 @@ class ColonyRuntime:
spawn_tools = tools if tools is not None else self._tools
spawn_executor = tool_executor or self._tool_executor
# Colony progress tracker: when the caller supplied a db_path
# in input_data, this worker is part of a SQLite task queue
# and must see the hive.colony-progress-tracker skill body in
# its system prompt from turn 0. Rebuild the catalog with the
# skill pre-activated; falls back to the colony default when
# no db_path is present.
_spawn_catalog = self.skills_catalog_prompt
_spawn_skill_dirs = self.skill_dirs
if isinstance(input_data, dict) and input_data.get("db_path"):
try:
from framework.skills.config import SkillsConfig
from framework.skills.manager import SkillsManager, SkillsManagerConfig
_pre = SkillsManager(
SkillsManagerConfig(
skills_config=SkillsConfig.from_agent_vars(
skills=["hive.colony-progress-tracker"],
),
)
)
_pre.load()
_spawn_catalog = _pre.skills_catalog_prompt
_spawn_skill_dirs = (
list(_pre.allowlisted_dirs) if hasattr(_pre, "allowlisted_dirs") else self.skill_dirs
)
logger.info(
"spawn: pre-activated hive.colony-progress-tracker "
"(catalog %d%d chars) for worker with db_path=%s",
len(self.skills_catalog_prompt),
len(_spawn_catalog),
input_data.get("db_path"),
)
except Exception as exc:
logger.warning(
"spawn: failed to pre-activate colony-progress-tracker skill, falling back to base catalog: %s",
exc,
)
# Resolve the SSE stream_id once. When the caller didn't supply
# one we use the per-worker fan-out tag (filtered out by the
# SSE handler). When the caller passed an explicit value we
@@ -713,24 +469,10 @@ class ColonyRuntime:
# (worse) the process CWD.
worker_storage = self._storage_path / "workers" / worker_id
worker_storage.mkdir(parents=True, exist_ok=True)
# Fork the queen's conversation into the worker's store.
# The queen already accumulated the user chat, read relevant
# skills, and made decisions about how to approach the task;
# the worker would repeat that discovery work (and often
# mis-step — see the 2026-04-14 "dummy-target" incident)
# if spawned with a blank store. We snapshot the queen's
# parts + meta at spawn time, then append the task as the
# next user message so the worker's AgentLoop restores into
# a conversation that already ends with its new instruction.
await self._fork_parent_conversation(
worker_storage / "conversations",
task=task,
input_data=input_data,
worker_conv_store = FileConversationStore(
worker_storage / "conversations"
)
worker_conv_store = FileConversationStore(worker_storage / "conversations")
# AgentLoop takes bus/judge/config/executor at construction;
# LLM, tools, stream_id, execution_id all come from the
# AgentContext passed to execute().
@@ -750,9 +492,9 @@ class ColonyRuntime:
llm=self._llm,
available_tools=list(spawn_tools),
accounts_prompt=self._accounts_prompt,
skills_catalog_prompt=_spawn_catalog,
skills_catalog_prompt=self.skills_catalog_prompt,
protocols_prompt=self.protocols_prompt,
skill_dirs=_spawn_skill_dirs,
skill_dirs=self.skill_dirs,
execution_id=worker_id,
stream_id=explicit_stream_id or f"worker:{worker_id}",
)
@@ -785,8 +527,6 @@ class ColonyRuntime:
async def spawn_batch(
self,
tasks: list[dict[str, Any]],
*,
tools_override: list[Any] | None = None,
) -> list[str]:
"""Spawn a batch of parallel workers, one per task spec.
@@ -799,12 +539,6 @@ class ColonyRuntime:
The overseer's ``run_parallel_workers`` tool is the usual
caller; it pairs ``spawn_batch`` + ``wait_for_worker_reports``
into a single fan-out/fan-in primitive.
When ``tools_override`` is supplied, every spawned worker
receives that tool list instead of the colony's default. Used
by ``run_parallel_workers`` to drop tools whose credentials
failed the pre-flight check (so the spawned workers don't
waste a startup trying to use them).
"""
worker_ids: list[str] = []
for spec in tasks:
@@ -816,7 +550,6 @@ class ColonyRuntime:
task=task_text,
count=1,
input_data=task_data or {"task": task_text},
tools=tools_override,
)
worker_ids.extend(ids)
return worker_ids
@@ -910,7 +643,9 @@ class ColonyRuntime:
if remaining <= 0:
break
try:
report = await asyncio.wait_for(report_queue.get(), timeout=remaining)
report = await asyncio.wait_for(
report_queue.get(), timeout=remaining
)
except TimeoutError:
break
wid = report.get("worker_id")
@@ -979,7 +714,10 @@ class ColonyRuntime:
return self._overseer
if not self._running:
raise RuntimeError("start_overseer requires the ColonyRuntime to be running (call start() first)")
raise RuntimeError(
"start_overseer requires the ColonyRuntime to be running "
"(call start() first)"
)
from framework.agent_loop.agent_loop import AgentLoop
from framework.storage.conversation_store import FileConversationStore
@@ -990,7 +728,9 @@ class ColonyRuntime:
# {colony_session}/conversations/. Workers get their own sub-dirs
# under workers/{worker_id}/; the overseer is the root occupant.
self._storage_path.mkdir(parents=True, exist_ok=True)
overseer_conv_store = FileConversationStore(self._storage_path / "conversations")
overseer_conv_store = FileConversationStore(
self._storage_path / "conversations"
)
agent_loop = AgentLoop(
event_bus=self._scoped_event_bus,
tool_executor=self._tool_executor,
@@ -1128,96 +868,6 @@ class ColonyRuntime:
return True
return False
def watch_batch_timeouts(
self,
worker_ids: list[str],
*,
soft_timeout: float,
hard_timeout: float,
warning_message: str | None = None,
) -> asyncio.Task:
"""Schedule a background task that enforces soft + hard timeouts.
Semantics:
* At ``t = soft_timeout`` every worker in ``worker_ids`` that is
still active AND hasn't already filed an ``_explicit_report``
receives ``warning_message`` via ``send_to_worker`` the inject
appears as a user turn at the next agent-loop boundary, so the
worker's LLM can see it and call ``report_to_parent`` with
partial results.
* At ``t = hard_timeout`` any worker still active is force-stopped
via ``stop_worker``. ``Worker.run`` still emits its
``SUBAGENT_REPORT`` on cancel (the explicit report survives,
if the worker reported just before the stop) so the queen
always sees a terminal inject for every spawned worker.
Returns the scheduled task so callers can await or cancel it.
Non-blocking for the caller the watcher runs on the event loop
independently.
"""
if warning_message is None:
grace = max(0.0, hard_timeout - soft_timeout)
warning_message = (
f"[SOFT TIMEOUT] You've been running for {soft_timeout:.0f}s. "
"Wrap up now: call report_to_parent with whatever partial "
"results you have. You have "
f"~{grace:.0f}s more before a hard stop — anything not "
"reported by then will be lost."
)
async def _watch() -> None:
try:
await asyncio.sleep(soft_timeout)
for wid in worker_ids:
worker = self._workers.get(wid)
if worker is None or not worker.is_active:
continue
if getattr(worker, "_explicit_report", None) is not None:
continue
try:
await self.send_to_worker(wid, warning_message)
except Exception:
logger.warning(
"watch_batch_timeouts: soft-timeout inject failed for %s",
wid,
exc_info=True,
)
remaining = hard_timeout - soft_timeout
if remaining <= 0:
return
await asyncio.sleep(remaining)
for wid in worker_ids:
worker = self._workers.get(wid)
if worker is None or not worker.is_active:
continue
try:
await self.stop_worker(wid)
logger.info(
"watch_batch_timeouts: hard-stopped %s after %ss (no report)",
wid,
hard_timeout,
)
except Exception:
logger.warning(
"watch_batch_timeouts: hard-stop failed for %s",
wid,
exc_info=True,
)
except asyncio.CancelledError:
raise
except Exception:
logger.exception("watch_batch_timeouts: watcher crashed")
task = asyncio.create_task(_watch(), name=f"batch-timeout:{worker_ids[0] if worker_ids else '?'}")
# Hold a strong reference until completion. Without this the
# task can be garbage-collected during `await asyncio.sleep`,
# silently swallowing the soft-timeout inject (the exact bug
# surfaced by workers never seeing [SOFT TIMEOUT]).
self._background_tasks.add(task)
task.add_done_callback(self._background_tasks.discard)
return task
# ── Status & Query ──────────────────────────────────────────
def list_workers(self) -> list[WorkerInfo]:
@@ -1241,7 +891,9 @@ class ColonyRuntime:
def get_worker_result(self, worker_id: str) -> WorkerResult | None:
return self._execution_results.get(worker_id)
async def wait_for_worker(self, worker_id: str, timeout: float | None = None) -> WorkerResult | None:
async def wait_for_worker(
self, worker_id: str, timeout: float | None = None
) -> WorkerResult | None:
worker = self._workers.get(worker_id)
if worker is None:
return self._execution_results.get(worker_id)
@@ -1249,7 +901,7 @@ class ColonyRuntime:
return worker.info.result
try:
await asyncio.wait_for(asyncio.shield(worker._task_handle), timeout=timeout)
except TimeoutError:
except asyncio.TimeoutError:
return None
return worker.info.result
@@ -1290,7 +942,9 @@ class ColonyRuntime:
if worker and worker.is_active:
loop = worker._agent_loop
if hasattr(loop, "inject_event"):
await loop.inject_event(content, is_client_input=is_client_input, image_content=image_content)
await loop.inject_event(
content, is_client_input=is_client_input, image_content=image_content
)
return True
return False
@@ -1362,11 +1016,7 @@ class ColonyRuntime:
run_immediately = tc.get("run_immediately", False)
if interval and interval > 0 and self._running:
task = asyncio.create_task(
self._timer_loop(trig_id, interval, run_immediately),
name=f"timer:{trig_id}",
)
task.add_done_callback(self._on_timer_task_done)
task = asyncio.create_task(self._timer_loop(trig_id, interval, run_immediately))
self._timer_tasks.append(task)
async def _timer_loop(
+7 -118
View File
@@ -111,15 +111,6 @@ class EventType(StrEnum):
# Retry tracking
NODE_RETRY = "node_retry"
# Stream-health observability. Split from NODE_RETRY so the UI can
# distinguish "slow TTFT on a huge context" (healthy, just slow) from
# "stream went silent mid-generation" (probable stall) from "we nudged
# the model to continue" (recovery), which NODE_RETRY used to conflate.
STREAM_TTFT_EXCEEDED = "stream_ttft_exceeded"
STREAM_INACTIVE = "stream_inactive"
STREAM_NUDGE_SENT = "stream_nudge_sent"
TOOL_CALL_REPLAY_DETECTED = "tool_call_replay_detected"
# Worker agent lifecycle
WORKER_COMPLETED = "worker_completed"
WORKER_FAILED = "worker_failed"
@@ -455,7 +446,11 @@ class EventBus:
# iteration values. Without this, live SSE would use raw iterations
# while events.jsonl would use offset iterations, causing ID collisions
# on the frontend when replaying after cold resume.
if self._session_log_iteration_offset and isinstance(event.data, dict) and "iteration" in event.data:
if (
self._session_log_iteration_offset
and isinstance(event.data, dict)
and "iteration" in event.data
):
offset = self._session_log_iteration_offset
event.data = {**event.data, "iteration": event.data["iteration"] + offset}
@@ -523,35 +518,17 @@ class EventBus:
return True
# Per-handler wall-clock timeout. A subscriber that deadlocks or
# blocks on slow I/O would otherwise freeze the publisher (and via
# ``await publish(...)`` any coroutine that emits events) indefinitely.
# 15 s is generous for legitimate handlers and cheap to tune later.
_HANDLER_TIMEOUT_SECONDS: float = 15.0
async def _execute_handlers(
self,
event: AgentEvent,
handlers: list[EventHandler],
) -> None:
"""Execute handlers concurrently with rate limiting + hard timeout."""
"""Execute handlers concurrently with rate limiting."""
async def run_handler(handler: EventHandler) -> None:
async with self._semaphore:
try:
await asyncio.wait_for(
handler(event),
timeout=self._HANDLER_TIMEOUT_SECONDS,
)
except TimeoutError:
handler_name = getattr(handler, "__qualname__", repr(handler))
logger.error(
"EventBus handler %s exceeded %.0fs on event %s — dropping; "
"fix the handler or the publisher will stall",
handler_name,
self._HANDLER_TIMEOUT_SECONDS,
getattr(event.type, "name", event.type),
)
await handler(event)
except Exception:
logger.exception(f"Handler error for {event.type}")
@@ -1070,94 +1047,6 @@ class EventBus:
)
)
async def emit_stream_ttft_exceeded(
self,
stream_id: str,
node_id: str,
ttft_seconds: float,
limit_seconds: float,
execution_id: str | None = None,
) -> None:
"""Emit when a stream stayed silent past the TTFT budget (no first event)."""
await self.publish(
AgentEvent(
type=EventType.STREAM_TTFT_EXCEEDED,
stream_id=stream_id,
node_id=node_id,
execution_id=execution_id,
data={
"ttft_seconds": ttft_seconds,
"limit_seconds": limit_seconds,
},
)
)
async def emit_stream_inactive(
self,
stream_id: str,
node_id: str,
idle_seconds: float,
limit_seconds: float,
execution_id: str | None = None,
) -> None:
"""Emit when a stream that had produced events went silent past budget."""
await self.publish(
AgentEvent(
type=EventType.STREAM_INACTIVE,
stream_id=stream_id,
node_id=node_id,
execution_id=execution_id,
data={
"idle_seconds": idle_seconds,
"limit_seconds": limit_seconds,
},
)
)
async def emit_stream_nudge_sent(
self,
stream_id: str,
node_id: str,
reason: str,
nudge_count: int,
execution_id: str | None = None,
) -> None:
"""Emit when the continue-nudge was injected (recovery, not retry)."""
await self.publish(
AgentEvent(
type=EventType.STREAM_NUDGE_SENT,
stream_id=stream_id,
node_id=node_id,
execution_id=execution_id,
data={
"reason": reason,
"nudge_count": nudge_count,
},
)
)
async def emit_tool_call_replay_detected(
self,
stream_id: str,
node_id: str,
tool_name: str,
prior_seq: int,
execution_id: str | None = None,
) -> None:
"""Emit when the model is about to re-execute a prior successful call."""
await self.publish(
AgentEvent(
type=EventType.TOOL_CALL_REPLAY_DETECTED,
stream_id=stream_id,
node_id=node_id,
execution_id=execution_id,
data={
"tool_name": tool_name,
"prior_seq": prior_seq,
},
)
)
async def emit_worker_completed(
self,
stream_id: str,
+48 -73
View File
@@ -16,7 +16,7 @@ from collections import OrderedDict
from collections.abc import Callable
from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal
from typing import TYPE_CHECKING, Any
from framework.host.event_bus import EventBus
from framework.host.shared_state import IsolationLevel, SharedBufferManager
@@ -48,8 +48,6 @@ class ExecutionAlreadyRunningError(RuntimeError):
logger = logging.getLogger(__name__)
CancelExecutionResult = Literal["cancelled", "cancelling", "not_found"]
class GraphScopedEventBus(EventBus):
"""Proxy that stamps ``graph_id`` on every published event.
@@ -132,7 +130,7 @@ class ExecutionContext:
run_id: str | None = None # Unique ID per trigger() invocation
started_at: datetime = field(default_factory=datetime.now)
completed_at: datetime | None = None
status: str = "pending" # pending, running, cancelling, completed, failed, paused, cancelled
status: str = "pending" # pending, running, completed, failed, paused
class ExecutionManager:
@@ -317,22 +315,6 @@ class ExecutionManager:
"""Return IDs of all currently active executions."""
return list(self._active_executions.keys())
def _get_blocking_execution_ids_locked(self) -> list[str]:
"""Return executions that still block a replacement from starting.
An execution continues to block replacement until its task has
terminated and the task's final cleanup has removed its bookkeeping.
This is intentional: a timed-out cancellation does not mean the old
task is harmless. If it is still alive, it can still write shared
session state, so letting a replacement start would guarantee
overlapping mutations on the same session.
"""
blocking_ids: list[str] = list(self._active_executions.keys())
for execution_id, task in self._execution_tasks.items():
if not task.done() and execution_id not in self._active_executions:
blocking_ids.append(execution_id)
return blocking_ids
@property
def agent_idle_seconds(self) -> float:
"""Seconds since the last agent activity (LLM call, tool call, node transition).
@@ -414,22 +396,15 @@ class ExecutionManager:
async def stop(self) -> None:
"""Stop the execution stream and cancel active executions."""
async with self._lock:
if not self._running:
return
if not self._running:
return
self._running = False
self._running = False
# Cancel all active executions, but keep bookkeeping until each
# task reaches its own cleanup path.
tasks_to_wait: list[asyncio.Task] = []
for execution_id, task in self._execution_tasks.items():
if task.done():
continue
ctx = self._active_executions.get(execution_id)
if ctx is not None:
ctx.status = "cancelling"
self._cancel_reasons.setdefault(execution_id, "Execution cancelled")
# Cancel all active executions
tasks_to_wait = []
for _, task in self._execution_tasks.items():
if not task.done():
task.cancel()
tasks_to_wait.append(task)
@@ -443,6 +418,9 @@ class ExecutionManager:
len(pending),
)
self._execution_tasks.clear()
self._active_executions.clear()
logger.info(f"ExecutionStream '{self.stream_id}' stopped")
# Emit stream stopped event
@@ -474,7 +452,9 @@ class ExecutionManager:
for executor in self._active_executors.values():
node = executor.node_registry.get(node_id)
if node is not None and hasattr(node, "inject_event"):
await node.inject_event(content, is_client_input=is_client_input, image_content=image_content)
await node.inject_event(
content, is_client_input=is_client_input, image_content=image_content
)
return True
return False
@@ -591,16 +571,12 @@ class ExecutionManager:
)
async with self._lock:
if not self._running:
raise RuntimeError(f"ExecutionStream '{self.stream_id}' is not running")
blocking_ids = self._get_blocking_execution_ids_locked()
if blocking_ids:
raise ExecutionAlreadyRunningError(self.stream_id, blocking_ids)
self._active_executions[execution_id] = ctx
self._completion_events[execution_id] = asyncio.Event()
self._execution_tasks[execution_id] = asyncio.create_task(self._run_execution(ctx))
# Start execution task
task = asyncio.create_task(self._run_execution(ctx))
self._execution_tasks[execution_id] = task
logger.debug(f"Queued execution {execution_id} for stream {self.stream_id}")
return execution_id
@@ -693,7 +669,9 @@ class ExecutionManager:
if self._runtime_log_store:
from framework.tracker.runtime_logger import RuntimeLogger
runtime_logger = RuntimeLogger(store=self._runtime_log_store, agent_id=self.graph.id)
runtime_logger = RuntimeLogger(
store=self._runtime_log_store, agent_id=self.graph.id
)
# Derive storage from session_store (graph-specific for secondary
# graphs) so that all files — conversations, state, checkpoints,
@@ -909,7 +887,9 @@ class ExecutionManager:
if has_result and result.paused_at:
await self._write_session_state(execution_id, ctx, result=result)
else:
await self._write_session_state(execution_id, ctx, error="Execution cancelled")
await self._write_session_state(
execution_id, ctx, error="Execution cancelled"
)
# Emit SSE event so the frontend knows the execution stopped.
# The executor does NOT emit on CancelledError, so there is no
@@ -1209,7 +1189,7 @@ class ExecutionManager:
"""Get execution context."""
return self._active_executions.get(execution_id)
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> CancelExecutionResult:
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> bool:
"""
Cancel a running execution.
@@ -1220,38 +1200,33 @@ class ExecutionManager:
provided, defaults to "Execution cancelled".
Returns:
"cancelled" if the task fully exited within the grace period,
"cancelling" if cancellation was requested but the task is still
shutting down, or "not_found" if no active task exists.
True if cancelled, False if not found
"""
async with self._lock:
task = self._execution_tasks.get(execution_id)
if task is None or task.done():
return "not_found"
task = self._execution_tasks.get(execution_id)
if task and not task.done():
# Store the reason so the CancelledError handler can use it
# when emitting the pause/fail event.
self._cancel_reasons[execution_id] = reason or "Execution cancelled"
ctx = self._active_executions.get(execution_id)
if ctx is not None:
ctx.status = "cancelling"
task.cancel()
# Wait briefly for the task to finish. Don't block indefinitely —
# the task may be stuck in a long LLM API call that doesn't
# respond to cancellation quickly.
done, _ = await asyncio.wait({task}, timeout=5.0)
if not done:
# Keep bookkeeping in place until the task's own finally block runs.
# We intentionally do not add deferred cleanup keyed by execution_id
# here because resumed executions reuse the same id; a delayed pop
# could otherwise delete bookkeeping that belongs to the new run.
logger.warning(
"Execution %s did not finish within cancel timeout; leaving bookkeeping in place until task exit",
execution_id,
)
return "cancelling"
return "cancelled"
# Wait briefly for the task to finish. Don't block indefinitely —
# the task may be stuck in a long LLM API call that doesn't
# respond to cancellation quickly.
done, _ = await asyncio.wait({task}, timeout=5.0)
if not done:
# Task didn't finish within timeout — clean up bookkeeping now
# so the session doesn't think it still has running executions.
# The task will continue winding down in the background and its
# finally block will harmlessly pop already-removed keys.
logger.warning(
"Execution %s did not finish within cancel timeout; force-cleaning bookkeeping",
execution_id,
)
async with self._lock:
self._active_executions.pop(execution_id, None)
self._execution_tasks.pop(execution_id, None)
self._active_executors.pop(execution_id, None)
return True
return False
# === STATS AND MONITORING ===
-487
View File
@@ -1,487 +0,0 @@
"""Per-colony SQLite task queue + progress ledger.
Every colony gets its own ``progress.db`` under ``~/.hive/colonies/{name}/data/``.
The DB holds the colony's task queue plus per-task step and SOP checklist
rows. Workers claim tasks atomically, write progress as they execute, and
verify SOP gates before marking a task done. This gives cross-run memory
that the existing per-iteration stall detectors don't have.
The DB is driven by agents via the ``sqlite3`` CLI through
``execute_command_tool``. This module handles framework-side lifecycle:
creation, migration, queen-side bulk seeding, stale-claim reclamation.
Concurrency model:
- WAL mode on from day one so 100 concurrent workers don't serialize.
- Workers hold NO long-running connection they ``sqlite3`` per call,
which naturally releases locks between LLM turns.
- Atomic claim via ``BEGIN IMMEDIATE; UPDATE tasks SET status='claimed'
WHERE id=(SELECT ... LIMIT 1)``. The subquery-form UPDATE runs inside
the immediate transaction so racers either win the row or find zero
affected rows.
- Stale-claim reclaimer runs on host startup: claims older than
``stale_after_minutes`` get returned to ``pending`` and the row's
``retry_count`` increments. When ``retry_count >= max_retries`` the
row is moved to ``failed`` instead.
All writes go through ``BEGIN IMMEDIATE`` so racing readers see
consistent snapshots.
"""
from __future__ import annotations
import json
import logging
import sqlite3
import uuid
from datetime import UTC, datetime
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
SCHEMA_VERSION = 1
_SCHEMA_V1 = """
CREATE TABLE IF NOT EXISTS tasks (
id TEXT PRIMARY KEY,
seq INTEGER,
priority INTEGER NOT NULL DEFAULT 0,
goal TEXT NOT NULL,
payload TEXT,
status TEXT NOT NULL DEFAULT 'pending',
worker_id TEXT,
claim_token TEXT,
claimed_at TEXT,
started_at TEXT,
completed_at TEXT,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL,
retry_count INTEGER NOT NULL DEFAULT 0,
max_retries INTEGER NOT NULL DEFAULT 3,
last_error TEXT,
parent_task_id TEXT REFERENCES tasks(id) ON DELETE SET NULL,
source TEXT
);
CREATE TABLE IF NOT EXISTS steps (
id TEXT PRIMARY KEY,
task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
seq INTEGER NOT NULL,
title TEXT NOT NULL,
detail TEXT,
status TEXT NOT NULL DEFAULT 'pending',
evidence TEXT,
worker_id TEXT,
started_at TEXT,
completed_at TEXT,
UNIQUE (task_id, seq)
);
CREATE TABLE IF NOT EXISTS sop_checklist (
id TEXT PRIMARY KEY,
task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
key TEXT NOT NULL,
description TEXT NOT NULL,
required INTEGER NOT NULL DEFAULT 1,
done_at TEXT,
done_by TEXT,
note TEXT,
UNIQUE (task_id, key)
);
CREATE TABLE IF NOT EXISTS colony_meta (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at TEXT NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_tasks_claimable
ON tasks(status, priority DESC, seq, created_at)
WHERE status = 'pending';
CREATE INDEX IF NOT EXISTS idx_steps_task_seq
ON steps(task_id, seq);
CREATE INDEX IF NOT EXISTS idx_sop_required_open
ON sop_checklist(task_id, required, done_at);
CREATE INDEX IF NOT EXISTS idx_tasks_status
ON tasks(status, updated_at);
"""
_PRAGMAS = (
"PRAGMA journal_mode = WAL;",
"PRAGMA synchronous = NORMAL;",
"PRAGMA foreign_keys = ON;",
"PRAGMA busy_timeout = 5000;",
)
def _now_iso() -> str:
return datetime.now(UTC).isoformat(timespec="seconds")
def _new_id() -> str:
return str(uuid.uuid4())
def _connect(db_path: Path) -> sqlite3.Connection:
"""Open a connection with the standard pragmas applied.
WAL mode is sticky on the file once set, so re-applying on every
open is cheap. The other pragmas are per-connection and must be
set each time.
"""
con = sqlite3.connect(str(db_path), isolation_level=None, timeout=5.0)
for pragma in _PRAGMAS:
con.execute(pragma)
return con
def ensure_progress_db(colony_dir: Path) -> Path:
"""Create or migrate ``{colony_dir}/data/progress.db``.
Idempotent: safe to call on an already-initialized DB. Returns the
absolute path to the DB file.
Steps:
1. Ensure ``data/`` subdir exists.
2. Open the DB (creates the file if missing).
3. Apply WAL + pragmas.
4. Read ``PRAGMA user_version``; if < SCHEMA_VERSION, run the
schema block and bump user_version.
5. Reclaim any stale claims left from previous runs.
6. Patch every ``*.json`` worker config in the colony dir to
inject ``input_data.db_path`` and ``input_data.colony_id`` so
pre-existing colonies (forked before this feature landed) get
the tracker wiring on their next spawn.
"""
data_dir = Path(colony_dir) / "data"
data_dir.mkdir(parents=True, exist_ok=True)
db_path = data_dir / "progress.db"
con = _connect(db_path)
try:
current_version = con.execute("PRAGMA user_version").fetchone()[0]
if current_version < SCHEMA_VERSION:
con.executescript(_SCHEMA_V1)
con.execute(f"PRAGMA user_version = {SCHEMA_VERSION}")
con.execute(
"INSERT OR REPLACE INTO colony_meta(key, value, updated_at) VALUES (?, ?, ?)",
("schema_version", str(SCHEMA_VERSION), _now_iso()),
)
logger.info("progress_db: initialized schema v%d at %s", SCHEMA_VERSION, db_path)
reclaimed = _reclaim_stale_inner(con, stale_after_minutes=15)
if reclaimed:
logger.info(
"progress_db: reclaimed %d stale claims at startup (%s)",
reclaimed,
db_path,
)
finally:
con.close()
resolved_db_path = db_path.resolve()
_patch_worker_configs(Path(colony_dir), resolved_db_path)
return resolved_db_path
def _patch_worker_configs(colony_dir: Path, db_path: Path) -> int:
"""Inject ``input_data.db_path`` + ``input_data.colony_id`` +
``input_data.colony_data_dir`` into existing ``worker.json`` files
in a colony directory.
Runs on every ``ensure_progress_db`` call so colonies that were
forked before this feature landed get their worker spawn messages
patched in place. Idempotent: if ``input_data`` already contains
all three values, the file is not rewritten.
Returns the number of files that were actually modified (0 on
the common case of already-patched colonies).
Why ``colony_data_dir``? ``db_path`` alone points agents at
``progress.db``; for anything else (custom SQLite stores, JSON
ledgers, scraped artefacts) they need the *directory* so they
stop creating state under ``~/.hive/skills/`` which holds skill
*definitions*, not runtime data. See
``_default_skills/colony-storage-paths/SKILL.md``.
"""
colony_id = colony_dir.name
abs_db = str(db_path)
abs_data_dir = str(db_path.parent)
patched = 0
for worker_cfg in colony_dir.glob("*.json"):
# Only patch files that look like worker configs (have the
# worker_meta shape). ``metadata.json`` and ``triggers.json``
# are colony-level and must not be touched.
if worker_cfg.name in ("metadata.json", "triggers.json"):
continue
try:
data = json.loads(worker_cfg.read_text(encoding="utf-8"))
except (json.JSONDecodeError, OSError):
continue
if not isinstance(data, dict) or "system_prompt" not in data:
# Not a worker config (lacks the worker_meta schema).
continue
input_data = data.get("input_data")
if not isinstance(input_data, dict):
input_data = {}
if (
input_data.get("db_path") == abs_db
and input_data.get("colony_id") == colony_id
and input_data.get("colony_data_dir") == abs_data_dir
):
continue # already patched
input_data["db_path"] = abs_db
input_data["colony_id"] = colony_id
input_data["colony_data_dir"] = abs_data_dir
data["input_data"] = input_data
try:
worker_cfg.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
patched += 1
except OSError as e:
logger.warning("progress_db: failed to patch worker config %s: %s", worker_cfg, e)
if patched:
logger.info(
"progress_db: patched %d worker config(s) in colony '%s' with db_path + colony_data_dir",
patched,
colony_id,
)
return patched
def ensure_all_colony_dbs(colonies_root: Path | None = None) -> list[Path]:
"""Idempotently ensure every existing colony has a progress.db.
Called on framework host startup to backfill older colonies and
run the stale-claim reclaimer on all of them in one pass.
"""
if colonies_root is None:
colonies_root = Path.home() / ".hive" / "colonies"
if not colonies_root.is_dir():
return []
initialized: list[Path] = []
for entry in sorted(colonies_root.iterdir()):
if not entry.is_dir():
continue
try:
initialized.append(ensure_progress_db(entry))
except Exception as e:
logger.warning("progress_db: failed to ensure DB for colony '%s': %s", entry.name, e)
return initialized
def seed_tasks(
db_path: Path,
tasks: list[dict[str, Any]],
*,
source: str = "queen_create",
) -> list[str]:
"""Bulk-insert tasks (with optional nested steps + sop_items).
Each task dict accepts:
- goal: str (required)
- seq: int (optional ordering hint)
- priority: int (default 0)
- payload: dict | str | None (stored as JSON text)
- max_retries: int (default 3)
- parent_task_id: str | None
- steps: list[{"title": str, "detail"?: str}] (optional)
- sop_items: list[{"key": str, "description": str, "required"?: bool, "note"?: str}] (optional)
All rows are inserted in a single BEGIN IMMEDIATE transaction so
10k-row seeds finish in one disk flush. Returns the created task ids
in the same order as input.
"""
if not tasks:
return []
created_ids: list[str] = []
now = _now_iso()
con = _connect(Path(db_path))
try:
con.execute("BEGIN IMMEDIATE")
for idx, task in enumerate(tasks):
goal = task.get("goal")
if not goal:
raise ValueError(f"task[{idx}] missing required 'goal' field")
task_id = task.get("id") or _new_id()
payload = task.get("payload")
if payload is not None and not isinstance(payload, str):
payload = json.dumps(payload, ensure_ascii=False)
con.execute(
"""
INSERT INTO tasks (
id, seq, priority, goal, payload, status,
created_at, updated_at, max_retries, parent_task_id, source
) VALUES (?, ?, ?, ?, ?, 'pending', ?, ?, ?, ?, ?)
""",
(
task_id,
task.get("seq"),
int(task.get("priority", 0)),
goal,
payload,
now,
now,
int(task.get("max_retries", 3)),
task.get("parent_task_id"),
source,
),
)
for step_seq, step in enumerate(task.get("steps") or [], start=1):
if not step.get("title"):
raise ValueError(f"task[{idx}].steps[{step_seq - 1}] missing required 'title'")
con.execute(
"""
INSERT INTO steps (id, task_id, seq, title, detail, status)
VALUES (?, ?, ?, ?, ?, 'pending')
""",
(
_new_id(),
task_id,
step.get("seq", step_seq),
step["title"],
step.get("detail"),
),
)
for sop in task.get("sop_items") or []:
key = sop.get("key")
description = sop.get("description")
if not key or not description:
raise ValueError(f"task[{idx}].sop_items missing 'key' or 'description'")
con.execute(
"""
INSERT INTO sop_checklist
(id, task_id, key, description, required, note)
VALUES (?, ?, ?, ?, ?, ?)
""",
(
_new_id(),
task_id,
key,
description,
1 if sop.get("required", True) else 0,
sop.get("note"),
),
)
created_ids.append(task_id)
con.execute("COMMIT")
except Exception:
con.execute("ROLLBACK")
raise
finally:
con.close()
return created_ids
def enqueue_task(
db_path: Path,
goal: str,
*,
steps: list[dict[str, Any]] | None = None,
sop_items: list[dict[str, Any]] | None = None,
payload: Any = None,
priority: int = 0,
parent_task_id: str | None = None,
source: str = "enqueue_tool",
) -> str:
"""Append a single task to an existing queue. Thin wrapper over seed_tasks."""
ids = seed_tasks(
db_path,
[
{
"goal": goal,
"steps": steps,
"sop_items": sop_items,
"payload": payload,
"priority": priority,
"parent_task_id": parent_task_id,
}
],
source=source,
)
return ids[0]
def _reclaim_stale_inner(con: sqlite3.Connection, *, stale_after_minutes: int) -> int:
"""Reclaim stale claims. Runs inside an existing open connection.
Two-step:
1. Tasks past max_retries go to 'failed' with last_error populated.
2. Remaining stale claims return to 'pending', retry_count++.
"""
cutoff_expr = f"datetime('now', '-{int(stale_after_minutes)} minutes')"
con.execute("BEGIN IMMEDIATE")
try:
con.execute(
f"""
UPDATE tasks
SET status = 'failed',
last_error = COALESCE(last_error, 'exceeded max_retries after stale claim'),
completed_at = datetime('now'),
updated_at = datetime('now')
WHERE status IN ('claimed', 'in_progress')
AND claimed_at IS NOT NULL
AND claimed_at < {cutoff_expr}
AND retry_count >= max_retries
"""
)
cur = con.execute(
f"""
UPDATE tasks
SET status = 'pending',
worker_id = NULL,
claim_token = NULL,
claimed_at = NULL,
started_at = NULL,
retry_count = retry_count + 1,
updated_at = datetime('now')
WHERE status IN ('claimed', 'in_progress')
AND claimed_at IS NOT NULL
AND claimed_at < {cutoff_expr}
AND retry_count < max_retries
"""
)
reclaimed = cur.rowcount or 0
con.execute("COMMIT")
return reclaimed
except Exception:
con.execute("ROLLBACK")
raise
def reclaim_stale(db_path: Path, stale_after_minutes: int = 15) -> int:
"""Public wrapper that opens its own connection."""
con = _connect(Path(db_path))
try:
return _reclaim_stale_inner(con, stale_after_minutes=stale_after_minutes)
finally:
con.close()
__all__ = [
"SCHEMA_VERSION",
"ensure_progress_db",
"ensure_all_colony_dbs",
"seed_tasks",
"enqueue_task",
"reclaim_stale",
]
+2
View File
@@ -2,6 +2,8 @@
import asyncio
import logging
import time
from dataclasses import dataclass, field
from enum import StrEnum
from typing import Any
+7 -2
View File
@@ -136,7 +136,9 @@ class StreamDecisionTracker:
self._run_locks[execution_id] = asyncio.Lock()
self._current_nodes[execution_id] = "unknown"
logger.debug(f"Started run {run_id} for execution {execution_id} in stream {self.stream_id}")
logger.debug(
f"Started run {run_id} for execution {execution_id} in stream {self.stream_id}"
)
return run_id
def end_run(
@@ -332,7 +334,10 @@ class StreamDecisionTracker:
"""
run = self._runs.get(execution_id)
if run is None:
logger.warning(f"report_problem called but no run for execution {execution_id}: [{severity}] {description}")
logger.warning(
f"report_problem called but no run for execution {execution_id}: "
f"[{severity}] {description}"
)
return ""
return run.add_problem(
+2 -1
View File
@@ -89,7 +89,8 @@ class WebhookServer:
)
await self._site.start()
logger.info(
f"Webhook server started on {self._config.host}:{self._config.port} with {len(self._routes)} route(s)"
f"Webhook server started on {self._config.host}:{self._config.port} "
f"with {len(self._routes)} route(s)"
)
async def stop(self) -> None:
+29 -63
View File
@@ -92,7 +92,9 @@ class Worker:
# result.json, data). Required when seed_conversation() is used —
# we deliberately do NOT fall back to CWD, which previously caused
# conversation parts to leak into the process working directory.
self._storage_path: Path | None = Path(storage_path) if storage_path is not None else None
self._storage_path: Path | None = (
Path(storage_path) if storage_path is not None else None
)
self._task_handle: asyncio.Task | None = None
self._started_at: float = 0.0
self._result: WorkerResult | None = None
@@ -145,34 +147,20 @@ class Worker:
self.status = WorkerStatus.RUNNING
self._started_at = time.monotonic()
# Scope browser profile (and any other CONTEXT_PARAMS) to this
# worker. asyncio.create_task() copies the parent's contextvars,
# so without this override every spawned worker inherits the
# queen's `profile=<queen_session_id>` and its browser_* tool
# calls end up driving the queen's Chrome tab group. Setting
# it here (inside the new Task's context) shadows the parent
# value without affecting the queen's ongoing calls.
try:
from framework.loader.tool_registry import ToolRegistry
ToolRegistry.set_execution_context(profile=self.id)
except Exception:
logger.debug(
"Worker %s: failed to scope browser profile",
self.id,
exc_info=True,
)
try:
result = await self._agent_loop.execute(self._context)
duration = time.monotonic() - self._started_at
if result.success:
self.status = WorkerStatus.COMPLETED
self._result = self._build_result(result, duration, default_status="success")
self._result = self._build_result(
result, duration, default_status="success"
)
else:
self.status = WorkerStatus.FAILED
self._result = self._build_result(result, duration, default_status="failed")
self._result = self._build_result(
result, duration, default_status="failed"
)
await self._emit_terminal_events(result)
@@ -188,28 +176,13 @@ class Worker:
except asyncio.CancelledError:
self.status = WorkerStatus.STOPPED
duration = time.monotonic() - self._started_at
# Preserve any explicit report the worker's LLM already filed
# via ``report_to_parent`` before being cancelled — the caller
# cares about that payload even on a hard stop. Only fall back
# to the canned "stopped" message when no explicit report exists.
explicit = self._explicit_report
if explicit is not None:
self._result = WorkerResult(
error="Worker stopped by queen after reporting",
duration_seconds=duration,
status=explicit["status"],
summary=explicit["summary"],
data=explicit["data"],
)
await self._emit_terminal_events(None, force_status=explicit["status"])
else:
self._result = WorkerResult(
error="Worker stopped by queen",
duration_seconds=duration,
status="stopped",
summary="Worker was cancelled before completion.",
)
await self._emit_terminal_events(None, force_status="stopped")
self._result = WorkerResult(
error="Worker stopped by queen",
duration_seconds=duration,
status="stopped",
summary="Worker was cancelled before completion.",
)
await self._emit_terminal_events(None, force_status="stopped")
return self._result
except Exception as exc:
@@ -319,7 +292,11 @@ class Worker:
# EXECUTION_COMPLETED / EXECUTION_FAILED (backwards-compat)
if agent_result is not None:
lifecycle_type = EventType.EXECUTION_COMPLETED if agent_result.success else EventType.EXECUTION_FAILED
lifecycle_type = (
EventType.EXECUTION_COMPLETED
if agent_result.success
else EventType.EXECUTION_FAILED
)
await self._event_bus.publish(
AgentEvent(
type=lifecycle_type,
@@ -332,7 +309,11 @@ class Worker:
"task": self.task,
"success": agent_result.success,
"error": agent_result.error,
"output_keys": (list(agent_result.output.keys()) if agent_result.output else []),
"output_keys": (
list(agent_result.output.keys())
if agent_result.output
else []
),
},
)
)
@@ -367,23 +348,7 @@ class Worker:
async def start_background(self) -> None:
"""Spawn the worker's run() as an asyncio background task."""
self._task_handle = asyncio.create_task(self.run(), name=f"worker:{self.id}")
# Surface any exception that escapes run(); without this callback
# a crash here only becomes visible when stop() eventually awaits
# the handle (and is silently lost if stop() is never called).
self._task_handle.add_done_callback(self._on_task_done)
def _on_task_done(self, task: asyncio.Task) -> None:
if task.cancelled():
return
exc = task.exception()
if exc is not None:
logger.error(
"Worker '%s' background task crashed: %s",
self.id,
exc,
exc_info=exc,
)
self._task_handle = asyncio.create_task(self.run())
async def stop(self) -> None:
"""Cancel the worker's background task, if any."""
@@ -423,7 +388,8 @@ class Worker:
"""
if self.status != WorkerStatus.PENDING:
raise RuntimeError(
f"seed_conversation must be called before start_background (worker {self.id} is {self.status})"
f"seed_conversation must be called before start_background "
f"(worker {self.id} is {self.status})"
)
# Write parts directly to the worker's on-disk conversation store
+3 -1
View File
@@ -50,7 +50,9 @@ class AnthropicProvider(LLMProvider):
# Delegate to LiteLLMProvider internally.
self.api_key = api_key or _get_api_key_from_credential_store()
if not self.api_key:
raise ValueError("Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key.")
raise ValueError(
"Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key."
)
self.model = model
+29 -8
View File
@@ -53,9 +53,17 @@ _TOKEN_REFRESH_BUFFER_SECS = 60
# Credentials file in ~/.hive/ (native implementation)
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
_IDE_STATE_DB_MAC = (
Path.home() / "Library" / "Application Support" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
Path.home()
/ "Library"
/ "Application Support"
/ "Antigravity"
/ "User"
/ "globalStorage"
/ "state.vscdb"
)
_IDE_STATE_DB_LINUX = (
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
)
_IDE_STATE_DB_LINUX = Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
_BASE_HEADERS: dict[str, str] = {
@@ -360,7 +368,9 @@ def _to_gemini_contents(
def _map_finish_reason(reason: str) -> str:
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get((reason or "").upper(), "stop")
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get(
(reason or "").upper(), "stop"
)
def _parse_complete_response(raw: dict[str, Any], model: str) -> LLMResponse:
@@ -528,7 +538,8 @@ class AntigravityProvider(LLMProvider):
return self._access_token
raise RuntimeError(
"No valid Antigravity credentials. Run: uv run python core/antigravity_auth.py auth account add"
"No valid Antigravity credentials. "
"Run: uv run python core/antigravity_auth.py auth account add"
)
# --- Request building -------------------------------------------------- #
@@ -582,7 +593,11 @@ class AntigravityProvider(LLMProvider):
token = self._ensure_token()
body_bytes = json.dumps(body).encode("utf-8")
path = "/v1internal:streamGenerateContent?alt=sse" if streaming else "/v1internal:generateContent"
path = (
"/v1internal:streamGenerateContent?alt=sse"
if streaming
else "/v1internal:generateContent"
)
headers = {
**_BASE_HEADERS,
"Authorization": f"Bearer {token}",
@@ -604,7 +619,9 @@ class AntigravityProvider(LLMProvider):
if result:
self._access_token, self._token_expires_at = result
headers["Authorization"] = f"Bearer {self._access_token}"
req2 = urllib.request.Request(url, data=body_bytes, headers=headers, method="POST")
req2 = urllib.request.Request(
url, data=body_bytes, headers=headers, method="POST"
)
try:
return urllib.request.urlopen(req2, timeout=120) # noqa: S310
except urllib.error.HTTPError as exc2:
@@ -625,7 +642,9 @@ class AntigravityProvider(LLMProvider):
last_exc = exc
continue
raise RuntimeError(f"All Antigravity endpoints failed. Last error: {last_exc}") from last_exc
raise RuntimeError(
f"All Antigravity endpoints failed. Last error: {last_exc}"
) from last_exc
# --- LLMProvider interface --------------------------------------------- #
@@ -664,7 +683,9 @@ class AntigravityProvider(LLMProvider):
try:
body = self._build_body(messages, system, tools, max_tokens)
http_resp = self._post(body, streaming=True)
for event in _parse_sse_stream(http_resp, self.model, self._thought_sigs.__setitem__):
for event in _parse_sse_stream(
http_resp, self.model, self._thought_sigs.__setitem__
):
loop.call_soon_threadsafe(queue.put_nowait, event)
except Exception as exc:
logger.error("Antigravity stream error: %s", exc)
-24
View File
@@ -12,11 +12,6 @@ Vision support rules are derived from official vendor documentation:
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from framework.llm.provider import Tool
def _model_name(model: str) -> str:
"""Return the bare model name after stripping any 'provider/' prefix."""
@@ -109,22 +104,3 @@ def supports_image_tool_results(model: str) -> bool:
# 5. Default: assume vision capable
# Covers: OpenAI, Anthropic, Google, Mistral, Kimi, and other hosted providers
return True
def filter_tools_for_model(tools: list[Tool], model: str) -> tuple[list[Tool], list[str]]:
"""Drop image-producing tools for text-only models.
Returns ``(filtered_tools, hidden_names)``. For vision-capable models
(or when *model* is empty) the input list is returned unchanged and
``hidden_names`` is empty. For text-only models any tool with
``produces_image=True`` is removed so the LLM never sees it in its
schema avoids wasted calls and stale "screenshot failed" entries
in agent memory.
"""
if not model or supports_image_tool_results(model):
return list(tools), []
hidden = [t.name for t in tools if t.produces_image]
if not hidden:
return list(tools), []
kept = [t for t in tools if not t.produces_image]
return kept, hidden
+76 -69
View File
@@ -100,7 +100,9 @@ def _patch_litellm_anthropic_oauth() -> None:
result["authorization"] = f"Bearer {token}"
# Merge the OAuth beta header with any existing beta headers.
existing_beta = result.get("anthropic-beta", "")
beta_parts = [b.strip() for b in existing_beta.split(",") if b.strip()] if existing_beta else []
beta_parts = (
[b.strip() for b in existing_beta.split(",") if b.strip()] if existing_beta else []
)
if ANTHROPIC_OAUTH_BETA_HEADER not in beta_parts:
beta_parts.append(ANTHROPIC_OAUTH_BETA_HEADER)
result["anthropic-beta"] = ",".join(beta_parts)
@@ -189,14 +191,6 @@ def _ensure_ollama_chat_prefix(model: str) -> str:
RATE_LIMIT_MAX_RETRIES = 10
RATE_LIMIT_BACKOFF_BASE = 2 # seconds
RATE_LIMIT_MAX_DELAY = 120 # seconds - cap to prevent absurd waits
# Separate, much lower cap for "empty response, finish_reason=stop"
# scenarios. Unlike a real 429, these are rarely transient: Gemini
# returns stop+empty on silently-filtered safety blocks, poisoned
# conversation state (dangling tool_result after compaction), or
# malformed tool schemas. Waiting minutes doesn't fix any of those, so
# give up after 3 attempts (2+4+8 = 14s) and surface an actionable
# error instead of burning 12+ minutes on exponential backoff.
EMPTY_RESPONSE_MAX_RETRIES = 3
MINIMAX_API_BASE = "https://api.minimax.io/v1"
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
@@ -260,7 +254,9 @@ def _claude_code_billing_header(messages: list[dict[str, Any]]) -> str:
break
sampled = "".join(_sample_js_code_unit(first_text, i) for i in (4, 7, 20))
version_hash = hashlib.sha256(f"{_CLAUDE_CODE_BILLING_SALT}{sampled}{CLAUDE_CODE_VERSION}".encode()).hexdigest()
version_hash = hashlib.sha256(
f"{_CLAUDE_CODE_BILLING_SALT}{sampled}{CLAUDE_CODE_VERSION}".encode()
).hexdigest()
entrypoint = os.environ.get("CLAUDE_CODE_ENTRYPOINT", "").strip() or "cli"
return (
f"x-anthropic-billing-header: cc_version={CLAUDE_CODE_VERSION}.{version_hash[:3]}; "
@@ -332,7 +328,9 @@ def _prune_failed_request_dumps(max_files: int = MAX_FAILED_REQUEST_DUMPS) -> No
def _remember_openrouter_tool_compat_model(model: str) -> None:
"""Cache OpenRouter tool-compat fallback for a bounded time window."""
OPENROUTER_TOOL_COMPAT_MODEL_CACHE[model] = time.monotonic() + OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS
OPENROUTER_TOOL_COMPAT_MODEL_CACHE[model] = (
time.monotonic() + OPENROUTER_TOOL_COMPAT_CACHE_TTL_SECONDS
)
def _is_openrouter_tool_compat_cached(model: str) -> bool:
@@ -740,14 +738,20 @@ class LiteLLMProvider(LLMProvider):
eh.setdefault("user-agent", CLAUDE_CODE_USER_AGENT)
# The Codex ChatGPT backend (chatgpt.com/backend-api/codex) rejects
# several standard OpenAI params: max_output_tokens, stream_options.
self._codex_backend = bool(self.api_base and "chatgpt.com/backend-api/codex" in self.api_base)
self._codex_backend = bool(
self.api_base and "chatgpt.com/backend-api/codex" in self.api_base
)
# Antigravity routes through a local OpenAI-compatible proxy — no patches needed.
self._antigravity = bool(self.api_base and "localhost:8069" in self.api_base)
if litellm is None:
raise ImportError("LiteLLM is not installed. Please install it with: uv pip install litellm")
raise ImportError(
"LiteLLM is not installed. Please install it with: uv pip install litellm"
)
def reconfigure(self, model: str, api_key: str | None = None, api_base: str | None = None) -> None:
def reconfigure(
self, model: str, api_key: str | None = None, api_base: str | None = None
) -> None:
"""Hot-swap the model, API key, and/or base URL on this provider instance.
Since the same LiteLLMProvider object is shared by reference across the
@@ -772,7 +776,9 @@ class LiteLLMProvider(LLMProvider):
if self._claude_code_oauth:
eh = self.extra_kwargs.setdefault("extra_headers", {})
eh.setdefault("user-agent", CLAUDE_CODE_USER_AGENT)
self._codex_backend = bool(self.api_base and "chatgpt.com/backend-api/codex" in self.api_base)
self._codex_backend = bool(
self.api_base and "chatgpt.com/backend-api/codex" in self.api_base
)
self._antigravity = bool(self.api_base and "localhost:8069" in self.api_base)
# Note: The Codex ChatGPT backend is a Responses API endpoint at
@@ -795,7 +801,9 @@ class LiteLLMProvider(LLMProvider):
return HIVE_API_BASE
return None
def _completion_with_rate_limit_retry(self, max_retries: int | None = None, **kwargs: Any) -> Any:
def _completion_with_rate_limit_retry(
self, max_retries: int | None = None, **kwargs: Any
) -> Any:
"""Call litellm.completion with retry on 429 rate limit errors and empty responses.
When a :class:`KeyPool` is configured, rate-limited keys are rotated
@@ -827,10 +835,15 @@ class LiteLLMProvider(LLMProvider):
None,
)
if last_role == "assistant":
logger.debug("[retry] Empty response after assistant message — expected, not retrying.")
logger.debug(
"[retry] Empty response after assistant message — "
"expected, not retrying."
)
return response
finish_reason = response.choices[0].finish_reason if response.choices else "unknown"
finish_reason = (
response.choices[0].finish_reason if response.choices else "unknown"
)
# Dump full request to file for debugging
token_count, token_method = _estimate_tokens(model, messages)
dump_path = _dump_failed_request(
@@ -859,31 +872,22 @@ class LiteLLMProvider(LLMProvider):
)
return response
empty_cap = min(retries, EMPTY_RESPONSE_MAX_RETRIES)
if attempt >= empty_cap:
if attempt == retries:
logger.error(
f"[retry] GAVE UP on {model} after "
f"{attempt + 1} attempts — empty response "
f"[retry] GAVE UP on {model} after {retries + 1} "
f"attempts — empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0}). "
f"This is almost never a rate limit despite the "
f"earlier log message — check the dumped request "
f"at {dump_path} for poisoned conversation state "
f"(dangling tool_result after compaction), a "
f"safety-filter trigger in the prompt, or a "
f"malformed tool schema."
f"choices={len(response.choices) if response.choices else 0})"
)
return response
wait = _compute_retry_delay(attempt)
logger.warning(
f"[retry] {model} returned empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0}). "
f"choices={len(response.choices) if response.choices else 0}) "
f"likely rate limited or quota exceeded. "
f"Retrying in {wait}s "
f"(attempt {attempt + 1}/{empty_cap}). "
f"Note: empty-response retries are capped at "
f"{EMPTY_RESPONSE_MAX_RETRIES} because this is rarely "
f"a transient rate limit on small payloads."
f"(attempt {attempt + 1}/{retries})"
)
time.sleep(wait)
continue
@@ -1029,7 +1033,9 @@ class LiteLLMProvider(LLMProvider):
# Async variants — non-blocking on the event loop
# ------------------------------------------------------------------
async def _acompletion_with_rate_limit_retry(self, max_retries: int | None = None, **kwargs: Any) -> Any:
async def _acompletion_with_rate_limit_retry(
self, max_retries: int | None = None, **kwargs: Any
) -> Any:
"""Async version of _completion_with_rate_limit_retry.
Uses litellm.acompletion and asyncio.sleep instead of blocking calls.
@@ -1055,10 +1061,15 @@ class LiteLLMProvider(LLMProvider):
None,
)
if last_role == "assistant":
logger.debug("[async-retry] Empty response after assistant message — expected, not retrying.")
logger.debug(
"[async-retry] Empty response after assistant message — "
"expected, not retrying."
)
return response
finish_reason = response.choices[0].finish_reason if response.choices else "unknown"
finish_reason = (
response.choices[0].finish_reason if response.choices else "unknown"
)
token_count, token_method = _estimate_tokens(model, messages)
dump_path = _dump_failed_request(
model=model,
@@ -1086,35 +1097,22 @@ class LiteLLMProvider(LLMProvider):
)
return response
# Use a much lower retry cap for empty-response
# recoveries than for real exceptions. These are
# almost never transient (see EMPTY_RESPONSE_MAX_RETRIES
# rationale at the top of the file).
empty_cap = min(retries, EMPTY_RESPONSE_MAX_RETRIES)
if attempt >= empty_cap:
if attempt == retries:
logger.error(
f"[async-retry] GAVE UP on {model} after "
f"{attempt + 1} attempts — empty response "
f"[async-retry] GAVE UP on {model} after {retries + 1} "
f"attempts — empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0}). "
f"This is almost never a rate limit despite the "
f"earlier log message — check the dumped request "
f"at {dump_path} for poisoned conversation state "
f"(dangling tool_result after compaction), a "
f"safety-filter trigger in the prompt, or a "
f"malformed tool schema."
f"choices={len(response.choices) if response.choices else 0})"
)
return response
wait = _compute_retry_delay(attempt)
logger.warning(
f"[async-retry] {model} returned empty response "
f"(finish_reason={finish_reason}, "
f"choices={len(response.choices) if response.choices else 0}). "
f"choices={len(response.choices) if response.choices else 0}) "
f"likely rate limited or quota exceeded. "
f"Retrying in {wait}s "
f"(attempt {attempt + 1}/{empty_cap}). "
f"Note: empty-response retries are capped at "
f"{EMPTY_RESPONSE_MAX_RETRIES} because this is rarely "
f"a transient rate limit on small payloads."
f"(attempt {attempt + 1}/{retries})"
)
await asyncio.sleep(wait)
continue
@@ -1342,7 +1340,8 @@ class LiteLLMProvider(LLMProvider):
)
return text_tool_content, text_tool_calls
logger.info(
"[openrouter-tool-compat] %s returned non-JSON fallback content; treating it as plain text.",
"[openrouter-tool-compat] %s returned non-JSON fallback content; "
"treating it as plain text.",
self.model,
)
return content.strip(), []
@@ -1494,7 +1493,9 @@ class LiteLLMProvider(LLMProvider):
)
return repaired
raise ValueError(f"Failed to parse tool call arguments for '{tool_name}' (likely truncated JSON).")
raise ValueError(
f"Failed to parse tool call arguments for '{tool_name}' (likely truncated JSON)."
)
def _parse_openrouter_text_tool_calls(
self,
@@ -1651,7 +1652,11 @@ class LiteLLMProvider(LLMProvider):
return [
message
for message in full_messages
if not (message.get("role") == "assistant" and not message.get("content") and not message.get("tool_calls"))
if not (
message.get("role") == "assistant"
and not message.get("content")
and not message.get("tool_calls")
)
]
async def _acomplete_via_openrouter_tool_compat(
@@ -1879,8 +1884,8 @@ class LiteLLMProvider(LLMProvider):
if logger.isEnabledFor(logging.DEBUG) and full_messages:
import json as _json
from datetime import datetime as _dt
from pathlib import Path as _Path
from datetime import datetime as _dt
_debug_dir = _Path.home() / ".hive" / "debug_logs"
_debug_dir.mkdir(parents=True, exist_ok=True)
@@ -1904,7 +1909,9 @@ class LiteLLMProvider(LLMProvider):
}
)
try:
_dump_file.write_text(_json.dumps(_summary, indent=2, ensure_ascii=False), encoding="utf-8")
_dump_file.write_text(
_json.dumps(_summary, indent=2, ensure_ascii=False), encoding="utf-8"
)
logger.debug("[LLM-MSG] %d messages dumped to %s", len(full_messages), _dump_file)
except Exception:
pass
@@ -1929,7 +1936,9 @@ class LiteLLMProvider(LLMProvider):
full_messages = [
m
for m in full_messages
if not (m.get("role") == "assistant" and not m.get("content") and not m.get("tool_calls"))
if not (
m.get("role") == "assistant" and not m.get("content") and not m.get("tool_calls")
)
]
kwargs: dict[str, Any] = {
@@ -1959,10 +1968,6 @@ class LiteLLMProvider(LLMProvider):
if self._codex_backend:
kwargs.pop("max_tokens", None)
kwargs.pop("stream_options", None)
# Pass store directly to OpenAI in case litellm drops it as unknown
if "extra_body" not in kwargs:
kwargs["extra_body"] = {}
kwargs["extra_body"]["store"] = False
request_summary = _summarize_request_for_log(kwargs)
logger.debug(
@@ -2119,7 +2124,8 @@ class LiteLLMProvider(LLMProvider):
else getattr(usage, "cache_read_input_tokens", 0) or 0
)
logger.debug(
"[tokens] finish-chunk usage: input=%d output=%d cached=%d model=%s",
"[tokens] finish-chunk usage: "
"input=%d output=%d cached=%d model=%s",
input_tokens,
output_tokens,
cached_tokens,
@@ -2166,7 +2172,8 @@ class LiteLLMProvider(LLMProvider):
else getattr(_usage, "cache_read_input_tokens", 0) or 0
)
logger.debug(
"[tokens] post-loop chunks fallback: input=%d output=%d cached=%d model=%s",
"[tokens] post-loop chunks fallback:"
" input=%d output=%d cached=%d model=%s",
input_tokens,
output_tokens,
cached_tokens,
+25 -46
View File
@@ -61,14 +61,14 @@
"label": "Gemini 3 Flash - Fast",
"recommended": false,
"max_tokens": 32768,
"max_context_tokens": 240000
"max_context_tokens": 900000
},
{
"id": "gemini-3.1-pro-preview-customtools",
"id": "gemini-3.1-pro-preview",
"label": "Gemini 3.1 Pro - Best quality",
"recommended": true,
"max_tokens": 32768,
"max_context_tokens": 240000
"max_context_tokens": 900000
}
]
},
@@ -115,6 +115,13 @@
"max_tokens": 40960,
"max_context_tokens": 131072
},
{
"id": "llama3.1-8b",
"label": "Llama 3.1 8B - Fastest production",
"recommended": false,
"max_tokens": 8192,
"max_context_tokens": 32768
},
{
"id": "zai-glm-4.7",
"label": "Z.ai GLM 4.7 - Strong coding preview",
@@ -138,15 +145,15 @@
"id": "MiniMax-M2.7",
"label": "MiniMax M2.7 - Best coding quality",
"recommended": true,
"max_tokens": 40960,
"max_context_tokens": 180000
"max_tokens": 32768,
"max_context_tokens": 204800
},
{
"id": "MiniMax-M2.5",
"label": "MiniMax M2.5 - Strong value",
"recommended": false,
"max_tokens": 40960,
"max_context_tokens": 180000
"max_tokens": 32768,
"max_context_tokens": 204800
}
]
},
@@ -281,14 +288,14 @@
"label": "GPT-5.4 - Best overall",
"recommended": true,
"max_tokens": 128000,
"max_context_tokens": 872000
"max_context_tokens": 922000
},
{
"id": "anthropic/claude-sonnet-4.6",
"label": "Claude Sonnet 4.6 - Best coding balance",
"recommended": false,
"max_tokens": 64000,
"max_context_tokens": 872000
"max_context_tokens": 936000
},
{
"id": "anthropic/claude-opus-4.6",
@@ -298,46 +305,18 @@
"max_context_tokens": 872000
},
{
"id": "google/gemini-3.1-pro-preview-customtools",
"id": "google/gemini-3.1-pro-preview",
"label": "Gemini 3.1 Pro Preview - Long-context reasoning",
"recommended": false,
"max_tokens": 32768,
"max_context_tokens": 872000
"max_context_tokens": 1048576
},
{
"id": "qwen/qwen3.6-plus",
"label": "Qwen 3.6 Plus - Strong reasoning",
"recommended": true,
"max_tokens": 32768,
"max_context_tokens": 240000
},
{
"id": "z-ai/glm-5v-turbo",
"label": "GLM-5V Turbo - Vision capable",
"recommended": true,
"max_tokens": 32768,
"max_context_tokens": 192000
},
{
"id": "z-ai/glm-5.1",
"label": "GLM-5.1 - Better but Slower",
"recommended": true,
"max_tokens": 40960,
"max_context_tokens": 192000
},
{
"id": "minimax/minimax-m2.7",
"label": "Minimax M2.7 - Minimax flagship",
"id": "deepseek/deepseek-v3.2",
"label": "DeepSeek V3.2 - Best value",
"recommended": false,
"max_tokens": 40960,
"max_context_tokens": 180000
},
{
"id": "xiaomi/mimo-v2-pro",
"label": "MiMo V2 Pro - Xiaomi multimodal",
"recommended": true,
"max_tokens": 64000,
"max_context_tokens": 872000
"max_tokens": 32768,
"max_context_tokens": 163840
}
]
}
@@ -368,8 +347,8 @@
"provider": "minimax",
"api_key_env_var": "MINIMAX_API_KEY",
"model": "MiniMax-M2.7",
"max_tokens": 40960,
"max_context_tokens": 180800,
"max_tokens": 32768,
"max_context_tokens": 204800,
"api_base": "https://api.minimax.io/v1"
},
"kimi_code": {
@@ -418,4 +397,4 @@
"api_base": "http://localhost:11434"
}
}
}
}
+23 -7
View File
@@ -50,7 +50,9 @@ def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
if not isinstance(model_id, str) or not model_id.strip():
raise ModelCatalogError(f"{model_path}.id must be a non-empty string")
if model_id in seen_model_ids:
raise ModelCatalogError(f"Duplicate model id {model_id!r} in {provider_path}.models")
raise ModelCatalogError(
f"Duplicate model id {model_id!r} in {provider_path}.models"
)
seen_model_ids.add(model_id)
if model_id == default_model:
@@ -89,11 +91,17 @@ def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
api_base = preset_map.get("api_base")
if api_base is not None and (not isinstance(api_base, str) or not api_base.strip()):
raise ModelCatalogError(f"{preset_path}.api_base must be a non-empty string when present")
raise ModelCatalogError(
f"{preset_path}.api_base must be a non-empty string when present"
)
api_key_env_var = preset_map.get("api_key_env_var")
if api_key_env_var is not None and (not isinstance(api_key_env_var, str) or not api_key_env_var.strip()):
raise ModelCatalogError(f"{preset_path}.api_key_env_var must be a non-empty string when present")
if api_key_env_var is not None and (
not isinstance(api_key_env_var, str) or not api_key_env_var.strip()
):
raise ModelCatalogError(
f"{preset_path}.api_key_env_var must be a non-empty string when present"
)
for key in ("max_tokens", "max_context_tokens"):
value = preset_map.get(key)
@@ -102,7 +110,9 @@ def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
model_choices = preset_map.get("model_choices")
if model_choices is not None:
for idx, choice in enumerate(_require_list(model_choices, f"{preset_path}.model_choices")):
for idx, choice in enumerate(
_require_list(model_choices, f"{preset_path}.model_choices")
):
choice_path = f"{preset_path}.model_choices[{idx}]"
choice_map = _require_mapping(choice, choice_path)
choice_id = choice_map.get("id")
@@ -134,13 +144,19 @@ def load_model_catalog() -> dict[str, Any]:
def get_models_catalogue() -> dict[str, list[dict[str, Any]]]:
"""Return provider -> model list."""
providers = load_model_catalog()["providers"]
return {provider_id: copy.deepcopy(provider_info["models"]) for provider_id, provider_info in providers.items()}
return {
provider_id: copy.deepcopy(provider_info["models"])
for provider_id, provider_info in providers.items()
}
def get_default_models() -> dict[str, str]:
"""Return provider -> default model id."""
providers = load_model_catalog()["providers"]
return {provider_id: str(provider_info["default_model"]) for provider_id, provider_info in providers.items()}
return {
provider_id: str(provider_info["default_model"])
for provider_id, provider_info in providers.items()
}
def get_provider_models(provider: str) -> list[dict[str, Any]]:
-9
View File
@@ -27,15 +27,6 @@ class Tool:
name: str
description: str
parameters: dict[str, Any] = field(default_factory=dict)
# If True, the tool may return ImageContent in its result. Text-only models
# (e.g. glm-5, deepseek-chat) have this hidden from their schema entirely.
produces_image: bool = False
# If True, this tool performs no filesystem/process/network writes and is
# safe to run concurrently with other safe-flagged tools inside the same
# assistant turn. Unsafe tools (writes, shell, browser actions) are always
# serialized after the safe batch. Default False - the conservative choice
# when a tool's behavior isn't explicitly vetted.
concurrency_safe: bool = False
@dataclass
+38 -32
View File
@@ -9,7 +9,7 @@ from datetime import UTC
from pathlib import Path
from typing import Any
from framework.config import get_hive_config, get_preferred_model
from framework.config import get_hive_config, get_max_context_tokens, get_preferred_model
from framework.credentials.validation import (
ensure_credential_key_env as _ensure_credential_key_env,
)
@@ -20,12 +20,14 @@ from framework.loader.preload_validation import run_preload_validation
from framework.loader.tool_registry import ToolRegistry
from framework.orchestrator import Goal
from framework.orchestrator.edge import (
DEFAULT_MAX_TOKENS,
EdgeCondition,
EdgeSpec,
GraphSpec,
)
from framework.orchestrator.node import NodeSpec
from framework.orchestrator.orchestrator import ExecutionResult
from framework.tools.flowchart_utils import generate_fallback_flowchart
logger = logging.getLogger(__name__)
@@ -553,10 +555,18 @@ def get_kimi_code_token() -> str | None:
# VSCode-style SQLite state database under the key
# "antigravityUnifiedStateSync.oauthToken" as a base64-encoded protobuf blob.
ANTIGRAVITY_IDE_STATE_DB = (
Path.home() / "Library" / "Application Support" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
Path.home()
/ "Library"
/ "Application Support"
/ "Antigravity"
/ "User"
/ "globalStorage"
/ "state.vscdb"
)
# Linux fallback for the IDE state DB
ANTIGRAVITY_IDE_STATE_DB_LINUX = Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
ANTIGRAVITY_IDE_STATE_DB_LINUX = (
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
)
# Antigravity credentials stored by native OAuth implementation
ANTIGRAVITY_AUTH_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
@@ -700,7 +710,9 @@ def _is_antigravity_token_expired(auth_data: dict) -> bool:
return True
elif isinstance(last_refresh_val, str):
try:
last_refresh_val = datetime.fromisoformat(last_refresh_val.replace("Z", "+00:00")).timestamp()
last_refresh_val = datetime.fromisoformat(
last_refresh_val.replace("Z", "+00:00")
).timestamp()
except (ValueError, TypeError):
return True
@@ -831,7 +843,8 @@ def get_antigravity_token() -> str | None:
return token_data["access_token"]
logger.warning(
"Antigravity token refresh failed. Re-open the Antigravity IDE or run 'antigravity-auth accounts add'."
"Antigravity token refresh failed. "
"Re-open the Antigravity IDE or run 'antigravity-auth accounts add'."
)
return access_token
@@ -1242,16 +1255,10 @@ class AgentLoader:
if tools_path.exists():
self._tool_registry.discover_from_module(tools_path)
# Per-agent env for MCP subprocesses. Stored on the registry so
# parallel workers in the same process don't clobber each other
# via the shared os.environ dict — the registry merges these
# into every MCPServerConfig.env at registration time.
self._tool_registry.set_mcp_extra_env(
{
"HIVE_AGENT_NAME": agent_path.name,
"HIVE_STORAGE_PATH": str(self._storage_path),
}
)
# Set environment variables for MCP subprocesses
# These are inherited by MCP servers (e.g., GCU browser tools)
os.environ["HIVE_AGENT_NAME"] = agent_path.name
os.environ["HIVE_STORAGE_PATH"] = str(self._storage_path)
# MCP tools are loaded by McpRegistryStage in the pipeline during AgentHost.start()
@@ -1284,7 +1291,11 @@ class AgentLoader:
# Evict cached submodules first (e.g. deep_research_agent.nodes,
# deep_research_agent.agent) so the top-level reload picks up
# changes in the entire package — not just __init__.py.
stale = [name for name in sys.modules if name == package_name or name.startswith(f"{package_name}.")]
stale = [
name
for name in sys.modules
if name == package_name or name.startswith(f"{package_name}.")
]
for name in stale:
del sys.modules[name]
@@ -1333,7 +1344,7 @@ class AgentLoader:
if not worker_jsons:
raise FileNotFoundError(f"No worker config found in {agent_path}")
from framework.orchestrator.edge import GraphSpec
from framework.orchestrator.edge import EdgeSpec, GraphSpec
from framework.orchestrator.goal import Constraint, Goal as GoalModel, SuccessCriterion
from framework.orchestrator.node import NodeSpec
@@ -1404,18 +1415,7 @@ class AgentLoader:
credential_store=credential_store,
)
runner._agent_default_skills = None
# Colony workers attached to a SQLite task queue get the
# colony-progress-tracker skill pre-activated so its full
# claim / step / SOP-gate protocol lands in the system prompt
# on turn 0, bypassing the progressive-disclosure catalog
# lookup. Triggered by the presence of ``input_data.db_path``
# in worker.json (written by fork_session_into_colony and
# backfilled by ensure_progress_db for pre-existing colonies).
_preactivate: list[str] = []
_input_data = first_worker.get("input_data") or {}
if isinstance(_input_data, dict) and _input_data.get("db_path"):
_preactivate.append("hive.colony-progress-tracker")
runner._agent_skills = _preactivate or None
runner._agent_skills = None
return runner
def register_tool(
@@ -1549,6 +1549,7 @@ class AgentLoader:
]
# Merge user-configured stages from ~/.hive/configuration.json
from framework.config import get_hive_config
from framework.pipeline.registry import build_pipeline_from_config
hive_config = get_hive_config()
@@ -1561,7 +1562,9 @@ class AgentLoader:
if agent_json.exists():
try:
agent_pipeline = (
_json.loads(agent_json.read_text(encoding="utf-8")).get("pipeline", {}).get("stages", [])
_json.loads(agent_json.read_text(encoding="utf-8"))
.get("pipeline", {})
.get("stages", [])
)
if agent_pipeline:
agent_stages = build_pipeline_from_config(agent_pipeline)
@@ -1977,7 +1980,8 @@ class AgentLoader:
for sc in self.goal.success_criteria
],
constraints=[
{"id": c.id, "description": c.description, "type": c.constraint_type} for c in self.goal.constraints
{"id": c.id, "description": c.description, "type": c.constraint_type}
for c in self.goal.constraints
],
required_tools=sorted(required_tools),
has_tools_module=(self.agent_path / "tools.py").exists(),
@@ -2048,7 +2052,9 @@ class AgentLoader:
if api_key_env and not os.environ.get(api_key_env):
if api_key_env not in missing_credentials:
missing_credentials.append(api_key_env)
warnings.append(f"Agent has LLM nodes but {api_key_env} not set (model: {self.model})")
warnings.append(
f"Agent has LLM nodes but {api_key_env} not set (model: {self.model})"
)
return ValidationResult(
valid=len(errors) == 0,
+38 -92
View File
@@ -17,15 +17,14 @@ from __future__ import annotations
import argparse
import asyncio
import json
import os
import shutil
import subprocess
import sys
import threading
from pathlib import Path
from typing import Any
from urllib import error as urlerror, parse as urlparse, request as urlrequest
# ---------------------------------------------------------------------------
# Public registration
# ---------------------------------------------------------------------------
@@ -86,10 +85,6 @@ def _register_open(subparsers: argparse._SubParsersAction) -> None:
def cmd_serve(args: argparse.Namespace) -> int:
"""Start the HTTP API server (the runtime hub)."""
import atexit
import logging
import signal
from aiohttp import web
_build_frontend()
@@ -99,67 +94,16 @@ def cmd_serve(args: argparse.Namespace) -> int:
if getattr(args, "debug", False):
configure_logging(level="DEBUG")
else:
elif getattr(args, "verbose", False):
configure_logging(level="INFO")
# Last-resort MCP cleanup. Runs on any process exit path, including
# crashes — so hung MCP subprocesses don't outlive the server. The
# graceful shutdown path below also disconnects clients; atexit is
# belt-and-braces and no-ops if already cleaned.
def _atexit_cleanup_mcp() -> None:
try:
from framework.loader.mcp_connection_manager import MCPConnectionManager
MCPConnectionManager.get_instance().cleanup_all()
except Exception as exc: # noqa: BLE001
logging.getLogger(__name__).debug("atexit MCP cleanup failed: %s", exc)
atexit.register(_atexit_cleanup_mcp)
else:
configure_logging(level="WARNING")
model = getattr(args, "model", None)
app = create_app(model=model)
async def run_server() -> None:
manager = app["manager"]
shutdown_event = asyncio.Event()
signal_count = {"n": 0}
def _request_shutdown(signame: str) -> None:
signal_count["n"] += 1
if signal_count["n"] == 1:
print(f"\nReceived {signame}, shutting down gracefully… (press Ctrl+C again to force quit)")
shutdown_event.set()
else:
# Second Ctrl+C (or SIGTERM) — the user is done waiting.
# Skip the graceful teardown and exit immediately. os._exit
# bypasses atexit handlers, so fire the MCP cleanup manually
# first to avoid leaking subprocesses.
print(f"\nReceived {signame} again — force quitting.")
try:
from framework.loader.mcp_connection_manager import (
MCPConnectionManager,
)
MCPConnectionManager.get_instance().cleanup_all()
except Exception: # noqa: BLE001
pass
os._exit(130)
# Register SIGTERM (and explicit SIGINT) so container orchestrators
# and plain Ctrl-C both route through the same graceful path —
# manager.shutdown_all() flushes state and disconnects MCP clients.
loop = asyncio.get_running_loop()
for signame in ("SIGINT", "SIGTERM"):
try:
loop.add_signal_handler(
getattr(signal, signame),
_request_shutdown,
signame,
)
except (NotImplementedError, AttributeError):
# Windows / restricted environments — fall back to default
# handlers (KeyboardInterrupt for SIGINT; SIGTERM kills).
pass
# Preload colonies specified via --colony
for colony_arg in getattr(args, "colony", []) or []:
@@ -168,7 +112,9 @@ def cmd_serve(args: argparse.Namespace) -> int:
print(f"Colony not found: {colony_arg}")
continue
try:
session = await manager.create_session_with_worker_colony(str(colony_path), model=model)
session = await manager.create_session_with_worker_colony(
str(colony_path), model=model
)
info = session.worker_info
name = info.name if info else session.colony_id
print(f"Loaded colony: {session.colony_id} ({name}) → session {session.id}")
@@ -199,7 +145,7 @@ def cmd_serve(args: argparse.Namespace) -> int:
_open_browser(dashboard_url)
try:
await shutdown_event.wait()
await asyncio.Event().wait()
except asyncio.CancelledError:
pass
finally:
@@ -215,13 +161,7 @@ def cmd_serve(args: argparse.Namespace) -> int:
def cmd_open(args: argparse.Namespace) -> int:
"""Start the HTTP server and open the dashboard in the browser."""
# Don't block local startup on a best-effort analytics probe.
threading.Thread(
target=_ping_hive_gateway_availability,
args=("hive-open",),
daemon=True,
name="hive-open-gateway-ping",
).start()
_ping_hive_gateway_availability("hive-open")
args.open = True
return cmd_serve(args)
@@ -320,14 +260,12 @@ def cmd_queen_sessions(args: argparse.Namespace) -> int:
meta = json.loads(meta_path.read_text(encoding="utf-8"))
except Exception:
meta = {}
rows.append(
{
"session_id": session_dir.name,
"phase": meta.get("phase", "?"),
"agent_path": meta.get("agent_path", ""),
"colony_fork": bool(meta.get("colony_fork")),
}
)
rows.append({
"session_id": session_dir.name,
"phase": meta.get("phase", "?"),
"agent_path": meta.get("agent_path", ""),
"colony_fork": bool(meta.get("colony_fork")),
})
if args.json:
print(json.dumps(rows, indent=2))
@@ -401,18 +339,18 @@ def cmd_colony_list(args: argparse.Namespace) -> int:
except Exception:
meta = {}
worker_count = sum(
1 for f in path.iterdir() if f.is_file() and f.suffix == ".json" and f.stem not in _RESERVED_JSON_STEMS
)
rows.append(
{
"name": path.name,
"queen_name": meta.get("queen_name", ""),
"queen_session_id": meta.get("queen_session_id", ""),
"workers": worker_count,
"created_at": meta.get("created_at", ""),
"path": str(path),
}
1
for f in path.iterdir()
if f.is_file() and f.suffix == ".json" and f.stem not in _RESERVED_JSON_STEMS
)
rows.append({
"name": path.name,
"queen_name": meta.get("queen_name", ""),
"queen_session_id": meta.get("queen_session_id", ""),
"workers": worker_count,
"created_at": meta.get("created_at", ""),
"path": str(path),
})
if args.json:
print(json.dumps(rows, indent=2))
@@ -425,7 +363,9 @@ def cmd_colony_list(args: argparse.Namespace) -> int:
print(f"{'NAME':<24} {'QUEEN':<28} {'WORKERS':<8} CREATED")
print("-" * 90)
for r in rows:
print(f"{r['name']:<24} {r['queen_name']:<28} {r['workers']:<8} {r['created_at'][:19]}")
print(
f"{r['name']:<24} {r['queen_name']:<28} {r['workers']:<8} {r['created_at'][:19]}"
)
return 0
@@ -652,7 +592,9 @@ def _http_get(url: str, timeout: float = 10.0) -> dict:
def _http_post(url: str, body: dict, timeout: float = 30.0) -> dict:
data = json.dumps(body).encode("utf-8")
req = urlrequest.Request(url, data=data, method="POST", headers={"Content-Type": "application/json"})
req = urlrequest.Request(
url, data=data, method="POST", headers={"Content-Type": "application/json"}
)
with urlrequest.urlopen(req, timeout=timeout) as r:
return json.loads(r.read().decode("utf-8"))
@@ -708,7 +650,9 @@ def _open_browser(url: str) -> None:
try:
if sys.platform == "darwin":
subprocess.Popen(["open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.Popen(
["open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
elif sys.platform == "win32":
subprocess.Popen(
["cmd", "/c", "start", "", url],
@@ -716,7 +660,9 @@ def _open_browser(url: str) -> None:
stderr=subprocess.DEVNULL,
)
elif sys.platform == "linux":
subprocess.Popen(["xdg-open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.Popen(
["xdg-open", url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
)
except Exception:
pass
+19 -74
View File
@@ -267,7 +267,9 @@ class MCPClient:
try:
response = self._http_client.get("/health")
response.raise_for_status()
logger.info(f"Connected to MCP server '{self.config.name}' via HTTP at {self.config.url}")
logger.info(
f"Connected to MCP server '{self.config.name}' via HTTP at {self.config.url}"
)
except Exception as e:
logger.warning(f"Health check failed for MCP server '{self.config.name}': {e}")
# Continue anyway, server might not have health endpoint
@@ -375,8 +377,9 @@ class MCPClient:
self._tools[tool.name] = tool
tool_names = list(self._tools.keys())
logger.info(f"Discovered {len(self._tools)} tools from '{self.config.name}'")
logger.debug(f"Discovered tools from '{self.config.name}': {tool_names}")
logger.info(
f"Discovered {len(self._tools)} tools from '{self.config.name}': {tool_names}"
)
except Exception as e:
logger.error(f"Failed to discover tools from '{self.config.name}': {e}")
raise
@@ -461,12 +464,8 @@ class MCPClient:
)
if self.config.transport == "stdio":
def _stdio_call() -> Any:
with self._stdio_call_lock:
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
return self._call_tool_with_retry(_stdio_call)
with self._stdio_call_lock:
return self._run_async(self._call_tool_stdio_async(tool_name, arguments))
elif self.config.transport == "sse":
return self._call_tool_with_retry(
lambda: self._run_async(self._call_tool_stdio_async(tool_name, arguments))
@@ -476,70 +475,10 @@ class MCPClient:
else:
return self._call_tool_http(tool_name, arguments)
# Exceptions that indicate the STDIO session/subprocess is dead and
# needs a fresh connect(). Keep this narrow — we don't want to mask
# tool-level errors as transport errors.
_STDIO_DEAD_SESSION_ERRORS = (
BrokenPipeError,
ConnectionError,
ConnectionResetError,
EOFError,
)
def _is_stdio_dead_session_error(self, exc: BaseException) -> bool:
if isinstance(exc, self._STDIO_DEAD_SESSION_ERRORS):
return True
# mcp SDK frequently wraps transport errors in RuntimeError with a
# readable message — match on the common signals.
if isinstance(exc, RuntimeError):
msg = str(exc).lower()
for needle in (
"broken pipe",
"connection closed",
"connection reset",
"stream closed",
"session not initialized",
"transport closed",
"anyio.closedresourceerror",
"read operation was cancelled",
):
if needle in msg:
return True
return False
def _call_tool_with_retry(self, call: Any) -> Any:
"""Retry once after reconnecting when the transport looks dead.
Applies to all transports:
- **stdio**: if the subprocess died (broken pipe, closed stream,
session not initialized), tear it down and start a fresh one.
- **sse / unix / http** (httpx-backed): same treatment for
``httpx.ConnectError`` / ``httpx.ReadTimeout``.
"""
"""Retry transient MCP transport failures once after reconnecting."""
if self.config.transport == "stdio":
try:
return call()
except BaseException as original_error:
if not self._is_stdio_dead_session_error(original_error):
raise
logger.warning(
"Retrying MCP STDIO tool call after dead-session signal from '%s': %s",
self.config.name,
original_error,
)
try:
self._reconnect()
except Exception as reconnect_error:
logger.warning(
"Reconnect failed for MCP STDIO server '%s': %s",
self.config.name,
reconnect_error,
)
raise original_error from reconnect_error
try:
return call()
except BaseException as retry_error:
raise original_error from retry_error
return call()
if self.config.transport not in {"unix", "sse"}:
return call()
@@ -664,7 +603,9 @@ class MCPClient:
if self._session:
await self._session.__aexit__(None, None, None)
except asyncio.CancelledError:
logger.warning("MCP session cleanup was cancelled; proceeding with best-effort shutdown")
logger.warning(
"MCP session cleanup was cancelled; proceeding with best-effort shutdown"
)
except Exception as e:
logger.warning(f"Error closing MCP session: {e}")
finally:
@@ -675,7 +616,9 @@ class MCPClient:
if self._stdio_context:
await self._stdio_context.__aexit__(None, None, None)
except asyncio.CancelledError:
logger.debug("STDIO context cleanup was cancelled; proceeding with best-effort shutdown")
logger.debug(
"STDIO context cleanup was cancelled; proceeding with best-effort shutdown"
)
except Exception as e:
msg = str(e).lower()
if "cancel scope" in msg or "different task" in msg:
@@ -716,7 +659,9 @@ class MCPClient:
# any exceptions that may occur if the loop stops between these calls.
if self._loop.is_running():
try:
cleanup_future = asyncio.run_coroutine_threadsafe(self._cleanup_stdio_async(), self._loop)
cleanup_future = asyncio.run_coroutine_threadsafe(
self._cleanup_stdio_async(), self._loop
)
cleanup_future.result(timeout=self._CLEANUP_TIMEOUT)
cleanup_attempted = True
except TimeoutError:
@@ -74,7 +74,8 @@ class MCPConnectionManager:
if not should_connect:
if not transition_event.wait(timeout=_TRANSITION_TIMEOUT):
logger.warning(
"Timed out waiting for transition on MCP server '%s', forcing cleanup and retrying",
"Timed out waiting for transition on MCP server '%s', "
"forcing cleanup and retrying",
server_name,
)
with self._pool_lock:
@@ -98,7 +99,10 @@ class MCPConnectionManager:
current = self._transitions.get(server_name)
if current is transition_event:
self._transitions.pop(server_name, None)
if server_name not in self._pool and self._refcounts.get(server_name, 0) <= 0:
if (
server_name not in self._pool
and self._refcounts.get(server_name, 0) <= 0
):
self._configs.pop(server_name, None)
transition_event.set()
raise
@@ -320,7 +324,8 @@ class MCPConnectionManager:
self._transitions.pop(server_name, None)
transition_event.set()
logger.info(
"Reconnected MCP server '%s' but refcount dropped to 0, discarding new client",
"Reconnected MCP server '%s' but refcount dropped to 0, "
"discarding new client",
server_name,
)
try:
@@ -331,7 +336,9 @@ class MCPConnectionManager:
server_name,
exc_info=True,
)
raise KeyError(f"MCP server '{server_name}' was fully released during reconnect")
raise KeyError(
f"MCP server '{server_name}' was fully released during reconnect"
)
self._pool[server_name] = new_client
self._configs[server_name] = config
@@ -373,7 +380,8 @@ class MCPConnectionManager:
all_resolved = all(event.wait(timeout=_TRANSITION_TIMEOUT) for event in pending)
if not all_resolved:
logger.warning(
"Timed out waiting for pending transitions during cleanup, forcing cleanup of stuck transitions",
"Timed out waiting for pending transitions during cleanup, "
"forcing cleanup of stuck transitions",
)
with self._pool_lock:
for sn, evt in list(self._transitions.items()):
+3 -1
View File
@@ -23,7 +23,9 @@ class MCPError(ValueError):
self.what = what
self.why = why
self.fix = fix
self.message = f"[{self.code.value}]\nWhat failed: {self.what}\nWhy: {self.why}\nFix: {self.fix}"
self.message = (
f"[{self.code.value}]\nWhat failed: {self.what}\nWhy: {self.why}\nFix: {self.fix}"
)
super().__init__(self.message)
+5 -89
View File
@@ -24,7 +24,9 @@ from framework.loader.mcp_errors import (
logger = logging.getLogger(__name__)
DEFAULT_INDEX_URL = "https://raw.githubusercontent.com/aden-hive/hive-mcp-registry/main/registry_index.json"
DEFAULT_INDEX_URL = (
"https://raw.githubusercontent.com/aden-hive/hive-mcp-registry/main/registry_index.json"
)
DEFAULT_REFRESH_INTERVAL_HOURS = 24
_LAST_FETCHED_FILENAME = "last_fetched"
_LEGACY_LAST_FETCHED_FILENAME = "last_fetched.json"
@@ -34,32 +36,6 @@ _DEFAULT_CONFIG = {
"refresh_interval_hours": DEFAULT_REFRESH_INTERVAL_HOURS,
}
# Default local MCP servers that ship with Hive. Seeded on first startup so
# fresh users get working file I/O, browser automation, and the hive tool
# suite without having to run `hive mcp add` manually. ``cwd`` is filled in
# at registration time with the absolute path to the ``tools/`` directory.
_DEFAULT_LOCAL_SERVERS: dict[str, dict[str, Any]] = {
"hive_tools": {
"description": "Hive tools: web search, email, CRM, calendar, and 100+ integrations",
"args": ["run", "python", "mcp_server.py", "--stdio"],
},
"gcu-tools": {
"description": "Browser automation: click, type, navigate, screenshot, snapshot",
"args": ["run", "python", "-m", "gcu.server", "--stdio"],
},
"files-tools": {
"description": "File I/O: read, write, edit, search, list, run commands",
"args": ["run", "python", "files_server.py", "--stdio"],
},
}
# Aliases that earlier versions of ensure_defaults wrote under the wrong name.
# When we see one of these stale entries, drop it before seeding the canonical
# name so the active agents (queen, credential_tester) can find their tools.
_STALE_DEFAULT_ALIASES: dict[str, str] = {
"hive_tools": "hive-tools",
}
class MCPRegistry:
"""Manages local MCP server state in ~/.hive/mcp_registry/."""
@@ -83,67 +59,6 @@ class MCPRegistry:
if not self._installed_path.exists():
self._write_json(self._installed_path, {"servers": {}})
def ensure_defaults(self) -> list[str]:
"""Seed the built-in local MCP servers (hive-tools, gcu-tools, files-tools).
Idempotent servers already present are left untouched. Skips seeding
entirely when the source-tree ``tools/`` directory cannot be located
(e.g. when Hive is installed from a wheel rather than a checkout).
Returns the list of names that were newly registered.
"""
self.initialize()
# parents: [0]=loader, [1]=framework, [2]=core, [3]=repo root
tools_dir = Path(__file__).resolve().parents[3] / "tools"
if not tools_dir.is_dir():
logger.debug(
"MCPRegistry.ensure_defaults: tools dir %s missing; skipping default seed",
tools_dir,
)
return []
cwd = str(tools_dir)
data = self._read_installed()
existing = data.get("servers", {})
added: list[str] = []
# Drop stale aliases (from earlier versions that wrote the wrong name).
# Only remove the alias when the canonical name isn't already installed,
# so we never clobber a hand-edited entry the user cares about.
mutated = False
for canonical, stale in _STALE_DEFAULT_ALIASES.items():
if stale in existing and canonical not in existing:
logger.info(
"MCPRegistry.ensure_defaults: removing stale alias '%s' (canonical: '%s')",
stale,
canonical,
)
del existing[stale]
mutated = True
if mutated:
self._write_installed(data)
for name, spec in _DEFAULT_LOCAL_SERVERS.items():
if name in existing:
continue
try:
self.add_local(
name=name,
transport="stdio",
command="uv",
args=list(spec["args"]),
cwd=cwd,
description=spec["description"],
)
added.append(name)
except MCPError as exc:
logger.warning("MCPRegistry.ensure_defaults: failed to seed '%s': %s", name, exc)
if added:
logger.info("MCPRegistry: seeded default local servers: %s", added)
return added
# ── Internal I/O ────────────────────────────────────────────────
def _read_installed(self) -> dict:
@@ -705,7 +620,8 @@ class MCPRegistry:
pinned_version = versions[name]
if installed_version != pinned_version:
logger.warning(
"Server '%s' version mismatch: installed=%s, pinned=%s. Run: hive mcp update %s",
"Server '%s' version mismatch: installed=%s, pinned=%s. "
"Run: hive mcp update %s",
name,
installed_version,
pinned_version,
+30 -35
View File
@@ -151,7 +151,10 @@ def _parse_key_value_pairs(values: list[str]) -> dict[str, str]:
result = {}
for item in values:
if "=" not in item:
raise ValueError(f"Invalid format: '{item}'. Expected KEY=VALUE.\nExample: --set JIRA_API_TOKEN=abc123")
raise ValueError(
f"Invalid format: '{item}'. Expected KEY=VALUE.\n"
f"Example: --set JIRA_API_TOKEN=abc123"
)
key, _, value = item.partition("=")
if not key:
raise ValueError(f"Invalid format: '{item}'. Key cannot be empty.")
@@ -297,8 +300,12 @@ def register_mcp_commands(subparsers) -> None:
# ── install ──
install_p = mcp_sub.add_parser("install", help="Install a server from the registry")
install_p.add_argument("name", help="Server name in the registry")
install_p.add_argument("--version", dest="version", default=None, help="Pin to a specific version")
install_p.add_argument("--transport", default=None, help="Override default transport (stdio, http, unix, sse)")
install_p.add_argument(
"--version", dest="version", default=None, help="Pin to a specific version"
)
install_p.add_argument(
"--transport", default=None, help="Override default transport (stdio, http, unix, sse)"
)
install_p.set_defaults(func=cmd_mcp_install)
# ── add ──
@@ -335,7 +342,9 @@ def register_mcp_commands(subparsers) -> None:
# ── list ──
list_p = mcp_sub.add_parser("list", help="List servers")
list_p.add_argument("--available", action="store_true", help="Show available servers from registry")
list_p.add_argument(
"--available", action="store_true", help="Show available servers from registry"
)
list_p.add_argument("--json", dest="output_json", action="store_true", help="Output as JSON")
list_p.set_defaults(func=cmd_mcp_list)
@@ -355,7 +364,9 @@ def register_mcp_commands(subparsers) -> None:
metavar="KEY=VAL",
help="Set environment variable overrides",
)
config_p.add_argument("--set-header", dest="set_header", nargs="+", metavar="KEY=VAL", help="Set header overrides")
config_p.add_argument(
"--set-header", dest="set_header", nargs="+", metavar="KEY=VAL", help="Set header overrides"
)
config_p.set_defaults(func=cmd_mcp_config)
# ── search ──
@@ -370,15 +381,10 @@ def register_mcp_commands(subparsers) -> None:
health_p.add_argument("--json", dest="output_json", action="store_true", help="Output as JSON")
health_p.set_defaults(func=cmd_mcp_health)
# ── init ──
init_p = mcp_sub.add_parser(
"init",
help="Initialize the local MCP registry and seed built-in servers",
)
init_p.set_defaults(func=cmd_mcp_init)
# ── update ──
update_p = mcp_sub.add_parser("update", help="Update installed servers or refresh the registry index")
update_p = mcp_sub.add_parser(
"update", help="Update installed servers or refresh the registry index"
)
update_p.add_argument(
"name",
nargs="?",
@@ -482,7 +488,8 @@ def _cmd_mcp_add_from_manifest(registry, manifest_path: str) -> int:
manifest = json.loads(path.read_text(encoding="utf-8"))
except json.JSONDecodeError as exc:
print(
f"Error: invalid JSON in {manifest_path}: {exc}\nValidate with: python -m json.tool {manifest_path}",
f"Error: invalid JSON in {manifest_path}: {exc}\n"
f"Validate with: python -m json.tool {manifest_path}",
file=sys.stderr,
)
return 1
@@ -681,7 +688,8 @@ def cmd_mcp_config(args) -> int:
server = registry.get_server(args.name)
if server is None:
print(
f"Error: server '{args.name}' is not installed.\nRun 'hive mcp list' to see installed servers.",
f"Error: server '{args.name}' is not installed.\n"
f"Run 'hive mcp list' to see installed servers.",
file=sys.stderr,
)
return 1
@@ -778,23 +786,6 @@ def cmd_mcp_health(args) -> int:
return 0
def cmd_mcp_init(args) -> int:
"""Initialize the local MCP registry and seed built-in local servers."""
registry = _get_registry()
try:
added = registry.ensure_defaults()
except Exception as exc:
print(f"Error: failed to initialize MCP registry: {exc}", file=sys.stderr)
return 1
if added:
for name in added:
print(f"✓ Registered {name}")
else:
print("✓ MCP registry already initialized (no changes)")
return 0
def cmd_mcp_update(args) -> int:
"""Update a single server, or refresh the index and update all registry servers."""
registry = _get_registry()
@@ -807,7 +798,8 @@ def cmd_mcp_update(args) -> int:
count = registry.update_index()
except Exception as exc:
print(
f"Error: failed to update registry index: {exc}\nCheck your network connection and try again.",
f"Error: failed to update registry index: {exc}\n"
f"Check your network connection and try again.",
file=sys.stderr,
)
return 1
@@ -816,7 +808,9 @@ def cmd_mcp_update(args) -> int:
# Step 2: update all installed registry servers (skip local/pinned)
installed = registry.list_installed()
registry_servers = [s for s in installed if s.get("source") == "registry" and not s.get("pinned")]
registry_servers = [
s for s in installed if s.get("source") == "registry" and not s.get("pinned")
]
if not registry_servers:
return 0
@@ -844,7 +838,8 @@ def _cmd_mcp_update_server(name: str, registry=None) -> int:
server = registry.get_server(name)
if server is None:
print(
f"Error: server '{name}' is not installed.\nRun 'hive mcp install {name}' to install it.",
f"Error: server '{name}' is not installed.\n"
f"Run 'hive mcp install {name}' to install it.",
file=sys.stderr,
)
return 1
+3 -1
View File
@@ -98,7 +98,9 @@ def validate_credentials(
if not result.success:
# Preserve the original validation_result so callers can
# inspect which credentials are still missing.
exc = CredentialError("Credential setup incomplete. Run again after configuring the required credentials.")
exc = CredentialError(
"Credential setup incomplete. Run again after configuring the required credentials."
)
if hasattr(e, "validation_result"):
exc.validation_result = e.validation_result # type: ignore[attr-defined]
if hasattr(e, "failed_cred_names"):
+31 -257
View File
@@ -7,7 +7,6 @@ import inspect
import json
import logging
import os
import re
from collections.abc import Callable
from dataclasses import dataclass
from pathlib import Path
@@ -19,16 +18,6 @@ logger = logging.getLogger(__name__)
_INPUT_LOG_MAX_LEN = 500
# Tools whose names match this pattern are assumed to return ImageContent.
# Matched against the bare tool name (case-insensitive). Used to mark MCP
# tools with produces_image=True so they can be filtered out for text-only
# models before the schema is ever shown to the LLM (avoids wasted calls
# and "screenshot failed" entries polluting memory).
_IMAGE_TOOL_NAME_RE = re.compile(
r"(screenshot|screen_capture|capture_image|render_image|get_image|snapshot_image)",
re.IGNORECASE,
)
# Per-execution context overrides. Each asyncio task (and thus each
# concurrent graph execution) gets its own copy, so there are no races
# when multiple ExecutionStreams run in parallel.
@@ -61,33 +50,6 @@ class ToolRegistry:
# and auto-injected at call time for tools that accept them.
CONTEXT_PARAMS = frozenset({"agent_id", "data_dir", "profile"})
# Tools that perform no filesystem/process/network writes and are safe
# to run concurrently with other safe tools in the same assistant turn.
# Unknown tools default to unsafe (serialized) - adding a name here is
# an explicit promise about that tool's side effects. Keep this list
# conservative: anything that mutates state, writes to disk, issues
# POST/PUT/DELETE requests, or drives a browser MUST NOT be listed.
CONCURRENCY_SAFE_TOOLS = frozenset(
{
# File system reads
"read_file",
"list_directory",
"grep",
"glob",
# Web reads
"web_search",
"web_fetch",
# Browser read-only snapshots (mutate-free observations)
"browser_screenshot",
"browser_snapshot",
"browser_console",
"browser_get_text",
# Background bash polling - reads output buffers only, does
# not touch the subprocess itself.
"bash_output",
}
)
# Credential directory used for change detection
_CREDENTIAL_DIR = Path("~/.hive/credentials/credentials").expanduser()
@@ -104,24 +66,9 @@ class ToolRegistry:
self._mcp_cred_snapshot: set[str] = set() # Credential filenames at MCP load time
self._mcp_aden_key_snapshot: str | None = None # ADEN_API_KEY value at MCP load time
self._mcp_server_tools: dict[str, set[str]] = {} # server name -> tool names
# tool name -> owning MCPClient (for force-kill on timeout)
self._mcp_tool_clients: dict[str, Any] = {}
# Per-agent env injected into every MCP server config.env. Kept
# here (not on the process-wide os.environ) so parallel workers
# in the same interpreter don't clobber each other's identity.
self._mcp_extra_env: dict[str, str] = {}
# Agent dir for re-loading registry MCP after credential resync.
self._mcp_registry_agent_path: Path | None = None
def set_mcp_extra_env(self, env: dict[str, str]) -> None:
"""Attach per-agent env vars to every MCPServerConfig this registry builds.
Use this instead of mutating ``os.environ`` the global env dict
is shared across all workers in a single interpreter, so writes
from one worker race with MCP spawns from another.
"""
self._mcp_extra_env = dict(env)
def register(
self,
name: str,
@@ -190,7 +137,6 @@ class ToolRegistry:
"properties": properties,
"required": required,
},
concurrency_safe=tool_name in self.CONCURRENCY_SAFE_TOOLS,
)
def executor(inputs: dict) -> Any:
@@ -257,7 +203,10 @@ class ToolRegistry:
str(e),
)
return {
"error": (f"Invalid JSON response from tool '{tool_name}': {str(e)}"),
"error": (
f"Invalid JSON response from tool '{tool_name}': "
f"{str(e)}"
),
"raw_content": result.content,
}
return result
@@ -377,9 +326,6 @@ class ToolRegistry:
is_error=True,
)
# Expose force-kill hook so the timeout handler can tear down a
# hung MCP subprocess (asyncio.wait_for alone cannot).
executor.kill_for_tool = registry_ref.kill_mcp_for_tool # type: ignore[attr-defined]
return executor
def get_registered_names(self) -> list[str]:
@@ -426,13 +372,15 @@ class ToolRegistry:
"""Resolve cwd and script paths for MCP stdio config (Windows compatibility).
Use this when building MCPServerConfig from a config file (e.g. in
list_agent_tools, discover_mcp_tools) so hive_tools and other servers
list_agent_tools, discover_mcp_tools) so hive-tools and other servers
work on Windows. Call with base_dir = directory containing the config.
"""
registry = ToolRegistry()
return registry._resolve_mcp_server_config(server_config, base_dir)
def _resolve_mcp_server_config(self, server_config: dict[str, Any], base_dir: Path) -> dict[str, Any]:
def _resolve_mcp_server_config(
self, server_config: dict[str, Any], base_dir: Path
) -> dict[str, Any]:
"""Resolve cwd and script paths for MCP stdio servers (Windows compatibility).
On Windows, passing cwd to subprocess can cause WinError 267. We use cwd=None
@@ -497,22 +445,12 @@ class ToolRegistry:
config["cwd"] = str(resolved_cwd)
return config
# For coder_tools_server, inject --project-root so reads land
# in the expected workspace (hive repo, for framework skills
# and docs), and inject --write-root so writes land under
# ~/.hive/workspace/ instead of polluting the git checkout
# with queen-authored skills, ledgers, and scripts. Without
# the split, every ``write_file`` call from the queen landed
# in the hive repo root.
# For coder_tools_server, inject --project-root so writes go to the expected workspace
if script_name and "coder_tools" in script_name:
project_root = str(resolved_cwd.parent.resolve())
args = list(args)
if "--project-root" not in args:
args.extend(["--project-root", project_root])
if "--write-root" not in args:
_write_root = Path.home() / ".hive" / "workspace"
_write_root.mkdir(parents=True, exist_ok=True)
args.extend(["--write-root", str(_write_root)])
config["args"] = args
if os.name == "nt":
@@ -557,7 +495,8 @@ class ToolRegistry:
server_list = [{"name": name, **cfg} for name, cfg in config.items()]
resolved_server_list = [
self._resolve_mcp_server_config(server_config, base_dir) for server_config in server_list
self._resolve_mcp_server_config(server_config, base_dir)
for server_config in server_list
]
# Ordered first-wins for duplicate tool names across servers; keep tools.py tools.
self.load_registry_servers(
@@ -571,8 +510,6 @@ class ToolRegistry:
self._mcp_cred_snapshot = self._snapshot_credentials()
self._mcp_aden_key_snapshot = os.environ.get("ADEN_API_KEY")
self._log_registry_snapshot("after load_mcp_config")
def _register_mcp_server_with_retry(
self,
server_config: dict[str, Any],
@@ -581,18 +518,8 @@ class ToolRegistry:
tool_cap: int | None = None,
log_collisions: bool = False,
) -> tuple[bool, int, str | None]:
"""Register a single MCP server with one retry for transient failures.
When ``preserve_existing_tools=True`` and the server's tools are
already present from a prior registration, ``register_mcp_server``
returns ``count=0`` because every tool was shadowed. That's a
no-op success, not a failure don't retry / warn in that case.
Otherwise a duplicate-init path (e.g. a worker spawn re-loading
the MCP servers the queen already registered) spams shadow
warnings, sleeps 2s, and retries for no reason.
"""
"""Register a single MCP server with one retry for transient failures."""
name = server_config.get("name", "unknown")
already_loaded = bool(self._mcp_server_tools.get(name))
last_error: str | None = None
for attempt in range(2):
@@ -605,10 +532,6 @@ class ToolRegistry:
)
if count > 0:
return True, count, None
if already_loaded and preserve_existing_tools:
# All tools shadowed by the prior registration of
# the same server — nothing to do, server is usable.
return True, 0, None
last_error = "registered 0 tools"
except Exception as exc:
last_error = str(exc)
@@ -721,17 +644,13 @@ class ToolRegistry:
from framework.loader.mcp_client import MCPClient, MCPServerConfig
from framework.loader.mcp_connection_manager import MCPConnectionManager
# Build config object. Merge per-agent env on top of the
# server's own env so MCP subprocesses receive the identity
# of the worker that spawned them (instead of whichever
# worker most recently wrote to os.environ).
merged_env = {**self._mcp_extra_env, **(server_config.get("env") or {})}
# Build config object
config = MCPServerConfig(
name=server_config["name"],
transport=server_config["transport"],
command=server_config.get("command"),
args=server_config.get("args", []),
env=merged_env,
env=server_config.get("env", {}),
cwd=server_config.get("cwd"),
url=server_config.get("url"),
headers=server_config.get("headers", {}),
@@ -757,37 +676,22 @@ class ToolRegistry:
server_name = server_config["name"]
if server_name not in self._mcp_server_tools:
self._mcp_server_tools[server_name] = set()
# Build admission gate: only admit MCP tools that are either
# (a) credential-backed *and* have a configured account, or
# (b) credential-less *and* listed in the verified manifest.
# Servers that don't expose `__aden_verified_manifest` (third-party
# MCP servers) bypass the gate entirely — preserves prior behavior.
admit = self._build_mcp_admission_gate(client)
count = 0
admitted_names: list[str] = []
for mcp_tool in client.list_tools():
if not admit(mcp_tool.name):
continue
if tool_cap is not None and count >= tool_cap:
break
if preserve_existing_tools and mcp_tool.name in self._tools:
if log_collisions:
origin_server = self._find_mcp_origin_server_for_tool(mcp_tool.name) or "<existing>"
# Don't warn when a server is being re-registered
# by itself — that's a redundant-init case (e.g.
# the same tool_registry seeing the same server
# twice via pooled reconnect), not a real
# cross-server shadow worth flagging.
if origin_server != server_name:
logger.warning(
"MCP tool '%s' from '%s' shadowed by '%s' (loaded first)",
mcp_tool.name,
server_name,
origin_server,
)
origin_server = (
self._find_mcp_origin_server_for_tool(mcp_tool.name) or "<existing>"
)
logger.warning(
"MCP tool '%s' from '%s' shadowed by '%s' (loaded first)",
mcp_tool.name,
server_name,
origin_server,
)
# Skip registration; do not update MCP tool bookkeeping for this server.
continue
@@ -810,11 +714,17 @@ class ToolRegistry:
base_context.update(exec_ctx)
# Only inject context params the tool accepts
filtered_context = {k: v for k, v in base_context.items() if k in tool_params}
filtered_context = {
k: v for k, v in base_context.items() if k in tool_params
}
# Strip context params from LLM inputs — the framework
# values are authoritative (prevents the LLM from passing
# e.g. data_dir="/data" and overriding the real path).
clean_inputs = {k: v for k, v in inputs.items() if k not in registry_ref.CONTEXT_PARAMS}
clean_inputs = {
k: v
for k, v in inputs.items()
if k not in registry_ref.CONTEXT_PARAMS
}
merged_inputs = {**clean_inputs, **filtered_context}
result = client_ref.call_tool(tool_name, merged_inputs)
# MCP client already extracts content (returns str
@@ -847,9 +757,7 @@ class ToolRegistry:
make_mcp_executor(client, mcp_tool.name, self, tool_params),
)
self._mcp_tool_names.add(mcp_tool.name)
self._mcp_tool_clients[mcp_tool.name] = client
self._mcp_server_tools[server_name].add(mcp_tool.name)
admitted_names.append(mcp_tool.name)
count += 1
logger.info(
@@ -861,12 +769,6 @@ class ToolRegistry:
"skipped_reason": None,
},
)
logger.info(
"MCP server '%s' admitted %d tool(s): %s",
config.name,
len(admitted_names),
sorted(admitted_names),
)
return count
except Exception as e:
@@ -892,104 +794,6 @@ class ToolRegistry:
return server_name
return None
def _log_registry_snapshot(self, context: str) -> None:
"""Emit a one-line summary of the current tool registry.
Called after every tool-list mutation (initial load + resync) so that
operators can correlate "what tools does the queen have right now"
with credential changes and MCP server lifecycle events. Per-server
contents are already logged by `register_mcp_server`; this is just the
rollup so the resync path also gets a single anchor line.
"""
per_server_counts = {server: len(names) for server, names in self._mcp_server_tools.items()}
non_mcp_count = len(self._tools) - len(self._mcp_tool_names)
logger.info(
"ToolRegistry snapshot (%s): total=%d, mcp=%d, non_mcp=%d, per_server=%s",
context,
len(self._tools),
len(self._mcp_tool_names),
non_mcp_count,
per_server_counts,
)
_MCP_VERIFIED_MANIFEST_TOOL = "__aden_verified_manifest"
def _build_mcp_admission_gate(self, client: Any) -> Callable[[str], bool]:
"""Build a per-server predicate that filters MCP tools at registration.
Rules:
* The sentinel manifest tool itself is never admitted.
* Credential-backed tools (provider in `tool_provider_map`) are
admitted only when at least one account exists for that provider.
* Credential-less tools are admitted only when they appear in the
server's verified manifest.
* Servers that don't expose a manifest bypass the verified gate
entirely (third-party MCP servers behave as before).
"""
verified_names: set[str] = set()
manifest_present = False
# Only probe the sentinel when the server actually advertises it.
# Calling ``__aden_verified_manifest`` unconditionally on every
# MCP server at registration time (a) causes a bogus tool call
# round-trip to every third-party server, (b) pollutes any
# call-capturing fakes in tests, and (c) risks side effects on
# servers that eagerly execute unknown tool names. Listing is
# cheap and cached by the client; this keeps the manifest gate
# active for aden-flavoured servers without penalising others.
sentinel_advertised = False
try:
for t in client.list_tools():
if getattr(t, "name", None) == self._MCP_VERIFIED_MANIFEST_TOOL:
sentinel_advertised = True
break
except Exception:
sentinel_advertised = False
if sentinel_advertised:
try:
raw = client.call_tool(self._MCP_VERIFIED_MANIFEST_TOOL, {})
parsed: Any = raw
if isinstance(raw, str):
try:
parsed = json.loads(raw)
except json.JSONDecodeError:
parsed = None
# Only treat the response as a manifest when it's a list
# of strings. A malformed response shouldn't flip the gate
# on and silently hide every real tool from the server.
if isinstance(parsed, list) and all(isinstance(n, str) for n in parsed):
verified_names = set(parsed)
manifest_present = True
except Exception:
# Server advertised the sentinel but errored when called
# — treat as no manifest; fall back to third-party bypass.
pass
tool_provider_map: dict[str, str] = {}
live_providers: set[str] = set()
try:
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
adapter = CredentialStoreAdapter.default()
tool_provider_map = adapter.get_tool_provider_map()
live_providers = {a.get("provider", "") for a in adapter.get_all_account_info() if a.get("provider")}
except Exception:
logger.debug("Credential snapshot unavailable for MCP gate", exc_info=True)
def admit(tool_name: str) -> bool:
if tool_name == self._MCP_VERIFIED_MANIFEST_TOOL:
return False
provider = tool_provider_map.get(tool_name)
if provider:
# Credentialed tool — needs an account.
return provider in live_providers
if not manifest_present:
# Third-party MCP server: preserve legacy "admit everything".
return True
return tool_name in verified_names
return admit
def _convert_mcp_tool_to_framework_tool(self, mcp_tool: Any) -> Tool:
"""
Convert an MCP tool to a framework Tool.
@@ -1019,8 +823,6 @@ class ToolRegistry:
"properties": properties,
"required": required,
},
produces_image=bool(_IMAGE_TOOL_NAME_RE.search(mcp_tool.name or "")),
concurrency_safe=mcp_tool.name in self.CONCURRENCY_SAFE_TOOLS,
)
return tool
@@ -1168,7 +970,6 @@ class ToolRegistry:
self.reload_registry_mcp_servers_after_resync()
logger.info("MCP server resync complete")
self._log_registry_snapshot("after resync_mcp_servers_if_needed")
return True
def cleanup(self) -> None:
@@ -1195,33 +996,6 @@ class ToolRegistry:
self._mcp_clients.clear()
self._mcp_client_servers.clear()
self._mcp_managed_clients.clear()
self._mcp_tool_clients.clear()
def kill_mcp_for_tool(self, tool_name: str) -> bool:
"""Force-disconnect the MCP client that owns *tool_name*.
Called from the timeout handler in ``execute_tool`` when a tool
call hangs. Plain ``asyncio.wait_for`` cancellation cannot stop
a sync executor running inside a thread pool (and therefore
cannot stop the MCP subprocess), so we reach through to the
client here and tear it down. The next ``call_tool`` triggers
an automatic reconnect.
Returns True if a client was found and disconnect was attempted.
"""
client = self._mcp_tool_clients.get(tool_name)
if client is None:
return False
try:
logger.warning(
"Force-disconnecting MCP client for hung tool '%s' on server '%s'",
tool_name,
getattr(client.config, "name", "?"),
)
client.disconnect()
except Exception as exc:
logger.warning("Error force-disconnecting MCP client for '%s': %s", tool_name, exc)
return True
def __del__(self):
"""Destructor to ensure cleanup."""
@@ -50,7 +50,11 @@ class CheckpointConfig:
Returns:
True if should check for old checkpoints and prune them
"""
return self.enabled and self.prune_every_n_nodes > 0 and nodes_executed % self.prune_every_n_nodes == 0
return (
self.enabled
and self.prune_every_n_nodes > 0
and nodes_executed % self.prune_every_n_nodes == 0
)
# Default configuration for most agents
+3 -1
View File
@@ -175,7 +175,9 @@ def _resolve_available_tools(
return always_tools
declared = set(node_spec.tools)
declared_tools = [t for t in tools if t.name in declared and t.name not in _ALWAYS_AVAILABLE_TOOLS]
declared_tools = [
t for t in tools if t.name in declared and t.name not in _ALWAYS_AVAILABLE_TOOLS
]
return always_tools + declared_tools
@@ -169,7 +169,11 @@ class ContextHandoff:
key_hint = ""
if output_keys:
key_hint = "\nThe following output keys are especially important: " + ", ".join(output_keys) + ".\n"
key_hint = (
"\nThe following output keys are especially important: "
+ ", ".join(output_keys)
+ ".\n"
)
system_prompt = (
"You are a concise summarizer. Given the conversation below, "
+14 -5
View File
@@ -186,7 +186,8 @@ class EdgeSpec(BaseModel):
expr_vars = {
k: repr(context[k])
for k in context
if k not in ("output", "buffer", "result", "true", "false") and k in self.condition_expr
if k not in ("output", "buffer", "result", "true", "false")
and k in self.condition_expr
}
logger.info(
" Edge %s: condition '%s'%s (vars: %s)",
@@ -332,8 +333,12 @@ class GraphSpec(BaseModel):
default_factory=dict,
description="Named entry points for resuming execution. Format: {name: node_id}",
)
terminal_nodes: list[str] = Field(default_factory=list, description="IDs of nodes that end execution")
pause_nodes: list[str] = Field(default_factory=list, description="IDs of nodes that pause execution for HITL input")
terminal_nodes: list[str] = Field(
default_factory=list, description="IDs of nodes that end execution"
)
pause_nodes: list[str] = Field(
default_factory=list, description="IDs of nodes that pause execution for HITL input"
)
# Components
nodes: list[Any] = Field( # NodeSpec, but avoiding circular import
@@ -342,7 +347,9 @@ class GraphSpec(BaseModel):
edges: list[EdgeSpec] = Field(default_factory=list, description="All edge specifications")
# Data buffer keys
buffer_keys: list[str] = Field(default_factory=list, description="Keys available in data buffer")
buffer_keys: list[str] = Field(
default_factory=list, description="Keys available in data buffer"
)
# Default LLM settings
default_model: str = "claude-haiku-4-5-20251001"
@@ -550,7 +557,9 @@ class GraphSpec(BaseModel):
fan_outs = self.detect_fan_out_nodes()
for source_id, targets in fan_outs.items():
event_loop_targets = [
t for t in targets if self.get_node(t) and getattr(self.get_node(t), "node_type", "") == "event_loop"
t
for t in targets
if self.get_node(t) and getattr(self.get_node(t), "node_type", "") == "event_loop"
]
if len(event_loop_targets) > 1:
seen_keys: dict[str, str] = {}
+155 -141
View File
@@ -1,19 +1,12 @@
"""Browser automation best-practices prompt.
This module provides ``GCU_BROWSER_SYSTEM_PROMPT`` a canonical set of
This module provides ``GCU_BROWSER_SYSTEM_PROMPT`` -- a canonical set of
browser automation guidelines that can be included in any node's system
prompt that uses browser tools from the gcu-tools MCP server.
Browser tools are registered via the global MCP registry (gcu-tools).
Nodes that need browser access declare ``tools: {policy: "all"}`` in their
agent.json config.
Note: the canonical source of truth for browser automation guidance is
the ``browser-automation`` default skill at
``core/framework/skills/_default_skills/browser-automation/SKILL.md``.
Activate that skill for the full decision tree. This module holds a
compact subset suitable for direct inlining into a node's system prompt
when a skill activation is not desired.
"""
GCU_BROWSER_SYSTEM_PROMPT = """\
@@ -21,151 +14,172 @@ GCU_BROWSER_SYSTEM_PROMPT = """\
Follow these rules for reliable, efficient browser interaction.
## Pick the right reading tool
## Reading Pages
- ALWAYS prefer `browser_snapshot` over `browser_get_text("body")`
it returns a compact ~1-5 KB accessibility tree vs 100+ KB of raw HTML.
- Interaction tools (`browser_click`, `browser_type`, `browser_fill`,
`browser_scroll`, etc.) return a page snapshot automatically in their
result. Use it to decide your next action do NOT call
`browser_snapshot` separately after every action.
Only call `browser_snapshot` when you need a fresh view without
performing an action, or after setting `auto_snapshot=false`.
- Do NOT use `browser_screenshot` to read text use
`browser_snapshot` for that (compact, searchable, fast).
- DO use `browser_screenshot` when you need visual context:
charts, images, canvas elements, layout verification, or when
the snapshot doesn't capture what you need.
- Only fall back to `browser_get_text` for extracting specific
small elements by CSS selector.
- **`browser_snapshot`** compact accessibility tree. Fast, cheap, good
for static / text-heavy pages where the DOM matches what's visually
rendered (docs, forms, search results, settings pages).
- **`browser_screenshot`** visual capture + scale metadata. Use when
the snapshot does not show the thing you need, when refs look stale,
or when you need visual position/layout to act. This is common on
complex SPAs (LinkedIn, X / Twitter, Reddit, Gmail, Notion, Slack,
Discord), shadow DOM, and virtual scrolling.
Use snapshot first for structure and ordinary controls; switch to
screenshot when snapshot can't find or verify the target. Interaction
tools (`browser_click`, `browser_type`, `browser_type_focused`,
`browser_fill`, `browser_scroll`) wait 0.5 s for the page to settle
after a successful action, then attach a fresh snapshot under the
`snapshot` key of their result so don't call `browser_snapshot`
separately after an interaction unless you need a newer view. Tune
with `auto_snapshot_mode`: `"default"` (full tree) is the default;
`"simple"` trims unnamed structural nodes; `"interactive"` returns
only controls (tightest token footprint); `"off"` skips the capture
entirely use when batching several interactions.
Only fall back to `browser_get_text` for extracting small elements by
CSS selector.
## Coordinates
Every browser tool that takes or returns coordinates operates in
**fractions of the viewport (0..1 for both axes)**. Read a target's
proportional position off `browser_screenshot` "this button is
~35% from the left, ~20% from the top" → pass `(0.35, 0.20)`.
`browser_get_rect` and `browser_shadow_query` return `rect.cx` /
`rect.cy` as fractions in the same space. The tools handle the
fraction CSS-px multiplication internally; you do not need to
track image pixels, DPR, or any scale factor.
Why fractions: every vision model (Claude, GPT-4o, Gemini, local
VLMs) resizes or tiles images differently before the model sees the
pixels. Proportions survive every such transform; pixel coordinates
only "work" per-model and break when you swap backends.
Avoid raw `browser_evaluate` + `getBoundingClientRect()` for coord
lookup that returns CSS px and will be wrong when fed to click
tools. Prefer `browser_get_rect` / `browser_shadow_query`, which
return fractions.
## Rich-text editors (X, LinkedIn DMs, Gmail, Reddit, Slack, Discord)
Click the input area first with `browser_click_coordinate` or
`browser_click(selector)` BEFORE typing. React / Draft.js / Lexical /
ProseMirror only register input as "real" after a native pointer-
sourced focus event; JS `.focus()` is not enough. Without a real click
first, the editor stays empty and the send button stays disabled.
`browser_type` does this automatically when you have a selector it
clicks the element, then inserts text via CDP `Input.insertText`.
For shadow-DOM inputs where selectors can't reach, use
`browser_click_coordinate` to focus, then `browser_type_focused(text=...)`
to type into the active element. Before clicking send, verify the
submit button's `disabled` / `aria-disabled` state via `browser_evaluate`.
## Shadow DOM
Sites like LinkedIn messaging (`#interop-outlet`), Reddit (faceplate
Web Components), and some X elements live inside shadow roots.
`document.querySelector` and `wait_for_selector` do **not** see into
shadow roots. But `browser_click_coordinate` **does** CDP hit
testing walks shadow roots natively, so coordinate-based operations
reach shadow elements transparently.
**Shadow-heavy site workflow:**
1. `browser_screenshot()` visual image
2. Identify target visually pixel `(x, y)` read straight off the image
3. `browser_click_coordinate(x, y)` lands via native hit test;
inputs get focused regardless of shadow depth
4. Type via `browser_type_focused` (no selector needed types into the
already-focused element), or `browser_type` if you have a selector
For selector-style access when you know the shadow path:
`browser_shadow_query("#interop-outlet >>> #msg-overlay >>> p")`
returns a CSS-px rect you can feed directly to click tools.
## Navigation & waiting
- `browser_navigate(wait_until="load")` returns when the page fires
load. On SPAs (LinkedIn especially 45 seconds), add a 23 s sleep
after to let React/Vue hydrate before querying for chrome elements.
- Never re-navigate to the same URL after scrolling resets scroll.
- Use `timeout_ms=20000` for heavy SPAs.
- `wait_for_selector` / `wait_for_text` resolve in milliseconds when
the element is already in the DOM no need to sleep if you can
express the wait condition.
## Keyboard shortcuts
`browser_press("a", modifiers=["ctrl"])` for Ctrl+A. Accepted
modifiers: `"alt"`, `"ctrl"`/`"control"`, `"meta"`/`"cmd"`,
`"shift"`. The tool dispatches the modifier key first, then the main
key with `code` and `windowsVirtualKeyCode` populated (Chrome's
shortcut dispatcher requires both), then releases in reverse order.
## Navigation & Waiting
- `browser_navigate` and `browser_open` already wait for the page to
load (`domcontentloaded`). Do NOT call `browser_wait` with no
arguments after navigation it wastes time.
Only use `browser_wait` when you need a *specific element* or *text*
to appear (pass `selector` or `text`).
- NEVER re-navigate to the same URL after scrolling
this resets your scroll position and loses loaded content.
## Scrolling
- Use large scroll amounts ~2000 when loading more content
sites like twitter and linkedin have lazy loading for paging.
- The scroll result includes a snapshot automatically no need to call
`browser_snapshot` separately.
- Use large amounts (~2000 px) for lazy-loaded sites (X, LinkedIn).
- Scroll result includes a snapshot don't call `browser_snapshot`
separately.
## Batching Actions
- You can call multiple tools in a single turn they execute in parallel.
ALWAYS batch independent actions together. Examples:
- Fill multiple form fields in one turn.
- Navigate + snapshot in one turn.
- Click + scroll if targeting different elements.
- When batching, set `auto_snapshot=false` on all but the last action
to avoid redundant snapshots.
- Aim for 3-5 tool calls per turn minimum. One tool call per turn is
wasteful.
## Batching
## Error Recovery
- If a tool fails, retry once with the same approach.
- If it fails a second time, STOP retrying and switch approach.
- If `browser_snapshot` fails try `browser_get_text` with a
specific small selector as fallback.
- If `browser_open` fails or page seems stale `browser_stop`,
then `browser_start`, then retry.
- Multiple tool calls per turn execute in parallel. Batch independent
actions together: fill multiple fields, navigate + snapshot,
different-target click + scroll.
- Set `auto_snapshot=false` on all but the last when batching.
- Aim for 35 tool calls per turn minimum.
## Tab Management
## Tab management
**Close tabs as soon as you are done with them** not only at the end of the task.
After reading or extracting data from a tab, close it immediately.
Close tabs as soon as you're done with them — not only at the end of
the task. `browser_close(target_id=...)` for one, `browser_close_finished()`
for a full cleanup. Never accumulate more than 3 open tabs.
`browser_tabs` reports an `origin` field: `"agent"` (you own it, close
when done), `"popup"` (close after extracting), `"startup"`/`"user"`
(leave alone).
**Decision rules:**
- Finished reading/extracting from a tab? `browser_close(target_id=...)`
- Completed a multi-tab workflow? `browser_close_finished()` to clean up all your tabs
- More than 3 tabs open? stop and close finished ones before opening more
- Popup appeared that you didn't need? → close it immediately
## Login & auth walls
**Origin awareness:** `browser_tabs` returns an `origin` field for each tab:
- `"agent"` you opened it; you own it; close it when done
- `"popup"` opened by a link or script; close after extracting what you need
- `"startup"` or `"user"` leave these alone unless the task requires it
Report the auth wall and stop do NOT attempt to log in. Dismiss
cookie consent banners if they block content.
**Cleanup tools:**
- `browser_close(target_id=...)` close one specific tab
- `browser_close_finished()` close all your agent/popup tabs (safe: leaves startup/user tabs)
- `browser_close_all()` close everything except the active tab (use only for full reset)
## Error recovery
**Multi-tab workflow pattern:**
1. Open background tabs with `browser_open(url=..., background=true)` to stay on current tab
2. Process each tab and close it with `browser_close` when done
3. When the full workflow completes, call `browser_close_finished()` to confirm cleanup
4. Check `browser_tabs` at any point it shows `origin` and `age_seconds` per tab
- Retry once on failure, then switch approach.
- If `browser_snapshot` fails, try `browser_get_text` with a narrow
selector as fallback.
- If `browser_open` fails or the page seems stale, `browser_stop`
`browser_start` retry.
Never accumulate tabs. Treat every tab you open as a resource you must free.
## `browser_evaluate`
## Shadow DOM & Overlays
Use for reading state inside a shadow root that standard tools don't
handle, for one-shot site-specific actions, or to measure layout the
tools don't expose. Do NOT use it on a strict-CSP site (LinkedIn,
some X surfaces) with `innerHTML` Trusted Types silently drops the
assignment. Always use `createElement` + `appendChild` + `setAttribute`
for DOM injection on those sites. `style.cssText`, `textContent`, and
`.value` assignments are fine.
Some sites (LinkedIn messaging, etc.) render content inside closed shadow roots that are
invisible to regular DOM queries and `browser_snapshot` coordinates.
**Detecting shadow DOM**: `document.elementFromPoint(x, y)` returns a zero-height host element
(e.g. `#interop-outlet`) for the entire overlay area — this is normal, not a bug.
`document.body.innerText` and `document.querySelectorAll` return nothing for shadow content.
`browser_snapshot` CAN read shadow DOM text but cannot return coordinates.
**Querying into shadow DOM:**
```
browser_shadow_query("#interop-outlet >>> #msg-overlay >>> p")
```
Uses `>>>` to pierce shadow roots. Returns `rect` in CSS pixels and `physicalRect` ready for
`browser_click_coordinate` / `browser_hover_coordinate`.
**Getting physical rect for any element (including shadow DOM):**
```
browser_get_rect(selector="#interop-outlet >>> .msg-convo-wrapper", pierce_shadow=true)
```
**Manual JS traversal when selector is dynamic:**
```js
const shadow = document.getElementById('interop-outlet').shadowRoot;
const convo = shadow.querySelector('#ember37');
const rect = convo.querySelector('p').getBoundingClientRect();
// rect is in CSS pixels multiply by DPR for physical pixels
```
Pass this as a multi-statement script to `browser_evaluate`; it wraps automatically in an IIFE.
Use `JSON.stringify(rect)` to serialize the result.
## Coordinate System
There are THREE coordinate spaces. Using the wrong one causes clicks/hovers to land in the
wrong place.
| Space | Used by | How to get |
|---|---|---|
| Physical pixels | `browser_click_coordinate` | `browser_coords` `physical_x/y` |
| CSS pixels | `getBoundingClientRect()`, `elementFromPoint` | `browser_coords` `css_x/y` |
| Screenshot pixels | What you see in the 800px image | Raw position in screenshot |
**Converting screenshot physical**: `browser_coords(x, y)` use `physical_x/y`.
**Converting CSS physical**: multiply by `window.devicePixelRatio` (typically 1.6 on HiDPI).
**Never** pass raw `getBoundingClientRect()` values to `browser_hover_coordinate` without
multiplying by DPR first.
## Screenshots
Screenshot data is base64-encoded PNG. To view it:
```
run_command("echo '<base64_data>' | base64 -d > /tmp/screenshot.png")
```
Then use `read_file("/tmp/screenshot.png")` to view the image.
Always use `full_page=false` (default) unless you specifically need the full scrolled page.
## JavaScript Evaluation
`browser_evaluate` wraps your script in an IIFE automatically:
- Single expression (`document.title`) wrapped with `return`
- Multi-statement or contains `;`/`\n` wrapped without return (add explicit `return` yourself)
- Already an IIFE run as-is
**Avoid**: complex closures with `return` inside `for` loops Chrome CDP returns `null`.
**Use instead**: `Array.from(...).map(...).join(...)` chains, or build result objects and
`JSON.stringify()` them.
**For shadow DOM traversal with dynamic selectors**, write the full JS path:
```js
const s = document.getElementById('interop-outlet').shadowRoot;
const el = s.querySelector('.msg-convo-wrapper');
return JSON.stringify(el.getBoundingClientRect());
```
## Login & Auth Walls
- If you see a "Log in" or "Sign up" prompt instead of expected
content, report the auth wall immediately do NOT attempt to log in.
- Check for cookie consent banners and dismiss them if they block content.
## Efficiency
- Minimize tool calls combine actions where possible.
- When a snapshot result is saved to a spillover file, use
`run_command` with grep to extract specific data rather than
re-reading the full file.
- Call `set_output` in the same turn as your last browser action
when possible don't waste a turn.
"""
+18 -6
View File
@@ -41,9 +41,13 @@ class SuccessCriterion(BaseModel):
id: str
description: str = Field(description="Human-readable description of what success looks like")
metric: str = Field(description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'")
metric: str = Field(
description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'"
)
# NEW: runtime evaluation type (separate from metric)
type: str = Field(default="success_rate", description="Runtime evaluation type, e.g. 'success_rate'")
type: str = Field(
default="success_rate", description="Runtime evaluation type, e.g. 'success_rate'"
)
target: Any = Field(description="The target value or condition")
weight: float = Field(default=1.0, ge=0.0, le=1.0, description="Relative importance (0-1)")
@@ -63,9 +67,15 @@ class Constraint(BaseModel):
id: str
description: str
constraint_type: str = Field(description="Type: 'hard' (must not violate) or 'soft' (prefer not to violate)")
category: str = Field(default="general", description="Category: 'time', 'cost', 'safety', 'scope', 'quality'")
check: str = Field(default="", description="How to check: expression, function name, or 'llm_judge'")
constraint_type: str = Field(
description="Type: 'hard' (must not violate) or 'soft' (prefer not to violate)"
)
category: str = Field(
default="general", description="Category: 'time', 'cost', 'safety', 'scope', 'quality'"
)
check: str = Field(
default="", description="How to check: expression, function name, or 'llm_judge'"
)
model_config = {"extra": "allow"}
@@ -132,7 +142,9 @@ class Goal(BaseModel):
# Input/output schema
input_schema: dict[str, Any] = Field(default_factory=dict, description="Expected input format")
output_schema: dict[str, Any] = Field(default_factory=dict, description="Expected output format")
output_schema: dict[str, Any] = Field(
default_factory=dict, description="Expected output format"
)
# Versioning for evolution
version: str = "1.0.0"
+13 -5
View File
@@ -129,13 +129,15 @@ class NodeSpec(BaseModel):
input_schema: dict[str, dict] = Field(
default_factory=dict,
description=(
"Optional schema for input validation. Format: {key: {type: 'string', required: True, description: '...'}}"
"Optional schema for input validation. "
"Format: {key: {type: 'string', required: True, description: '...'}}"
),
)
output_schema: dict[str, dict] = Field(
default_factory=dict,
description=(
"Optional schema for output validation. Format: {key: {type: 'dict', required: True, description: '...'}}"
"Optional schema for output validation. "
"Format: {key: {type: 'dict', required: True, description: '...'}}"
),
)
@@ -151,13 +153,19 @@ class NodeSpec(BaseModel):
"'none' = no tools at all."
),
)
model: str | None = Field(default=None, description="Specific model to use (defaults to graph default)")
model: str | None = Field(
default=None, description="Specific model to use (defaults to graph default)"
)
# For function nodes
function: str | None = Field(default=None, description="Function name or path for function nodes")
function: str | None = Field(
default=None, description="Function name or path for function nodes"
)
# For router nodes
routes: dict[str, str] = Field(default_factory=dict, description="Condition -> target_node_id mapping for routers")
routes: dict[str, str] = Field(
default_factory=dict, description="Condition -> target_node_id mapping for routers"
)
# Retry behavior
max_retries: int = Field(default=3)
+20 -7
View File
@@ -379,7 +379,9 @@ class NodeWorker:
# Failure
if attempt + 1 < total_attempts:
gc.retry_counts[self.node_spec.id] = gc.retry_counts.get(self.node_spec.id, 0) + 1
gc.retry_counts[self.node_spec.id] = (
gc.retry_counts.get(self.node_spec.id, 0) + 1
)
gc.nodes_with_retries.add(self.node_spec.id)
delay = 1.0 * (2**attempt)
logger.warning(
@@ -409,7 +411,9 @@ class NodeWorker:
except Exception as exc:
if attempt + 1 < total_attempts:
gc.retry_counts[self.node_spec.id] = gc.retry_counts.get(self.node_spec.id, 0) + 1
gc.retry_counts[self.node_spec.id] = (
gc.retry_counts.get(self.node_spec.id, 0) + 1
)
gc.nodes_with_retries.add(self.node_spec.id)
delay = 1.0 * (2**attempt)
logger.warning(
@@ -465,7 +469,9 @@ class NodeWorker:
if len(conditionals) > 1:
max_prio = max(e.priority for e in conditionals)
traversable = [
e for e in traversable if e.condition != EdgeCondition.CONDITIONAL or e.priority == max_prio
e
for e in traversable
if e.condition != EdgeCondition.CONDITIONAL or e.priority == max_prio
]
# When parallel execution is disabled, follow first match only (sequential)
@@ -535,7 +541,9 @@ class NodeWorker:
logger.warning("Worker %s output validation warnings: %s", node_spec.id, errors)
# Determine if this worker is a fan-out branch
is_fanout_branch = any(tag.via_branch == node_spec.id for tag in self._inherited_fan_out_tags)
is_fanout_branch = any(
tag.via_branch == node_spec.id for tag in self._inherited_fan_out_tags
)
# Collect keys to write: declared output_keys + any extra output items
# (for fan-out branches, all output items need conflict checking)
@@ -634,7 +642,9 @@ class NodeWorker:
self._node_impl = node
return node
raise RuntimeError(f"No implementation for node '{self.node_spec.id}' (type: {self.node_spec.node_type})")
raise RuntimeError(
f"No implementation for node '{self.node_spec.id}' (type: {self.node_spec.node_type})"
)
def _build_node_context(self) -> NodeContext:
"""Build NodeContext for this worker's execution."""
@@ -739,7 +749,9 @@ class NodeWorker:
inherited_conversation=gc.continuous_conversation,
narrative=narrative,
)
gc.continuous_conversation.update_system_prompt(build_system_prompt_for_node_context(next_ctx))
gc.continuous_conversation.update_system_prompt(
build_system_prompt_for_node_context(next_ctx)
)
gc.continuous_conversation.set_current_phase(next_spec.id)
buffer_items, data_files = self._prepare_transition_payload()
@@ -787,7 +799,8 @@ class NodeWorker:
file_path.write_text(write_content, encoding="utf-8")
file_size = file_path.stat().st_size
buffer_items[key] = (
f"[Saved to '{filename}' ({file_size:,} bytes). Use read_file(path='{filename}') to access.]"
f"[Saved to '{filename}' ({file_size:,} bytes). "
f"Use read_file(path='{filename}') to access.]"
)
continue
except Exception:
+112 -50
View File
@@ -202,7 +202,9 @@ class Orchestrator:
self.validator = OutputValidator()
self.logger = logging.getLogger(__name__)
self.logger.debug(
"[Orchestrator.__init__] Created with stream_id=%s, execution_id=%s, initial node_registry keys: %s",
"[Orchestrator.__init__] Created with"
" stream_id=%s, execution_id=%s,"
" initial node_registry keys: %s",
stream_id,
execution_id,
list(self.node_registry.keys()),
@@ -345,7 +347,8 @@ class Orchestrator:
missing = [t for t in declared if t not in available_tool_names]
if missing:
self.logger.warning(
"Node '%s' (id=%s) declares %d tools not in this runtime; stripping them and continuing: %s",
"Node '%s' (id=%s) declares %d tools not in this runtime; "
"stripping them and continuing: %s",
node.name,
node.id,
len(missing),
@@ -388,7 +391,10 @@ class Orchestrator:
lines.append(f"[tool result]: {c}")
elif m.role == "assistant" and m.tool_calls:
names = [tc.get("function", {}).get("name", "?") for tc in m.tool_calls]
lines.append(f"[assistant (calls: {', '.join(names)})]: {m.content[:200] if m.content else ''}")
lines.append(
f"[assistant (calls: {', '.join(names)})]: "
f"{m.content[:200] if m.content else ''}"
)
else:
lines.append(f"[{m.role}]: {m.content}")
formatted = "\n\n".join(lines)
@@ -559,7 +565,8 @@ class Orchestrator:
# [RESTORED] Type safety check
if not isinstance(buffer_data, dict):
self.logger.warning(
f"⚠️ Invalid data buffer type in session state: {type(buffer_data).__name__}, expected dict"
f"⚠️ Invalid data buffer type in session state: "
f"{type(buffer_data).__name__}, expected dict"
)
else:
# Restore buffer from previous session.
@@ -583,7 +590,8 @@ class Orchestrator:
# contains all state including the original input, and re-writing
# input_data would overwrite intermediate results with stale values.
_is_resuming = bool(
session_state and (session_state.get("paused_at") or session_state.get("resume_from_checkpoint"))
session_state
and (session_state.get("paused_at") or session_state.get("resume_from_checkpoint"))
)
if input_data and not _is_resuming:
for key, value in input_data.items():
@@ -608,7 +616,11 @@ class Orchestrator:
# If resuming at a specific node (paused_at), that node was counted
# but never completed, so decrement its count
paused_at = session_state.get("paused_at")
if paused_at and paused_at in node_visit_counts and node_visit_counts[paused_at] > 0:
if (
paused_at
and paused_at in node_visit_counts
and node_visit_counts[paused_at] > 0
):
old_count = node_visit_counts[paused_at]
node_visit_counts[paused_at] -= 1
self.logger.info(
@@ -624,7 +636,10 @@ class Orchestrator:
checkpoint = await checkpoint_store.load_checkpoint(checkpoint_id)
if checkpoint:
self.logger.info(f"🔄 Resuming from checkpoint: {checkpoint_id} (node: {checkpoint.current_node})")
self.logger.info(
f"🔄 Resuming from checkpoint: {checkpoint_id} "
f"(node: {checkpoint.current_node})"
)
checkpoint_run_id = checkpoint.run_id or LEGACY_RUN_ID
self._run_id = checkpoint_run_id
@@ -633,7 +648,9 @@ class Orchestrator:
buffer.write(key, value, validate=False)
# Start from checkpoint's next node or current node
current_node_id = checkpoint.next_node or checkpoint.current_node or graph.entry_node
current_node_id = (
checkpoint.next_node or checkpoint.current_node or graph.entry_node
)
# Restore execution path
path.extend(checkpoint.execution_path)
@@ -643,11 +660,16 @@ class Orchestrator:
f"resuming at node: {current_node_id}"
)
else:
self.logger.warning(f"Checkpoint {checkpoint_id} not found, resuming from normal entry point")
self.logger.warning(
f"Checkpoint {checkpoint_id} not found, resuming from normal entry point"
)
current_node_id = graph.get_entry_point(session_state)
except Exception as e:
self.logger.error(f"Failed to load checkpoint {checkpoint_id}: {e}, resuming from normal entry point")
self.logger.error(
f"Failed to load checkpoint {checkpoint_id}: {e}, "
f"resuming from normal entry point"
)
current_node_id = graph.get_entry_point(session_state)
else:
current_node_id = graph.get_entry_point(session_state)
@@ -680,27 +702,14 @@ class Orchestrator:
self.logger.info(f" Goal: {goal.description}")
self.logger.info(f" Entry node: {graph.entry_node}")
# Set per-execution data_dir and agent_id so data tools and
# spillover files share the same session-scoped directory, and
# so MCP tools whose server-side schemas mark agent_id as a
# required field (list_dir, hashline_edit, replace_file_content,
# execute_command_tool, …) get a valid value injected even on
# registry instances where agent_loader.setup() didn't populate
# the session_context. Without this, FastMCP rejects those
# calls with "agent_id is a required property".
# Set per-execution data_dir so data tools and spillover files
# share the same session-scoped directory.
_ctx_token = None
if self._storage_path:
from framework.loader.tool_registry import ToolRegistry
_ctx_token = ToolRegistry.set_execution_context(
data_dir=str(self._storage_path / "data"),
agent_id=graph.id,
)
else:
from framework.loader.tool_registry import ToolRegistry
_ctx_token = ToolRegistry.set_execution_context(
agent_id=graph.id,
)
try:
@@ -735,14 +744,20 @@ class Orchestrator:
"human_input": "event_loop", # Use queen interaction / escalation instead
}
def _get_node_implementation(self, node_spec: NodeSpec, cleanup_llm_model: str | None = None) -> NodeProtocol:
def _get_node_implementation(
self, node_spec: NodeSpec, cleanup_llm_model: str | None = None
) -> NodeProtocol:
"""Get or create a node implementation."""
# Check registry first
if node_spec.id in self.node_registry:
logger.debug("[Orchestrator._get_node_implementation] Found node '%s' in registry", node_spec.id)
logger.debug(
"[Orchestrator._get_node_implementation] Found node '%s' in registry", node_spec.id
)
return self.node_registry[node_spec.id]
logger.debug(
"[Orchestrator._get_node_implementation] Node '%s' not in registry (keys: %s), creating new",
"[Orchestrator._get_node_implementation]"
" Node '%s' not in registry (keys: %s),"
" creating new",
node_spec.id,
list(self.node_registry.keys()),
)
@@ -812,7 +827,9 @@ class Orchestrator:
# Cache so inject_event() is reachable for queen interaction and escalation routing
self.node_registry[node_spec.id] = node
logger.debug(
"[Orchestrator._get_node_implementation] Cached node '%s' in node_registry, registry now has keys: %s",
"[Orchestrator._get_node_implementation]"
" Cached node '%s' in node_registry,"
" registry now has keys: %s",
node_spec.id,
list(self.node_registry.keys()),
)
@@ -895,7 +912,9 @@ class Orchestrator:
if len(conditionals) > 1:
max_prio = max(e.priority for e in conditionals)
traversable = [
e for e in traversable if e.condition != EdgeCondition.CONDITIONAL or e.priority == max_prio
e
for e in traversable
if e.condition != EdgeCondition.CONDITIONAL or e.priority == max_prio
]
return traversable
@@ -1058,7 +1077,9 @@ class Orchestrator:
execution_id=self._execution_id,
)
self.logger.info(f" ▶ Branch {node_spec.name}: executing (attempt {attempt + 1})")
self.logger.info(
f" ▶ Branch {node_spec.name}: executing (attempt {attempt + 1})"
)
result = await node_impl.execute(ctx)
last_result = result
@@ -1119,13 +1140,19 @@ class Orchestrator:
)
return branch, result
self.logger.warning(f" ↻ Branch {node_spec.name}: retry {attempt + 1}/{effective_max_retries}")
self.logger.warning(
f" ↻ Branch {node_spec.name}: "
f"retry {attempt + 1}/{effective_max_retries}"
)
# All retries exhausted
branch.status = "failed"
branch.error = last_result.error if last_result else "Unknown error"
branch.result = last_result
self.logger.error(f" ✗ Branch {node_spec.name}: failed after {effective_max_retries} attempts")
self.logger.error(
f" ✗ Branch {node_spec.name}: "
f"failed after {effective_max_retries} attempts"
)
return branch, last_result
except Exception as e:
@@ -1168,7 +1195,10 @@ class Orchestrator:
# Branch timed out
branch.status = "timed_out"
branch.error = f"Branch timed out after {timeout}s"
self.logger.warning(f" ⏱ Branch {graph.get_node(branch.node_id).name}: timed out after {timeout}s")
self.logger.warning(
f" ⏱ Branch {graph.get_node(branch.node_id).name}: "
f"timed out after {timeout}s"
)
path.append(branch.node_id)
failed_branches.append(branch)
elif isinstance(result, Exception):
@@ -1192,9 +1222,13 @@ class Orchestrator:
if self._parallel_config.on_branch_failure == "fail_all":
raise RuntimeError(f"Parallel execution failed: branches {failed_names} failed")
elif self._parallel_config.on_branch_failure == "continue_others":
self.logger.warning(f"⚠ Some branches failed ({failed_names}), continuing with successful ones")
self.logger.warning(
f"⚠ Some branches failed ({failed_names}), continuing with successful ones"
)
self.logger.info(f" ⑃ Fan-out complete: {len(branch_results)}/{len(branches)} branches succeeded")
self.logger.info(
f" ⑃ Fan-out complete: {len(branch_results)}/{len(branches)} branches succeeded"
)
return branch_results, total_tokens, total_latency
def register_node(self, node_id: str, implementation: NodeProtocol) -> None:
@@ -1385,10 +1419,15 @@ class Orchestrator:
return True
if not terminal_worker_ids:
# No terminals: check if all workers are done
return all(w.lifecycle in (WorkerLifecycle.COMPLETED, WorkerLifecycle.FAILED) for w in workers.values())
return all(
w.lifecycle in (WorkerLifecycle.COMPLETED, WorkerLifecycle.FAILED)
for w in workers.values()
)
if any(w.lifecycle == WorkerLifecycle.RUNNING for w in workers.values()):
return False
return any(tid in completed_terminals or tid in failed_workers for tid in terminal_worker_ids)
return any(
tid in completed_terminals or tid in failed_workers for tid in terminal_worker_ids
)
def _mark_quiescent_terminal_failure() -> bool:
nonlocal execution_error
@@ -1396,15 +1435,22 @@ class Orchestrator:
return False
if any(w.lifecycle == WorkerLifecycle.RUNNING for w in workers.values()):
return False
if any(tid in completed_terminals or tid in failed_workers for tid in terminal_worker_ids):
if any(
tid in completed_terminals or tid in failed_workers for tid in terminal_worker_ids
):
return False
execution_error = f"Worker execution ended before terminal nodes completed: {sorted(terminal_worker_ids)}"
execution_error = (
"Worker execution ended before terminal nodes completed: "
f"{sorted(terminal_worker_ids)}"
)
self.logger.error(execution_error)
return True
# Track fan-out branch workers for per-branch timeout enforcement
_fanout_branch_tasks: dict[str, asyncio.Task] = {} # worker_id → timeout-wrapper task
branch_timeout = self._parallel_config.branch_timeout_seconds if self._parallel_config else 300.0
branch_timeout = (
self._parallel_config.branch_timeout_seconds if self._parallel_config else 300.0
)
def _route_activation(
activation: Activation,
@@ -1439,7 +1485,9 @@ class Orchestrator:
target_worker.activate(inherited_tags=activation.fan_out_tags)
if target_worker._task is not None:
# Fan-out branch: wrap with timeout
is_fanout_branch = any(tag.via_branch == activation.target_id for tag in activation.fan_out_tags)
is_fanout_branch = any(
tag.via_branch == activation.target_id for tag in activation.fan_out_tags
)
if is_fanout_branch and branch_timeout > 0:
timed_task = asyncio.ensure_future(
asyncio.wait_for(target_worker._task, timeout=branch_timeout)
@@ -1494,7 +1542,9 @@ class Orchestrator:
if completion.conversation is not None:
gc.continuous_conversation = completion.conversation
self.logger.info(f" ✓ Worker completed: {worker_id} ({len(activations)} outgoing activation(s))")
self.logger.info(
f" ✓ Worker completed: {worker_id} ({len(activations)} outgoing activation(s))"
)
# Route activations to target workers
for activation in activations:
@@ -1535,7 +1585,9 @@ class Orchestrator:
completion_event.set()
# Subscribe to events (only if event bus has subscribe capability)
has_event_subscription = self._event_bus is not None and hasattr(self._event_bus, "subscribe")
has_event_subscription = self._event_bus is not None and hasattr(
self._event_bus, "subscribe"
)
if has_event_subscription:
sub_completed = self._event_bus.subscribe(
event_types=[EventType.WORKER_COMPLETED],
@@ -1577,12 +1629,14 @@ class Orchestrator:
)
if unresolved_terminals:
execution_error = (
f"Worker execution ended before terminal nodes completed: {unresolved_terminals}"
"Worker execution ended before terminal nodes completed: "
f"{unresolved_terminals}"
)
self.logger.error(execution_error)
else:
execution_error = (
"Worker execution ended before all workers reached a terminal lifecycle state"
"Worker execution ended before all workers reached "
"a terminal lifecycle state"
)
self.logger.error(execution_error)
break
@@ -1613,7 +1667,10 @@ class Orchestrator:
task_error = exc
# Check for fan-out branch timeout
if isinstance(task_error, asyncio.TimeoutError) and wid in _fanout_branch_tasks:
if (
isinstance(task_error, asyncio.TimeoutError)
and wid in _fanout_branch_tasks
):
error = f"Branch failed (timed out after {branch_timeout}s)"
failed_workers[wid] = error
worker.lifecycle = WorkerLifecycle.FAILED
@@ -1657,7 +1714,10 @@ class Orchestrator:
src_spec = graph.get_node(wid)
if src_spec and src_spec.tools:
for t in self.tools:
if t.name in src_spec.tools and t.name not in gc.cumulative_tool_names:
if (
t.name in src_spec.tools
and t.name not in gc.cumulative_tool_names
):
gc.cumulative_tools.append(t)
gc.cumulative_tool_names.add(t.name)
if src_spec and src_spec.output_keys:
@@ -1668,7 +1728,8 @@ class Orchestrator:
gc.continuous_conversation = completion_conversation
self.logger.info(
f" ✓ Worker completed: {wid} ({len(outgoing_activations)} outgoing activation(s))"
f" ✓ Worker completed: {wid} "
f"({len(outgoing_activations)} outgoing activation(s))"
)
# Route activations
@@ -1713,7 +1774,8 @@ class Orchestrator:
error = str(task_error)
else:
error = (
f"Worker task completed without publishing a completion (lifecycle={worker.lifecycle})"
"Worker task completed without publishing a completion "
f"(lifecycle={worker.lifecycle})"
)
failed_workers[wid] = error
@@ -97,12 +97,15 @@ def build_transition_marker(
file_path = data_path / filename
try:
write_content = (
json.dumps(value, indent=2, ensure_ascii=False) if isinstance(value, (dict, list)) else str(value)
json.dumps(value, indent=2, ensure_ascii=False)
if isinstance(value, (dict, list))
else str(value)
)
file_path.write_text(write_content, encoding="utf-8")
file_size = file_path.stat().st_size
buffer_items[key] = (
f"[Saved to '{filename}' ({file_size:,} bytes). Use read_file(path='{filename}') to access.]"
f"[Saved to '{filename}' ({file_size:,} bytes). "
f"Use read_file(path='{filename}') to access.]"
)
except Exception:
buffer_items[key] = val_str[:300] + "..."
+6 -12
View File
@@ -162,8 +162,6 @@ def build_prompt_spec_from_node_context(
memory_prompt: str | None = None,
) -> NodePromptSpec:
"""Convert a NodeContext-like object into structured prompt inputs."""
from framework.skills.tool_gating import augment_catalog_for_tools
resolved_memory_prompt = memory_prompt
if resolved_memory_prompt is None:
resolved_memory_prompt = getattr(ctx, "memory_prompt", "") or ""
@@ -173,19 +171,14 @@ def build_prompt_spec_from_node_context(
resolved_memory_prompt = dynamic_memory_provider() or ""
except Exception:
resolved_memory_prompt = getattr(ctx, "memory_prompt", "") or ""
# Tool-gated pre-activation: inject full body of default skills whose
# trigger tools are present in this node's tool list (e.g. browser_*
# pulls in hive.browser-automation).
tool_names = [getattr(t, "name", "") for t in (getattr(ctx, "available_tools", None) or [])]
skills_catalog_prompt = augment_catalog_for_tools(ctx.skills_catalog_prompt or "", tool_names)
return NodePromptSpec(
identity_prompt=ctx.identity_prompt or "",
focus_prompt=focus_prompt if focus_prompt is not None else (ctx.node_spec.system_prompt or ""),
focus_prompt=focus_prompt
if focus_prompt is not None
else (ctx.node_spec.system_prompt or ""),
narrative=narrative if narrative is not None else (ctx.narrative or ""),
accounts_prompt=ctx.accounts_prompt or "",
skills_catalog_prompt=skills_catalog_prompt,
skills_catalog_prompt=ctx.skills_catalog_prompt or "",
protocols_prompt=ctx.protocols_prompt or "",
memory_prompt=resolved_memory_prompt,
node_type=ctx.node_spec.node_type,
@@ -293,7 +286,8 @@ def build_transition_message(spec: TransitionSpec) -> str:
if spec.data_files:
sections.append(
"\nData files (use read_file to access):\n" + "\n".join(f" {entry}" for entry in spec.data_files)
"\nData files (use read_file to access):\n"
+ "\n".join(f" {entry}" for entry in spec.data_files)
)
if spec.cumulative_tool_names:
+6 -4
View File
@@ -12,9 +12,7 @@ MAX_POWER_ABS_EXPONENT = 1_000
MAX_POWER_RESULT_BITS = 4_096
# Typical edge-condition evaluations in this repo complete well under 1ms.
# 100ms leaves ample headroom for legitimate checks while failing fast on abuse.
# On Windows (where SIGALRM is unavailable) the fallback relies on periodic
# perf_counter polling which is less precise, so we use a wider margin.
DEFAULT_TIMEOUT_MS = 100 if hasattr(signal, "SIGALRM") else 500
DEFAULT_TIMEOUT_MS = 100
def _safe_pow(base: Any, exp: Any) -> Any:
@@ -171,7 +169,11 @@ class SafeEvalVisitor(ast.NodeVisitor):
return tuple(self.visit(elt) for elt in node.elts)
def visit_Dict(self, node: ast.Dict) -> dict:
return {self.visit(k): self.visit(v) for k, v in zip(node.keys, node.values, strict=False) if k is not None}
return {
self.visit(k): self.visit(v)
for k, v in zip(node.keys, node.values, strict=False)
if k is not None
}
# --- Operations ---
def visit_BinOp(self, node: ast.BinOp) -> Any:
+6 -2
View File
@@ -120,7 +120,9 @@ class OutputValidator:
nullable_keys = nullable_keys or []
if not isinstance(output, dict):
return ValidationResult(success=False, errors=[f"Output is not a dict, got {type(output).__name__}"])
return ValidationResult(
success=False, errors=[f"Output is not a dict, got {type(output).__name__}"]
)
for key in expected_keys:
if key not in output:
@@ -235,7 +237,9 @@ class OutputValidator:
# Check for overly long values
if len(value) > max_length:
errors.append(f"Output key '{key}' exceeds max length ({len(value)} > {max_length})")
errors.append(
f"Output key '{key}' exceeds max length ({len(value)} > {max_length})"
)
return ValidationResult(success=len(errors) == 0, errors=errors)
+3 -1
View File
@@ -27,6 +27,8 @@ class CostGuardStage(PipelineStage):
if estimated > self._budget:
return PipelineResult(
action="reject",
rejection_reason=(f"Estimated cost ${estimated:.4f} exceeds budget ${self._budget:.4f}"),
rejection_reason=(
f"Estimated cost ${estimated:.4f} exceeds budget ${self._budget:.4f}"
),
)
return PipelineResult(action="continue")
@@ -40,7 +40,8 @@ class InputValidationStage(PipelineStage):
return PipelineResult(
action="reject",
rejection_reason=(
f"Input key '{key}' has type {type(value).__name__}, expected {expected_type.__name__}"
f"Input key '{key}' has type {type(value).__name__}, "
f"expected {expected_type.__name__}"
),
)
return PipelineResult(action="continue")
+3 -1
View File
@@ -35,7 +35,9 @@ class RateLimitStage(PipelineStage):
if len(self._timestamps[key]) >= self._max_rpm:
return PipelineResult(
action="reject",
rejection_reason=(f"Rate limit exceeded: {self._max_rpm} req/min for session '{session_id}'"),
rejection_reason=(
f"Rate limit exceeded: {self._max_rpm} req/min for session '{session_id}'"
),
)
self._timestamps[key].append(now)
return PipelineResult(action="continue")
@@ -1,180 +0,0 @@
"""Regression tests for forced cancellation overlap in ExecutionStream."""
from __future__ import annotations
import asyncio
from types import SimpleNamespace
from unittest.mock import MagicMock
import pytest
from framework.host.event_bus import AgentEvent, EventBus, EventType
from framework.host.execution_manager import (
EntryPointSpec,
ExecutionAlreadyRunningError,
ExecutionManager,
)
from framework.orchestrator.edge import GraphSpec
from framework.orchestrator.goal import Goal
from framework.orchestrator.orchestrator import ExecutionResult
def _build_stream(tmp_path, *, event_bus: EventBus | None = None) -> ExecutionManager:
graph = GraphSpec(
id="test-graph",
goal_id="goal-1",
version="1.0.0",
entry_node="start",
entry_points={"start": "start"},
terminal_nodes=[],
pause_nodes=[],
nodes=[],
edges=[],
)
goal = Goal(id="goal-1", name="goal-1", description="test goal")
entry_spec = EntryPointSpec(
id="webhook",
name="Webhook",
entry_node="start",
trigger_type="webhook",
isolation_level="shared",
max_concurrent=1,
)
storage = SimpleNamespace(base_path=tmp_path)
stream = ExecutionManager(
stream_id="webhook",
entry_spec=entry_spec,
graph=graph,
goal=goal,
state_manager=MagicMock(),
storage=storage,
outcome_aggregator=MagicMock(),
event_bus=event_bus,
)
stream._running = True
return stream
def _install_blocking_executor(monkeypatch, release: asyncio.Event) -> None:
class BlockingExecutor:
def __init__(self, *args, **kwargs):
self.node_registry = {}
async def execute(self, *args, **kwargs):
while True:
try:
await release.wait()
break
except asyncio.CancelledError:
continue
return ExecutionResult(success=True, output={"ok": True})
monkeypatch.setattr("framework.host.execution_manager.Orchestrator", BlockingExecutor)
@pytest.mark.asyncio
async def test_forced_cancel_timeout_keeps_stream_locked_until_task_exit(tmp_path, monkeypatch):
event_bus = EventBus()
stream = _build_stream(tmp_path, event_bus=event_bus)
release = asyncio.Event()
_install_blocking_executor(monkeypatch, release)
started_events: list[AgentEvent] = []
first_started = asyncio.Event()
second_started = asyncio.Event()
async def on_started(event: AgentEvent) -> None:
started_events.append(event)
if len(started_events) == 1:
first_started.set()
elif len(started_events) == 2:
second_started.set()
event_bus.subscribe(
event_types=[EventType.EXECUTION_STARTED],
handler=on_started,
filter_stream="webhook",
)
async def immediate_timeout(_tasks, timeout=None):
return set(), set(_tasks)
execution_id = await stream.execute({}, session_state={"resume_session_id": "session-1"})
await asyncio.wait_for(first_started.wait(), timeout=1)
old_task = stream._execution_tasks[execution_id]
monkeypatch.setattr("framework.host.execution_manager.asyncio.wait", immediate_timeout)
try:
cancelled = await stream.cancel_execution(execution_id, reason="forced timeout")
assert cancelled == "cancelling"
assert execution_id in stream._execution_tasks
assert execution_id in stream._active_executions
assert execution_id in stream._completion_events
assert stream._active_executions[execution_id].status == "cancelling"
assert not old_task.done()
with pytest.raises(ExecutionAlreadyRunningError):
await stream.execute({}, session_state={"resume_session_id": execution_id})
assert len(started_events) == 1
release.set()
await asyncio.wait_for(old_task, timeout=1)
restarted_id = await stream.execute({}, session_state={"resume_session_id": execution_id})
assert restarted_id == execution_id
await asyncio.wait_for(second_started.wait(), timeout=1)
finally:
release.set()
await asyncio.gather(*stream._execution_tasks.values(), return_exceptions=True)
@pytest.mark.asyncio
async def test_repeated_forced_restarts_do_not_accumulate_parallel_tasks(tmp_path, monkeypatch):
event_bus = EventBus()
stream = _build_stream(tmp_path, event_bus=event_bus)
release = asyncio.Event()
_install_blocking_executor(monkeypatch, release)
started_events: list[AgentEvent] = []
first_started = asyncio.Event()
async def on_started(event: AgentEvent) -> None:
started_events.append(event)
first_started.set()
event_bus.subscribe(
event_types=[EventType.EXECUTION_STARTED],
handler=on_started,
filter_stream="webhook",
)
async def immediate_timeout(_tasks, timeout=None):
return set(), set(_tasks)
monkeypatch.setattr("framework.host.execution_manager.asyncio.wait", immediate_timeout)
execution_id = await stream.execute({}, session_state={"resume_session_id": "session-1"})
await asyncio.wait_for(first_started.wait(), timeout=1)
first_task = stream._execution_tasks[execution_id]
try:
assert await stream.cancel_execution(execution_id, reason="restart-1") == "cancelling"
with pytest.raises(ExecutionAlreadyRunningError):
await stream.execute({}, session_state={"resume_session_id": execution_id})
with pytest.raises(ExecutionAlreadyRunningError):
await stream.execute({}, session_state={"resume_session_id": execution_id})
assert len(started_events) == 1
assert list(stream._execution_tasks) == [execution_id]
assert stream._execution_tasks[execution_id] is first_task
assert not first_task.done()
finally:
release.set()
await asyncio.wait_for(first_task, timeout=1)
+12 -4
View File
@@ -25,7 +25,9 @@ class GoalStatus(StrEnum):
class SuccessCriterion(BaseModel):
id: str
description: str = Field(description="Human-readable description of what success looks like")
metric: str = Field(description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'")
metric: str = Field(
description="How to measure: 'output_contains', 'output_equals', 'llm_judge', 'custom'"
)
type: str = Field(default="success_rate", description="Runtime evaluation type")
target: Any = Field(description="The target value or condition")
weight: float = Field(default=1.0, ge=0.0, le=1.0, description="Relative importance (0-1)")
@@ -37,9 +39,15 @@ class SuccessCriterion(BaseModel):
class Constraint(BaseModel):
id: str
description: str
constraint_type: str = Field(description="Type: 'hard' (must not violate) or 'soft' (prefer not to violate)")
category: str = Field(default="general", description="Category: 'time', 'cost', 'safety', 'scope', 'quality'")
check: str = Field(default="", description="How to check: expression, function name, or 'llm_judge'")
constraint_type: str = Field(
description="Type: 'hard' (must not violate) or 'soft' (prefer not to violate)"
)
category: str = Field(
default="general", description="Category: 'time', 'cost', 'safety', 'scope', 'quality'"
)
check: str = Field(
default="", description="How to check: expression, function name, or 'llm_judge'"
)
model_config = {"extra": "allow"}
+6 -2
View File
@@ -237,7 +237,9 @@ class SessionState(BaseModel):
progress=SessionProgress(
current_node=result.paused_at or (result.path[-1] if result.path else None),
paused_at=result.paused_at,
resume_from=result.session_state.get("resume_from") if result.session_state else None,
resume_from=result.session_state.get("resume_from")
if result.session_state
else None,
steps_executed=result.steps_executed,
total_tokens=result.total_tokens,
total_latency_ms=result.total_latency_ms,
@@ -254,7 +256,9 @@ class SessionState(BaseModel):
error=result.error,
output=result.output,
),
data_buffer=result.session_state.get("data_buffer", result.session_state.get("memory", {}))
data_buffer=result.session_state.get(
"data_buffer", result.session_state.get("memory", {})
)
if result.session_state
else {},
input_data=input_data or {},
+55 -77
View File
@@ -56,7 +56,8 @@ def validate_agent_path(agent_path: str | Path) -> Path:
if resolved.is_relative_to(root) and resolved != root:
return resolved
raise ValueError(
"agent_path must be inside an allowed directory (~/.hive/colonies/, exports/, examples/, or ~/.hive/agents/)"
"agent_path must be inside an allowed directory "
"(~/.hive/colonies/, exports/, examples/, or ~/.hive/agents/)"
)
@@ -173,12 +174,11 @@ async def handle_health(request: web.Request) -> web.Response:
)
async def _probe_browser_bridge() -> dict:
"""Probe the local GCU bridge and return ``{bridge, connected}``.
async def handle_browser_status(request: web.Request) -> web.Response:
"""GET /api/browser/status — proxy the GCU bridge status check server-side.
Shared by the one-shot ``GET /api/browser/status`` handler and the
``/api/browser/status/stream`` SSE feed so both see the same data
source.
Checks http://127.0.0.1:9230/status so the browser never makes a
cross-origin request that would log ERR_CONNECTION_REFUSED in the console.
"""
import asyncio
@@ -186,71 +186,24 @@ async def _probe_browser_bridge() -> dict:
status_port = bridge_port + 1
try:
reader, writer = await asyncio.wait_for(asyncio.open_connection("127.0.0.1", status_port), timeout=0.5)
reader, writer = await asyncio.wait_for(
asyncio.open_connection("127.0.0.1", status_port), timeout=0.5
)
writer.write(b"GET /status HTTP/1.0\r\nHost: 127.0.0.1\r\n\r\n")
await writer.drain()
raw = await asyncio.wait_for(reader.read(512), timeout=0.5)
writer.close()
# Parse JSON body after the blank line
if b"\r\n\r\n" in raw:
body = raw.split(b"\r\n\r\n", 1)[1]
import json as _json
import json
data = _json.loads(body)
return {"bridge": True, "connected": bool(data.get("connected", False))}
data = json.loads(body)
return web.json_response({"bridge": True, "connected": data.get("connected", False)})
except Exception:
pass
return {"bridge": False, "connected": False}
async def handle_browser_status(request: web.Request) -> web.Response:
"""GET /api/browser/status — proxy the GCU bridge status check server-side.
Checks http://127.0.0.1:9230/status so the browser never makes a
cross-origin request that would log ERR_CONNECTION_REFUSED in the console.
"""
return web.json_response(await _probe_browser_bridge())
async def handle_browser_status_stream(request: web.Request) -> web.StreamResponse:
"""GET /api/browser/status/stream — SSE feed of bridge status.
Emits a ``status`` event immediately, then again only when the
probe result changes. Polls the local bridge every 3s; that's the
same cadence the frontend used before, but we absorb it
server-side instead of the browser burning a request.
"""
import asyncio
import json as _json
resp = web.StreamResponse(
status=200,
headers={
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
await resp.prepare(request)
async def _send(event: str, data: dict) -> None:
payload = f"event: {event}\ndata: {_json.dumps(data)}\n\n"
await resp.write(payload.encode("utf-8"))
last: tuple | None = None
try:
while True:
status = await _probe_browser_bridge()
signature = (status["bridge"], status["connected"])
if signature != last:
await _send("status", status)
last = signature
await asyncio.sleep(3.0)
except (asyncio.CancelledError, ConnectionResetError):
raise
except Exception as exc:
logger.warning("browser status stream error: %s", exc, exc_info=True)
return resp
return web.json_response({"bridge": False, "connected": False})
def create_app(model: str | None = None) -> web.Application:
@@ -279,26 +232,56 @@ def create_app(model: str | None = None) -> web.Application:
from framework.credentials.key_storage import generate_and_save_credential_key
generate_and_save_credential_key()
logger.info("Generated and persisted HIVE_CREDENTIAL_KEY to ~/.hive/secrets/credential_key")
logger.info(
"Generated and persisted HIVE_CREDENTIAL_KEY to ~/.hive/secrets/credential_key"
)
except Exception as exc:
logger.warning("Could not auto-persist HIVE_CREDENTIAL_KEY: %s", exc)
# Local server startup should not wait on an eager Aden sync.
# The store can still fetch/refresh credentials on demand.
credential_store = CredentialStore.with_aden_sync(auto_sync=False)
credential_store = CredentialStore.with_aden_sync()
except Exception:
logger.debug("Encrypted credential store unavailable, using in-memory fallback")
credential_store = CredentialStore.for_testing({})
app["credential_store"] = credential_store
# Let queen sessions build their registry lazily on first use instead of
# paying the MCP discovery cost during `hive open`.
app["queen_tool_registry"] = None
# Pre-load queen MCP tools once at startup (cached for all sessions)
# This avoids rebuilding the tool registry for every queen session
from framework.loader.mcp_registry import MCPRegistry
from framework.loader.tool_registry import ToolRegistry
_queen_tool_registry: ToolRegistry | None = None
try:
_queen_tool_registry = ToolRegistry()
import framework.agents.queen as _queen_pkg
queen_pkg_dir = Path(_queen_pkg.__file__).parent
mcp_config = queen_pkg_dir / "mcp_servers.json"
if mcp_config.exists():
_queen_tool_registry.load_mcp_config(mcp_config)
logger.info("Pre-loaded queen MCP tools from %s", mcp_config)
registry = MCPRegistry()
registry.initialize()
if (queen_pkg_dir / "mcp_registry.json").is_file():
_queen_tool_registry.set_mcp_registry_agent_path(queen_pkg_dir)
registry_configs, selection_max_tools = registry.load_agent_selection(queen_pkg_dir)
if registry_configs:
_queen_tool_registry.load_registry_servers(
registry_configs,
preserve_existing_tools=True,
log_collisions=True,
max_tools=selection_max_tools,
)
logger.info(
"Pre-loaded queen tool registry with %d tools", len(_queen_tool_registry.get_tools())
)
except Exception as e:
logger.warning("Failed to pre-load queen tool registry: %s", e)
app["queen_tool_registry"] = _queen_tool_registry
app["manager"] = SessionManager(
model=model,
credential_store=credential_store,
queen_tool_registry=None,
model=model, credential_store=credential_store, queen_tool_registry=_queen_tool_registry
)
# Register shutdown hook
@@ -307,20 +290,17 @@ def create_app(model: str | None = None) -> web.Application:
# Health check
app.router.add_get("/api/health", handle_health)
app.router.add_get("/api/browser/status", handle_browser_status)
app.router.add_get("/api/browser/status/stream", handle_browser_status_stream)
# Register route modules
from framework.server.routes_colony_workers import register_routes as register_colony_worker_routes
from framework.server.routes_config import register_routes as register_config_routes
from framework.server.routes_credentials import register_routes as register_credential_routes
from framework.server.routes_events import register_routes as register_event_routes
from framework.server.routes_execution import register_routes as register_execution_routes
from framework.server.routes_workers import register_routes as register_worker_routes
from framework.server.routes_logs import register_routes as register_log_routes
from framework.server.routes_messages import register_routes as register_message_routes
from framework.server.routes_prompts import register_routes as register_prompt_routes
from framework.server.routes_queens import register_routes as register_queen_routes
from framework.server.routes_sessions import register_routes as register_session_routes
from framework.server.routes_workers import register_routes as register_worker_routes
register_config_routes(app)
register_credential_routes(app)
@@ -331,8 +311,6 @@ def create_app(model: str | None = None) -> web.Application:
register_worker_routes(app)
register_log_routes(app)
register_queen_routes(app)
register_colony_worker_routes(app)
register_prompt_routes(app)
# Static file serving — Option C production mode
# If frontend/dist/ exists, serve built frontend files on /
+216 -178
View File
@@ -13,8 +13,6 @@ from pathlib import Path
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from framework.agent_loop.internals.types import HookContext, HookResult
from framework.loader.tool_registry import ToolRegistry
from framework.server.session_manager import Session
logger = logging.getLogger(__name__)
@@ -51,7 +49,7 @@ def install_worker_escalation_routing(
# Defensive: ignore any stray non-worker origin (e.g. queen).
if not stream_id.startswith("worker:"):
return
worker_id = stream_id[len("worker:") :]
worker_id = stream_id[len("worker:"):]
data = event.data or {}
request_id = data.get("request_id")
reason = str(data.get("reason", "")).strip()
@@ -66,7 +64,8 @@ def install_worker_escalation_routing(
try:
await runtime.inject_input(
worker_id,
"[QUEEN_REPLY] queue_full — queen inbox saturated; proceed with best judgment or retry later.",
"[QUEEN_REPLY] queue_full — queen inbox saturated; "
"proceed with best judgment or retry later.",
)
except Exception:
logger.warning(
@@ -101,16 +100,24 @@ def install_worker_escalation_routing(
lines.append(context_text)
if request_id:
lines.append(
"Use reply_to_worker(request_id, reply) to unblock, or list_worker_questions() to see all pending."
"Use reply_to_worker(request_id, reply) to unblock, "
"or list_worker_questions() to see all pending."
)
else:
lines.append("No request_id — use inject_message(content=...) to relay guidance manually.")
lines.append(
"No request_id — use inject_message(content=...) to relay "
"guidance manually."
)
handoff = "\n".join(lines)
# Fallback: if the queen loop has gone away, publish a
# CLIENT_INPUT_REQUESTED so the human sees the question and the
# worker does not wedge.
queen_node = session.queen_executor.node_registry.get("queen") if session.queen_executor is not None else None
queen_node = (
session.queen_executor.node_registry.get("queen")
if session.queen_executor is not None
else None
)
if queen_node is None or not hasattr(queen_node, "inject_event"):
if session.event_bus is not None:
await session.event_bus.emit_client_input_requested(
@@ -134,7 +141,9 @@ def install_worker_escalation_routing(
filter_colony=runtime.colony_id,
)
except Exception:
logger.warning("Failed to install colony-scoped escalation sub", exc_info=True)
logger.warning(
"Failed to install colony-scoped escalation sub", exc_info=True
)
# fall through to session bus
if session.event_bus is None:
return None
@@ -165,20 +174,24 @@ def _build_credentials_provider() -> Any:
def _provider() -> str:
now = time.monotonic()
if state["cached"] and (now - state["cached_at"]) < _CREDENTIALS_BLOCK_TTL_SECONDS:
if (
state["cached"]
and (now - state["cached_at"]) < _CREDENTIALS_BLOCK_TTL_SECONDS
):
return state["cached"]
try:
from aden_tools.credentials.store_adapter import CredentialStoreAdapter
from framework.orchestrator.prompting import build_accounts_prompt
adapter = CredentialStoreAdapter.default()
accounts = adapter.get_all_account_info()
# Compact form (no tool_provider_map) — tool schemas already
# surface function names; baking the full per-provider list
# into the system prompt on every turn was ~2 KB of redundancy.
rendered = build_accounts_prompt(accounts)
tool_provider_map = adapter.get_tool_provider_map()
rendered = build_accounts_prompt(
accounts,
tool_provider_map=tool_provider_map,
node_tool_names=None,
)
except Exception:
logger.debug("Failed to render ambient credentials block", exc_info=True)
rendered = ""
@@ -229,7 +242,7 @@ async def materialize_queen_identity(
phase_state.queen_id = queen_id
phase_state.queen_profile = queen_profile
phase_state.queen_identity_prompt = format_queen_identity_prompt(queen_profile, max_examples=1)
phase_state.queen_identity_prompt = format_queen_identity_prompt(queen_profile)
if event_bus is not None:
await event_bus.publish(
@@ -266,23 +279,40 @@ async def create_queen(
queen_loop_config as _base_loop_config,
)
from framework.agents.queen.nodes import (
_QUEEN_BUILDING_TOOLS,
_QUEEN_EDITING_TOOLS,
_QUEEN_INDEPENDENT_TOOLS,
_QUEEN_REVIEWING_TOOLS,
_QUEEN_WORKING_TOOLS,
_QUEEN_PLANNING_TOOLS,
_QUEEN_RUNNING_TOOLS,
_QUEEN_STAGING_TOOLS,
_appendices,
_building_knowledge,
_planning_knowledge,
_queen_behavior_always,
_queen_behavior_building,
_queen_behavior_editing,
_queen_behavior_independent,
_queen_behavior_planning,
_queen_behavior_running,
_queen_behavior_staging,
_queen_character_core,
_queen_identity_editing,
_queen_phase_7,
_queen_role_building,
_queen_role_independent,
_queen_role_reviewing,
_queen_role_working,
_queen_role_planning,
_queen_role_running,
_queen_role_staging,
_queen_style,
_queen_tools_building,
_queen_tools_editing,
_queen_tools_independent,
_queen_tools_reviewing,
_queen_tools_working,
finalize_queen_prompt,
_queen_tools_planning,
_queen_tools_running,
_queen_tools_staging,
_shared_building_knowledge,
)
from framework.host.event_bus import AgentEvent, EventType
from framework.llm.capabilities import supports_image_tool_results
from framework.loader.mcp_registry import MCPRegistry
from framework.loader.tool_registry import ToolRegistry
from framework.tools.queen_lifecycle_tools import (
@@ -294,7 +324,9 @@ async def create_queen(
# Use pre-loaded cached registry if available (fast path)
if tool_registry is not None:
queen_registry = tool_registry
logger.info("Queen: using pre-loaded tool registry with %d tools", len(queen_registry.get_tools()))
logger.info(
"Queen: using pre-loaded tool registry with %d tools", len(queen_registry.get_tools())
)
else:
# Build fresh (slow path - for backwards compatibility)
queen_registry = ToolRegistry()
@@ -327,10 +359,7 @@ async def create_queen(
logger.warning("Queen: MCP registry config failed to load", exc_info=True)
# ---- Phase state --------------------------------------------------
# 3-phase model: caller supplies the phase directly (DM → independent,
# colony bootstrap → working). Fall back to independent when nothing
# is specified — there is no "staging"/"planning" bootstrap anymore.
effective_phase = initial_phase or ("working" if worker_identity else "independent")
effective_phase = initial_phase or ("staging" if worker_identity else "planning")
phase_state = QueenPhaseState(phase=effective_phase, event_bus=session.event_bus)
session.phase_state = phase_state
@@ -342,6 +371,28 @@ async def create_queen(
# when the user adds/removes an integration.
phase_state.credentials_prompt_provider = _build_credentials_provider()
# ---- Track ask rounds during planning ----------------------------
# Increment planning_ask_rounds each time the queen requests user
# input (ask_user or ask_user_multiple) while in the planning phase.
async def _track_planning_asks(event: AgentEvent) -> None:
if phase_state.phase != "planning":
return
# Only count explicit ask_user / ask_user_multiple calls, not
# auto-block (text-only turns emit CLIENT_INPUT_REQUESTED with
# an empty prompt and no options/questions).
data = event.data or {}
has_prompt = bool(data.get("prompt"))
has_questions = bool(data.get("questions"))
has_options = bool(data.get("options"))
if has_prompt or has_questions or has_options:
phase_state.planning_ask_rounds += 1
session.event_bus.subscribe(
[EventType.CLIENT_INPUT_REQUESTED],
_track_planning_asks,
filter_stream="queen",
)
# ---- Lifecycle tools (always registered) --------------------------
register_queen_lifecycle_tools(
queen_registry,
@@ -377,21 +428,39 @@ async def create_queen(
session._queen_tool_executor = queen_tool_executor # type: ignore[attr-defined]
# ---- Partition tools by phase ------------------------------------
planning_names = set(_QUEEN_PLANNING_TOOLS)
building_names = set(_QUEEN_BUILDING_TOOLS)
staging_names = set(_QUEEN_STAGING_TOOLS)
running_names = set(_QUEEN_RUNNING_TOOLS)
editing_names = set(_QUEEN_EDITING_TOOLS)
independent_names = set(_QUEEN_INDEPENDENT_TOOLS)
working_names = set(_QUEEN_WORKING_TOOLS)
reviewing_names = set(_QUEEN_REVIEWING_TOOLS)
registered_names = {t.name for t in queen_tools}
missing_building = building_names - registered_names
if missing_building:
logger.warning(
"Queen: %d/%d building tools NOT registered: %s",
len(missing_building),
len(building_names),
sorted(missing_building),
)
logger.info("Queen: registered tools: %s", sorted(registered_names))
phase_state.working_tools = [t for t in queen_tools if t.name in working_names]
phase_state.reviewing_tools = [t for t in queen_tools if t.name in reviewing_names]
phase_state.planning_tools = [t for t in queen_tools if t.name in planning_names]
phase_state.building_tools = [t for t in queen_tools if t.name in building_names]
phase_state.staging_tools = [t for t in queen_tools if t.name in staging_names]
phase_state.running_tools = [t for t in queen_tools if t.name in running_names]
phase_state.editing_tools = [t for t in queen_tools if t.name in editing_names]
# Independent phase gets core tools + all MCP tools not claimed by any
# other phase (coder-tools file I/O, gcu-tools browser, etc.).
all_phase_names = independent_names | working_names | reviewing_names
all_phase_names = (
planning_names | building_names | staging_names | running_names | editing_names
)
mcp_tools = [t for t in queen_tools if t.name not in all_phase_names]
phase_state.independent_tools = [t for t in queen_tools if t.name in independent_names] + mcp_tools
phase_state.independent_tools = [
t for t in queen_tools if t.name in independent_names
] + mcp_tools
logger.info(
"Queen: independent tools: %s",
sorted(t.name for t in phase_state.independent_tools),
@@ -412,35 +481,74 @@ async def create_queen(
# ---- Compose phase-specific prompts ------------------------------
from framework.agents.queen.nodes import queen_node as _orig_node
# Resolve vision-only prompt sections based on the session's LLM.
# session.llm is immutable for the session's lifetime, so this check
# is stable — prompts never need to be recomposed mid-session.
_has_vision = bool(session.llm and supports_image_tool_results(getattr(session.llm, "model", "")))
if worker_identity is None:
worker_identity = (
"\n\n# Worker Profile\n"
"No worker agent loaded. You are operating independently.\n"
"Design or build the agent to solve the user's problem "
"according to your current phase."
)
phase_state.prompt_independent = finalize_queen_prompt(
(
_queen_character_core
+ _queen_role_independent
+ _queen_style
+ _queen_tools_independent
+ _queen_behavior_always
+ _queen_behavior_independent
),
_has_vision,
_planning_body = (
_queen_character_core
+ _queen_role_planning
+ _queen_style
+ _shared_building_knowledge
+ _queen_tools_planning
+ _queen_behavior_always
+ _queen_behavior_planning
+ _planning_knowledge
+ worker_identity
)
phase_state.prompt_working = finalize_queen_prompt(
(_queen_character_core + _queen_role_working + _queen_style + _queen_tools_working + _queen_behavior_always),
_has_vision,
phase_state.prompt_planning = _planning_body
_building_body = (
_queen_character_core
+ _queen_role_building
+ _queen_style
+ _shared_building_knowledge
+ _queen_tools_building
+ _queen_behavior_always
+ _queen_behavior_building
+ _building_knowledge
+ _queen_phase_7
+ _appendices
+ worker_identity
)
phase_state.prompt_reviewing = finalize_queen_prompt(
(
_queen_character_core
+ _queen_role_reviewing
+ _queen_style
+ _queen_tools_reviewing
+ _queen_behavior_always
),
_has_vision,
phase_state.prompt_building = _building_body
phase_state.prompt_staging = (
_queen_character_core
+ _queen_role_staging
+ _queen_style
+ _queen_tools_staging
+ _queen_behavior_always
+ _queen_behavior_staging
+ worker_identity
)
phase_state.prompt_running = (
_queen_character_core
+ _queen_role_running
+ _queen_style
+ _queen_tools_running
+ _queen_behavior_always
+ _queen_behavior_running
+ worker_identity
)
phase_state.prompt_editing = (
_queen_identity_editing
+ _queen_style
+ _queen_tools_editing
+ _queen_behavior_always
+ _queen_behavior_editing
+ worker_identity
)
phase_state.prompt_independent = (
_queen_character_core
+ _queen_role_independent
+ _queen_style
+ _queen_tools_independent
+ _queen_behavior_always
+ _queen_behavior_independent
)
# ---- Default skill protocols -------------------------------------
@@ -455,10 +563,6 @@ async def create_queen(
_queen_skills_mgr.load()
phase_state.protocols_prompt = _queen_skills_mgr.protocols_prompt
phase_state.skills_catalog_prompt = _queen_skills_mgr.skills_catalog_prompt
# Also store the manager so get_current_prompt() can render a
# phase-filtered catalog on each turn (skills with a `visibility`
# frontmatter that excludes the current phase are dropped).
phase_state.skills_manager = _queen_skills_mgr
_queen_skill_dirs = _queen_skills_mgr.allowlisted_dirs
except Exception:
logger.debug("Queen skill loading failed (non-fatal)", exc_info=True)
@@ -500,14 +604,6 @@ async def create_queen(
)
async def _queen_identity_hook(ctx: HookContext) -> HookResult | None:
from framework.agent_loop.internals.types import HookResult
from framework.agents.queen.queen_profiles import (
ensure_default_queens,
format_queen_identity_prompt,
load_queen_profile,
select_queen,
)
ensure_default_queens()
trigger = ctx.trigger or ""
# If the session was pre-bound to a queen (user clicked a specific
@@ -526,7 +622,7 @@ async def create_queen(
except FileNotFoundError:
logger.warning("Queen profile %s not found after selection", queen_id)
return None
identity_prompt = format_queen_identity_prompt(profile, max_examples=1)
identity_prompt = format_queen_identity_prompt(profile)
# Store on phase_state so identity persists across dynamic prompt refreshes
phase_state.queen_id = queen_id
phase_state.queen_profile = profile
@@ -559,12 +655,18 @@ async def create_queen(
try:
_meta = _json.loads(_meta_path.read_text(encoding="utf-8"))
_meta["queen_id"] = queen_id
_meta_path.write_text(_json.dumps(_meta, ensure_ascii=False), encoding="utf-8")
_meta_path.write_text(
_json.dumps(_meta, ensure_ascii=False), encoding="utf-8"
)
except (OSError, _json.JSONDecodeError):
pass
# Re-point event bus log to new location, preserving offset
_offset = getattr(session.event_bus, "_session_log_iteration_offset", 0)
session.event_bus.set_session_log(_new_dir / "events.jsonl", iteration_offset=_offset)
_offset = getattr(
session.event_bus, "_session_log_iteration_offset", 0
)
session.event_bus.set_session_log(
_new_dir / "events.jsonl", iteration_offset=_offset
)
if _session_event_bus is not None:
await _session_event_bus.publish(
@@ -619,7 +721,7 @@ async def create_queen(
logger.debug("Queen: tools not yet available (registered on worker load): %s", missing)
node_updates["tools"] = available_tools
_orig_node.model_copy(update=node_updates)
adjusted_node = _orig_node.model_copy(update=node_updates)
# Determine session mode:
# - RESTORE: Resume cold session with history, no initial prompt -> wait for user
@@ -637,18 +739,6 @@ async def create_queen(
async def _queen_loop():
logger.debug("[_queen_loop] Starting queen loop for session %s", session.id)
# Scope the browser profile to this session so parallel queens each
# drive their own Chrome tab group instead of fighting over "default".
# Browser tools run in a stdio MCP subprocess, so we can't set a
# contextvar across processes — instead we inject `profile` as a
# CONTEXT_PARAM that ToolRegistry passes into every MCP call. The
# token stays local to this task.
try:
from framework.loader.tool_registry import ToolRegistry
ToolRegistry.set_execution_context(profile=session.id)
except Exception:
logger.debug("Queen: failed to set browser profile for session %s", session.id, exc_info=True)
try:
lc = _queen_loop_config
queen_loop_config = LoopConfig(
@@ -678,15 +768,6 @@ async def create_queen(
system_prompt="",
tools=[t.name for t in queen_tools],
tool_access_policy="all",
# Queen is a forever-alive conversational agent: bypass
# the implicit judge entirely. Without this, a text-only
# turn (greeting, clarifying question, summary) falls
# through to the default ACCEPT verdict in
# judge_pipeline.py, which terminates the loop and
# leaves session.queen_executor=None until the user
# reloads. Mirrors the static queen_node NodeSpec in
# framework.agents.queen.nodes which already sets this.
skip_judge=True,
)
ctx = AgentContext(
@@ -716,71 +797,44 @@ async def create_queen(
phase_state.inject_notification = _inject_phase_notification
async def _on_worker_report(event):
"""Inject [WORKER_REPORT] into queen as each worker finishes.
Subscribes to SUBAGENT_REPORT events which carry the worker's
real summary/data (preferring any explicit ``report_to_parent``
call). Every spawned worker emits exactly one success,
partial, failed, timeout, or stopped. The queen sees the
report as the next user turn and can react (reply to user,
kick off follow-up work, etc.) without being blocked by the
spawn call itself.
"""
async def _on_worker_done(event):
if event.stream_id == "queen":
return
data = event.data or {}
worker_id = data.get("worker_id", event.node_id or "unknown")
status = data.get("status", "unknown")
summary = data.get("summary") or "(no summary)"
err = data.get("error")
payload_data = data.get("data") or {}
duration = data.get("duration_seconds")
lines = ["[WORKER_REPORT]", f"worker_id: {worker_id}", f"status: {status}"]
if duration is not None:
try:
lines.append(f"duration: {float(duration):.1f}s")
except (TypeError, ValueError):
pass
lines.append(f"summary: {summary}")
if err:
lines.append(f"error: {err}")
if payload_data:
# Compact JSON so the queen sees all keys without the
# indentation blowing up the turn's token count.
try:
import json as _json
lines.append("data: " + _json.dumps(payload_data, ensure_ascii=False, default=str))
except Exception:
lines.append(f"data: {payload_data!r}")
notification = "\n".join(lines)
await agent_loop.inject_event(notification)
session.worker_configured = True
# Only transition to reviewing once the batch has quieted —
# if other workers from a parallel spawn are still live, stay
# in working so the queen's tool access (run_parallel_workers,
# inject_message, stop_worker) remains available.
colony_runtime = getattr(session, "colony_runtime", None)
still_active = 0
if colony_runtime is not None:
try:
still_active = sum(
1
for w in colony_runtime._workers.values() # type: ignore[attr-defined]
if getattr(w, "is_active", False)
if phase_state.phase == "running":
if event.type == EventType.EXECUTION_COMPLETED:
session.worker_configured = True
output = event.data.get("output", {})
output_summary = ""
if output:
for key, value in output.items():
val_str = str(value)
if len(val_str) > 200:
val_str = val_str[:200] + "..."
output_summary += f"\n {key}: {val_str}"
_out = output_summary or " (no output keys set)"
notification = (
"[WORKER_TERMINAL] Worker finished successfully.\n"
f"Output:{_out}\n"
"Report this to the user. "
"Ask if they want to re-run with different input "
"or tweak the configuration."
)
except Exception:
still_active = 0
if still_active == 0 and phase_state.phase in ("working", "running"):
await phase_state.switch_to_reviewing(source="auto")
else:
error = event.data.get("error", "Unknown error")
notification = (
"[WORKER_TERMINAL] Worker failed.\n"
f"Error: {error}\n"
"Report this to the user and help them troubleshoot. "
"You can re-run with different input or escalate to "
"building/planning if code changes are needed."
)
await agent_loop.inject_event(notification)
await phase_state.switch_to_editing(source="auto")
session.event_bus.subscribe(
event_types=[EventType.SUBAGENT_REPORT],
handler=_on_worker_report,
event_types=[EventType.EXECUTION_COMPLETED, EventType.EXECUTION_FAILED],
handler=_on_worker_done,
)
# ---- Colony-scoped worker escalation routing ----
@@ -813,22 +867,9 @@ async def create_queen(
# bootstrap: if the frontend doesn't pass initial_prompt, we must
# NOT invent a phantom "Hello" — that used to concatenate with the
# real first chat message and confuse the model.
ctx.input_data = {"user_request": None if _is_restore_mode else (initial_prompt or None)}
# Publish the initial prompt as a CLIENT_INPUT_RECEIVED event so
# it appears in the SSE stream and persists to events.jsonl for
# session resume. The /chat endpoint does the same for injected
# messages; this covers the session-creation-with-prompt path.
if initial_prompt and not _is_restore_mode:
await session.event_bus.publish(
AgentEvent(
type=EventType.CLIENT_INPUT_RECEIVED,
stream_id="queen",
node_id="queen",
execution_id=session.id,
data={"content": initial_prompt},
)
)
ctx.input_data = {
"user_request": None if _is_restore_mode else (initial_prompt or None)
}
logger.info(
"Queen %s in %s phase with %d tools: %s",
@@ -841,10 +882,7 @@ async def create_queen(
# Run the queen -- forever-alive conversation loop
result = await agent_loop.execute(ctx)
# AgentResult doesn't have stop_reason — check success/error.
# The queen is expected to be forever-alive; a clean return
# means the loop hit max_iterations or decided to exit.
if result.success:
if result.stop_reason == "complete":
logger.warning("Queen returned (should be forever-alive)")
elif result.error:
logger.error("Queen failed: %s", result.error)
@@ -1,708 +0,0 @@
"""Colony worker inspection routes.
These expose per-spawned-worker data (identified by worker_id) so the
frontend can render a colony-workers sidebar analogous to the queen
profile panel. Distinct from ``routes_workers.py``, which deals with
*graph nodes* inside a worker definition rather than live worker
instances.
Session-scoped (bound to a live session's runtime):
- GET /api/sessions/{session_id}/workers live + completed workers
- GET /api/sessions/{session_id}/colony/skills colony's shared skills catalog
- GET /api/sessions/{session_id}/colony/tools colony's default tools
Colony-scoped (bound to the on-disk colony directory, independent of any
live session one colony has exactly one progress.db):
- GET /api/colonies/{colony_name}/progress/snapshot progress.db tasks/steps snapshot
- GET /api/colonies/{colony_name}/progress/stream SSE feed of upserts (polled)
- GET /api/colonies/{colony_name}/data/tables list user tables in progress.db
- GET /api/colonies/{colony_name}/data/tables/{table}/rows paginated rows
- PATCH /api/colonies/{colony_name}/data/tables/{table}/rows edit a row
"""
import asyncio
import json
import logging
import re
import sqlite3
from pathlib import Path
from aiohttp import web
from framework.server.app import resolve_session
# Same validation used by create_colony — keep them in sync. Blocks path
# traversal (``..``) and shell-special chars; the endpoint would 400 on
# anything else anyway, but validating early avoids a disk hit.
_COLONY_NAME_RE = re.compile(r"^[a-z0-9_]+$")
logger = logging.getLogger(__name__)
# Poll interval for the progress SSE stream. Progress rows flip on the
# order of seconds as workers finish LLM turns, so 1s feels live without
# hammering the DB.
_PROGRESS_POLL_INTERVAL = 1.0
def _worker_info_to_dict(info) -> dict:
"""Serialize a WorkerInfo dataclass to a JSON-friendly dict."""
result_dict = None
if info.result is not None:
r = info.result
result_dict = {
"status": r.status,
"summary": r.summary,
"error": r.error,
"tokens_used": r.tokens_used,
"duration_seconds": r.duration_seconds,
}
return {
"worker_id": info.id,
"task": info.task,
"status": str(info.status),
"started_at": info.started_at,
"result": result_dict,
}
async def handle_list_workers(request: web.Request) -> web.Response:
"""GET /api/sessions/{session_id}/workers -- list workers in a session's colony.
Returns two populations merged:
1. In-memory workers from the session's unified ColonyRuntime
(``session.colony._workers``). Includes live + just-finished
entries since ``_workers`` isn't pruned on termination.
2. Historical worker directories on disk under
``<session_dir>/workers/`` that are not in memory. Populated
from dir name / first user message / dir mtime. These appear
as ``status="historical"`` so the frontend can style them
distinctly from actives.
Falls back to the legacy ``session.colony_runtime`` for the
in-memory half when ``session.colony`` isn't set.
"""
session, err = resolve_session(request)
if err:
return err
runtime = getattr(session, "colony", None) or getattr(session, "colony_runtime", None)
workers: list[dict] = []
known_ids: set[str] = set()
storage_path: Path | None = None
if runtime is not None:
for info in runtime.list_workers():
workers.append(_worker_info_to_dict(info))
known_ids.add(info.id)
raw_storage = getattr(runtime, "_storage_path", None)
if raw_storage is not None:
storage_path = Path(raw_storage)
# Fall back to the session's directory if the runtime didn't expose one.
if storage_path is None:
session_dir = getattr(session, "queen_dir", None) or getattr(session, "session_dir", None)
if session_dir is not None:
storage_path = Path(session_dir)
if storage_path is not None:
workers.extend(await asyncio.to_thread(_walk_historical_workers, storage_path, known_ids))
return web.json_response({"workers": workers})
def _walk_historical_workers(storage_path: Path, known_ids: set[str]) -> list[dict]:
"""Scan ``<storage_path>/workers/`` for worker session dirs not already
in memory and return minimal ``WorkerSummary``-shaped entries.
We don't persist a standalone status file per worker, so the on-disk
entries get ``status="historical"`` and ``result=None``. The task is
reconstructed from the first non-boilerplate user message in the
worker's conversation parts.
"""
workers_dir = storage_path / "workers"
if not workers_dir.exists() or not workers_dir.is_dir():
return []
out: list[dict] = []
try:
entries = list(workers_dir.iterdir())
except OSError:
return []
# Newest dir first so recent runs surface first in the tab.
entries.sort(key=lambda p: _safe_mtime(p), reverse=True)
for entry in entries:
if not entry.is_dir():
continue
wid = entry.name
if wid in known_ids:
continue
out.append(
{
"worker_id": wid,
"task": _extract_historical_task(entry),
"status": "historical",
"started_at": _safe_mtime(entry),
"result": None,
}
)
return out
def _safe_mtime(path: Path) -> float:
try:
return path.stat().st_mtime
except OSError:
return 0.0
def _extract_historical_task(worker_dir: Path) -> str:
"""Pull the worker's initial task from its conversation parts.
seq 0 is a boilerplate "Hello" greeting in most flows; the real
task lands in an early user message (typically seq 1 or 2). Scan
the first few parts and return the first ``role="user"`` content
that isn't the greeting. Bounded at 5 parts to stay cheap on
directory listings containing hundreds of workers.
"""
parts_dir = worker_dir / "conversations" / "parts"
if not parts_dir.exists():
return ""
try:
for i in range(5):
p = parts_dir / f"{i:010d}.json"
if not p.exists():
break
data = json.loads(p.read_text(encoding="utf-8"))
if data.get("role") != "user":
continue
content = data.get("content", "")
if not isinstance(content, str):
continue
text = content.strip()
if not text or text.lower() == "hello":
continue
return text[:400]
except Exception:
return ""
return ""
# ── Skills & tools ─────────────────────────────────────────────────
def _parsed_skill_to_dict(skill) -> dict:
"""Serialize a ParsedSkill for the frontend."""
return {
"name": skill.name,
"description": skill.description,
"location": skill.location,
"base_dir": skill.base_dir,
"source_scope": skill.source_scope,
}
async def handle_list_colony_skills(request: web.Request) -> web.Response:
"""GET /api/sessions/{session_id}/colony/skills -- list skills the colony sees."""
session, err = resolve_session(request)
if err:
return err
runtime = session.colony_runtime
if runtime is None:
return web.json_response({"skills": []})
# Reach into the skills manager's catalog. There is no public
# iterator yet; we touch the private dict directly and defensively
# tolerate either shape (bare SkillsManager, or the
# from_precomputed variant which has no catalog).
catalog = getattr(runtime._skills_manager, "_catalog", None)
skills_dict = getattr(catalog, "_skills", None) if catalog is not None else None
if not isinstance(skills_dict, dict):
return web.json_response({"skills": []})
skills = [_parsed_skill_to_dict(s) for s in skills_dict.values()]
skills.sort(key=lambda s: s["name"])
return web.json_response({"skills": skills})
# Tools that ship with the framework and have no credential provider,
# but still deserve their own logical group. Surfaced to the frontend
# as ``provider="system"`` so the UI treats them exactly like a
# credential-backed group.
_SYSTEM_TOOLS: frozenset[str] = frozenset(
{
"get_account_info",
"get_current_time",
"bash_kill",
"bash_output",
"execute_command_tool",
"example_tool",
}
)
def _tool_to_dict(tool, provider_map: dict[str, str] | None) -> dict:
"""Serialize a Tool dataclass for the frontend.
``provider_map`` is the colony runtime's tool_name → credential
provider map (built by the CredentialResolver pipeline stage from
``CredentialStoreAdapter.get_tool_provider_map()``). Credential-
backed tools get a canonical provider key (e.g. ``"hubspot"``,
``"gmail"``); framework / core tools return ``None``, except for
the hand-picked entries in ``_SYSTEM_TOOLS`` which are tagged
``"system"``.
"""
name = getattr(tool, "name", "")
provider = (provider_map or {}).get(name)
if provider is None and name in _SYSTEM_TOOLS:
provider = "system"
return {
"name": name,
"description": getattr(tool, "description", ""),
"provider": provider,
}
async def handle_list_colony_tools(request: web.Request) -> web.Response:
"""GET /api/sessions/{session_id}/colony/tools -- list the colony's default tools."""
session, err = resolve_session(request)
if err:
return err
runtime = session.colony_runtime
if runtime is None:
return web.json_response({"tools": []})
provider_map = getattr(runtime, "_tool_provider_map", None)
tools = [_tool_to_dict(t, provider_map) for t in (runtime._tools or [])]
tools.sort(key=lambda t: t["name"])
return web.json_response({"tools": tools})
# ── Progress DB (tasks/steps) ──────────────────────────────────────
def _resolve_progress_db_by_name(colony_name: str) -> Path | None:
"""Resolve a colony's progress.db path by directory name.
Returns ``None`` when the name fails validation or the file does not
exist. Both conditions render as an empty Data tab in the UI rather
than a hard error so an operator can open the panel before any
workers have actually run.
"""
if not _COLONY_NAME_RE.match(colony_name):
return None
db_path = Path.home() / ".hive" / "colonies" / colony_name / "data" / "progress.db"
return db_path if db_path.exists() else None
def _read_progress_snapshot(db_path: Path, worker_id: str | None) -> dict:
"""Read tasks + steps from progress.db, optionally filtered by worker_id.
The worker_id filter applies to tasks (claimed by that worker) and
to steps (executed by that worker). If omitted, returns all rows.
"""
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
try:
con.row_factory = sqlite3.Row
if worker_id:
task_rows = con.execute(
"SELECT * FROM tasks WHERE worker_id = ? ORDER BY updated_at DESC",
(worker_id,),
).fetchall()
step_rows = con.execute(
"SELECT * FROM steps WHERE worker_id = ? ORDER BY task_id, seq",
(worker_id,),
).fetchall()
else:
task_rows = con.execute("SELECT * FROM tasks ORDER BY updated_at DESC LIMIT 500").fetchall()
step_rows = con.execute("SELECT * FROM steps ORDER BY task_id, seq LIMIT 2000").fetchall()
return {
"tasks": [dict(r) for r in task_rows],
"steps": [dict(r) for r in step_rows],
}
finally:
con.close()
async def handle_progress_snapshot(request: web.Request) -> web.Response:
"""GET /api/colonies/{colony_name}/progress/snapshot
Optional ?worker_id=... to filter to rows touched by a specific worker.
"""
colony_name = request.match_info["colony_name"]
db_path = _resolve_progress_db_by_name(colony_name)
if db_path is None:
return web.json_response({"tasks": [], "steps": []})
worker_id = request.query.get("worker_id") or None
snapshot = await asyncio.to_thread(_read_progress_snapshot, db_path, worker_id)
return web.json_response(snapshot)
def _read_progress_upserts(
db_path: Path,
worker_id: str | None,
since: str | None,
) -> tuple[list[dict], list[dict], str | None]:
"""Return task/step rows with ``updated_at`` (tasks) or a derived
timestamp (steps) newer than ``since``, plus the new high-water mark.
Steps don't carry an ``updated_at`` column — we use
``COALESCE(completed_at, started_at)`` as the change witness. A step
without either timestamp hasn't changed since the last poll and is
skipped.
``since`` is an ISO8601 string (as produced by progress_db._now_iso).
``None`` means "give me everything" used for the SSE priming frame.
"""
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
try:
con.row_factory = sqlite3.Row
task_sql = "SELECT * FROM tasks"
step_sql = (
"SELECT *, COALESCE(completed_at, started_at) AS _ts "
"FROM steps WHERE COALESCE(completed_at, started_at) IS NOT NULL"
)
task_args: list = []
step_args: list = []
if since is not None:
task_sql += " WHERE updated_at > ?"
step_sql += " AND COALESCE(completed_at, started_at) > ?"
task_args.append(since)
step_args.append(since)
if worker_id:
joiner_t = " AND " if since is not None else " WHERE "
task_sql += joiner_t + "worker_id = ?"
step_sql += " AND worker_id = ?"
task_args.append(worker_id)
step_args.append(worker_id)
task_sql += " ORDER BY updated_at"
step_sql += " ORDER BY _ts"
task_rows = con.execute(task_sql, task_args).fetchall()
step_rows = con.execute(step_sql, step_args).fetchall()
tasks = [dict(r) for r in task_rows]
steps = [dict(r) for r in step_rows]
# High-water mark = max timestamp across both sets. Fall back to
# the previous ``since`` when nothing changed.
ts_values = [t["updated_at"] for t in tasks]
ts_values.extend(s["_ts"] for s in steps if s.get("_ts"))
new_since = max(ts_values) if ts_values else since
return tasks, steps, new_since
finally:
con.close()
async def handle_progress_stream(request: web.Request) -> web.StreamResponse:
"""GET /api/colonies/{colony_name}/progress/stream
SSE feed that emits ``snapshot`` once (current state) followed by
``upsert`` events whenever a task/step row changes. Polls the DB
every ``_PROGRESS_POLL_INTERVAL`` seconds the sqlite3 CLI path
workers use for writes doesn't fire SQLite's update hook on our
connection, so polling is the robust option.
"""
colony_name = request.match_info["colony_name"]
worker_id = request.query.get("worker_id") or None
resp = web.StreamResponse(
status=200,
headers={
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache, no-transform",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
await resp.prepare(request)
async def _send(event: str, data: dict) -> None:
payload = f"event: {event}\ndata: {json.dumps(data)}\n\n"
await resp.write(payload.encode("utf-8"))
db_path = _resolve_progress_db_by_name(colony_name)
if db_path is None:
await _send("snapshot", {"tasks": [], "steps": []})
await _send("end", {"reason": "no_progress_db"})
return resp
try:
snapshot = await asyncio.to_thread(_read_progress_snapshot, db_path, worker_id)
await _send("snapshot", snapshot)
since: str | None = None
# Initialize the high-water mark from the snapshot so we don't
# re-emit every row as "new" on the first poll.
ts_values: list[str] = [t.get("updated_at") for t in snapshot["tasks"] if t.get("updated_at")]
ts_values.extend(
s.get("completed_at") or s.get("started_at")
for s in snapshot["steps"]
if s.get("completed_at") or s.get("started_at")
)
if ts_values:
since = max(v for v in ts_values if v)
# The loop relies on client disconnect surfacing as
# ConnectionResetError from ``_send`` — no explicit alive check
# required.
while True:
await asyncio.sleep(_PROGRESS_POLL_INTERVAL)
tasks, steps, new_since = await asyncio.to_thread(_read_progress_upserts, db_path, worker_id, since)
if tasks or steps:
await _send("upsert", {"tasks": tasks, "steps": steps})
since = new_since
except (asyncio.CancelledError, ConnectionResetError):
# Client disconnected; clean exit.
raise
except Exception as exc:
logger.warning("progress stream error: %s", exc, exc_info=True)
try:
await _send("error", {"message": str(exc)})
except Exception:
pass
return resp
# ── Raw data grid (airtable-style view/edit of progress.db tables) ─────
#
# The Data tab lets the operator inspect and hand-edit SQLite rows.
# Identifier-quoting note: SQLite params can only bind values, never
# identifiers, so we have to interpolate table/column names into SQL.
# Every name is *validated against sqlite_master / PRAGMA table_info*
# before use and then wrapped with ``_q()`` which escapes embedded
# quotes. Do NOT accept raw names from the request without running them
# through ``_validate_ident`` first.
def _q(ident: str) -> str:
"""Quote a SQLite identifier (table or column) safely."""
return '"' + ident.replace('"', '""') + '"'
def _list_user_tables(con: sqlite3.Connection) -> list[str]:
return [
r["name"]
for r in con.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name"
)
]
def _table_columns(con: sqlite3.Connection, table: str) -> list[dict]:
"""Return PRAGMA table_info rows as dicts. Empty list if no such table."""
return [
{
"name": r["name"],
"type": r["type"] or "",
"notnull": bool(r["notnull"]),
# pk>0 means the column is part of the primary key (ordinal);
# 0 means non-PK.
"pk": int(r["pk"]),
"dflt_value": r["dflt_value"],
}
for r in con.execute(f"PRAGMA table_info({_q(table)})")
]
def _read_tables_overview(db_path: Path) -> list[dict]:
"""List user tables with columns + row counts."""
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
try:
con.row_factory = sqlite3.Row
out: list[dict] = []
for name in _list_user_tables(con):
cols = _table_columns(con, name)
count_row = con.execute(f"SELECT COUNT(*) AS c FROM {_q(name)}").fetchone()
out.append(
{
"name": name,
"columns": cols,
"row_count": int(count_row["c"]),
"primary_key": [c["name"] for c in cols if c["pk"] > 0],
}
)
return out
finally:
con.close()
def _validate_ident(name: str, known: set[str]) -> str | None:
"""Return ``name`` if present in ``known``, else ``None``."""
return name if name in known else None
def _read_table_rows(
db_path: Path,
table: str,
limit: int,
offset: int,
order_by: str | None,
order_dir: str,
) -> dict:
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
try:
con.row_factory = sqlite3.Row
tables = set(_list_user_tables(con))
if _validate_ident(table, tables) is None:
return {"error": f"unknown table: {table}"}
cols = _table_columns(con, table)
col_names = {c["name"] for c in cols}
sql = f"SELECT * FROM {_q(table)}"
if order_by and order_by in col_names:
direction = "DESC" if order_dir.lower() == "desc" else "ASC"
sql += f" ORDER BY {_q(order_by)} {direction}"
sql += " LIMIT ? OFFSET ?"
rows = con.execute(sql, (int(limit), int(offset))).fetchall()
total = con.execute(f"SELECT COUNT(*) AS c FROM {_q(table)}").fetchone()["c"]
return {
"table": table,
"columns": cols,
"primary_key": [c["name"] for c in cols if c["pk"] > 0],
"rows": [dict(r) for r in rows],
"total": int(total),
"limit": int(limit),
"offset": int(offset),
}
finally:
con.close()
def _update_table_row(
db_path: Path,
table: str,
pk: dict,
updates: dict,
) -> dict:
"""Apply ``updates`` (column->value) to the row matching ``pk``.
Returns ``{"updated": n}`` with the number of rows affected (0 or 1),
or ``{"error": ...}`` on validation failure.
"""
if not updates:
return {"error": "no updates provided"}
con = sqlite3.connect(db_path, timeout=5.0)
try:
con.row_factory = sqlite3.Row
tables = set(_list_user_tables(con))
if _validate_ident(table, tables) is None:
return {"error": f"unknown table: {table}"}
cols = _table_columns(con, table)
col_names = {c["name"] for c in cols}
pk_cols = [c["name"] for c in cols if c["pk"] > 0]
if not pk_cols:
return {"error": f"table {table!r} has no primary key; cannot edit by row"}
# Validate pk has every pk column and all values are scalars.
missing = [p for p in pk_cols if p not in pk]
if missing:
return {"error": f"missing primary key columns: {missing}"}
# Validate update columns exist and aren't part of the primary key
# (changing a PK column would silently break joins/foreign refs).
bad = [c for c in updates if c not in col_names]
if bad:
return {"error": f"unknown columns: {bad}"}
pk_update = [c for c in updates if c in pk_cols]
if pk_update:
return {"error": f"cannot edit primary key columns: {pk_update}"}
set_sql = ", ".join(f"{_q(c)} = ?" for c in updates)
where_sql = " AND ".join(f"{_q(c)} = ?" for c in pk_cols)
sql = f"UPDATE {_q(table)} SET {set_sql} WHERE {where_sql}"
params = list(updates.values()) + [pk[c] for c in pk_cols]
cur = con.execute(sql, params)
con.commit()
return {"updated": cur.rowcount}
finally:
con.close()
async def handle_list_tables(request: web.Request) -> web.Response:
"""GET /api/colonies/{colony_name}/data/tables"""
colony_name = request.match_info["colony_name"]
db_path = _resolve_progress_db_by_name(colony_name)
if db_path is None:
return web.json_response({"tables": []})
tables = await asyncio.to_thread(_read_tables_overview, db_path)
return web.json_response({"tables": tables})
async def handle_table_rows(request: web.Request) -> web.Response:
"""GET /api/colonies/{colony_name}/data/tables/{table}/rows"""
colony_name = request.match_info["colony_name"]
db_path = _resolve_progress_db_by_name(colony_name)
if db_path is None:
return web.json_response({"error": "no progress.db"}, status=404)
table = request.match_info["table"]
# Clamp limit: 500 is enough for the grid's virtualization window;
# a larger cap would make accidental full-table loads cheap.
try:
limit = max(1, min(500, int(request.query.get("limit", "100"))))
offset = max(0, int(request.query.get("offset", "0")))
except ValueError:
return web.json_response({"error": "invalid limit/offset"}, status=400)
order_by = request.query.get("order_by") or None
order_dir = request.query.get("order_dir", "asc")
result = await asyncio.to_thread(_read_table_rows, db_path, table, limit, offset, order_by, order_dir)
if "error" in result:
return web.json_response(result, status=400)
return web.json_response(result)
async def handle_update_row(request: web.Request) -> web.Response:
"""PATCH /api/colonies/{colony_name}/data/tables/{table}/rows
Body: ``{"pk": {col: value, ...}, "updates": {col: value, ...}}``.
"""
colony_name = request.match_info["colony_name"]
db_path = _resolve_progress_db_by_name(colony_name)
if db_path is None:
return web.json_response({"error": "no progress.db"}, status=404)
try:
body = await request.json()
except Exception:
return web.json_response({"error": "invalid JSON body"}, status=400)
pk = body.get("pk") or {}
updates = body.get("updates") or {}
if not isinstance(pk, dict) or not isinstance(updates, dict):
return web.json_response({"error": "pk and updates must be objects"}, status=400)
table = request.match_info["table"]
result = await asyncio.to_thread(_update_table_row, db_path, table, pk, updates)
if "error" in result:
return web.json_response(result, status=400)
return web.json_response(result)
def register_routes(app: web.Application) -> None:
"""Register colony worker routes."""
# Session-scoped — these read live runtime state from a session.
app.router.add_get("/api/sessions/{session_id}/workers", handle_list_workers)
app.router.add_get("/api/sessions/{session_id}/colony/skills", handle_list_colony_skills)
app.router.add_get("/api/sessions/{session_id}/colony/tools", handle_list_colony_tools)
# Colony-scoped — one progress.db per colony, no session indirection.
app.router.add_get(
"/api/colonies/{colony_name}/progress/snapshot",
handle_progress_snapshot,
)
app.router.add_get(
"/api/colonies/{colony_name}/progress/stream",
handle_progress_stream,
)
app.router.add_get("/api/colonies/{colony_name}/data/tables", handle_list_tables)
app.router.add_get(
"/api/colonies/{colony_name}/data/tables/{table}/rows",
handle_table_rows,
)
app.router.add_patch(
"/api/colonies/{colony_name}/data/tables/{table}/rows",
handle_update_row,
)

Some files were not shown because too many files have changed in this diff Show More