Compare commits
115 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 019501335d | |||
| 6fae1f04c8 | |||
| 8c4085f5e8 | |||
| 53240eb888 | |||
| de8d6f0946 | |||
| ea707438f2 | |||
| 445c9600ab | |||
| 2ab5e6d784 | |||
| e7f9b7d791 | |||
| 3cb0c69a96 | |||
| 22d75bfb05 | |||
| 357df1bbcb | |||
| 386bbd5780 | |||
| 235022b35d | |||
| 4d8f312c3e | |||
| 4651a6a85a | |||
| ea9c163438 | |||
| 77cc169606 | |||
| 8c6428f445 | |||
| 44cb0c0f4c | |||
| 2621fb88b1 | |||
| a70f92edbe | |||
| b2efa179ea | |||
| 8c6e76d052 | |||
| c7f1fbf19f | |||
| 7047ecbf46 | |||
| b96ee5aaab | |||
| 6744bea01a | |||
| 390038225b | |||
| b55c8fdf86 | |||
| e9aea0bbc4 | |||
| 0ba1fa8262 | |||
| 0fd96d410e | |||
| c658a7c50b | |||
| 56c3659bda | |||
| 14f927996c | |||
| 8a0ec070b8 | |||
| 80cd77ac30 | |||
| c67521a09c | |||
| 8da06f4f90 | |||
| 46e0413eb8 | |||
| 81731587ff | |||
| 4e9d9bf1ea | |||
| 2644ab953d | |||
| e7daa59573 | |||
| 1bec43afad | |||
| 3d1357595d | |||
| 59ccbba810 | |||
| 8b2ae369ac | |||
| 96a667cbd9 | |||
| 17150a53bd | |||
| c1d7b0ee69 | |||
| 16ea9b52d3 | |||
| dcbfd4ab01 | |||
| b762020793 | |||
| 4ffddc53e6 | |||
| 24bcc5aea7 | |||
| 3c91119f67 | |||
| 923e773c14 | |||
| 199c3a235e | |||
| a881fe68da | |||
| 6b9040477f | |||
| c7cc031060 | |||
| 93c0ef672a | |||
| 67d55e6cce | |||
| 0907ff9cec | |||
| ed2e7125ac | |||
| f39c1c87af | |||
| 1229b4ad4d | |||
| 0d11a946a5 | |||
| b007ed753b | |||
| bb39424e99 | |||
| b27c7a029e | |||
| a3433f2c9e | |||
| 24ef2c247d | |||
| a8f9661626 | |||
| 3005bcaa96 | |||
| 40c4591d65 | |||
| e2bfb9d3af | |||
| e55cea97ef | |||
| ddaafe0307 | |||
| c17205a453 | |||
| 8e4468851c | |||
| ccf4216841 | |||
| 82ffcb17ac | |||
| 4da5bcc1e4 | |||
| 3df7194003 | |||
| 6f1f27b6e9 | |||
| 7b52ed9fa7 | |||
| 4d32526a29 | |||
| 656401e199 | |||
| f2e51157dc | |||
| 0d13c805b1 | |||
| b1ec64438c | |||
| 90aadf247a | |||
| 49317ac5f5 | |||
| 7216e9d9f0 | |||
| 91b1070d80 | |||
| 08aeffd977 | |||
| 651b57b928 | |||
| 8c10fc2e1c | |||
| e3154ca0ee | |||
| 84a92af41b | |||
| 78fc62210a | |||
| 2fd7e9172a | |||
| ca63fd9ee9 | |||
| b99f25c8d7 | |||
| e972112074 | |||
| 6e97191f21 | |||
| 023fb9b8d0 | |||
| b7924b1ad0 | |||
| b6640b8592 | |||
| 43a1d5797c | |||
| f52c44821a | |||
| 97432ea08c |
@@ -57,12 +57,16 @@
|
||||
"mcp__gcu-tools__browser_type_focused",
|
||||
"mcp__gcu-tools__browser_wait",
|
||||
"Bash(python3 -c ' *)",
|
||||
"Bash(python3 scripts/debug_queen_prompt.py independent)"
|
||||
"Bash(python3 scripts/debug_queen_prompt.py independent)",
|
||||
"Bash(curl -s --max-time 2 http://127.0.0.1:9230/status)",
|
||||
"Bash(python3 -c \"import json, sys; print\\(json.loads\\(sys.stdin.read\\(\\)\\)['data']['content']\\)\")",
|
||||
"Bash(python3 -c \"import json; json.load\\(open\\('/home/timothy/aden/hive/tools/browser-extension/manifest.json'\\)\\)\")"
|
||||
],
|
||||
"additionalDirectories": [
|
||||
"/home/timothy/.hive/skills/writing-hive-skills",
|
||||
"/tmp",
|
||||
"/home/timothy/.hive/skills"
|
||||
"/home/timothy/.hive/skills",
|
||||
"/home/timothy/aden/hive/core/frontend/src/components"
|
||||
]
|
||||
},
|
||||
"hooks": {
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
This project uses ruff for Python linting and formatting.
|
||||
|
||||
Rules:
|
||||
- Line length: 100 characters
|
||||
- Python target: 3.11+
|
||||
- Use double quotes for strings
|
||||
- Sort imports with isort (ruff I rules): stdlib, third-party, first-party (framework), local
|
||||
- Combine as-imports
|
||||
- Use type hints on all function signatures
|
||||
- Use `from __future__ import annotations` for modern type syntax
|
||||
- Raise exceptions with `from` in except blocks (B904)
|
||||
- No unused imports (F401), no unused variables (F841)
|
||||
- Prefer list/dict/set comprehensions over map/filter (C4)
|
||||
|
||||
Run `make lint` to auto-fix, `make check` to verify without modifying files.
|
||||
Run `make format` to apply ruff formatting.
|
||||
|
||||
The ruff config lives in core/pyproject.toml under [tool.ruff].
|
||||
@@ -1,35 +0,0 @@
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
docs/
|
||||
LICENSE
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Dependencies (rebuilt in container)
|
||||
node_modules/
|
||||
|
||||
# Build artifacts
|
||||
dist/
|
||||
build/
|
||||
coverage/
|
||||
|
||||
# Environment files
|
||||
.env*
|
||||
config.yaml
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# GitHub
|
||||
.github/
|
||||
@@ -22,3 +22,6 @@ indent_size = 2
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[*.{sh,ps1}]
|
||||
end_of_line = lf
|
||||
|
||||
+5
-1
@@ -16,7 +16,6 @@
|
||||
|
||||
# Shell scripts (must use LF)
|
||||
*.sh text eol=lf
|
||||
quickstart.sh text eol=lf
|
||||
|
||||
# PowerShell scripts (Windows-friendly)
|
||||
*.ps1 text eol=lf
|
||||
@@ -122,3 +121,8 @@ CODE_OF_CONDUCT* text
|
||||
*.db binary
|
||||
*.sqlite binary
|
||||
*.sqlite3 binary
|
||||
|
||||
# Lockfiles — mark generated so GitHub collapses them in PR diffs
|
||||
*.lock linguist-generated=true -diff
|
||||
package-lock.json linguist-generated=true -diff
|
||||
uv.lock linguist-generated=true -diff
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"gcu-tools": {
|
||||
"type": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "-m", "gcu.server", "--stdio"],
|
||||
"cwd": "/home/timothy/aden/hive/tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
import json
|
||||
|
||||
with open('/home/timothy/aden/hive/x_rapid_ledger.json', 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
data['replies'].append({
|
||||
'original_preview': 'Alright, I give in. Here’s my picture with the boss, courtesy of @johnkrausphotos. Oh, and hook ‘em!'
|
||||
})
|
||||
|
||||
with open('/home/timothy/aden/hive/x_rapid_ledger.json', 'w') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
@@ -1,11 +0,0 @@
|
||||
import json, sys
|
||||
|
||||
with open('/home/timothy/aden/hive/x_rapid_ledger.json', 'r') as f:
|
||||
ledger = json.load(f)
|
||||
|
||||
text = sys.argv[1]
|
||||
for r in ledger['replies']:
|
||||
if r.get('original_preview') == text:
|
||||
print("YES")
|
||||
sys.exit(0)
|
||||
print("NO")
|
||||
@@ -66,12 +66,12 @@ from framework.agent_loop.internals.stall_detector import (
|
||||
ngram_similarity,
|
||||
)
|
||||
from framework.agent_loop.internals.synthetic_tools import (
|
||||
build_ask_user_multiple_tool,
|
||||
build_ask_user_tool,
|
||||
build_escalate_tool,
|
||||
build_report_to_parent_tool,
|
||||
handle_report_to_parent,
|
||||
)
|
||||
from framework.agent_loop.internals.tool_input_coercer import coerce_tool_input
|
||||
from framework.agent_loop.internals.tool_result_handler import (
|
||||
build_json_preview,
|
||||
execute_tool,
|
||||
@@ -85,7 +85,12 @@ from framework.agent_loop.internals.types import (
|
||||
JudgeVerdict,
|
||||
TriggerEvent,
|
||||
)
|
||||
from framework.agent_loop.internals.vision_fallback import (
|
||||
caption_tool_image,
|
||||
extract_intent_for_tool,
|
||||
)
|
||||
from framework.agent_loop.types import AgentContext, AgentProtocol, AgentResult
|
||||
from framework.config import get_vision_fallback_model
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.llm.capabilities import filter_tools_for_model, supports_image_tool_results
|
||||
from framework.llm.provider import Tool, ToolResult, ToolUse
|
||||
@@ -219,6 +224,46 @@ async def _describe_images_as_text(image_content: list[dict[str, Any]]) -> str |
|
||||
return None
|
||||
|
||||
|
||||
def _vision_fallback_active(model: str | None) -> bool:
|
||||
"""Return True if tool-result images for *model* should be routed
|
||||
through the vision-fallback chain rather than sent to the model.
|
||||
|
||||
Trigger: the model's catalog entry has ``supports_vision: false``
|
||||
(resolved via :func:`capabilities.supports_image_tool_results`,
|
||||
which reads ``model_catalog.json``). Unknown models default to
|
||||
vision-capable, so the fallback only fires when the catalog
|
||||
explicitly says the model is text-only.
|
||||
|
||||
The ``vision_fallback`` config block is the *substitution* model —
|
||||
it doesn't widen the trigger. To force fallback for a model that
|
||||
isn't catalogued yet, add an entry to ``model_catalog.json`` with
|
||||
``supports_vision: false`` rather than relying on a runtime config.
|
||||
"""
|
||||
if not model:
|
||||
return False
|
||||
return not supports_image_tool_results(model)
|
||||
|
||||
|
||||
async def _captioning_chain(
|
||||
intent: str,
|
||||
image_content: list[dict[str, Any]],
|
||||
) -> str | None:
|
||||
"""Two-stage caption chain used by the agent-loop tool-result hook.
|
||||
|
||||
Stage 1: configured ``vision_fallback`` model with intent + images.
|
||||
Stage 2: generic-caption rotation (gpt-4o-mini → claude-3-haiku
|
||||
→ gemini-flash) when stage 1 is unconfigured or fails.
|
||||
|
||||
Returns the caption text or None if both stages fail. Caller is
|
||||
responsible for the placeholder-on-None and the splice into the
|
||||
persisted tool-result content.
|
||||
"""
|
||||
caption = await caption_tool_image(intent, image_content)
|
||||
if not caption:
|
||||
caption = await _describe_images_as_text(image_content)
|
||||
return caption
|
||||
|
||||
|
||||
# Pattern for detecting context-window-exceeded errors across LLM providers.
|
||||
_CONTEXT_TOO_LARGE_RE = re.compile(
|
||||
r"context.{0,20}(length|window|limit|size)|"
|
||||
@@ -575,6 +620,7 @@ class AgentLoop(AgentProtocol):
|
||||
store=self._conversation_store,
|
||||
run_id=ctx.effective_run_id,
|
||||
compaction_buffer_tokens=self._config.compaction_buffer_tokens,
|
||||
compaction_buffer_ratio=self._config.compaction_buffer_ratio,
|
||||
compaction_warning_buffer_tokens=(self._config.compaction_warning_buffer_tokens),
|
||||
)
|
||||
accumulator = OutputAccumulator(
|
||||
@@ -587,7 +633,12 @@ class AgentLoop(AgentProtocol):
|
||||
|
||||
initial_message = self._build_initial_message(ctx)
|
||||
if initial_message:
|
||||
await conversation.add_user_message(initial_message)
|
||||
# Stamp with arrival time so the conversation has a
|
||||
# temporal anchor for the first turn, matching the
|
||||
# stamping done by drain_injection_queue for every
|
||||
# subsequent event.
|
||||
_stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
await conversation.add_user_message(f"[{_stamp}] {initial_message}")
|
||||
|
||||
await self._run_hooks("session_start", conversation, trigger=initial_message)
|
||||
|
||||
@@ -599,7 +650,8 @@ class AgentLoop(AgentProtocol):
|
||||
initial_message = self._build_initial_message(ctx)
|
||||
if not initial_message:
|
||||
initial_message = "Hello"
|
||||
await conversation.add_user_message(initial_message)
|
||||
_stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
await conversation.add_user_message(f"[{_stamp}] {initial_message}")
|
||||
|
||||
# 2b. Restore spill counter from existing files (resume safety)
|
||||
self._restore_spill_counter()
|
||||
@@ -608,8 +660,6 @@ class AgentLoop(AgentProtocol):
|
||||
tools = list(ctx.available_tools)
|
||||
if ctx.supports_direct_user_io:
|
||||
tools.append(self._build_ask_user_tool())
|
||||
if stream_id == "queen" or stream_id == "overseer":
|
||||
tools.append(self._build_ask_user_multiple_tool())
|
||||
# Workers (parallel ephemeral agents) get escalate + report_to_parent.
|
||||
# The overseer is client-facing like the queen and has neither.
|
||||
if stream_id not in ("queen", "judge", "overseer"):
|
||||
@@ -621,8 +671,23 @@ class AgentLoop(AgentProtocol):
|
||||
# Hide image-producing tools from text-only models so they never try
|
||||
# to call them. Avoids wasted turns + "screenshot failed" lessons
|
||||
# getting saved to memory. See framework.llm.capabilities.
|
||||
# EXCEPTION: when the model IS on the text-only deny list AND
|
||||
# a vision_fallback subagent is configured, leave image tools
|
||||
# visible. The post-execution hook in the inner tool loop
|
||||
# will route each image_content through the fallback VLM and
|
||||
# replace it with a text caption before the main agent sees
|
||||
# the result — so the main agent gets captions instead of
|
||||
# raw images, rather than losing the tool entirely. We DON'T
|
||||
# bypass the filter for vision-capable models (that would be
|
||||
# a no-op anyway — the filter doesn't fire for them) and we
|
||||
# DON'T bypass it without a configured fallback (the agent
|
||||
# would just see raw stripped tool results with no caption).
|
||||
_llm_model = ctx.llm.model if ctx.llm else ""
|
||||
tools, _hidden_image_tools = filter_tools_for_model(tools, _llm_model)
|
||||
_text_only_main = _llm_model and not supports_image_tool_results(_llm_model)
|
||||
if _text_only_main and get_vision_fallback_model() is not None:
|
||||
_hidden_image_tools: list[str] = []
|
||||
else:
|
||||
tools, _hidden_image_tools = filter_tools_for_model(tools, _llm_model)
|
||||
|
||||
logger.info(
|
||||
"[%s] Tools available (%d): %s | direct_user_io=%s | judge=%s | hidden_image_tools=%s",
|
||||
@@ -754,8 +819,6 @@ class AgentLoop(AgentProtocol):
|
||||
)
|
||||
got_input = await self._await_user_input(
|
||||
ctx,
|
||||
prompt=str(pending_input_state.get("prompt", "")),
|
||||
options=pending_input_state.get("options"),
|
||||
questions=pending_input_state.get("questions"),
|
||||
emit_client_request=bool(pending_input_state.get("emit_client_request", True)),
|
||||
)
|
||||
@@ -789,7 +852,6 @@ class AgentLoop(AgentProtocol):
|
||||
if ctx.dynamic_tools_provider is not None:
|
||||
_synthetic_names = {
|
||||
"ask_user",
|
||||
"ask_user_multiple",
|
||||
"escalate",
|
||||
}
|
||||
synthetic = [t for t in tools if t.name in _synthetic_names]
|
||||
@@ -798,14 +860,56 @@ class AgentLoop(AgentProtocol):
|
||||
tools.extend(synthetic)
|
||||
|
||||
# 6b3. Dynamic prompt refresh (phase switching / memory refresh)
|
||||
if ctx.dynamic_prompt_provider is not None or ctx.dynamic_memory_provider is not None:
|
||||
if (
|
||||
ctx.dynamic_prompt_provider is not None
|
||||
or ctx.dynamic_memory_provider is not None
|
||||
or ctx.dynamic_skills_catalog_provider is not None
|
||||
):
|
||||
if ctx.dynamic_prompt_provider is not None:
|
||||
_new_prompt = stamp_prompt_datetime(ctx.dynamic_prompt_provider())
|
||||
_new_prompt = ctx.dynamic_prompt_provider()
|
||||
# When a suffix provider is also wired (Queen's
|
||||
# static/dynamic split), keep the two pieces separate
|
||||
# so the LLM wrapper can emit them as two system
|
||||
# content blocks with a cache breakpoint between them.
|
||||
# The timestamp used to be stamped here via
|
||||
# stamp_prompt_datetime on every iteration — it now
|
||||
# lives inside the frozen dynamic suffix and is only
|
||||
# refreshed at user-turn boundaries, so per-iteration
|
||||
# stamping would both double-stamp and bust the cache.
|
||||
_new_suffix: str | None = None
|
||||
if ctx.dynamic_prompt_suffix_provider is not None:
|
||||
try:
|
||||
_new_suffix = ctx.dynamic_prompt_suffix_provider() or ""
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"[%s] dynamic_prompt_suffix_provider raised — falling back to legacy stamp",
|
||||
node_id,
|
||||
exc_info=True,
|
||||
)
|
||||
_new_suffix = None
|
||||
if _new_suffix is None:
|
||||
# Legacy / fallback path: no split in use (or the
|
||||
# suffix provider raised). Stamp the timestamp at
|
||||
# the end of the single-string prompt so the model
|
||||
# still sees a current "now".
|
||||
_new_prompt = stamp_prompt_datetime(_new_prompt)
|
||||
else:
|
||||
# build_system_prompt_for_context reads dynamic_skills_catalog_provider
|
||||
# directly; no separate branch needed.
|
||||
_new_prompt = build_system_prompt_for_context(ctx)
|
||||
if _new_prompt != conversation.system_prompt:
|
||||
conversation.update_system_prompt(_new_prompt)
|
||||
logger.info("[%s] Dynamic prompt updated", node_id)
|
||||
_new_suffix = None
|
||||
if _new_suffix is not None:
|
||||
_combined_for_compare = f"{_new_prompt}\n\n{_new_suffix}" if _new_suffix else _new_prompt
|
||||
if (
|
||||
_combined_for_compare != conversation.system_prompt
|
||||
or _new_suffix != conversation.system_prompt_dynamic_suffix
|
||||
):
|
||||
conversation.update_system_prompt(_new_prompt, dynamic_suffix=_new_suffix)
|
||||
logger.info("[%s] Dynamic prompt updated (split)", node_id)
|
||||
else:
|
||||
if _new_prompt != conversation.system_prompt:
|
||||
conversation.update_system_prompt(_new_prompt)
|
||||
logger.info("[%s] Dynamic prompt updated", node_id)
|
||||
|
||||
# 6c. Publish iteration event (with per-iteration metadata when available)
|
||||
_iter_meta = None
|
||||
@@ -863,8 +967,6 @@ class AgentLoop(AgentProtocol):
|
||||
turn_tokens,
|
||||
logged_tool_calls,
|
||||
user_input_requested,
|
||||
ask_user_prompt,
|
||||
ask_user_options,
|
||||
queen_input_requested,
|
||||
request_system_prompt,
|
||||
request_messages,
|
||||
@@ -897,6 +999,8 @@ class AgentLoop(AgentProtocol):
|
||||
input_tokens=turn_tokens.get("input", 0),
|
||||
output_tokens=turn_tokens.get("output", 0),
|
||||
cached_tokens=turn_tokens.get("cached", 0),
|
||||
cache_creation_tokens=turn_tokens.get("cache_creation", 0),
|
||||
cost_usd=float(turn_tokens.get("cost", 0.0) or 0.0),
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
)
|
||||
@@ -1080,7 +1184,7 @@ class AgentLoop(AgentProtocol):
|
||||
inner_turn=0,
|
||||
)
|
||||
await conversation.add_assistant_message(visible_error)
|
||||
await self._await_user_input(ctx, prompt="")
|
||||
await self._await_user_input(ctx)
|
||||
_llm_turn_failed_waiting_input = True
|
||||
break # exit retry loop, continue outer iteration
|
||||
|
||||
@@ -1129,7 +1233,7 @@ class AgentLoop(AgentProtocol):
|
||||
if _turn_cancelled:
|
||||
logger.info("[%s] iter=%d: turn cancelled by user", node_id, iteration)
|
||||
if ctx.supports_direct_user_io:
|
||||
await self._await_user_input(ctx, prompt="")
|
||||
await self._await_user_input(ctx)
|
||||
continue # back to top of for-iteration loop
|
||||
|
||||
# Queen non-transient LLM failures wait for user input and then
|
||||
@@ -1260,7 +1364,7 @@ class AgentLoop(AgentProtocol):
|
||||
iteration,
|
||||
_consecutive_empty_turns,
|
||||
)
|
||||
await self._await_user_input(ctx, prompt="")
|
||||
await self._await_user_input(ctx)
|
||||
_consecutive_empty_turns = 0
|
||||
else:
|
||||
await conversation.add_user_message(
|
||||
@@ -1336,7 +1440,6 @@ class AgentLoop(AgentProtocol):
|
||||
if tc.get("tool_name")
|
||||
not in (
|
||||
"ask_user",
|
||||
"ask_user_multiple",
|
||||
"escalate",
|
||||
)
|
||||
]
|
||||
@@ -1384,7 +1487,7 @@ class AgentLoop(AgentProtocol):
|
||||
recent_responses.clear()
|
||||
elif ctx.supports_direct_user_io:
|
||||
await conversation.add_user_message(warning_msg)
|
||||
await self._await_user_input(ctx, prompt=doom_desc)
|
||||
await self._await_user_input(ctx)
|
||||
recent_tool_fingerprints.clear()
|
||||
recent_responses.clear()
|
||||
else:
|
||||
@@ -1507,11 +1610,9 @@ class AgentLoop(AgentProtocol):
|
||||
# conversation — they flow through without blocking.
|
||||
_cf_block = False
|
||||
_cf_auto = False
|
||||
_cf_prompt = ""
|
||||
if ctx.supports_direct_user_io:
|
||||
if user_input_requested:
|
||||
_cf_block = True
|
||||
_cf_prompt = ask_user_prompt
|
||||
elif stream_id == "queen" and not real_tool_results and not outputs_set:
|
||||
# Auto-block: only for the queen (conversational node).
|
||||
# Workers are autonomous — they block only on explicit
|
||||
@@ -1614,13 +1715,13 @@ class AgentLoop(AgentProtocol):
|
||||
iteration,
|
||||
_cf_auto,
|
||||
)
|
||||
# Check for multi-question batch from ask_user_multiple
|
||||
multi_qs = getattr(self, "_pending_multi_questions", None)
|
||||
self._pending_multi_questions = None
|
||||
# Pull the pending questions array set by the ask_user
|
||||
# handler (a 1-item list for a single question, 2-8 for a
|
||||
# batch). None for auto-block turns with no explicit ask.
|
||||
pending_qs = getattr(self, "_pending_questions", None)
|
||||
self._pending_questions = None
|
||||
pending_input_state = {
|
||||
"prompt": _cf_prompt,
|
||||
"options": ask_user_options,
|
||||
"questions": multi_qs,
|
||||
"questions": pending_qs,
|
||||
"emit_client_request": True,
|
||||
}
|
||||
await self._write_cursor(
|
||||
@@ -1634,11 +1735,9 @@ class AgentLoop(AgentProtocol):
|
||||
)
|
||||
got_input = await self._await_user_input(
|
||||
ctx,
|
||||
prompt=_cf_prompt,
|
||||
options=ask_user_options,
|
||||
questions=multi_qs,
|
||||
questions=pending_qs,
|
||||
)
|
||||
# Emit deferred tool_call_completed for ask_user / ask_user_multiple
|
||||
# Emit deferred tool_call_completed for ask_user
|
||||
deferred = getattr(self, "_deferred_tool_complete", None)
|
||||
if deferred:
|
||||
self._deferred_tool_complete = None
|
||||
@@ -1803,7 +1902,7 @@ class AgentLoop(AgentProtocol):
|
||||
recent_tool_fingerprints=recent_tool_fingerprints,
|
||||
pending_input=pending_input_state,
|
||||
)
|
||||
got_input = await self._await_user_input(ctx, prompt="", emit_client_request=False)
|
||||
got_input = await self._await_user_input(ctx, emit_client_request=False)
|
||||
logger.info(
|
||||
"[%s] iter=%d: queen wait unblocked, got_input=%s",
|
||||
node_id,
|
||||
@@ -2195,9 +2294,7 @@ class AgentLoop(AgentProtocol):
|
||||
async def _await_user_input(
|
||||
self,
|
||||
ctx: AgentContext,
|
||||
prompt: str = "",
|
||||
*,
|
||||
options: list[str] | None = None,
|
||||
questions: list[dict] | None = None,
|
||||
emit_client_request: bool = True,
|
||||
) -> bool:
|
||||
@@ -2210,11 +2307,11 @@ class AgentLoop(AgentProtocol):
|
||||
before the judge runs.
|
||||
|
||||
Args:
|
||||
options: Optional predefined choices for the user (from ask_user).
|
||||
Passed through to the CLIENT_INPUT_REQUESTED event so the
|
||||
frontend can render a QuestionWidget with buttons.
|
||||
questions: Optional list of question dicts for ask_user_multiple.
|
||||
Each dict has id, prompt, and optional options.
|
||||
questions: Optional list of question dicts from ask_user. Each
|
||||
dict has id, prompt, and optional options. Passed through to
|
||||
the CLIENT_INPUT_REQUESTED event so the frontend can render
|
||||
the appropriate widget (QuestionWidget for one, else
|
||||
MultiQuestionWidget).
|
||||
emit_client_request: When False, wait silently without publishing
|
||||
CLIENT_INPUT_REQUESTED. Used for worker waits where input is
|
||||
expected from the queen via inject_message().
|
||||
@@ -2243,9 +2340,7 @@ class AgentLoop(AgentProtocol):
|
||||
await self._event_bus.emit_client_input_requested(
|
||||
stream_id=ctx.stream_id or ctx.agent_id,
|
||||
node_id=ctx.agent_id,
|
||||
prompt=prompt,
|
||||
execution_id=ctx.execution_id or "",
|
||||
options=options,
|
||||
questions=questions,
|
||||
)
|
||||
|
||||
@@ -2277,8 +2372,6 @@ class AgentLoop(AgentProtocol):
|
||||
dict[str, int],
|
||||
list[dict],
|
||||
bool,
|
||||
str,
|
||||
list[str] | None,
|
||||
bool,
|
||||
str,
|
||||
list[dict[str, Any]],
|
||||
@@ -2287,8 +2380,7 @@ class AgentLoop(AgentProtocol):
|
||||
"""Run a single LLM turn with streaming and tool execution.
|
||||
|
||||
Returns (assistant_text, real_tool_results, outputs_set, token_counts, logged_tool_calls,
|
||||
user_input_requested, ask_user_prompt, ask_user_options, queen_input_requested,
|
||||
system_prompt, messages, reported_to_parent).
|
||||
user_input_requested, queen_input_requested, system_prompt, messages, reported_to_parent).
|
||||
|
||||
``real_tool_results`` contains only results from actual tools (web_search,
|
||||
etc.), NOT from synthetic framework tools such as ``set_output``,
|
||||
@@ -2309,7 +2401,9 @@ class AgentLoop(AgentProtocol):
|
||||
stream_id = ctx.stream_id or ctx.agent_id
|
||||
node_id = ctx.agent_id
|
||||
execution_id = ctx.execution_id or ""
|
||||
token_counts: dict[str, int] = {"input": 0, "output": 0, "cached": 0}
|
||||
# Mixed-type dict: int token counts + str stop_reason/model + float cost.
|
||||
# Typed loosely to avoid churn in the many call sites that read from it.
|
||||
token_counts: dict[str, Any] = {"input": 0, "output": 0, "cached": 0, "cache_creation": 0, "cost": 0.0}
|
||||
tool_call_count = 0
|
||||
final_text = ""
|
||||
final_system_prompt = conversation.system_prompt
|
||||
@@ -2317,8 +2411,6 @@ class AgentLoop(AgentProtocol):
|
||||
# Track output keys set via set_output across all inner iterations
|
||||
outputs_set_this_turn: list[str] = []
|
||||
user_input_requested = False
|
||||
ask_user_prompt = ""
|
||||
ask_user_options: list[str] | None = None
|
||||
queen_input_requested = False
|
||||
# Accumulate ALL tool calls across inner iterations for L3 logging.
|
||||
# Unlike real_tool_results (reset each inner iteration), this persists.
|
||||
@@ -2452,9 +2544,16 @@ class AgentLoop(AgentProtocol):
|
||||
nonlocal _first_event_at
|
||||
_clean_snapshot = "" # visible-only text for the frontend
|
||||
|
||||
# Split-prompt path: pass STATIC and DYNAMIC tail separately
|
||||
# so the LLM wrapper can emit them as two Anthropic system
|
||||
# content blocks with a cache breakpoint between them. When
|
||||
# no split is in use, ``system_prompt_static`` equals the
|
||||
# full prompt and the suffix is empty — identical to the
|
||||
# legacy single-block request.
|
||||
async for event in ctx.llm.stream(
|
||||
messages=_msgs,
|
||||
system=conversation.system_prompt,
|
||||
system=conversation.system_prompt_static,
|
||||
system_dynamic_suffix=(conversation.system_prompt_dynamic_suffix or None),
|
||||
tools=tools if tools else None,
|
||||
max_tokens=ctx.max_tokens,
|
||||
):
|
||||
@@ -2535,6 +2634,8 @@ class AgentLoop(AgentProtocol):
|
||||
token_counts["input"] += event.input_tokens
|
||||
token_counts["output"] += event.output_tokens
|
||||
token_counts["cached"] += event.cached_tokens
|
||||
token_counts["cache_creation"] += event.cache_creation_tokens
|
||||
token_counts["cost"] = token_counts.get("cost", 0.0) + event.cost_usd
|
||||
token_counts["stop_reason"] = event.stop_reason
|
||||
token_counts["model"] = event.model
|
||||
|
||||
@@ -2557,9 +2658,7 @@ class AgentLoop(AgentProtocol):
|
||||
_legacy = self._config.llm_stream_inactivity_timeout_seconds
|
||||
if _legacy and _legacy > 0 and _legacy < _inter_event_limit:
|
||||
_inter_event_limit = _legacy
|
||||
_watchdog_active = (_ttft_limit and _ttft_limit > 0) or (
|
||||
_inter_event_limit and _inter_event_limit > 0
|
||||
)
|
||||
_watchdog_active = (_ttft_limit and _ttft_limit > 0) or (_inter_event_limit and _inter_event_limit > 0)
|
||||
# Result of the watchdog: "ok" (stream finished), "ttft" (no first
|
||||
# event in budget), "inactive" (silence after first event).
|
||||
_watchdog_verdict: str = "ok"
|
||||
@@ -2578,9 +2677,7 @@ class AgentLoop(AgentProtocol):
|
||||
)
|
||||
_check_interval = max(1.0, min(5.0, _tight / 2))
|
||||
while True:
|
||||
done, _pending = await asyncio.wait(
|
||||
{self._stream_task}, timeout=_check_interval
|
||||
)
|
||||
done, _pending = await asyncio.wait({self._stream_task}, timeout=_check_interval)
|
||||
if self._stream_task in done:
|
||||
break
|
||||
now = time.monotonic()
|
||||
@@ -2598,11 +2695,7 @@ class AgentLoop(AgentProtocol):
|
||||
# Post-first-event silence. A stream that produced
|
||||
# events and then went quiet is a real stall.
|
||||
idle = now - _stream_last_event_at
|
||||
if (
|
||||
_inter_event_limit
|
||||
and _inter_event_limit > 0
|
||||
and idle >= _inter_event_limit
|
||||
):
|
||||
if _inter_event_limit and _inter_event_limit > 0 and idle >= _inter_event_limit:
|
||||
_watchdog_verdict = "inactive"
|
||||
_watchdog_elapsed = idle
|
||||
_watchdog_limit = _inter_event_limit
|
||||
@@ -2816,8 +2909,6 @@ class AgentLoop(AgentProtocol):
|
||||
token_counts,
|
||||
logged_tool_calls,
|
||||
user_input_requested,
|
||||
ask_user_prompt,
|
||||
ask_user_options,
|
||||
queen_input_requested,
|
||||
final_system_prompt,
|
||||
final_messages,
|
||||
@@ -2867,7 +2958,17 @@ class AgentLoop(AgentProtocol):
|
||||
# nudge on its next turn without losing the real execution output.
|
||||
replay_prefixes_by_id: dict[str, str] = {}
|
||||
|
||||
# Schema-driven coercion of tool arguments. Heals the small
|
||||
# handful of drift patterns that non-frontier models emit
|
||||
# (numbers-as-strings, array-of-{label} wrappers, arrays
|
||||
# sent as JSON strings, singleton scalars). Runs once per
|
||||
# tool call before dispatch; see tool_input_coercer module.
|
||||
_tool_by_name = {t.name: t for t in tools}
|
||||
|
||||
for tc in tool_calls:
|
||||
_tool_schema = _tool_by_name.get(tc.tool_name)
|
||||
if _tool_schema is not None:
|
||||
coerce_tool_input(_tool_schema, tc.tool_input)
|
||||
tool_call_count += 1
|
||||
if hard_limit > 0 and tool_call_count > hard_limit:
|
||||
limit_hit = True
|
||||
@@ -2900,54 +3001,89 @@ class AgentLoop(AgentProtocol):
|
||||
|
||||
elif tc.tool_name == "ask_user":
|
||||
# --- Framework-level ask_user handling ---
|
||||
ask_user_prompt = tc.tool_input.get("question", "")
|
||||
raw_options = tc.tool_input.get("options", None)
|
||||
|
||||
# Self-heal: some model families (notably the queen
|
||||
# profile prompt poisoning the output style) cram
|
||||
# the options inside the question string as a
|
||||
# pseudo-XML blob like:
|
||||
#
|
||||
# "What do you want to do?</question>\n_OPTIONS:
|
||||
# [\"De-risk\", \"Add\", \"Short\"]"
|
||||
#
|
||||
# When that happens the question text leaks
|
||||
# </question> and _OPTIONS: into the chat UI and
|
||||
# the buttons never appear. Detect + repair.
|
||||
# The consolidated tool always takes a `questions`
|
||||
# array (1-8 entries). A single-entry array is the
|
||||
# common case; longer arrays batch several questions
|
||||
# into one turn so the user answers them all at once.
|
||||
from framework.agent_loop.internals.synthetic_tools import (
|
||||
sanitize_ask_user_inputs,
|
||||
)
|
||||
|
||||
ask_user_prompt, recovered_options = sanitize_ask_user_inputs(ask_user_prompt, raw_options)
|
||||
if recovered_options is not None and raw_options is None:
|
||||
raw_options = recovered_options
|
||||
# Defensive: ensure options is a list of strings.
|
||||
# Smaller models sometimes send a string instead of
|
||||
# an array — try to recover gracefully.
|
||||
ask_user_options: list[str] | None = None
|
||||
if isinstance(raw_options, list):
|
||||
ask_user_options = [str(o) for o in raw_options if o]
|
||||
elif isinstance(raw_options, str) and raw_options.strip():
|
||||
# Try JSON parse first (e.g. '["a","b"]')
|
||||
try:
|
||||
parsed = json.loads(raw_options)
|
||||
if isinstance(parsed, list):
|
||||
ask_user_options = [str(o) for o in parsed if o]
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
if ask_user_options is not None and len(ask_user_options) < 2:
|
||||
ask_user_options = None # fall back to free-text input
|
||||
|
||||
# Workers MUST provide at least 2 options — no free-text
|
||||
# questions allowed. Only the queen may omit options.
|
||||
if ask_user_options is None and stream_id != "queen":
|
||||
raw_questions = tc.tool_input.get("questions", None)
|
||||
if not isinstance(raw_questions, list) or not raw_questions:
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
"ERROR: options are required. Provide at least "
|
||||
"2 predefined choices in the 'options' array. "
|
||||
'Example: {"question": "...", "options": '
|
||||
'["Yes", "No"]}'
|
||||
"ERROR: ask_user requires a non-empty "
|
||||
"'questions' array. Each entry must have "
|
||||
"{id, prompt, options?}. Example: "
|
||||
'{"questions": [{"id": "q1", "prompt": '
|
||||
'"What now?", "options": ["A", "B"]}]}'
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
results_by_id[tc.tool_use_id] = result
|
||||
user_input_requested = False
|
||||
continue
|
||||
|
||||
# Normalize + self-heal each question entry. The
|
||||
# generic tool_input_coercer has already handled
|
||||
# schema-shape drift (array-of-string options, JSON
|
||||
# strings, etc.), so here we only deal with
|
||||
# prompt-style drift: some model families cram
|
||||
# options inside the prompt as a pseudo-XML blob
|
||||
# like "What now?</question>\n_OPTIONS: [\"A\", \"B\"]".
|
||||
# sanitize_ask_user_inputs strips the tag and
|
||||
# recovers the inline options as a fallback.
|
||||
questions: list[dict] = []
|
||||
for i, q in enumerate(raw_questions):
|
||||
if not isinstance(q, dict):
|
||||
continue
|
||||
qid = str(q.get("id", f"q{i + 1}"))
|
||||
raw_prompt = q.get("prompt", q.get("question", ""))
|
||||
raw_opts = q.get("options", None)
|
||||
cleaned_prompt, recovered_opts = sanitize_ask_user_inputs(raw_prompt, raw_opts)
|
||||
|
||||
opts: list[str] | None = None
|
||||
if isinstance(raw_opts, list) and raw_opts:
|
||||
opts = [str(o) for o in raw_opts if o]
|
||||
elif recovered_opts is not None:
|
||||
opts = recovered_opts
|
||||
if opts is not None and len(opts) < 2:
|
||||
opts = None # fall back to free-text
|
||||
|
||||
questions.append(
|
||||
{
|
||||
"id": qid,
|
||||
"prompt": cleaned_prompt,
|
||||
**({"options": opts} if opts else {}),
|
||||
}
|
||||
)
|
||||
|
||||
if not questions:
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
"ERROR: no valid question objects in "
|
||||
"'questions'. Each entry must be an "
|
||||
"object with 'id' and 'prompt'."
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
results_by_id[tc.tool_use_id] = result
|
||||
user_input_requested = False
|
||||
continue
|
||||
|
||||
# Workers MUST provide options on every question —
|
||||
# free-text asks are queen-only.
|
||||
if stream_id != "queen" and any("options" not in q for q in questions):
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
"ERROR: options are required on every "
|
||||
"question for worker nodes. Provide at "
|
||||
"least 2 predefined choices in the "
|
||||
"'options' array of each question."
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
@@ -2957,77 +3093,31 @@ class AgentLoop(AgentProtocol):
|
||||
|
||||
user_input_requested = True
|
||||
|
||||
# Free-form ask_user (no options): stream the question
|
||||
# text as a chat message so the user can see it. When
|
||||
# options are present the QuestionWidget shows the
|
||||
# question, but without options nothing renders it.
|
||||
if ask_user_options is None and ask_user_prompt and ctx.emits_client_io:
|
||||
# Single free-form question: stream the prompt as a
|
||||
# chat message so the user sees it. Widget-rendered
|
||||
# cases (single-with-options, multi) draw their own
|
||||
# question text, so no text delta is needed.
|
||||
if (
|
||||
len(questions) == 1
|
||||
and "options" not in questions[0]
|
||||
and questions[0]["prompt"]
|
||||
and ctx.emits_client_io
|
||||
):
|
||||
_q_text = questions[0]["prompt"]
|
||||
await self._publish_text_delta(
|
||||
stream_id,
|
||||
node_id,
|
||||
content=ask_user_prompt,
|
||||
snapshot=ask_user_prompt,
|
||||
content=_q_text,
|
||||
snapshot=_q_text,
|
||||
ctx=ctx,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
inner_turn=inner_turn,
|
||||
)
|
||||
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content="Waiting for user input...",
|
||||
is_error=False,
|
||||
)
|
||||
results_by_id[tc.tool_use_id] = result
|
||||
|
||||
elif tc.tool_name == "ask_user_multiple":
|
||||
# --- Framework-level ask_user_multiple ---
|
||||
raw_questions = tc.tool_input.get("questions", [])
|
||||
if not isinstance(raw_questions, list) or len(raw_questions) < 2:
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
"ERROR: questions must be an array of at "
|
||||
"least 2 question objects. Use ask_user "
|
||||
"for single questions."
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
results_by_id[tc.tool_use_id] = result
|
||||
user_input_requested = False
|
||||
continue
|
||||
|
||||
# Normalize each question entry
|
||||
questions: list[dict] = []
|
||||
for i, q in enumerate(raw_questions):
|
||||
if not isinstance(q, dict):
|
||||
continue
|
||||
qid = str(q.get("id", f"q{i + 1}"))
|
||||
prompt = str(q.get("prompt", ""))
|
||||
opts = q.get("options", None)
|
||||
if isinstance(opts, list):
|
||||
opts = [str(o) for o in opts if o]
|
||||
if len(opts) < 2:
|
||||
opts = None
|
||||
else:
|
||||
opts = None
|
||||
questions.append(
|
||||
{
|
||||
"id": qid,
|
||||
"prompt": prompt,
|
||||
**({"options": opts} if opts else {}),
|
||||
}
|
||||
)
|
||||
|
||||
user_input_requested = True
|
||||
|
||||
# Store as multi-question prompt/options for
|
||||
# the event emission path
|
||||
ask_user_prompt = ""
|
||||
ask_user_options = None
|
||||
# Pass the full questions list via a special
|
||||
# key that the event emitter picks up
|
||||
self._pending_multi_questions = questions
|
||||
# Stash the normalized questions list for the
|
||||
# blocking path (§1612) + event emission.
|
||||
self._pending_questions = questions
|
||||
|
||||
result = ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
@@ -3338,6 +3428,30 @@ class AgentLoop(AgentProtocol):
|
||||
|
||||
# Phase 3: record results into conversation in original order,
|
||||
# build logged/real lists, and publish completed events.
|
||||
#
|
||||
# Vision-fallback prefetch: a single turn may fire several
|
||||
# image-producing tools in parallel (e.g. one screenshot
|
||||
# per tab). Captioning each one takes a vision LLM round
|
||||
# trip (1–30 s). Doing them sequentially in this loop
|
||||
# would serialise that latency per image. Instead, kick
|
||||
# off all caption tasks concurrently NOW, and await each
|
||||
# one just-in-time inside the per-tc body. If only a
|
||||
# single image needs captioning, this collapses to a
|
||||
# single await with no overhead.
|
||||
_model_text_only = ctx.llm and _vision_fallback_active(ctx.llm.model)
|
||||
caption_tasks: dict[str, asyncio.Task[str | None]] = {}
|
||||
if _model_text_only:
|
||||
for tc in tool_calls[:executed_in_batch]:
|
||||
res = results_by_id.get(tc.tool_use_id)
|
||||
if not res or not res.image_content:
|
||||
continue
|
||||
intent = extract_intent_for_tool(
|
||||
conversation,
|
||||
tc.tool_name,
|
||||
tc.tool_input or {},
|
||||
)
|
||||
caption_tasks[tc.tool_use_id] = asyncio.create_task(_captioning_chain(intent, res.image_content))
|
||||
|
||||
for tc in tool_calls[:executed_in_batch]:
|
||||
result = results_by_id.get(tc.tool_use_id)
|
||||
if result is None:
|
||||
@@ -3346,7 +3460,6 @@ class AgentLoop(AgentProtocol):
|
||||
# Build log entries for real tools (exclude synthetic tools)
|
||||
if tc.tool_name not in (
|
||||
"ask_user",
|
||||
"ask_user_multiple",
|
||||
"escalate",
|
||||
):
|
||||
tool_entry = {
|
||||
@@ -3361,11 +3474,30 @@ class AgentLoop(AgentProtocol):
|
||||
logged_tool_calls.append(tool_entry)
|
||||
|
||||
image_content = result.image_content
|
||||
if image_content and ctx.llm and not supports_image_tool_results(ctx.llm.model):
|
||||
logger.info(
|
||||
"Stripping image_content from tool result; model '%s' does not support images in tool results",
|
||||
ctx.llm.model,
|
||||
)
|
||||
# Vision-fallback marker spliced into the persisted text
|
||||
# below. None when no captioning ran (vision-capable
|
||||
# main model, no images, or no fallback chain reached
|
||||
# this tool).
|
||||
vision_fallback_marker: str | None = None
|
||||
if image_content and tc.tool_use_id in caption_tasks:
|
||||
caption = await caption_tasks.pop(tc.tool_use_id)
|
||||
if caption:
|
||||
vision_fallback_marker = f"[vision-fallback caption]\n{caption}"
|
||||
logger.info(
|
||||
"vision_fallback: captioned %d image(s) for tool '%s' (model '%s' routed through fallback)",
|
||||
len(image_content),
|
||||
tc.tool_name,
|
||||
ctx.llm.model if ctx.llm else "?",
|
||||
)
|
||||
else:
|
||||
vision_fallback_marker = "[image stripped — vision fallback exhausted]"
|
||||
logger.info(
|
||||
"vision_fallback: exhausted; stripping %d image(s) from "
|
||||
"tool '%s' result without caption (model '%s')",
|
||||
len(image_content),
|
||||
tc.tool_name,
|
||||
ctx.llm.model if ctx.llm else "?",
|
||||
)
|
||||
image_content = None
|
||||
|
||||
# Apply replay-detector steer prefix if this call matched a
|
||||
@@ -3377,6 +3509,11 @@ class AgentLoop(AgentProtocol):
|
||||
if _prefix:
|
||||
stored_content = f"{_prefix}{stored_content or ''}"
|
||||
|
||||
# Splice the vision-fallback caption / placeholder into
|
||||
# the persisted text after any prefix has been applied.
|
||||
if vision_fallback_marker:
|
||||
stored_content = f"{stored_content or ''}\n\n{vision_fallback_marker}"
|
||||
|
||||
await conversation.add_tool_result(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=stored_content,
|
||||
@@ -3384,7 +3521,7 @@ class AgentLoop(AgentProtocol):
|
||||
image_content=image_content,
|
||||
is_skill_content=result.is_skill_content,
|
||||
)
|
||||
if tc.tool_name in ("ask_user", "ask_user_multiple") and user_input_requested and not result.is_error:
|
||||
if tc.tool_name == "ask_user" and user_input_requested and not result.is_error:
|
||||
# Defer tool_call_completed until after user responds
|
||||
self._deferred_tool_complete = {
|
||||
"stream_id": stream_id,
|
||||
@@ -3465,8 +3602,6 @@ class AgentLoop(AgentProtocol):
|
||||
token_counts,
|
||||
logged_tool_calls,
|
||||
user_input_requested,
|
||||
ask_user_prompt,
|
||||
ask_user_options,
|
||||
queen_input_requested,
|
||||
final_system_prompt,
|
||||
final_messages,
|
||||
@@ -3517,8 +3652,6 @@ class AgentLoop(AgentProtocol):
|
||||
token_counts,
|
||||
logged_tool_calls,
|
||||
user_input_requested,
|
||||
ask_user_prompt,
|
||||
ask_user_options,
|
||||
queen_input_requested,
|
||||
final_system_prompt,
|
||||
final_messages,
|
||||
@@ -3538,10 +3671,6 @@ class AgentLoop(AgentProtocol):
|
||||
"""Build the synthetic ask_user tool. Delegates to synthetic_tools module."""
|
||||
return build_ask_user_tool()
|
||||
|
||||
def _build_ask_user_multiple_tool(self) -> Tool:
|
||||
"""Build the synthetic ask_user_multiple tool. Delegates to synthetic_tools module."""
|
||||
return build_ask_user_multiple_tool()
|
||||
|
||||
def _build_escalate_tool(self) -> Tool:
|
||||
"""Build the synthetic escalate tool. Delegates to synthetic_tools module."""
|
||||
return build_escalate_tool()
|
||||
@@ -4136,6 +4265,8 @@ class AgentLoop(AgentProtocol):
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
cost_usd: float = 0.0,
|
||||
execution_id: str = "",
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
@@ -4148,6 +4279,8 @@ class AgentLoop(AgentProtocol):
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
)
|
||||
|
||||
@@ -56,6 +56,16 @@ class Message:
|
||||
# from a crashed or watchdog-cancelled stream. Signals that the original
|
||||
# turn never finished — the model may or may not choose to redo it.
|
||||
truncated: bool = False
|
||||
# When non-None, identifies the parent session id this message was
|
||||
# carried over from — used by fork_session_into_colony on the single
|
||||
# compacted-summary message it writes when a colony is born from a
|
||||
# queen DM. Presence of the field IS the "inherited" signal.
|
||||
inherited_from: str | None = None
|
||||
# True when this user message was synthesized from one or more
|
||||
# fired triggers (timer/webhook), not typed by a human. The LLM still
|
||||
# sees the message as a regular user turn; the UI uses this flag to
|
||||
# render it as a trigger banner instead of a speech bubble.
|
||||
is_trigger: bool = False
|
||||
|
||||
def to_llm_dict(self) -> dict[str, Any]:
|
||||
"""Convert to OpenAI-format message dict."""
|
||||
@@ -121,6 +131,10 @@ class Message:
|
||||
d["is_system_nudge"] = self.is_system_nudge
|
||||
if self.truncated:
|
||||
d["truncated"] = self.truncated
|
||||
if self.inherited_from is not None:
|
||||
d["inherited_from"] = self.inherited_from
|
||||
if self.is_trigger:
|
||||
d["is_trigger"] = self.is_trigger
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
@@ -140,6 +154,8 @@ class Message:
|
||||
run_id=data.get("run_id"),
|
||||
is_system_nudge=data.get("is_system_nudge", False),
|
||||
truncated=data.get("truncated", False),
|
||||
inherited_from=data.get("inherited_from"),
|
||||
is_trigger=data.get("is_trigger", False),
|
||||
)
|
||||
|
||||
|
||||
@@ -411,9 +427,20 @@ class NodeConversation:
|
||||
store: ConversationStore | None = None,
|
||||
run_id: str | None = None,
|
||||
compaction_buffer_tokens: int | None = None,
|
||||
compaction_buffer_ratio: float | None = None,
|
||||
compaction_warning_buffer_tokens: int | None = None,
|
||||
) -> None:
|
||||
self._system_prompt = system_prompt
|
||||
# Optional split: when a caller updates the prompt with a
|
||||
# ``dynamic_suffix`` argument, we remember the static prefix and
|
||||
# suffix separately so the LLM wrapper can emit them as two
|
||||
# Anthropic system content blocks with a cache breakpoint between
|
||||
# them. ``_system_prompt`` stays as the concatenated form used for
|
||||
# persistence and for the legacy single-block LLM path.
|
||||
# On restore, these default to the concat/empty pair — the next
|
||||
# AgentLoop iteration's dynamic-prompt refresh step repopulates.
|
||||
self._system_prompt_static: str = system_prompt
|
||||
self._system_prompt_dynamic_suffix: str = ""
|
||||
self._max_context_tokens = max_context_tokens
|
||||
self._compaction_threshold = compaction_threshold
|
||||
# Buffer-based compaction trigger (Gap 7). When set, takes
|
||||
@@ -423,6 +450,11 @@ class NodeConversation:
|
||||
# limit. If left as None the legacy threshold-based rule is
|
||||
# used, keeping old call sites behaving identically.
|
||||
self._compaction_buffer_tokens = compaction_buffer_tokens
|
||||
# Ratio component of the hybrid buffer. Combines additively with
|
||||
# _compaction_buffer_tokens so callers can express "reserve N tokens
|
||||
# plus M% of the window" — the absolute floor matters on tiny
|
||||
# windows, the ratio matters on large ones.
|
||||
self._compaction_buffer_ratio = compaction_buffer_ratio
|
||||
self._compaction_warning_buffer_tokens = compaction_warning_buffer_tokens
|
||||
self._output_keys = output_keys
|
||||
self._store = store
|
||||
@@ -437,15 +469,56 @@ class NodeConversation:
|
||||
|
||||
@property
|
||||
def system_prompt(self) -> str:
|
||||
"""Full concatenated system prompt (static + dynamic suffix, if any).
|
||||
|
||||
This is the canonical form used for persistence and for the legacy
|
||||
single-block LLM path. Split-prompt callers should read
|
||||
``system_prompt_static`` and ``system_prompt_dynamic_suffix`` instead.
|
||||
"""
|
||||
return self._system_prompt
|
||||
|
||||
def update_system_prompt(self, new_prompt: str) -> None:
|
||||
@property
|
||||
def system_prompt_static(self) -> str:
|
||||
"""Static prefix of the system prompt (cache-stable).
|
||||
|
||||
Equals ``system_prompt`` when no split is in use. When the AgentLoop
|
||||
calls ``update_system_prompt(static, dynamic_suffix=...)``, this is
|
||||
the piece sent as the cache-controlled first block.
|
||||
"""
|
||||
return self._system_prompt_static
|
||||
|
||||
@property
|
||||
def system_prompt_dynamic_suffix(self) -> str:
|
||||
"""Dynamic tail of the system prompt (not cached).
|
||||
|
||||
Empty unless the consumer splits its prompt. The LLM wrapper uses a
|
||||
non-empty suffix to emit a two-block system content list with a
|
||||
cache breakpoint between the static prefix and this tail.
|
||||
"""
|
||||
return self._system_prompt_dynamic_suffix
|
||||
|
||||
def update_system_prompt(self, new_prompt: str, dynamic_suffix: str | None = None) -> None:
|
||||
"""Update the system prompt.
|
||||
|
||||
Used in continuous conversation mode at phase transitions to swap
|
||||
Layer 3 (focus) while preserving the conversation history.
|
||||
|
||||
When ``dynamic_suffix`` is provided, ``new_prompt`` is interpreted as
|
||||
the STATIC prefix and ``dynamic_suffix`` as the per-turn tail; they
|
||||
travel to the LLM as two separate cache-controlled blocks but are
|
||||
persisted as a single concatenated string for backward-compat
|
||||
restore. ``new_prompt`` alone (suffix left None) keeps the legacy
|
||||
single-string behavior.
|
||||
"""
|
||||
self._system_prompt = new_prompt
|
||||
if dynamic_suffix is None:
|
||||
# Legacy single-string path — static == full, no suffix split.
|
||||
self._system_prompt = new_prompt
|
||||
self._system_prompt_static = new_prompt
|
||||
self._system_prompt_dynamic_suffix = ""
|
||||
else:
|
||||
self._system_prompt_static = new_prompt
|
||||
self._system_prompt_dynamic_suffix = dynamic_suffix
|
||||
self._system_prompt = f"{new_prompt}\n\n{dynamic_suffix}" if dynamic_suffix else new_prompt
|
||||
self._meta_persisted = False # re-persist with new prompt
|
||||
|
||||
def set_current_phase(self, phase_id: str) -> None:
|
||||
@@ -485,6 +558,7 @@ class NodeConversation:
|
||||
is_client_input: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
is_system_nudge: bool = False,
|
||||
is_trigger: bool = False,
|
||||
) -> Message:
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
@@ -496,6 +570,7 @@ class NodeConversation:
|
||||
is_client_input=is_client_input,
|
||||
image_content=image_content,
|
||||
is_system_nudge=is_system_nudge,
|
||||
is_trigger=is_trigger,
|
||||
)
|
||||
self._messages.append(msg)
|
||||
self._next_seq += 1
|
||||
@@ -829,19 +904,30 @@ class NodeConversation:
|
||||
"""True when the conversation should be compacted before the
|
||||
next LLM call.
|
||||
|
||||
Buffer-based rule (Gap 7): trigger when the current estimate
|
||||
plus the configured buffer would exceed the hard context limit.
|
||||
Prevents compaction from firing only AFTER we're already over
|
||||
the wire and forced into a reactive binary-split pass.
|
||||
Hybrid buffer rule: the headroom reserved before compaction fires
|
||||
is the SUM of an absolute fixed component and a ratio of the hard
|
||||
context limit:
|
||||
|
||||
When no buffer is configured, falls back to the multiplicative
|
||||
threshold the old callers were built around.
|
||||
effective_buffer = compaction_buffer_tokens
|
||||
+ compaction_buffer_ratio * max_context_tokens
|
||||
|
||||
The fixed component gives a floor on tiny windows; the ratio
|
||||
keeps the trigger meaningful on large windows where any constant
|
||||
buffer becomes a rounding error (an 8k buffer is 75% on a 32k
|
||||
window but 96% on a 200k window). Compaction fires when the
|
||||
current estimate would consume more than (limit - effective_buffer).
|
||||
|
||||
When neither component is configured, falls back to the legacy
|
||||
multiplicative threshold so old callers keep behaving identically.
|
||||
"""
|
||||
if self._max_context_tokens <= 0:
|
||||
return False
|
||||
if self._compaction_buffer_tokens is not None:
|
||||
budget = self._max_context_tokens - self._compaction_buffer_tokens
|
||||
return self.estimate_tokens() >= max(0, budget)
|
||||
fixed = self._compaction_buffer_tokens
|
||||
ratio = self._compaction_buffer_ratio
|
||||
if fixed is not None or ratio is not None:
|
||||
effective_buffer = (fixed or 0) + (ratio or 0.0) * self._max_context_tokens
|
||||
budget = self._max_context_tokens - effective_buffer
|
||||
return self.estimate_tokens() >= max(0.0, budget)
|
||||
return self.estimate_tokens() >= self._max_context_tokens * self._compaction_threshold
|
||||
|
||||
def compaction_warning(self) -> bool:
|
||||
@@ -1498,6 +1584,7 @@ class NodeConversation:
|
||||
"max_context_tokens": self._max_context_tokens,
|
||||
"compaction_threshold": self._compaction_threshold,
|
||||
"compaction_buffer_tokens": self._compaction_buffer_tokens,
|
||||
"compaction_buffer_ratio": self._compaction_buffer_ratio,
|
||||
"compaction_warning_buffer_tokens": (self._compaction_warning_buffer_tokens),
|
||||
"output_keys": self._output_keys,
|
||||
}
|
||||
@@ -1547,6 +1634,7 @@ class NodeConversation:
|
||||
store=store,
|
||||
run_id=run_id,
|
||||
compaction_buffer_tokens=meta.get("compaction_buffer_tokens"),
|
||||
compaction_buffer_ratio=meta.get("compaction_buffer_ratio"),
|
||||
compaction_warning_buffer_tokens=meta.get("compaction_warning_buffer_tokens"),
|
||||
)
|
||||
conv._meta_persisted = True
|
||||
|
||||
@@ -371,6 +371,7 @@ async def llm_compact(
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Summarise *messages* with LLM, splitting recursively if too large.
|
||||
|
||||
@@ -378,6 +379,11 @@ async def llm_compact(
|
||||
rejects the call with a context-length error, the messages are split
|
||||
in half and each half is summarised independently. Tool history is
|
||||
appended once at the top-level call (``_depth == 0``).
|
||||
|
||||
When ``preserve_user_messages`` is True, the prompt and system message
|
||||
are amplified to instruct the LLM to keep every user message verbatim
|
||||
and in full — used by the manual /compact-and-fork endpoint where the
|
||||
user wants their voice carried into the new session intact.
|
||||
"""
|
||||
from framework.agent_loop.conversation import extract_tool_call_history
|
||||
from framework.agent_loop.internals.tool_result_handler import is_context_too_large_error
|
||||
@@ -401,6 +407,7 @@ async def llm_compact(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
else:
|
||||
prompt = build_llm_compaction_prompt(
|
||||
@@ -408,17 +415,30 @@ async def llm_compact(
|
||||
accumulator,
|
||||
formatted,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
if preserve_user_messages:
|
||||
system_msg = (
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. CRITICAL: reproduce every user "
|
||||
"message verbatim and in full inside the 'User Messages' "
|
||||
"section — do not paraphrase, truncate, or merge them. "
|
||||
"Assistant turns and tool results may be summarised, but "
|
||||
"user input is sacred."
|
||||
)
|
||||
else:
|
||||
system_msg = (
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. Preserve user-stated rules, "
|
||||
"constraints, and account/identity preferences verbatim."
|
||||
)
|
||||
summary_budget = max(1024, max_context_tokens // 2)
|
||||
try:
|
||||
response = await ctx.llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
system=(
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. Preserve user-stated rules, "
|
||||
"constraints, and account/identity preferences verbatim."
|
||||
),
|
||||
system=system_msg,
|
||||
max_tokens=summary_budget,
|
||||
)
|
||||
summary = response.content
|
||||
@@ -437,6 +457,7 @@ async def llm_compact(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
else:
|
||||
raise
|
||||
@@ -459,6 +480,7 @@ async def _llm_compact_split(
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Split messages in half and summarise each half independently."""
|
||||
mid = max(1, len(messages) // 2)
|
||||
@@ -470,6 +492,7 @@ async def _llm_compact_split(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
s2 = await llm_compact(
|
||||
ctx,
|
||||
@@ -479,6 +502,7 @@ async def _llm_compact_split(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
return s1 + "\n\n" + s2
|
||||
|
||||
@@ -510,6 +534,7 @@ def build_llm_compaction_prompt(
|
||||
formatted_messages: str,
|
||||
*,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Build prompt for LLM compaction targeting 50% of token budget.
|
||||
|
||||
@@ -539,6 +564,18 @@ def build_llm_compaction_prompt(
|
||||
target_chars = target_tokens * 4
|
||||
node_ctx = "\n".join(ctx_lines)
|
||||
|
||||
user_messages_section = (
|
||||
"6. **User Messages** — Reproduce EVERY user message verbatim and "
|
||||
"in full, in chronological order, each on its own line prefixed "
|
||||
'with the message index (e.g. "[U1] ..."). Do NOT paraphrase, '
|
||||
"summarise, merge, or omit any user message. Preserve markdown, "
|
||||
"code fences, whitespace, and punctuation exactly as the user "
|
||||
"wrote them.\n"
|
||||
if preserve_user_messages
|
||||
else "6. **User Messages** — Preserve ALL user-stated rules, constraints, "
|
||||
"identity preferences, and account details verbatim.\n"
|
||||
)
|
||||
|
||||
return (
|
||||
"You are compacting an AI agent's conversation history. "
|
||||
"The agent is still working and needs to continue.\n\n"
|
||||
@@ -559,8 +596,7 @@ def build_llm_compaction_prompt(
|
||||
"resolved. Include root causes so the agent doesn't repeat them.\n"
|
||||
"5. **Problem Solving Efforts** — Approaches tried, dead ends hit, "
|
||||
"and reasoning behind the current strategy.\n"
|
||||
"6. **User Messages** — Preserve ALL user-stated rules, constraints, "
|
||||
"identity preferences, and account details verbatim.\n"
|
||||
f"{user_messages_section}"
|
||||
"7. **Pending Tasks** — Work remaining, outputs still needed, and "
|
||||
"any blockers.\n"
|
||||
"8. **Current Work** — The most recent action taken and the immediate "
|
||||
|
||||
@@ -12,6 +12,7 @@ import json
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import ConversationStore, NodeConversation
|
||||
@@ -191,15 +192,21 @@ async def drain_injection_queue(
|
||||
else:
|
||||
logger.info("[drain] no vision fallback available; images dropped")
|
||||
image_content = None
|
||||
# Real user input is stored as-is; external events get a prefix
|
||||
# Stamp every injected event with its arrival time so the model
|
||||
# has a consistent temporal log to reason over (and so the
|
||||
# stamp lives inside byte-stable conversation history instead
|
||||
# of a per-turn system-prompt tail). Minute precision is what
|
||||
# the queen needs for conversational / scheduling context.
|
||||
stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
if is_client_input:
|
||||
stamped = f"[{stamp}] {content}" if content else f"[{stamp}]"
|
||||
await conversation.add_user_message(
|
||||
content,
|
||||
stamped,
|
||||
is_client_input=True,
|
||||
image_content=image_content,
|
||||
)
|
||||
else:
|
||||
await conversation.add_user_message(f"[External event]: {content}")
|
||||
await conversation.add_user_message(f"[{stamp}] [External event] {content}")
|
||||
count += 1
|
||||
except asyncio.QueueEmpty:
|
||||
break
|
||||
@@ -232,9 +239,12 @@ async def drain_trigger_queue(
|
||||
payload_str = json.dumps(t.payload, default=str)
|
||||
parts.append(f"[TRIGGER: {t.trigger_type}/{t.source_id}]{task_line}\n{payload_str}")
|
||||
|
||||
combined = "\n\n".join(parts)
|
||||
stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
combined = f"[{stamp}]\n" + "\n\n".join(parts)
|
||||
logger.info("[drain] %d trigger(s): %s", len(triggers), combined[:200])
|
||||
await conversation.add_user_message(combined)
|
||||
# Tag the message so the UI can render a banner instead of the raw
|
||||
# `[TRIGGER: ...]` text. The LLM still sees `combined` verbatim.
|
||||
await conversation.add_user_message(combined, is_trigger=True)
|
||||
return len(triggers)
|
||||
|
||||
|
||||
|
||||
@@ -108,6 +108,8 @@ async def publish_llm_turn_complete(
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
cost_usd: float = 0.0,
|
||||
execution_id: str = "",
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
@@ -120,6 +122,8 @@ async def publish_llm_turn_complete(
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
)
|
||||
|
||||
@@ -91,108 +91,66 @@ def sanitize_ask_user_inputs(
|
||||
return q, recovered
|
||||
|
||||
|
||||
ask_user_prompt = """\
|
||||
Use this tool when you need to ask the user questions during execution. Reach for it when:
|
||||
|
||||
- The task is ambiguous and the user needs to choose an approach
|
||||
- You need missing information to continue
|
||||
- You want approval before taking a meaningful action
|
||||
- A decision has real trade-offs the user should weigh in on
|
||||
- You want post-task feedback, or to offer saving a skill or updating memory
|
||||
|
||||
Usage notes:
|
||||
- Users will always be able to select "Other" to provide custom text input, \
|
||||
so do not include catch-all options like "Other" or "Something else" yourself.
|
||||
- Each option is a plain string. Do NOT wrap options in `{"label": "..."}` or \
|
||||
`{"value": "..."}` objects — pass the raw choice text directly, e.g. `"Email"`, \
|
||||
not `{"label": "Email"}`.
|
||||
- If you recommend a specific option, make that the first option in the list \
|
||||
and append " (Recommended)" to the end of its text.
|
||||
- Call this tool whenever you need the user's response.
|
||||
- The prompt field must be plain text only.
|
||||
- Do not include XML, pseudo-tags, or inline option lists inside prompt.
|
||||
- Omit options only when the question truly requires a free-form response the \
|
||||
user must type out, such as describing an idea or pasting an error message.
|
||||
- Do not repeat the questions in your normal text response. The widget renders \
|
||||
them, so keep any surrounding text to a brief intro only.
|
||||
Example — single question with options:
|
||||
{"questions": [{"id": "next", "prompt": "What would you like to do?", \
|
||||
"options": ["Build a new agent (Recommended)", "Modify existing agent", "Run tests"]}]}
|
||||
|
||||
Example — batch:
|
||||
{"questions": [
|
||||
{"id": "scope", "prompt": "What scope?", "options": ["Full", "Partial"]},
|
||||
{"id": "format", "prompt": "Output format?", "options": ["PDF", "CSV", "JSON"]},
|
||||
{"id": "details", "prompt": "Any special requirements?"}
|
||||
]}
|
||||
|
||||
Example — free-form (queen only):
|
||||
{"questions": [{"id": "idea", "prompt": "Describe the agent you want to build."}]}
|
||||
"""
|
||||
|
||||
|
||||
def build_ask_user_tool() -> Tool:
|
||||
"""Build the synthetic ask_user tool for explicit user-input requests.
|
||||
|
||||
The queen calls ask_user() when it needs to pause and wait
|
||||
for user input. Text-only turns WITHOUT ask_user flow through without
|
||||
blocking, allowing progress updates and summaries to stream freely.
|
||||
The queen calls ask_user() when it needs to pause and wait for user
|
||||
input. Accepts an array of 1-8 questions — a single question for the
|
||||
common case, or a batch when several clarifications are needed at once.
|
||||
Text-only turns WITHOUT ask_user flow through without blocking, allowing
|
||||
progress updates and summaries to stream freely.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user",
|
||||
description=(
|
||||
"You MUST call this tool whenever you need the user's response. "
|
||||
"Always call it after greeting the user, asking a question, or "
|
||||
"requesting approval. Do NOT call it for status updates or "
|
||||
"summaries that don't require a response.\n\n"
|
||||
"STRUCTURE RULES (CRITICAL):\n"
|
||||
"- The 'question' field is PLAIN TEXT shown to the user. Do NOT "
|
||||
"include XML tags, pseudo-tags like </question>, or option lists "
|
||||
"in the question string. The UI does not parse them — they "
|
||||
"render as raw text and look broken.\n"
|
||||
"- The 'options' parameter is the ONLY way to render buttons. "
|
||||
"If you want buttons, put them in the 'options' array, not in "
|
||||
"the question string. Do NOT write 'OPTIONS: [...]', "
|
||||
"'_options: [...]', or any inline list inside 'question'.\n"
|
||||
"- The question text must read as a single clean prompt with "
|
||||
"no markup. Example: 'What would you like to do?' — not "
|
||||
"'What would you like to do?</question>'.\n\n"
|
||||
"USAGE:\n"
|
||||
"Always include 2-3 predefined options. The UI automatically "
|
||||
"appends an 'Other' free-text input after your options, so NEVER "
|
||||
"include catch-all options like 'Custom idea', 'Something else', "
|
||||
"'Other', or 'None of the above' — the UI handles that. "
|
||||
"When the question primarily needs a typed answer but you must "
|
||||
"include options, make one option signal that typing is expected "
|
||||
"(e.g. 'I\\'ll type my response'). This helps users discover the "
|
||||
"free-text input. "
|
||||
"The ONLY exception: omit options when the question demands a "
|
||||
"free-form answer the user must type out (e.g. 'Describe your "
|
||||
"agent idea', 'Paste the error message').\n\n"
|
||||
"CORRECT EXAMPLE:\n"
|
||||
'{"question": "What would you like to do?", "options": '
|
||||
'["Build a new agent", "Modify existing agent", "Run tests"]}\n\n'
|
||||
"FREE-FORM EXAMPLE:\n"
|
||||
'{"question": "Describe the agent you want to build."}\n\n'
|
||||
"WRONG (do NOT do this — buttons will not render):\n"
|
||||
'{"question": "What now?</question>\\n_OPTIONS: [\\"A\\", \\"B\\"]"}'
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "The question or prompt shown to the user.",
|
||||
},
|
||||
"options": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 specific predefined choices. Include in most cases. "
|
||||
'Example: ["Option A", "Option B", "Option C"]. '
|
||||
"The UI always appends an 'Other' free-text input, so "
|
||||
"do NOT include catch-alls like 'Custom idea' or 'Other'. "
|
||||
"Omit ONLY when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
"maxItems": 3,
|
||||
},
|
||||
},
|
||||
"required": ["question"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_ask_user_multiple_tool() -> Tool:
|
||||
"""Build the synthetic ask_user_multiple tool for batched questions.
|
||||
|
||||
Queen-only tool that presents multiple questions at once so the user
|
||||
can answer them all in a single interaction rather than one at a time.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user_multiple",
|
||||
description=(
|
||||
"Ask the user multiple questions at once. Use this instead of "
|
||||
"ask_user when you have 2 or more questions to ask in the same "
|
||||
"turn — it lets the user answer everything in one go rather than "
|
||||
"going back and forth. Each question can have its own predefined "
|
||||
"options (2-3 choices) or be free-form. The UI renders all "
|
||||
"questions together with a single Submit button. "
|
||||
"ALWAYS prefer this over ask_user when you have multiple things "
|
||||
"to clarify. "
|
||||
"IMPORTANT: Do NOT repeat the questions in your text response — "
|
||||
"the widget renders them. Keep your text to a brief intro only. "
|
||||
'{"questions": ['
|
||||
' {"id": "scope", "prompt": "What scope?", "options": ["Full", "Partial"]},'
|
||||
' {"id": "format", "prompt": "Output format?", "options": ["PDF", "CSV", "JSON"]},'
|
||||
' {"id": "details", "prompt": "Any special requirements?"}'
|
||||
"]}"
|
||||
),
|
||||
description=ask_user_prompt,
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"questions": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 8,
|
||||
"description": "List of questions to present to the user.",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -208,8 +166,13 @@ def build_ask_user_multiple_tool() -> Tool:
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 predefined choices. The UI appends an "
|
||||
"'Other' free-text input automatically. "
|
||||
"2-3 predefined choices as plain strings "
|
||||
'(e.g. ["Yes", "No", "Maybe"]). Do NOT '
|
||||
'wrap items in {"label": "..."} or '
|
||||
'{"value": "..."} objects — pass the raw '
|
||||
"choice text directly. The UI appends an "
|
||||
"'Other' free-text input automatically, "
|
||||
"so don't include catch-all options. "
|
||||
"Omit only when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
@@ -218,9 +181,6 @@ def build_ask_user_multiple_tool() -> Tool:
|
||||
},
|
||||
"required": ["id", "prompt"],
|
||||
},
|
||||
"minItems": 2,
|
||||
"maxItems": 8,
|
||||
"description": "List of questions to present to the user.",
|
||||
},
|
||||
},
|
||||
"required": ["questions"],
|
||||
|
||||
@@ -0,0 +1,291 @@
|
||||
"""Generic coercion of LLM-emitted tool arguments to match each tool's JSON schema.
|
||||
|
||||
Small/mid-size models drift from tool schemas in predictable, boring ways:
|
||||
|
||||
- A number field comes back as a string (``"42"`` instead of ``42``).
|
||||
- A boolean field comes back as a string (``"true"`` instead of ``True``).
|
||||
- An array-of-string field comes back as an array of objects
|
||||
(``[{"label": "A"}, ...]`` instead of ``["A", ...]``).
|
||||
- An array/object field comes back as a JSON-encoded string
|
||||
(``'["A","B"]'`` instead of ``["A", "B"]``).
|
||||
- A lone scalar arrives where the schema expects an array.
|
||||
|
||||
This module centralizes the healing in one schema-driven pass that runs
|
||||
on every tool call before dispatch. Coercion is conservative:
|
||||
|
||||
- Values that already match the expected type are untouched.
|
||||
- Shapes we don't recognize are returned as-is, so real bugs surface
|
||||
instead of getting silently munged into something plausible.
|
||||
- Every actual coercion is logged with the tool, property, and shape
|
||||
transition so we can see which models/tools are drifting.
|
||||
|
||||
Tool-specific prompt drift (e.g. ``</question>`` tags leaking into an
|
||||
``ask_user`` prompt string) is NOT this module's job — that belongs in
|
||||
per-tool sanitizers, because it's about prompt style, not schema shape.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# When an ``array<string>`` field arrives as an array of objects, look
|
||||
# for a text-carrying field in preference order. Covers the wrappers
|
||||
# small models tend to produce: ``[{"label": "A"}]``, ``[{"value": "A"}]``,
|
||||
# ``[{"text": "A"}]``, etc.
|
||||
_STRING_EXTRACT_KEYS: tuple[str, ...] = (
|
||||
"label",
|
||||
"value",
|
||||
"text",
|
||||
"name",
|
||||
"title",
|
||||
"display",
|
||||
)
|
||||
|
||||
|
||||
def coerce_tool_input(tool: Tool, raw_input: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Coerce *raw_input* in place to match *tool*'s JSON schema.
|
||||
|
||||
Returns the mutated input dict (same object as *raw_input* when
|
||||
possible, for callers that assume in-place mutation). Properties
|
||||
not present in the schema are left untouched.
|
||||
"""
|
||||
if not isinstance(raw_input, dict):
|
||||
return raw_input or {}
|
||||
|
||||
schema = tool.parameters or {}
|
||||
props = schema.get("properties")
|
||||
if not isinstance(props, dict):
|
||||
return raw_input
|
||||
|
||||
for key in list(raw_input.keys()):
|
||||
prop_schema = props.get(key)
|
||||
if not isinstance(prop_schema, dict):
|
||||
continue
|
||||
original = raw_input[key]
|
||||
coerced = _coerce(original, prop_schema)
|
||||
if coerced is not original:
|
||||
logger.info(
|
||||
"coerced tool input tool=%s prop=%s from=%s to=%s",
|
||||
tool.name,
|
||||
key,
|
||||
_shape(original),
|
||||
_shape(coerced),
|
||||
)
|
||||
raw_input[key] = coerced
|
||||
|
||||
return raw_input
|
||||
|
||||
|
||||
def _coerce(value: Any, schema: dict[str, Any]) -> Any:
|
||||
"""Dispatch on the schema's ``type`` field.
|
||||
|
||||
Returns the *same object* on passthrough so callers can detect
|
||||
no-ops via identity (``coerced is value``).
|
||||
"""
|
||||
expected = schema.get("type")
|
||||
if not expected:
|
||||
return value
|
||||
|
||||
# Union type: try each in order, return the first coercion that
|
||||
# actually changes the value. Falls back to the original.
|
||||
if isinstance(expected, list):
|
||||
for t in expected:
|
||||
sub_schema = {**schema, "type": t}
|
||||
coerced = _coerce(value, sub_schema)
|
||||
if coerced is not value:
|
||||
return coerced
|
||||
return value
|
||||
|
||||
if expected == "integer":
|
||||
return _coerce_integer(value)
|
||||
if expected == "number":
|
||||
return _coerce_number(value)
|
||||
if expected == "boolean":
|
||||
return _coerce_boolean(value)
|
||||
if expected == "string":
|
||||
return _coerce_string(value)
|
||||
if expected == "array":
|
||||
return _coerce_array(value, schema)
|
||||
if expected == "object":
|
||||
return _coerce_object(value, schema)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_integer(value: Any) -> Any:
|
||||
# bool is a subclass of int in Python; don't mistake True for 1 here.
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
parsed = _parse_number(value)
|
||||
if parsed is None:
|
||||
return value
|
||||
if parsed != int(parsed):
|
||||
# Has a fractional part — caller asked for int, don't truncate.
|
||||
return value
|
||||
return int(parsed)
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_number(value: Any) -> Any:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
parsed = _parse_number(value)
|
||||
if parsed is None:
|
||||
return value
|
||||
if parsed == int(parsed):
|
||||
return int(parsed)
|
||||
return parsed
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_boolean(value: Any) -> Any:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
low = value.strip().lower()
|
||||
if low == "true":
|
||||
return True
|
||||
if low == "false":
|
||||
return False
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_string(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
# Common drift: model sent ``{"label": "..."}`` when we wanted "...".
|
||||
if isinstance(value, dict):
|
||||
extracted = _extract_string_from_object(value)
|
||||
if extracted is not None:
|
||||
return extracted
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_array(value: Any, schema: dict[str, Any]) -> Any:
|
||||
# Heal: JSON-encoded array string → array.
|
||||
if isinstance(value, str):
|
||||
parsed = _try_parse_json(value)
|
||||
if isinstance(parsed, list):
|
||||
value = parsed
|
||||
else:
|
||||
# Scalar string where an array is expected — wrap it.
|
||||
return [value]
|
||||
elif not isinstance(value, list):
|
||||
# Any other scalar (int, bool, dict, ...) — wrap.
|
||||
return [value]
|
||||
|
||||
items_schema = schema.get("items")
|
||||
if not isinstance(items_schema, dict):
|
||||
return value
|
||||
|
||||
coerced_items: list[Any] = []
|
||||
changed = False
|
||||
for item in value:
|
||||
c = _coerce(item, items_schema)
|
||||
if c is not item:
|
||||
changed = True
|
||||
coerced_items.append(c)
|
||||
return coerced_items if changed else value
|
||||
|
||||
|
||||
def _coerce_object(value: Any, schema: dict[str, Any]) -> Any:
|
||||
# Heal: JSON-encoded object string → object.
|
||||
if isinstance(value, str):
|
||||
parsed = _try_parse_json(value)
|
||||
if isinstance(parsed, dict):
|
||||
value = parsed
|
||||
else:
|
||||
return value
|
||||
if not isinstance(value, dict):
|
||||
return value
|
||||
|
||||
sub_props = schema.get("properties")
|
||||
if not isinstance(sub_props, dict):
|
||||
return value
|
||||
|
||||
changed = False
|
||||
for k in list(value.keys()):
|
||||
sub_schema = sub_props.get(k)
|
||||
if not isinstance(sub_schema, dict):
|
||||
continue
|
||||
original = value[k]
|
||||
coerced = _coerce(original, sub_schema)
|
||||
if coerced is not original:
|
||||
value[k] = coerced
|
||||
changed = True
|
||||
# Return the same dict on mutation so callers that passed a shared
|
||||
# reference see the updates. ``changed`` is only used to decide
|
||||
# whether we need to log at a coarser level upstream.
|
||||
return value if changed or not sub_props else value
|
||||
|
||||
|
||||
def _extract_string_from_object(obj: dict[str, Any]) -> str | None:
|
||||
"""Pick a likely-text field out of a wrapper object.
|
||||
|
||||
Tries the known keys first, falls back to the sole value if the
|
||||
object has exactly one entry. Returns None when nothing plausible
|
||||
is found — the caller keeps the original.
|
||||
"""
|
||||
for k in _STRING_EXTRACT_KEYS:
|
||||
v = obj.get(k)
|
||||
if isinstance(v, str) and v:
|
||||
return v
|
||||
if len(obj) == 1:
|
||||
(only,) = obj.values()
|
||||
if isinstance(only, str) and only:
|
||||
return only
|
||||
return None
|
||||
|
||||
|
||||
def _try_parse_json(raw: str) -> Any:
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def _parse_number(raw: str) -> float | None:
|
||||
try:
|
||||
f = float(raw)
|
||||
except (ValueError, OverflowError):
|
||||
return None
|
||||
# Reject NaN and inf — they pass float() but aren't useful numeric
|
||||
# values for tool arguments.
|
||||
if f != f or f == float("inf") or f == float("-inf"):
|
||||
return None
|
||||
return f
|
||||
|
||||
|
||||
def _shape(value: Any) -> str:
|
||||
"""Short type/shape description used in coercion log lines."""
|
||||
if value is None:
|
||||
return "None"
|
||||
if isinstance(value, bool):
|
||||
return "bool"
|
||||
if isinstance(value, int):
|
||||
return "int"
|
||||
if isinstance(value, float):
|
||||
return "float"
|
||||
if isinstance(value, str):
|
||||
return f"str[{len(value)}]"
|
||||
if isinstance(value, list):
|
||||
if not value:
|
||||
return "list[0]"
|
||||
return f"list[{len(value)}]<{_shape(value[0])}>"
|
||||
if isinstance(value, dict):
|
||||
keys = sorted(value.keys())[:3]
|
||||
suffix = ",…" if len(value) > 3 else ""
|
||||
return f"dict{{{','.join(keys)}{suffix}}}"
|
||||
return type(value).__name__
|
||||
@@ -69,6 +69,20 @@ class LoopConfig:
|
||||
# and less tight than Anthropic's own counting. Override via
|
||||
# LoopConfig for larger windows.
|
||||
compaction_buffer_tokens: int = 8_000
|
||||
# Ratio-based component of the hybrid compaction buffer. Effective
|
||||
# headroom reserved before compaction fires is
|
||||
# compaction_buffer_tokens + compaction_buffer_ratio * max_context_tokens
|
||||
# The ratio scales with the model's window where the absolute fixed
|
||||
# component does not (an 8k absolute buffer is 75% trigger on a 32k
|
||||
# window but 96% on a 200k window). Combining them gives an absolute
|
||||
# floor sized for the worst-case single tool result (one un-spilled
|
||||
# max_tool_result_chars payload ≈ 30k chars ≈ 7.5k tokens, rounded to
|
||||
# 8k) plus a fractional headroom that keeps the trigger meaningful on
|
||||
# large windows, so the inner tool loop always has room to grow
|
||||
# without tripping the mid-turn pre-send guard. Defaults: 8k + 15%.
|
||||
# On 32k that's a 12.8k buffer (~60% trigger); on 200k it's 38k
|
||||
# (~81% trigger); on 1M it's 158k (~84% trigger).
|
||||
compaction_buffer_ratio: float = 0.15
|
||||
# Warning is emitted one buffer earlier so the user/telemetry gets
|
||||
# a "we're close" signal without triggering a compaction pass.
|
||||
compaction_warning_buffer_tokens: int = 12_000
|
||||
|
||||
@@ -0,0 +1,247 @@
|
||||
"""Vision-fallback subagent for tool-result images on text-only LLMs.
|
||||
|
||||
When a tool returns image content but the main agent's model can't
|
||||
accept image blocks (i.e. its catalog entry has ``supports_vision: false``),
|
||||
the framework strips the images before they ever reach the LLM. Without
|
||||
this module, the agent then sees only the tool's text envelope (URL,
|
||||
dimensions, size) and is blind to whatever the image actually shows.
|
||||
|
||||
This module provides:
|
||||
|
||||
* ``caption_tool_image()`` — direct LiteLLM call to a configured
|
||||
vision model (``vision_fallback`` block in ``~/.hive/configuration.json``)
|
||||
that takes the agent's intent + the image(s) and returns a textual
|
||||
description tailored to that intent.
|
||||
* ``extract_intent_for_tool()`` — pull the most recent assistant text
|
||||
+ the tool call descriptor and concatenate them into a ≤2KB intent
|
||||
string the vision subagent can reason against.
|
||||
|
||||
Both helpers degrade silently — return ``None`` / a placeholder rather
|
||||
than raise — so a vision-fallback failure can never kill the main
|
||||
agent's run. The agent-loop call site is responsible for chaining
|
||||
through to the existing generic-caption rotation
|
||||
(``_describe_images_as_text``) on a None return.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from framework.config import (
|
||||
get_vision_fallback_api_base,
|
||||
get_vision_fallback_api_key,
|
||||
get_vision_fallback_model,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..conversation import NodeConversation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Hard cap on the intent string handed to the vision subagent. The
|
||||
# subagent only needs the agent's recent reasoning + the tool descriptor;
|
||||
# anything longer is wasted tokens (and risks pushing past the vision
|
||||
# model's context with the image attached).
|
||||
_INTENT_MAX_CHARS = 4096
|
||||
|
||||
# Cap on the tool args JSON snippet inside the intent. Some tool inputs
|
||||
# (large strings, file contents) would dominate the intent if uncapped.
|
||||
_TOOL_ARGS_MAX_CHARS = 4096
|
||||
|
||||
# Subagent system prompt — kept short so it fits within any provider's
|
||||
# system-prompt budget alongside the user message + image. Tells the
|
||||
# subagent its role and constrains output format.
|
||||
#
|
||||
# Coordinate labeling: the main agent's browser tools
|
||||
# (browser_click_coordinate / browser_hover_coordinate / browser_press_at)
|
||||
# accept VIEWPORT FRACTIONS (x, y) in [0..1] where (0,0) is the top-left
|
||||
# and (1,1) is the bottom-right of the screenshot. Without coordinates
|
||||
# the text-only agent has no way to act on what we describe — it can
|
||||
# read the caption but cannot point. So for every interactive element
|
||||
# we name (button, link, input, icon, tab, menu item, dialog control),
|
||||
# include its approximate viewport-fraction centre as ``(fx, fy)``
|
||||
# right after the element's name, e.g. ``"Submit" button (0.83, 0.92)``.
|
||||
# Three rules: (1) coordinates only for things plausibly clickable /
|
||||
# hoverable / typeable — don't tag pure body text or decorative
|
||||
# graphics. (2) Eyeball to two decimal places; precision beyond that
|
||||
# is false confidence. (3) Never invent — if an element is partly
|
||||
# off-screen or you can't locate it, omit the coordinate rather than
|
||||
# guessing.
|
||||
_VISION_SUBAGENT_SYSTEM = (
|
||||
"You are a vision subagent for a text-only main agent. The main "
|
||||
"agent invoked a tool that returned the image(s) attached. Their "
|
||||
"intent (their reasoning + the tool call) is below. Describe what "
|
||||
"the image shows in service of their intent — concrete, factual, "
|
||||
"no speculation. If their intent asks a yes/no question, answer it "
|
||||
"directly first.\n\n"
|
||||
"Coordinate labeling: the main agent uses fractional viewport "
|
||||
"coordinates (x, y) in [0..1] — (0, 0) is the top-left of the "
|
||||
"image, (1, 1) is the bottom-right — to drive its click / hover / "
|
||||
"key-press tools. For every interactive element you mention "
|
||||
"(button, link, input, checkbox, radio, dropdown, tab, menu item, "
|
||||
"dialog control, icon), append its approximate centre as "
|
||||
"``(fx, fy)`` immediately after the element's name or label, e.g. "
|
||||
'``"Submit" button (0.83, 0.92)`` or ``profile avatar icon '
|
||||
"(0.05, 0.07)``. Use two decimal places — more is false precision. "
|
||||
"Skip coordinates for pure body text and decorative elements that "
|
||||
"aren't clickable. If an element is partially off-screen or you "
|
||||
"cannot reliably locate its centre, omit the coordinate rather "
|
||||
"than guessing.\n\n"
|
||||
"Output plain text, no markdown, ≤ 600 words."
|
||||
)
|
||||
|
||||
|
||||
def extract_intent_for_tool(
|
||||
conversation: NodeConversation,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | None,
|
||||
) -> str:
|
||||
"""Build the intent string passed to the vision subagent.
|
||||
|
||||
Combines the most recent assistant text (the LLM's reasoning right
|
||||
before invoking the tool) with a structured tool-call descriptor.
|
||||
Truncates to ``_INTENT_MAX_CHARS`` total, favouring the head of the
|
||||
assistant text where goal-stating sentences usually live.
|
||||
|
||||
If no preceding assistant text exists (rare — first turn), falls
|
||||
back to ``"<no preceding reasoning>"`` so the subagent still gets
|
||||
the tool descriptor.
|
||||
"""
|
||||
args_json: str
|
||||
try:
|
||||
args_json = json.dumps(tool_args or {}, default=str)
|
||||
except Exception:
|
||||
args_json = repr(tool_args)
|
||||
if len(args_json) > _TOOL_ARGS_MAX_CHARS:
|
||||
args_json = args_json[:_TOOL_ARGS_MAX_CHARS] + "…"
|
||||
|
||||
tool_line = f"Called: {tool_name}({args_json})"
|
||||
|
||||
# Walk newest → oldest, take the first assistant message with text.
|
||||
assistant_text = ""
|
||||
try:
|
||||
messages = getattr(conversation, "_messages", []) or []
|
||||
for msg in reversed(messages):
|
||||
if getattr(msg, "role", None) != "assistant":
|
||||
continue
|
||||
content = getattr(msg, "content", "") or ""
|
||||
if isinstance(content, str) and content.strip():
|
||||
assistant_text = content.strip()
|
||||
break
|
||||
except Exception:
|
||||
# Defensive — the agent loop must keep running even if the
|
||||
# conversation structure changes shape.
|
||||
assistant_text = ""
|
||||
|
||||
if not assistant_text:
|
||||
assistant_text = "<no preceding reasoning>"
|
||||
|
||||
# Intent = tool descriptor (always intact) + reasoning (truncated).
|
||||
head = f"{tool_line}\n\nReasoning before call:\n"
|
||||
budget = _INTENT_MAX_CHARS - len(head)
|
||||
if budget < 100:
|
||||
# Tool descriptor is huge somehow — truncate it.
|
||||
return head[:_INTENT_MAX_CHARS]
|
||||
if len(assistant_text) > budget:
|
||||
assistant_text = assistant_text[: budget - 1] + "…"
|
||||
return head + assistant_text
|
||||
|
||||
|
||||
async def caption_tool_image(
|
||||
intent: str,
|
||||
image_content: list[dict[str, Any]],
|
||||
*,
|
||||
timeout_s: float = 30.0,
|
||||
) -> str | None:
|
||||
"""Caption the given images using the configured ``vision_fallback`` model.
|
||||
|
||||
Returns the model's text response on success, or ``None`` on any
|
||||
failure (no config, no API key, timeout, exception, empty
|
||||
response). Callers chain to the next stage of the fallback on None.
|
||||
|
||||
Logs each call to ``~/.hive/llm_logs`` via ``log_llm_turn`` so the
|
||||
cost / latency / quality are auditable post-hoc, tagged with
|
||||
``execution_id="vision_fallback_subagent"``.
|
||||
"""
|
||||
model = get_vision_fallback_model()
|
||||
if not model:
|
||||
return None
|
||||
|
||||
api_key = get_vision_fallback_api_key()
|
||||
api_base = get_vision_fallback_api_base()
|
||||
if not api_key:
|
||||
logger.debug("vision_fallback configured but no API key resolved; skipping")
|
||||
return None
|
||||
|
||||
try:
|
||||
import litellm
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
user_blocks: list[dict[str, Any]] = [{"type": "text", "text": intent}]
|
||||
user_blocks.extend(image_content)
|
||||
messages = [
|
||||
{"role": "system", "content": _VISION_SUBAGENT_SYSTEM},
|
||||
{"role": "user", "content": user_blocks},
|
||||
]
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"max_tokens": 1024,
|
||||
"timeout": timeout_s,
|
||||
"api_key": api_key,
|
||||
}
|
||||
if api_base:
|
||||
kwargs["api_base"] = api_base
|
||||
|
||||
started = datetime.now()
|
||||
caption: str | None = None
|
||||
error_text: str | None = None
|
||||
try:
|
||||
response = await litellm.acompletion(**kwargs)
|
||||
text = (response.choices[0].message.content or "").strip()
|
||||
if text:
|
||||
caption = text
|
||||
except Exception as exc:
|
||||
error_text = f"{type(exc).__name__}: {exc}"
|
||||
logger.debug("vision_fallback model '%s' failed: %s", model, exc)
|
||||
|
||||
# Best-effort audit log so users can grep ~/.hive/llm_logs/ for
|
||||
# vision-fallback subagent calls. Failures here must not bubble.
|
||||
try:
|
||||
from framework.tracker.llm_debug_logger import log_llm_turn
|
||||
|
||||
# Don't dump the base64 image data into the log file — that
|
||||
# would balloon the jsonl with mostly-binary noise.
|
||||
elided_blocks: list[dict[str, Any]] = [{"type": "text", "text": intent}]
|
||||
elided_blocks.extend({"type": "image_url", "image_url": {"url": "<elided>"}} for _ in range(len(image_content)))
|
||||
log_llm_turn(
|
||||
node_id="vision_fallback_subagent",
|
||||
stream_id="vision_fallback",
|
||||
execution_id="vision_fallback_subagent",
|
||||
iteration=0,
|
||||
system_prompt=_VISION_SUBAGENT_SYSTEM,
|
||||
messages=[{"role": "user", "content": elided_blocks}],
|
||||
assistant_text=caption or "",
|
||||
tool_calls=[],
|
||||
tool_results=[],
|
||||
token_counts={
|
||||
"model": model,
|
||||
"elapsed_s": (datetime.now() - started).total_seconds(),
|
||||
"error": error_text,
|
||||
"num_images": len(image_content),
|
||||
"intent_chars": len(intent),
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return caption
|
||||
|
||||
|
||||
__all__ = ["caption_tool_image", "extract_intent_for_tool"]
|
||||
@@ -53,7 +53,14 @@ def build_prompt_spec(
|
||||
# trigger tools are present in this agent's tool list (e.g. browser_*
|
||||
# pulls in hive.browser-automation). Keeps non-browser agents lean.
|
||||
tool_names = [getattr(t, "name", "") for t in (getattr(ctx, "available_tools", None) or [])]
|
||||
skills_catalog_prompt = augment_catalog_for_tools(ctx.skills_catalog_prompt or "", tool_names)
|
||||
raw_catalog = ctx.skills_catalog_prompt or ""
|
||||
dynamic_catalog = getattr(ctx, "dynamic_skills_catalog_provider", None)
|
||||
if dynamic_catalog is not None:
|
||||
try:
|
||||
raw_catalog = dynamic_catalog() or ""
|
||||
except Exception:
|
||||
raw_catalog = ctx.skills_catalog_prompt or ""
|
||||
skills_catalog_prompt = augment_catalog_for_tools(raw_catalog, tool_names)
|
||||
|
||||
return PromptSpec(
|
||||
identity_prompt=ctx.identity_prompt or "",
|
||||
|
||||
@@ -182,7 +182,24 @@ class AgentContext:
|
||||
|
||||
dynamic_tools_provider: Any = None
|
||||
dynamic_prompt_provider: Any = None
|
||||
# Optional Callable[[], str]: when set alongside ``dynamic_prompt_provider``,
|
||||
# the AgentLoop sends the system prompt as two pieces — the result of
|
||||
# ``dynamic_prompt_provider`` is the STATIC block (cached), and this
|
||||
# provider returns the DYNAMIC suffix (not cached). The LLM wrapper
|
||||
# emits them as two Anthropic system content blocks with a cache
|
||||
# breakpoint between them for providers that honor ``cache_control``.
|
||||
# For providers that don't, the two strings are concatenated. Used by
|
||||
# the Queen to keep her persona/role/tools block warm across iterations
|
||||
# while the recall + timestamp tail refreshes per user turn.
|
||||
dynamic_prompt_suffix_provider: Any = None
|
||||
dynamic_memory_provider: Any = None
|
||||
# Optional Callable[[], str]: when set, the current skills-catalog
|
||||
# prompt is sourced from this provider each iteration. Lets workers
|
||||
# pick up UI toggles without restarting the run. Queen agents already
|
||||
# rebuild the whole prompt via dynamic_prompt_provider — this field
|
||||
# is a surgical alternative used by colony workers where the rest of
|
||||
# the prompt stays constant and we don't want to thrash the cache.
|
||||
dynamic_skills_catalog_provider: Any = None
|
||||
|
||||
skills_catalog_prompt: str = ""
|
||||
protocols_prompt: str = ""
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@@ -47,6 +48,8 @@ class AgentEntry:
|
||||
tool_count: int = 0
|
||||
tags: list[str] = field(default_factory=list)
|
||||
last_active: str | None = None
|
||||
created_at: str | None = None
|
||||
icon: str | None = None
|
||||
workers: list[WorkerEntry] = field(default_factory=list)
|
||||
|
||||
|
||||
@@ -209,13 +212,26 @@ def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
name = config_fallback_name
|
||||
desc = ""
|
||||
|
||||
# Read colony metadata for queen provenance
|
||||
# Read colony metadata for queen provenance and timestamps
|
||||
colony_queen_name = ""
|
||||
colony_created_at: str | None = None
|
||||
colony_icon: str | None = None
|
||||
metadata_path = path / "metadata.json"
|
||||
if metadata_path.exists():
|
||||
try:
|
||||
mdata = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
colony_queen_name = mdata.get("queen_name", "")
|
||||
colony_created_at = mdata.get("created_at")
|
||||
colony_icon = mdata.get("icon")
|
||||
except Exception:
|
||||
pass
|
||||
# Fallback: use directory creation time if metadata lacks created_at
|
||||
if not colony_created_at:
|
||||
try:
|
||||
from datetime import datetime
|
||||
|
||||
stat = path.stat()
|
||||
colony_created_at = datetime.fromtimestamp(stat.st_birthtime, tz=UTC).isoformat()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -256,6 +272,8 @@ def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
tool_count=tool_count,
|
||||
tags=[],
|
||||
last_active=_get_last_active(path),
|
||||
created_at=colony_created_at,
|
||||
icon=colony_icon,
|
||||
workers=worker_entries,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -0,0 +1,240 @@
|
||||
"""One-shot LLM gate that decides if a queen DM is ready to fork a colony.
|
||||
|
||||
The queen's ``start_incubating_colony`` tool calls :func:`evaluate` with
|
||||
the queen's recent conversation, a proposed ``colony_name``, and a
|
||||
one-paragraph ``intended_purpose``. The evaluator returns a structured
|
||||
verdict:
|
||||
|
||||
{
|
||||
"ready": bool,
|
||||
"reasons": [str],
|
||||
"missing_prerequisites": [str],
|
||||
}
|
||||
|
||||
On ``ready=False`` the queen receives the verdict as her tool result and
|
||||
self-corrects (asks the user, refines scope, drops the idea). On
|
||||
``ready=True`` the tool flips the queen's phase to ``incubating``.
|
||||
|
||||
Failure mode is **fail-closed**: any LLM error or unparseable response
|
||||
returns ``ready=False`` with reason ``"evaluation_failed"`` so the queen
|
||||
cannot accidentally proceed past a broken gate.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import Message
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_INCUBATING_EVALUATOR_SYSTEM_PROMPT = """\
|
||||
You gate whether a queen agent should commit to forking a persistent
|
||||
"colony" (a headless worker spec written to disk). Forking is
|
||||
expensive: it ends the user's chat with this queen and the worker runs
|
||||
unattended afterward, so the spec must be settled before you approve.
|
||||
|
||||
Read the conversation excerpt and the queen's proposed colony_name +
|
||||
intended_purpose, then decide.
|
||||
|
||||
APPROVE (ready=true) only when ALL of the following hold:
|
||||
1. The user has explicitly asked for work that needs to outlive this
|
||||
chat — recurring (cron / interval), monitoring + alert, scheduled
|
||||
batch, or "fire-and-forget background job". A one-shot question
|
||||
that the queen can answer in chat does NOT qualify.
|
||||
2. The scope of the work is concrete enough to write down — what
|
||||
inputs, what outputs, what success looks like. Vague ("help me
|
||||
with my workflow") does NOT qualify.
|
||||
3. The technical approach is at least sketched — what data sources,
|
||||
APIs, or tools the worker will use. The queen does not have to
|
||||
have written the SKILL.md yet, but she must have the operational
|
||||
ingredients available.
|
||||
4. There are no open clarifying questions on the table that the user
|
||||
hasn't answered. If the queen recently asked the user something
|
||||
and is still waiting, do NOT approve.
|
||||
|
||||
REJECT (ready=false) on any of:
|
||||
- Conversation is too short / too generic to support a settled spec.
|
||||
- User is still describing what they want.
|
||||
- User has expressed doubts, change-of-direction, or "let me think".
|
||||
- Work is one-shot and could be done in chat instead.
|
||||
- Open question awaiting user reply.
|
||||
|
||||
Reply with a JSON object exactly matching this shape:
|
||||
|
||||
{
|
||||
"ready": true | false,
|
||||
"reasons": ["short phrase", ...], // at least one entry
|
||||
"missing_prerequisites": ["short phrase", ...] // empty when ready
|
||||
}
|
||||
|
||||
``reasons`` explains the verdict in 1-3 short phrases.
|
||||
``missing_prerequisites`` lists what's missing in queen-actionable
|
||||
form ("user hasn't confirmed schedule", "no API auth flow discussed").
|
||||
Empty list when ``ready=true``.
|
||||
|
||||
Output JSON only. Do not wrap in markdown. Do not add prose.
|
||||
"""
|
||||
|
||||
|
||||
# Bound the formatted excerpt so the eval call stays cheap and fits well
|
||||
# under the LLM's context window even for long DM sessions.
|
||||
_MAX_MESSAGES = 30
|
||||
_MAX_TOOL_CONTENT_CHARS = 400
|
||||
_MAX_USER_CONTENT_CHARS = 2_000
|
||||
_MAX_ASSISTANT_CONTENT_CHARS = 2_000
|
||||
|
||||
|
||||
def format_conversation_excerpt(messages: list[Message]) -> str:
|
||||
"""Format the tail of a queen conversation for the evaluator prompt.
|
||||
|
||||
Keeps the most recent ``_MAX_MESSAGES`` messages. Tool results are
|
||||
truncated hard since they're rarely load-bearing for the readiness
|
||||
decision; user/assistant text is truncated more generously to
|
||||
preserve the actual conversation signal.
|
||||
"""
|
||||
if not messages:
|
||||
return "(no messages)"
|
||||
|
||||
tail = messages[-_MAX_MESSAGES:]
|
||||
parts: list[str] = []
|
||||
for msg in tail:
|
||||
role = msg.role.upper()
|
||||
content = (msg.content or "").strip()
|
||||
if msg.role == "tool":
|
||||
if len(content) > _MAX_TOOL_CONTENT_CHARS:
|
||||
content = content[:_MAX_TOOL_CONTENT_CHARS] + "..."
|
||||
elif msg.role == "assistant":
|
||||
# Surface tool-call intent for empty assistant turns so the
|
||||
# evaluator sees what the queen has been doing.
|
||||
if not content and msg.tool_calls:
|
||||
names = [tc.get("function", {}).get("name", "?") for tc in msg.tool_calls]
|
||||
content = f"(called: {', '.join(names)})"
|
||||
if len(content) > _MAX_ASSISTANT_CONTENT_CHARS:
|
||||
content = content[:_MAX_ASSISTANT_CONTENT_CHARS] + "..."
|
||||
else: # user
|
||||
if len(content) > _MAX_USER_CONTENT_CHARS:
|
||||
content = content[:_MAX_USER_CONTENT_CHARS] + "..."
|
||||
if content:
|
||||
parts.append(f"[{role}]: {content}")
|
||||
|
||||
return "\n\n".join(parts) if parts else "(no messages)"
|
||||
|
||||
|
||||
def _build_user_message(
|
||||
conversation_excerpt: str,
|
||||
colony_name: str,
|
||||
intended_purpose: str,
|
||||
) -> str:
|
||||
return (
|
||||
f"## Proposed colony name\n{colony_name}\n\n"
|
||||
f"## Queen's intended_purpose\n{intended_purpose.strip()}\n\n"
|
||||
f"## Recent conversation (oldest → newest)\n{conversation_excerpt}\n\n"
|
||||
"Decide: should this queen be approved to enter INCUBATING phase?"
|
||||
)
|
||||
|
||||
|
||||
def _parse_verdict(raw: str) -> dict[str, Any] | None:
|
||||
"""Parse the evaluator's JSON. Returns None if parsing fails."""
|
||||
if not raw:
|
||||
return None
|
||||
raw = raw.strip()
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
# Some models wrap JSON in markdown fences or add preamble.
|
||||
# Pull the first { ... } block out as a best-effort fallback —
|
||||
# mirrors the same recovery pattern used in recall_selector.py.
|
||||
match = re.search(r"\{.*\}", raw, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
return json.loads(match.group())
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_verdict(parsed: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Coerce a parsed verdict into the shape the tool returns to the queen."""
|
||||
ready = bool(parsed.get("ready"))
|
||||
reasons = parsed.get("reasons") or []
|
||||
if isinstance(reasons, str):
|
||||
reasons = [reasons]
|
||||
reasons = [str(r).strip() for r in reasons if str(r).strip()]
|
||||
missing = parsed.get("missing_prerequisites") or []
|
||||
if isinstance(missing, str):
|
||||
missing = [missing]
|
||||
missing = [str(m).strip() for m in missing if str(m).strip()]
|
||||
|
||||
if ready:
|
||||
# When approved we don't surface missing prerequisites — the
|
||||
# incubating role prompt opens that floor itself.
|
||||
missing = []
|
||||
elif not reasons:
|
||||
# Always give the queen at least one reason to reflect on.
|
||||
reasons = ["evaluator returned no reasons"]
|
||||
|
||||
return {
|
||||
"ready": ready,
|
||||
"reasons": reasons,
|
||||
"missing_prerequisites": missing,
|
||||
}
|
||||
|
||||
|
||||
async def evaluate(
|
||||
llm: Any,
|
||||
messages: list[Message],
|
||||
colony_name: str,
|
||||
intended_purpose: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Run the incubating evaluator against the queen's conversation.
|
||||
|
||||
Args:
|
||||
llm: An LLM provider exposing ``acomplete(messages, system, ...)``.
|
||||
Pass the queen's own ``ctx.llm`` so the eval uses the same
|
||||
model the user is talking to.
|
||||
messages: The queen's conversation messages, oldest first. The
|
||||
evaluator slices its own tail; pass the full list.
|
||||
colony_name: Validated colony slug.
|
||||
intended_purpose: Queen's one-paragraph brief.
|
||||
|
||||
Returns:
|
||||
``{"ready": bool, "reasons": [str], "missing_prerequisites": [str]}``.
|
||||
Fail-closed on any error.
|
||||
"""
|
||||
excerpt = format_conversation_excerpt(messages)
|
||||
user_msg = _build_user_message(excerpt, colony_name, intended_purpose)
|
||||
|
||||
try:
|
||||
response = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": user_msg}],
|
||||
system=_INCUBATING_EVALUATOR_SYSTEM_PROMPT,
|
||||
max_tokens=1024,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001 - fail-closed on any LLM failure
|
||||
logger.warning("incubating_evaluator: LLM call failed (%s)", exc)
|
||||
return {
|
||||
"ready": False,
|
||||
"reasons": ["evaluation_failed"],
|
||||
"missing_prerequisites": ["evaluator LLM call failed; retry once the queen can reach the model again"],
|
||||
}
|
||||
|
||||
raw = (getattr(response, "content", "") or "").strip()
|
||||
parsed = _parse_verdict(raw)
|
||||
if parsed is None:
|
||||
logger.warning(
|
||||
"incubating_evaluator: could not parse JSON verdict (raw=%.200s)",
|
||||
raw,
|
||||
)
|
||||
return {
|
||||
"ready": False,
|
||||
"reasons": ["evaluation_failed"],
|
||||
"missing_prerequisites": ["evaluator returned malformed JSON; retry"],
|
||||
}
|
||||
|
||||
return _normalize_verdict(parsed)
|
||||
@@ -4,7 +4,6 @@ import re
|
||||
|
||||
from framework.orchestrator import NodeSpec
|
||||
|
||||
|
||||
# Wraps prompt sections that should only be shown to vision-capable models.
|
||||
# Content inside `<!-- vision-only -->...<!-- /vision-only -->` is kept for
|
||||
# vision models and stripped for text-only models. Applied once per session
|
||||
@@ -48,9 +47,27 @@ _QUEEN_INDEPENDENT_TOOLS = [
|
||||
"undo_changes",
|
||||
# NOTE (2026-04-16): ``run_parallel_workers`` is not in the DM phase.
|
||||
# Pure DM is for conversation with the user; fan out parallel work via
|
||||
# ``create_colony`` (forks into a persistent colony with its own page
|
||||
# and phase machine).
|
||||
# ``start_incubating_colony`` (which gates the colony fork behind a
|
||||
# readiness eval before exposing create_colony in INCUBATING phase).
|
||||
"start_incubating_colony",
|
||||
]
|
||||
|
||||
# Incubating phase: queen has been approved by the incubating_evaluator to
|
||||
# fork into a colony. Tool surface is intentionally small — the queen's job
|
||||
# in this phase is to nail the operational spec (concurrency, schedule,
|
||||
# result tracking, credentials) and write a tight task + SKILL.md, not to
|
||||
# keep doing work. Read-only file tools are kept so she can confirm details
|
||||
# (e.g. inspect an existing skill) before committing.
|
||||
_QUEEN_INCUBATING_TOOLS = [
|
||||
"read_file",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"run_command",
|
||||
# Schedule lives on the colony, not on the queen session — pass it
|
||||
# inline as create_colony(triggers=[...]) instead of staging through
|
||||
# set_trigger here.
|
||||
"create_colony",
|
||||
"cancel_incubation",
|
||||
]
|
||||
|
||||
# Working phase: colony workers are running. Queen monitors, replies
|
||||
@@ -71,10 +88,6 @@ _QUEEN_WORKING_TOOLS = [
|
||||
"stop_worker",
|
||||
# Fan out more tasks while workers are still running
|
||||
"run_parallel_workers",
|
||||
# Trigger management
|
||||
"set_trigger",
|
||||
"remove_trigger",
|
||||
"list_triggers",
|
||||
]
|
||||
|
||||
# Reviewing phase: workers have finished. Queen summarises results,
|
||||
@@ -103,8 +116,6 @@ _QUEEN_REVIEWING_TOOLS = [
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_queen_character_core = """\
|
||||
You are the advisor defined in <core_identity> above. Stay in character.
|
||||
|
||||
Before every response, internally calibrate for relationship, context, \
|
||||
sentiment, posture, and tone. Keep that assessment private. Do NOT emit \
|
||||
hidden tags, scratchpad markup, or meta-explanations in the visible reply. \
|
||||
@@ -126,19 +137,84 @@ You have full coding tools (read/write/edit/search/run) and MCP tools \
|
||||
(file operations via coder-tools, browser automation via gcu-tools). \
|
||||
Execute the user's task directly using conversation and tools. \
|
||||
You are the agent. \
|
||||
If the user opens with a greeting or chat, reply in plain prose in \
|
||||
character first — check recall memory for name and past topics and weave \
|
||||
them in. If you need a structured choice or approval gate, always use \
|
||||
ask_user or ask_user_multiple; otherwise ask in plain prose. \
|
||||
If you need a structured choice or approval gate, always use \
|
||||
``ask_user``; otherwise ask in plain prose. ``ask_user`` takes a \
|
||||
``questions`` array — pass a single entry for one question, or batch \
|
||||
several entries when you have multiple clarifications. \
|
||||
\
|
||||
When the user clearly wants persistent / recurring / headless work that \
|
||||
needs to outlive THIS chat (e.g. "every morning", "monitor X and alert \
|
||||
me", "set up a job that…"), call ``start_incubating_colony`` with a \
|
||||
proposed colony_name and a one-paragraph intended_purpose. A side \
|
||||
evaluator reads the conversation and decides if the spec is settled. If \
|
||||
it returns ``not_ready`` you keep talking with the user — sort out \
|
||||
whatever the evaluator said is missing, then retry. If it returns \
|
||||
``incubating`` your phase flips and a new prompt takes over. Do not \
|
||||
try to write SKILL.md, fork directories, or otherwise build the colony \
|
||||
yourself in this phase.\
|
||||
"""
|
||||
|
||||
_queen_role_incubating = """\
|
||||
You are in INCUBATING mode. The incubating evaluator has approved you to \
|
||||
fork colony ``{colony_name}`` and you are now drafting the spec. Your \
|
||||
ONLY job in this phase: produce a self-contained ``task`` description \
|
||||
and ``SKILL.md`` body that lets a fresh worker, who has zero memory of \
|
||||
this chat, do the work unattended. Do not start doing the work yourself \
|
||||
— the coding toolkit is gone on purpose so you can focus.
|
||||
|
||||
Before you call ``create_colony``, sort out the operational details that \
|
||||
conversation tends to skip. The "Approved → operational checklist" block \
|
||||
in your tools doc lists the kinds of things to think about (concurrency, \
|
||||
schedule, result-tracking, failure handling, credentials). Treat that \
|
||||
list as prompts for YOUR judgement — only ask the user about the items \
|
||||
that actually matter for THIS colony and that the conversation hasn't \
|
||||
already settled. Use ``ask_user`` (pass a ``questions`` array — batch \
|
||||
several entries for multi-question turns) for the gaps; plain prose for \
|
||||
everything else.
|
||||
|
||||
If you realise mid-incubation that the spec isn't ready (user changed \
|
||||
their mind, you're missing more than a couple of details, the work \
|
||||
turned out to be one-shot after all), call ``cancel_incubation`` — \
|
||||
no harm, you go back to INDEPENDENT and can retry later.
|
||||
|
||||
If the user explicitly asks for something UNRELATED to the current \
|
||||
colony being drafted (a side question, a one-shot task, a different \
|
||||
problem), don't try to handle it from this limited tool surface. Call \
|
||||
``cancel_incubation`` first to switch back to INDEPENDENT where you \
|
||||
have the full toolkit, handle their request there, and re-enter \
|
||||
INCUBATING later via ``start_incubating_colony`` when they want to \
|
||||
resume the colony spec.
|
||||
"""
|
||||
|
||||
_queen_role_working = """\
|
||||
You are in WORKING mode. Your colony has workers executing right now. \
|
||||
Your job: monitor progress, answer worker escalations through \
|
||||
reply_to_worker, and fan out more tasks with run_parallel_workers if \
|
||||
the user asks. Keep the user informed when they ask; do NOT poll the \
|
||||
workers just to have something to say. If the user greets you \
|
||||
mid-run, reply in prose and wait for their next message.
|
||||
You are in WORKING mode. The colony's spec was settled during \
|
||||
INCUBATING; workers are executing that spec now. Your role here is \
|
||||
operational presence, not direction — think on-call engineer for a \
|
||||
running deployment, not architect of a new one.
|
||||
|
||||
What you DO in this phase:
|
||||
- Be available for worker escalations (reply_to_worker on items in \
|
||||
list_worker_questions).
|
||||
- Surface progress when the user asks for it (get_worker_status), or \
|
||||
when something concrete is worth flagging (a notable failure, a \
|
||||
worker stuck on a question that needs them).
|
||||
- Intervene when a worker is clearly off course (inject_message) or \
|
||||
needs to stop (stop_worker).
|
||||
- Make SPEC-COMPATIBLE adjustments when the user asks — fan out MORE \
|
||||
of the same work (run_parallel_workers). This is a tweak to the spec \
|
||||
the user already approved, not a redesign. Scheduled / recurring \
|
||||
work belongs to a colony; if the user wants to add or change a \
|
||||
schedule, that's a new colony.
|
||||
|
||||
What you DO NOT do in this phase:
|
||||
- Redesign the colony. If the user asks for something fundamentally \
|
||||
new (different scope, different skill, different problem), say so \
|
||||
plainly: "this colony is for X — for that we'd need a fresh chat \
|
||||
with me, where I can incubate a new colony." A new colony is born \
|
||||
in INDEPENDENT via start_incubating_colony, and you cannot reach \
|
||||
that from inside a colony.
|
||||
- Drive the conversation. Do not poll workers just to have something \
|
||||
to say. If the user greets you mid-run, reply in prose and wait.
|
||||
"""
|
||||
|
||||
_queen_role_reviewing = """\
|
||||
@@ -168,38 +244,137 @@ search_files, run_command, undo_changes
|
||||
browser_tabs, browser_close, browser_evaluate, etc.).
|
||||
- MUST Follow the browser-automation skill protocol before using browser tools.
|
||||
|
||||
## Persistent colony
|
||||
- create_colony(colony_name, task, skill_path) — Fork this session into a \
|
||||
persistent colony for headless / recurring / background work. The colony \
|
||||
has its own chat surface and runs `run_parallel_workers` from there.
|
||||
- `skill_path` must point to a pre-authored skill folder with `SKILL.md`; \
|
||||
author it in a scratch location first, then call `create_colony`.
|
||||
- **Two-step flow:**
|
||||
1. Write a skill folder with `SKILL.md` in a scratch location.
|
||||
2. Call `create_colony(colony_name, task, skill_path)` with a FULL, \
|
||||
self-contained task.
|
||||
- The tool validates and installs the skill, forks this session into a \
|
||||
colony, and stores the task for later. Nothing runs immediately after \
|
||||
the call.
|
||||
- The task must be FULL and self-contained because the future worker run \
|
||||
cannot rely on this live chat turn for missing context.
|
||||
## Hand off to a colony
|
||||
- start_incubating_colony(colony_name, intended_purpose) — Use this when \
|
||||
the user wants persistent / recurring / headless work that needs to \
|
||||
outlive THIS chat. It does NOT fork on its own; it spawns a one-shot \
|
||||
evaluator that reads this conversation and decides whether the spec \
|
||||
is settled enough to proceed. On approval your phase flips to \
|
||||
INCUBATING and a new tool surface (including create_colony itself) \
|
||||
unlocks. On rejection you stay here and keep the conversation going \
|
||||
to fill the gaps the evaluator named.
|
||||
- ``intended_purpose`` is a one-paragraph brief: what the colony will \
|
||||
do, on what cadence, why it must outlive this chat. Don't write a \
|
||||
SKILL.md here — that comes in INCUBATING.
|
||||
"""
|
||||
|
||||
_queen_tools_incubating = """
|
||||
# Tools (INCUBATING mode)
|
||||
|
||||
You've been approved to fork. The full coding toolkit is gone on \
|
||||
purpose — your job in this phase is to nail the spec, not keep doing \
|
||||
work. Available:
|
||||
|
||||
## Read-only inspection (coder-tools MCP)
|
||||
- read_file, list_directory, search_files, run_command — for confirming \
|
||||
details before you commit (e.g. peek at an existing skill in \
|
||||
~/.hive/skills/, sanity-check an API URL).
|
||||
|
||||
## Approved → operational checklist (use your judgement, ask only what's missing)
|
||||
The conversation that got you here probably did NOT cover all of:
|
||||
- Concurrency: how many tasks should run in parallel? Single-fire?
|
||||
- Schedule: cron expression, interval (every N minutes), webhook, \
|
||||
manual-only?
|
||||
- Result tracking: what should the worker write into ``progress.db`` so \
|
||||
the user can review later? Per-task status, summary, raw payload?
|
||||
- Failure handling: retry, alert, mark-failed-and-continue?
|
||||
- Credentials and MCP servers: what does the worker need that you \
|
||||
haven't discussed (API keys, OAuth, browser profile)?
|
||||
- Skills the worker needs beyond the one you'll write inline.
|
||||
|
||||
These are PROMPTS for your judgement, not a required checklist. Cover \
|
||||
the items that actually matter for THIS colony, and only the ones the \
|
||||
user hasn't already implied. Use ``ask_user`` (batch several questions \
|
||||
into one call when you have multiple gaps) for answers you need; skip \
|
||||
the rest.
|
||||
|
||||
## Commit
|
||||
- create_colony(colony_name, task, skill_name, skill_description, \
|
||||
skill_body, skill_files?, tasks?, concurrency_hint?, triggers?) — \
|
||||
Fork this session into the colony. **Atomic call — pass the skill \
|
||||
AND the schedule INLINE.** Do NOT write SKILL.md with write_file \
|
||||
beforehand; this tool materialises the folder for you and then \
|
||||
forks. Reusing an existing skill_name within the colony replaces \
|
||||
that skill with your latest content.
|
||||
- The ``task`` must be FULL and self-contained — the worker has zero \
|
||||
memory of THIS chat at run time.
|
||||
- The ``skill_body`` must be FULL and self-contained — capture the \
|
||||
operational protocol (endpoints, auth, gotchas, pre-baked queries) \
|
||||
so the worker doesn't have to rediscover what you already know.
|
||||
- ``concurrency_hint`` (optional integer ≥ 1) — advisory cap on how \
|
||||
many worker processes typically run in parallel for this colony \
|
||||
(e.g. 1 for "send digest", 5 for a fan-out). Baked into worker.json \
|
||||
for the future colony queen to consult; not enforced.
|
||||
- ``triggers`` (optional array) — the colony's schedule, written \
|
||||
inline to ``triggers.json`` and auto-started on first colony load. \
|
||||
Pass this when the work is recurring / event-driven; omit for \
|
||||
colonies the user will run by clicking start. Each entry: \
|
||||
``{id, trigger_type, trigger_config, task}`` where trigger_type is \
|
||||
"timer" (config ``{cron: "0 9 * * *"}`` or ``{interval_minutes: N}``) \
|
||||
or "webhook" (config ``{path: "/hooks/..."}``). Each entry's \
|
||||
``task`` is what the worker does when THAT trigger fires — separate \
|
||||
from the colony-wide ``task`` argument, which is the worker's \
|
||||
overall purpose. Validated up front — a bad cron, missing task, or \
|
||||
malformed webhook path fails the call before anything is written, \
|
||||
so you can retry with corrected input.
|
||||
- After this returns, the chat is over: the session locks immediately \
|
||||
and the user gets a "compact and start a new session with you" \
|
||||
button. So make your call to create_colony the last thing you do — \
|
||||
one closing message to the user is fine, but expect the next user \
|
||||
input to land in a fresh forked session, not this one.
|
||||
|
||||
## Bail
|
||||
- cancel_incubation() — Call when the spec isn't ready after all (user \
|
||||
changed their mind, you discovered the work is actually one-shot, \
|
||||
more than a couple of details still need to be worked out). Returns \
|
||||
you to INDEPENDENT with the full toolkit; no fork happens.
|
||||
- Also call cancel_incubation() if the user explicitly pivots to \
|
||||
something UNRELATED to this colony (side question, one-shot ask, \
|
||||
different problem). You can't serve that from this narrow toolkit — \
|
||||
drop back to INDEPENDENT, handle it, then re-enter incubation via \
|
||||
start_incubating_colony when they're ready to resume the spec.
|
||||
"""
|
||||
|
||||
_queen_tools_working = """
|
||||
# Tools (WORKING mode)
|
||||
|
||||
Workers are running in your colony. You have:
|
||||
- Read-only: read_file, list_directory, search_files, run_command
|
||||
- get_worker_status(focus?) — Poll latest progress / issues
|
||||
- inject_message(content) — Send guidance to a running worker
|
||||
- list_worker_questions() / reply_to_worker(request_id, reply) — Answer escalations
|
||||
- stop_worker() — Stop a worker early
|
||||
- run_parallel_workers(tasks, timeout?) — Fan out MORE parallel tasks on \
|
||||
top of what's already running (each task string must be fully self-contained)
|
||||
- set_trigger / remove_trigger / list_triggers — Timer management
|
||||
The colony's spec was committed during INCUBATING. Your tools here are \
|
||||
operational, not editorial.
|
||||
|
||||
When every worker has reported (success or failure), the phase auto-moves \
|
||||
to REVIEWING. You do not need to call a transition tool yourself.
|
||||
## Stay informed (only when asked, or when something matters)
|
||||
- get_worker_status(focus?) — Pull progress / issues for the user.
|
||||
- list_worker_questions() — Check the escalation inbox.
|
||||
|
||||
## Respond
|
||||
- reply_to_worker(request_id, reply) — Answer a worker escalation.
|
||||
- inject_message(content) — Course-correct a running worker (e.g. it's \
|
||||
heading the wrong way and the user wants it redirected).
|
||||
|
||||
## Intervene
|
||||
- stop_worker() — Kill switch for a runaway or no-longer-needed worker.
|
||||
|
||||
## Spec-compatible adjustments
|
||||
- run_parallel_workers(tasks, timeout?) — Fan out MORE of the same \
|
||||
work. Use when the user wants additional units of an already-defined \
|
||||
job, NOT for new scope. Each task string must be fully self-contained.
|
||||
- Scheduled / recurring work belongs to a colony, not this session. \
|
||||
If the user wants to add or change a schedule, that's a new colony \
|
||||
born from a fresh chat via start_incubating_colony.
|
||||
|
||||
## Read-only inspection
|
||||
- read_file, list_directory, search_files, run_command
|
||||
|
||||
When every worker has reported (success or failure), the phase \
|
||||
auto-moves to REVIEWING. You do not need to call a transition tool \
|
||||
yourself.
|
||||
|
||||
## What does NOT belong here
|
||||
A request like "actually let's also do X" with X being a new scope, \
|
||||
new skill, or different problem is a NEW COLONY, not an extension of \
|
||||
this one. Tell the user plainly: "this colony is for the work we \
|
||||
already started — for that we'd need a fresh chat with me, where I \
|
||||
can incubate a new colony." You cannot create a new colony from \
|
||||
inside a colony.
|
||||
"""
|
||||
|
||||
_queen_tools_reviewing = """
|
||||
@@ -250,12 +425,19 @@ Check recall memory for name / role / past topics and weave them into \
|
||||
a 1–2 sentence in-character greeting, then wait.
|
||||
- On a clear ask (build, edit, run, investigate, search), call the \
|
||||
appropriate tool on the same turn — don't narrate intent and stop.
|
||||
- Use `ask_user` / `ask_user_multiple` only for structured choices \
|
||||
(approvals, 2–4 concrete options like "Postgres or SQLite?"). \
|
||||
Free-form questions belong in prose; reaching for `ask_user` on \
|
||||
every reply blocks natural conversation.
|
||||
- You are curious to understand the user. Use `ask_user` when the user's \
|
||||
response is needed to continue: to resolve ambiguity, collect missing \
|
||||
information, request approval, compare real trade-offs, gather post-task \
|
||||
feedback, or offer to save a skill or update memory. Pass one or more \
|
||||
questions in the ``questions`` array. Keep each ``prompt`` plain text only; \
|
||||
do not include XML, pseudo-tags, or inline option lists. Provide concrete \
|
||||
``options`` when the user should choose, set ``multiSelect: true`` when \
|
||||
multiple selections are valid, and put the recommended option first with \
|
||||
``(Recommended)`` in its label. Omit ``options`` only when a truly free-form \
|
||||
typed answer is required, such as an idea description or pasted error. Do not \
|
||||
repeat the same questions in normal reply text; the widget renders them.
|
||||
- Images attached by the user are analyzed directly via your vision \
|
||||
capability — no tool call needed.
|
||||
capability and no tool call needed.
|
||||
"""
|
||||
|
||||
_queen_memory_instructions = """
|
||||
@@ -288,9 +470,9 @@ queen_node = NodeSpec(
|
||||
id="queen",
|
||||
name="Queen",
|
||||
description=(
|
||||
"User's primary interactive interface. Operates in DM (independent) "
|
||||
"or colony mode (working / reviewing) depending on whether workers "
|
||||
"have been spawned."
|
||||
"User's primary interactive interface. Operates in DM (independent), "
|
||||
"colony-spec drafting (incubating), or colony mode (working / "
|
||||
"reviewing) depending on whether workers have been spawned."
|
||||
),
|
||||
node_type="event_loop",
|
||||
max_node_visits=0,
|
||||
@@ -299,11 +481,7 @@ queen_node = NodeSpec(
|
||||
nullable_output_keys=[], # Queen should never have this
|
||||
skip_judge=True, # Queen is a conversational agent; suppress tool-use pressure feedback
|
||||
tools=sorted(
|
||||
set(
|
||||
_QUEEN_INDEPENDENT_TOOLS
|
||||
+ _QUEEN_WORKING_TOOLS
|
||||
+ _QUEEN_REVIEWING_TOOLS
|
||||
)
|
||||
set(_QUEEN_INDEPENDENT_TOOLS + _QUEEN_INCUBATING_TOOLS + _QUEEN_WORKING_TOOLS + _QUEEN_REVIEWING_TOOLS)
|
||||
),
|
||||
system_prompt=(
|
||||
_queen_character_core
|
||||
@@ -316,25 +494,24 @@ queen_node = NodeSpec(
|
||||
)
|
||||
|
||||
ALL_QUEEN_TOOLS = sorted(
|
||||
set(
|
||||
_QUEEN_INDEPENDENT_TOOLS
|
||||
+ _QUEEN_WORKING_TOOLS
|
||||
+ _QUEEN_REVIEWING_TOOLS
|
||||
)
|
||||
set(_QUEEN_INDEPENDENT_TOOLS + _QUEEN_INCUBATING_TOOLS + _QUEEN_WORKING_TOOLS + _QUEEN_REVIEWING_TOOLS)
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"queen_node",
|
||||
"ALL_QUEEN_TOOLS",
|
||||
"_QUEEN_INDEPENDENT_TOOLS",
|
||||
"_QUEEN_INCUBATING_TOOLS",
|
||||
"_QUEEN_WORKING_TOOLS",
|
||||
"_QUEEN_REVIEWING_TOOLS",
|
||||
# Character + phase-specific prompt segments (used by queen_orchestrator for dynamic prompts)
|
||||
"_queen_character_core",
|
||||
"_queen_role_independent",
|
||||
"_queen_role_incubating",
|
||||
"_queen_role_working",
|
||||
"_queen_role_reviewing",
|
||||
"_queen_tools_independent",
|
||||
"_queen_tools_incubating",
|
||||
"_queen_tools_working",
|
||||
"_queen_tools_reviewing",
|
||||
"_queen_behavior_always",
|
||||
|
||||
@@ -1099,12 +1099,17 @@ def ensure_default_queens() -> None:
|
||||
|
||||
Safe to call multiple times — skips any profile that already has a file.
|
||||
"""
|
||||
created = 0
|
||||
for queen_id, profile in DEFAULT_QUEENS.items():
|
||||
queen_dir = QUEENS_DIR / queen_id
|
||||
profile_path = queen_dir / "profile.yaml"
|
||||
if profile_path.exists():
|
||||
continue
|
||||
queen_dir.mkdir(parents=True, exist_ok=True)
|
||||
profile_path.write_text(yaml.safe_dump(profile, sort_keys=False, allow_unicode=True))
|
||||
logger.info("Queen profiles ensured at %s", QUEENS_DIR)
|
||||
created += 1
|
||||
if created:
|
||||
logger.info("Created %d default queen profile(s) at %s", created, QUEENS_DIR)
|
||||
|
||||
|
||||
def list_queens() -> list[dict[str, str]]:
|
||||
@@ -1143,6 +1148,10 @@ def load_queen_profile(queen_id: str) -> dict[str, Any]:
|
||||
def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Merge partial updates into an existing queen profile and persist.
|
||||
|
||||
Performs a shallow merge at the top level, but deep-merges dict values
|
||||
(e.g. world_lore, hidden_background) so partial sub-field updates don't
|
||||
clobber sibling keys.
|
||||
|
||||
Returns the full updated profile.
|
||||
Raises FileNotFoundError if the profile doesn't exist.
|
||||
"""
|
||||
@@ -1150,7 +1159,11 @@ def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, An
|
||||
if not profile_path.exists():
|
||||
raise FileNotFoundError(f"Queen profile not found: {queen_id}")
|
||||
data = yaml.safe_load(profile_path.read_text())
|
||||
data.update(updates)
|
||||
for key, value in updates.items():
|
||||
if isinstance(value, dict) and isinstance(data.get(key), dict):
|
||||
data[key].update(value)
|
||||
else:
|
||||
data[key] = value
|
||||
profile_path.write_text(yaml.safe_dump(data, sort_keys=False, allow_unicode=True))
|
||||
return data
|
||||
|
||||
@@ -1160,9 +1173,38 @@ def update_queen_profile(queen_id: str, updates: dict[str, Any]) -> dict[str, An
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def format_queen_identity_prompt(
|
||||
profile: dict[str, Any], *, max_examples: int | None = None
|
||||
) -> str:
|
||||
def _as_clean_text(value: Any) -> str:
|
||||
"""Return a stripped string, or an empty string for non-string values."""
|
||||
return value.strip() if isinstance(value, str) else ""
|
||||
|
||||
|
||||
def _sentence(value: Any) -> str:
|
||||
text = _as_clean_text(value)
|
||||
if not text:
|
||||
return ""
|
||||
return text if text.endswith((".", "!", "?")) else f"{text}."
|
||||
|
||||
|
||||
def _profile_text_to_instruction(text: Any) -> str:
|
||||
instruction = _sentence(text)
|
||||
replacements = {
|
||||
"She thrives": "You thrive",
|
||||
"she thrives": "you thrive",
|
||||
"She's ": "You are ",
|
||||
"she's ": "you are ",
|
||||
"She is ": "You are ",
|
||||
"she is ": "you are ",
|
||||
"She ": "You ",
|
||||
"she ": "you ",
|
||||
"Her ": "Your ",
|
||||
"her ": "your ",
|
||||
}
|
||||
for old, new in replacements.items():
|
||||
instruction = instruction.replace(old, new)
|
||||
return instruction
|
||||
|
||||
|
||||
def format_queen_identity_prompt(profile: dict[str, Any], *, max_examples: int | None = None) -> str:
|
||||
"""Convert a queen profile into a high-dimensional character prompt.
|
||||
|
||||
Uses the 5-pillar character construction system: core identity,
|
||||
@@ -1188,35 +1230,35 @@ def format_queen_identity_prompt(
|
||||
sections: list[str] = []
|
||||
|
||||
# Pillar 1: Core identity
|
||||
sections.append(f"<core_identity>\nName: {name}, Identity: {title}.\n{core}\n</core_identity>")
|
||||
sections.append(f"<core_identity>\nYou are {name}, {title}.\n{core}\n</core_identity>")
|
||||
|
||||
# Pillar 2: Hidden background (behavioral engine, never surfaced)
|
||||
if bg:
|
||||
sections.append(
|
||||
f"<hidden_background>\n"
|
||||
f"(Strictly hidden from users -- acts as your underlying "
|
||||
f"behavioral engine)\n"
|
||||
"<hidden_background>\n"
|
||||
"(Strictly hidden from users -- acts as your underlying "
|
||||
"behavioral engine)\n"
|
||||
f"- Past Wound: {bg.get('past_wound', '')}\n"
|
||||
f"- Deep Motive: {bg.get('deep_motive', '')}\n"
|
||||
f"- Behavioral Mapping: {bg.get('behavioral_mapping', '')}\n"
|
||||
f"</hidden_background>"
|
||||
"</hidden_background>"
|
||||
)
|
||||
|
||||
# Pillar 3: Psychological profile
|
||||
if psych:
|
||||
sections.append(
|
||||
f"<psychological_profile>\n"
|
||||
f"- Social Masks & Boundaries: "
|
||||
"<psychological_profile>\n"
|
||||
"- Social Masks & Boundaries: "
|
||||
f"{psych.get('social_masks', '')}\n"
|
||||
f"- Anti-Stereotype Rules: "
|
||||
f"{psych.get('anti_stereotype', '')}\n"
|
||||
f"</psychological_profile>"
|
||||
"- Anti-Stereotype Rules: "
|
||||
f"{_profile_text_to_instruction(psych.get('anti_stereotype'))}\n"
|
||||
"</psychological_profile>"
|
||||
)
|
||||
|
||||
# Pillar 4: Behavior rules
|
||||
trigger_lines = []
|
||||
for t in triggers:
|
||||
trigger_lines.append(f" - [{t.get('trigger', '')}]: {t.get('reaction', '')}")
|
||||
trigger_lines.append(f" - [{t.get('trigger', '')}]: {_profile_text_to_instruction(t.get('reaction'))}")
|
||||
sections.append(
|
||||
"<behavior_rules>\n"
|
||||
"- Before each response, internally assess:\n"
|
||||
|
||||
@@ -0,0 +1,217 @@
|
||||
"""Per-queen tool configuration sidecar (``tools.json``).
|
||||
|
||||
Lives at ``~/.hive/agents/queens/{queen_id}/tools.json`` alongside
|
||||
``profile.yaml``. Kept separate so identity (name, title, core traits)
|
||||
stays human-authored and lean, while the machine-managed tool allowlist
|
||||
can grow (per-tool overrides, audit timestamps, future per-phase rules)
|
||||
without bloating the profile.
|
||||
|
||||
Schema::
|
||||
|
||||
{
|
||||
"enabled_mcp_tools": ["read_file", ...] | null,
|
||||
"updated_at": "2026-04-21T12:34:56+00:00"
|
||||
}
|
||||
|
||||
- ``null`` / missing file → default "allow every MCP tool".
|
||||
- ``[]`` → explicitly disable every MCP tool.
|
||||
- ``["foo", "bar"]`` → only those MCP tool names pass the filter.
|
||||
|
||||
Atomic writes via ``os.replace`` follow the same pattern as
|
||||
``framework.host.colony_metadata.update_colony_metadata``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tools_config_path(queen_id: str) -> Path:
|
||||
"""Return the on-disk path to a queen's ``tools.json``."""
|
||||
return QUEENS_DIR / queen_id / "tools.json"
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
"""Write ``data`` to ``path`` atomically via tempfile + replace."""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp = tempfile.mkstemp(
|
||||
prefix=".tools.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _migrate_from_profile_if_needed(queen_id: str) -> list[str] | None:
|
||||
"""Hoist a legacy ``enabled_mcp_tools`` field out of ``profile.yaml``.
|
||||
|
||||
Returns the migrated value (or ``None`` if nothing to migrate). After
|
||||
migration the sidecar exists on disk and the profile YAML no longer
|
||||
contains ``enabled_mcp_tools``. Safe to call repeatedly.
|
||||
"""
|
||||
profile_path = QUEENS_DIR / queen_id / "profile.yaml"
|
||||
if not profile_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = yaml.safe_load(profile_path.read_text(encoding="utf-8"))
|
||||
except (yaml.YAMLError, OSError):
|
||||
logger.warning("Could not read profile.yaml during tools migration: %s", queen_id)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
if "enabled_mcp_tools" not in data:
|
||||
return None
|
||||
|
||||
raw = data.pop("enabled_mcp_tools")
|
||||
enabled: list[str] | None
|
||||
if raw is None:
|
||||
enabled = None
|
||||
elif isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
enabled = raw
|
||||
else:
|
||||
logger.warning(
|
||||
"Legacy enabled_mcp_tools on queen %s had unexpected shape %r; dropping",
|
||||
queen_id,
|
||||
raw,
|
||||
)
|
||||
enabled = None
|
||||
|
||||
# Write sidecar first, then rewrite profile — if the second step
|
||||
# fails we still have the config available and won't re-migrate.
|
||||
_atomic_write_json(
|
||||
tools_config_path(queen_id),
|
||||
{
|
||||
"enabled_mcp_tools": enabled,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
profile_path.write_text(
|
||||
yaml.safe_dump(data, sort_keys=False, allow_unicode=True),
|
||||
encoding="utf-8",
|
||||
)
|
||||
logger.info(
|
||||
"Migrated enabled_mcp_tools for queen %s from profile.yaml to tools.json",
|
||||
queen_id,
|
||||
)
|
||||
return enabled
|
||||
|
||||
|
||||
def tools_config_exists(queen_id: str) -> bool:
|
||||
"""Return True when the queen has a persisted ``tools.json`` sidecar.
|
||||
|
||||
Used by callers that need to tell an explicit user save apart from a
|
||||
fallthrough to the role-based default (both can return the same
|
||||
value from ``load_queen_tools_config``).
|
||||
"""
|
||||
return tools_config_path(queen_id).exists()
|
||||
|
||||
|
||||
def delete_queen_tools_config(queen_id: str) -> bool:
|
||||
"""Delete the queen's ``tools.json`` sidecar if present.
|
||||
|
||||
Returns ``True`` if a file was removed, ``False`` if none existed.
|
||||
The next ``load_queen_tools_config`` call falls through to the
|
||||
role-based default (or allow-all for unknown queens).
|
||||
"""
|
||||
path = tools_config_path(queen_id)
|
||||
if not path.exists():
|
||||
return False
|
||||
try:
|
||||
path.unlink()
|
||||
return True
|
||||
except OSError:
|
||||
logger.warning("Failed to delete %s", path, exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def load_queen_tools_config(
|
||||
queen_id: str,
|
||||
mcp_catalog: dict[str, list[dict]] | None = None,
|
||||
) -> list[str] | None:
|
||||
"""Return the queen's MCP tool allowlist, or ``None`` for default-allow.
|
||||
|
||||
Order of resolution:
|
||||
1. ``tools.json`` sidecar (authoritative; user has saved).
|
||||
2. Legacy ``profile.yaml`` field (migrated and deleted on first read).
|
||||
3. Role-based default from ``queen_tools_defaults`` when the queen
|
||||
is in the known persona table. ``mcp_catalog`` lets the helper
|
||||
expand ``@server:NAME`` shorthands; without it, shorthand entries
|
||||
are dropped.
|
||||
4. ``None`` — default "allow every MCP tool".
|
||||
"""
|
||||
path = tools_config_path(queen_id)
|
||||
if path.exists():
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Invalid %s; treating as default-allow", path)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
raw = data.get("enabled_mcp_tools")
|
||||
if raw is None:
|
||||
return None
|
||||
if isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
return raw
|
||||
logger.warning("Unexpected enabled_mcp_tools shape in %s; ignoring", path)
|
||||
return None
|
||||
|
||||
migrated = _migrate_from_profile_if_needed(queen_id)
|
||||
if migrated is not None:
|
||||
return migrated
|
||||
# If migration just hoisted an explicit ``null`` out of profile.yaml,
|
||||
# a sidecar with allow-all semantics now exists on disk. Honor that
|
||||
# over the role default so an explicit user choice wins.
|
||||
if tools_config_path(queen_id).exists():
|
||||
return None
|
||||
|
||||
# No sidecar, nothing to migrate — fall back to role-based default.
|
||||
from framework.agents.queen.queen_tools_defaults import resolve_queen_default_tools
|
||||
|
||||
return resolve_queen_default_tools(queen_id, mcp_catalog)
|
||||
|
||||
|
||||
def update_queen_tools_config(
|
||||
queen_id: str,
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[str] | None:
|
||||
"""Persist the queen's MCP allowlist to ``tools.json``.
|
||||
|
||||
Raises ``FileNotFoundError`` if the queen's directory is missing —
|
||||
we refuse to silently create a sidecar for a queen that doesn't
|
||||
exist.
|
||||
"""
|
||||
queen_dir = QUEENS_DIR / queen_id
|
||||
if not queen_dir.exists():
|
||||
raise FileNotFoundError(f"Queen directory not found: {queen_id}")
|
||||
_atomic_write_json(
|
||||
tools_config_path(queen_id),
|
||||
{
|
||||
"enabled_mcp_tools": enabled_mcp_tools,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
return enabled_mcp_tools
|
||||
@@ -0,0 +1,272 @@
|
||||
"""Role-based default tool allowlists for queens.
|
||||
|
||||
Every queen inherits the same MCP surface (all servers loaded for the
|
||||
queen agent), but exposing 94+ tools to every persona clutters the LLM
|
||||
tool catalog and wastes prompt tokens. This module defines a sensible
|
||||
default allowlist per queen persona so, e.g., Head of Legal doesn't
|
||||
see port scanners and Head of Finance doesn't see ``apply_patch``.
|
||||
|
||||
Defaults apply only when the queen has no ``tools.json`` sidecar — the
|
||||
moment the user saves an allowlist through the Tool Library, the
|
||||
sidecar becomes authoritative. A DELETE on the tools endpoint removes
|
||||
the sidecar and brings the queen back to her role default.
|
||||
|
||||
Category entries support a ``@server:NAME`` shorthand that expands to
|
||||
every tool name registered against that MCP server in the current
|
||||
catalog. This keeps the category table short and drift-free when new
|
||||
tools are added (e.g. browser_* auto-joins the ``browser`` category).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Categories — reusable bundles of MCP tool names.
|
||||
# ---------------------------------------------------------------------------
|
||||
#
|
||||
# Each category is a flat list of either concrete tool names or the
|
||||
# ``@server:NAME`` shorthand. The shorthand expands to every tool the
|
||||
# given MCP server currently exposes (requires a live catalog; when one
|
||||
# is not available the shorthand is silently dropped so we fall back to
|
||||
# the named entries only).
|
||||
|
||||
_TOOL_CATEGORIES: dict[str, list[str]] = {
|
||||
# Read-only file operations — safe baseline for every knowledge queen.
|
||||
"file_read": [
|
||||
"read_file",
|
||||
"list_directory",
|
||||
"list_dir",
|
||||
"list_files",
|
||||
"search_files",
|
||||
"grep_search",
|
||||
"pdf_read",
|
||||
],
|
||||
# File mutation — only personas that author or edit artifacts.
|
||||
"file_write": [
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"apply_diff",
|
||||
"apply_patch",
|
||||
"replace_file_content",
|
||||
"hashline_edit",
|
||||
"undo_changes",
|
||||
],
|
||||
# Shell + process control — engineering personas only.
|
||||
"shell": [
|
||||
"run_command",
|
||||
"execute_command_tool",
|
||||
"bash_kill",
|
||||
"bash_output",
|
||||
],
|
||||
# Tabular data. CSV/Excel read/write + DuckDB SQL.
|
||||
"data": [
|
||||
"csv_read",
|
||||
"csv_info",
|
||||
"csv_write",
|
||||
"csv_append",
|
||||
"csv_sql",
|
||||
"excel_read",
|
||||
"excel_info",
|
||||
"excel_write",
|
||||
"excel_append",
|
||||
"excel_search",
|
||||
"excel_sheet_list",
|
||||
"excel_sql",
|
||||
],
|
||||
# Browser automation — every tool from the gcu-tools MCP server.
|
||||
"browser": ["@server:gcu-tools"],
|
||||
# External research / information-gathering.
|
||||
"research": [
|
||||
"search_papers",
|
||||
"download_paper",
|
||||
"search_wikipedia",
|
||||
"web_scrape",
|
||||
],
|
||||
# Security scanners — pentest-ish, only for engineering/security roles.
|
||||
"security": [
|
||||
"dns_security_scan",
|
||||
"http_headers_scan",
|
||||
"port_scan",
|
||||
"ssl_tls_scan",
|
||||
"subdomain_enumerate",
|
||||
"tech_stack_detect",
|
||||
"risk_score",
|
||||
],
|
||||
# Lightweight context helpers — good default for every queen.
|
||||
"time_context": [
|
||||
"get_current_time",
|
||||
"get_account_info",
|
||||
],
|
||||
# Runtime log inspection — debug/observability for builder personas.
|
||||
"runtime_inspection": [
|
||||
"query_runtime_logs",
|
||||
"query_runtime_log_details",
|
||||
"query_runtime_log_raw",
|
||||
],
|
||||
# Agent-management tools — building/validating/checking agents.
|
||||
"agent_mgmt": [
|
||||
"list_agents",
|
||||
"list_agent_tools",
|
||||
"list_agent_sessions",
|
||||
"get_agent_checkpoint",
|
||||
"list_agent_checkpoints",
|
||||
"run_agent_tests",
|
||||
"save_agent_draft",
|
||||
"confirm_and_build",
|
||||
"validate_agent_package",
|
||||
"validate_agent_tools",
|
||||
"enqueue_task",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-queen mapping.
|
||||
# ---------------------------------------------------------------------------
|
||||
#
|
||||
# Built from the queen personas in ``queen_profiles.DEFAULT_QUEENS``. The
|
||||
# goal is "just enough" — a queen should see tools she'd plausibly call
|
||||
# for her stated role, nothing more. Users curate further via the Tool
|
||||
# Library if they want.
|
||||
#
|
||||
# A queen whose ID is NOT in this map falls through to "allow every MCP
|
||||
# tool" (the original behavior), which keeps the system compatible with
|
||||
# user-added custom queen IDs that we don't know about.
|
||||
|
||||
QUEEN_DEFAULT_CATEGORIES: dict[str, list[str]] = {
|
||||
# Head of Technology — builds and operates systems; full toolkit.
|
||||
"queen_technology": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"shell",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"security",
|
||||
"time_context",
|
||||
"runtime_inspection",
|
||||
"agent_mgmt",
|
||||
],
|
||||
# Head of Growth — data, experiments, competitor research; no shell/security.
|
||||
"queen_growth": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Product Strategy — user research + roadmaps; no shell/security.
|
||||
"queen_product_strategy": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Finance — financial models (CSV/Excel heavy), market research.
|
||||
"queen_finance_fundraising": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Legal — reads contracts/PDFs, researches; no shell/data/security.
|
||||
"queen_legal": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Brand & Design — visual refs, style guides; no shell/data/security.
|
||||
"queen_brand_design": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Talent — candidate pipelines, resumes; data + browser heavy.
|
||||
"queen_talent": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
],
|
||||
# Head of Operations — processes, automation, observability.
|
||||
"queen_operations": [
|
||||
"file_read",
|
||||
"file_write",
|
||||
"data",
|
||||
"browser",
|
||||
"research",
|
||||
"time_context",
|
||||
"runtime_inspection",
|
||||
"agent_mgmt",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def has_role_default(queen_id: str) -> bool:
|
||||
"""Return True when ``queen_id`` is known to the category table."""
|
||||
return queen_id in QUEEN_DEFAULT_CATEGORIES
|
||||
|
||||
|
||||
def resolve_queen_default_tools(
|
||||
queen_id: str,
|
||||
mcp_catalog: dict[str, list[dict[str, Any]]] | None = None,
|
||||
) -> list[str] | None:
|
||||
"""Return the role-based default allowlist for ``queen_id``.
|
||||
|
||||
Arguments:
|
||||
queen_id: Profile ID (e.g. ``"queen_technology"``).
|
||||
mcp_catalog: Optional mapping of ``{server_name: [{"name": ...}, ...]}``
|
||||
used to expand ``@server:NAME`` shorthands in categories.
|
||||
When absent, shorthand entries are dropped and the result
|
||||
contains only the explicitly-named tools.
|
||||
|
||||
Returns:
|
||||
A deduplicated list of tool names, or ``None`` if the queen has
|
||||
no role entry (caller should treat as "allow every MCP tool").
|
||||
"""
|
||||
categories = QUEEN_DEFAULT_CATEGORIES.get(queen_id)
|
||||
if not categories:
|
||||
return None
|
||||
|
||||
names: list[str] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
def _add(name: str) -> None:
|
||||
if name and name not in seen:
|
||||
seen.add(name)
|
||||
names.append(name)
|
||||
|
||||
for cat in categories:
|
||||
for entry in _TOOL_CATEGORIES.get(cat, []):
|
||||
if entry.startswith("@server:"):
|
||||
server_name = entry[len("@server:") :]
|
||||
if mcp_catalog is None:
|
||||
logger.debug(
|
||||
"resolve_queen_default_tools: catalog missing; cannot expand %s",
|
||||
entry,
|
||||
)
|
||||
continue
|
||||
for tool in mcp_catalog.get(server_name, []) or []:
|
||||
tname = tool.get("name") if isinstance(tool, dict) else None
|
||||
if tname:
|
||||
_add(tname)
|
||||
else:
|
||||
_add(entry)
|
||||
|
||||
return names
|
||||
@@ -33,9 +33,9 @@ All tools are prefixed with `browser_`:
|
||||
|
||||
**`browser_snapshot`** — compact accessibility tree of interactive elements. Fast, cheap, good for static or form-heavy pages where the DOM matches what's visually rendered (documentation, simple dashboards, search results, settings pages).
|
||||
|
||||
**`browser_screenshot`** — visual capture + metadata (`cssWidth`, `devicePixelRatio`, scale fields). **Use this on any complex SPA** — LinkedIn, Twitter/X, Reddit, Gmail, Notion, Slack, Discord, any site using shadow DOM, virtual scrolling, React reconciliation, or dynamic layout. On these pages, snapshot refs go stale in seconds, shadow contents aren't in the AX tree, and virtual-scrolled elements disappear from the tree entirely. Screenshot is the **only** reliable way to orient yourself.
|
||||
**`browser_screenshot`** — visual capture + metadata (`cssWidth`, `devicePixelRatio`, scale fields). Use this when `browser_snapshot` does not show the thing you need, when refs look stale, or when visual position/layout matters. This often happens on complex SPAs — LinkedIn, Twitter/X, Reddit, Gmail, Notion, Slack, Discord — and on sites using shadow DOM, virtual scrolling, React reconciliation, or dynamic layout.
|
||||
|
||||
Neither tool is "preferred" universally — they're for different jobs. Default to snapshot on text-heavy static pages, screenshot on SPAs and anything shadow-DOM-heavy. Activate the `browser-automation` skill for the full decision tree.
|
||||
Neither tool is "preferred" universally — they're for different jobs. Start with snapshot for page structure and ordinary controls; use screenshot as the fallback when snapshot can't find or verify the visible target. Activate the `browser-automation` skill for the full decision tree.
|
||||
|
||||
## Coordinate rule
|
||||
|
||||
@@ -44,9 +44,9 @@ Every browser tool that takes or returns coordinates operates in **fractions of
|
||||
## System prompt tips for browser nodes
|
||||
|
||||
```
|
||||
1. On LinkedIn / X / Reddit / Gmail / any SPA — use browser_screenshot to orient,
|
||||
not browser_snapshot. Shadow DOM and virtual scrolling make snapshots unreliable.
|
||||
2. For static pages (docs, forms, search results), browser_snapshot is fine.
|
||||
1. Start with browser_snapshot or the snapshot returned by the latest interaction.
|
||||
2. If the target is missing, ambiguous, stale, or visibly present but absent from the tree,
|
||||
use browser_screenshot to orient and then click by fractional coordinates.
|
||||
3. Before typing into a rich-text editor (X compose, LinkedIn DM, Gmail, Reddit),
|
||||
click the input area first with browser_click_coordinate so React / Draft.js /
|
||||
Lexical register a native focus event, then use browser_type_focused(text=...)
|
||||
@@ -66,7 +66,7 @@ Every browser tool that takes or returns coordinates operates in **fractions of
|
||||
"tools": {"policy": "all"},
|
||||
"input_keys": ["search_url"],
|
||||
"output_keys": ["profiles"],
|
||||
"system_prompt": "Navigate to the search URL via browser_navigate(wait_until='load', timeout_ms=20000). Wait 3s for SPA hydration. On LinkedIn, use browser_screenshot to see the page — browser_snapshot misses shadow-DOM and virtual-scrolled content. Paginate through results by scrolling and screenshotting; extract each profile card by reading its visible layout..."
|
||||
"system_prompt": "Navigate to the search URL via browser_navigate(wait_until='load', timeout_ms=20000). Wait 3s for SPA hydration. Use the returned snapshot to look for result cards first. If the cards are missing, stale, or visually present but absent from the tree, use browser_screenshot to orient; paginate through results by scrolling and use screenshots only when the snapshot cannot find or verify the visible cards..."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -823,8 +823,8 @@ async def run_shutdown_reflection(
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_LONG_REFLECT_INTERVAL = 5
|
||||
_SHORT_REFLECT_TURN_INTERVAL = 2
|
||||
_SHORT_REFLECT_COOLDOWN_SEC = 120.0
|
||||
_SHORT_REFLECT_TURN_INTERVAL = 3
|
||||
_SHORT_REFLECT_COOLDOWN_SEC = 300.0
|
||||
|
||||
|
||||
async def subscribe_reflection_triggers(
|
||||
|
||||
@@ -14,6 +14,16 @@ from typing import Any
|
||||
|
||||
DEFAULT_MAX_TOKENS = 8192
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Desktop mode
|
||||
# ---------------------------------------------------------------------------
|
||||
# Set by the Electron shell when it spawns `hive serve` so the runtime knows
|
||||
# it's hosted inside a desktop client rather than served standalone. Today
|
||||
# this only gates the frontend-build step in `hive serve` (the desktop ships
|
||||
# its own renderer), but it's also a useful hook for future single-user /
|
||||
# telemetry-off branching.
|
||||
DESKTOP_MODE: bool = bool(os.environ.get("HIVE_DESKTOP_MODE"))
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hive home directory structure
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -155,6 +165,57 @@ def get_preferred_worker_model() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def get_vision_fallback_model() -> str | None:
|
||||
"""Return the configured vision-fallback model, or None if not configured.
|
||||
|
||||
Reads from the ``vision_fallback`` section of ~/.hive/configuration.json.
|
||||
Used by the agent-loop hook that captions tool-result images when the
|
||||
main agent's model cannot accept image content (text-only LLMs).
|
||||
|
||||
When this returns None the fallback chain skips the configured-subagent
|
||||
stage and proceeds straight to the generic caption rotation
|
||||
(``_describe_images_as_text``).
|
||||
"""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if vision.get("provider") and vision.get("model"):
|
||||
provider = str(vision["provider"])
|
||||
model = str(vision["model"]).strip()
|
||||
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
|
||||
model = model[len("openrouter/") :]
|
||||
if model:
|
||||
return f"{provider}/{model}"
|
||||
return None
|
||||
|
||||
|
||||
def get_vision_fallback_api_key() -> str | None:
|
||||
"""Return the API key for the vision-fallback model.
|
||||
|
||||
Resolution order: ``vision_fallback.api_key_env_var`` from the env,
|
||||
then the default ``get_api_key()``. No subscription-token branches —
|
||||
vision fallback is intended for hosted vision models (Anthropic,
|
||||
OpenAI, Google), not for the subscription-bearer providers.
|
||||
"""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if not vision:
|
||||
return get_api_key()
|
||||
api_key_env_var = vision.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
return get_api_key()
|
||||
|
||||
|
||||
def get_vision_fallback_api_base() -> str | None:
|
||||
"""Return the api_base for the vision-fallback model, or None."""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if not vision:
|
||||
return None
|
||||
if vision.get("api_base"):
|
||||
return vision["api_base"]
|
||||
if str(vision.get("provider", "")).lower() == "openrouter":
|
||||
return OPENROUTER_API_BASE
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_api_key() -> str | None:
|
||||
"""Return the API key for the worker LLM, falling back to the default key."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
|
||||
@@ -1672,7 +1672,7 @@ class AgentHost:
|
||||
entry_point_id: str,
|
||||
execution_id: str,
|
||||
graph_id: str | None = None,
|
||||
) -> bool:
|
||||
) -> str:
|
||||
"""
|
||||
Cancel a running execution.
|
||||
|
||||
@@ -1682,11 +1682,11 @@ class AgentHost:
|
||||
graph_id: Graph to search (defaults to active graph)
|
||||
|
||||
Returns:
|
||||
True if cancelled, False if not found
|
||||
Cancellation outcome from the stream.
|
||||
"""
|
||||
stream = self._resolve_stream(entry_point_id, graph_id)
|
||||
if stream is None:
|
||||
return False
|
||||
return "not_found"
|
||||
return await stream.cancel_execution(execution_id)
|
||||
|
||||
# === QUERY OPERATIONS ===
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
"""Read/write helpers for per-colony metadata.json.
|
||||
|
||||
A colony's metadata.json lives at ``{COLONIES_DIR}/{colony_name}/metadata.json``
|
||||
and holds immutable provenance: the queen that created it, the forked
|
||||
session id, creation/update timestamps, and the list of workers.
|
||||
|
||||
Mutable user-editable tool configuration lives in a sibling
|
||||
``tools.json`` sidecar — see :mod:`framework.host.colony_tools_config`
|
||||
— so identity and tool gating evolve independently.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def colony_metadata_path(colony_name: str) -> Path:
|
||||
"""Return the on-disk path to a colony's metadata.json."""
|
||||
return COLONIES_DIR / colony_name / "metadata.json"
|
||||
|
||||
|
||||
def load_colony_metadata(colony_name: str) -> dict[str, Any]:
|
||||
"""Load metadata.json for ``colony_name``.
|
||||
|
||||
Returns an empty dict if the file is missing or malformed — callers
|
||||
are expected to treat missing fields as defaults.
|
||||
"""
|
||||
path = colony_metadata_path(colony_name)
|
||||
if not path.exists():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Failed to read colony metadata at %s", path)
|
||||
return {}
|
||||
return data if isinstance(data, dict) else {}
|
||||
|
||||
|
||||
def update_colony_metadata(colony_name: str, updates: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Shallow-merge ``updates`` into metadata.json and persist.
|
||||
|
||||
Returns the full updated dict. Raises ``FileNotFoundError`` if the
|
||||
colony does not exist. Writes atomically via ``os.replace`` to
|
||||
minimize the window where a reader could see a half-written file.
|
||||
"""
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
path = colony_metadata_path(colony_name)
|
||||
if not path.parent.exists():
|
||||
raise FileNotFoundError(f"Colony '{colony_name}' not found")
|
||||
|
||||
data = load_colony_metadata(colony_name) if path.exists() else {}
|
||||
for key, value in updates.items():
|
||||
data[key] = value
|
||||
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp_path = tempfile.mkstemp(
|
||||
prefix=".metadata.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp_path, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
return data
|
||||
|
||||
|
||||
def list_colony_names() -> list[str]:
|
||||
"""Return the names of every colony that has a metadata.json on disk."""
|
||||
if not COLONIES_DIR.is_dir():
|
||||
return []
|
||||
names: list[str] = []
|
||||
for entry in sorted(COLONIES_DIR.iterdir()):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
if (entry / "metadata.json").exists():
|
||||
names.append(entry.name)
|
||||
return names
|
||||
@@ -14,6 +14,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Callable
|
||||
@@ -73,9 +74,28 @@ def _format_spawn_task_message(task: str, input_data: dict[str, Any]) -> str:
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _env_int(name: str, default: int) -> int:
|
||||
"""Read a positive int from env; fall back to default on missing/invalid."""
|
||||
raw = os.environ.get(name)
|
||||
if not raw:
|
||||
return default
|
||||
try:
|
||||
value = int(raw)
|
||||
except ValueError:
|
||||
logger.warning("Invalid %s=%r; using default %d", name, raw, default)
|
||||
return default
|
||||
return value if value > 0 else default
|
||||
|
||||
|
||||
# Laptop-safe default. Each worker is a full AgentLoop (Claude SDK session +
|
||||
# tool catalog), so ~4 concurrent is the realistic ceiling on a dev machine.
|
||||
# Override via HIVE_MAX_CONCURRENT_WORKERS for servers.
|
||||
_DEFAULT_MAX_CONCURRENT_WORKERS = _env_int("HIVE_MAX_CONCURRENT_WORKERS", 4)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ColonyConfig:
|
||||
max_concurrent_workers: int = 100
|
||||
max_concurrent_workers: int = _DEFAULT_MAX_CONCURRENT_WORKERS
|
||||
cache_ttl: float = 60.0
|
||||
batch_interval: float = 0.1
|
||||
max_history: int = 1000
|
||||
@@ -165,6 +185,8 @@ class ColonyRuntime:
|
||||
protocols_prompt: str = "",
|
||||
skill_dirs: list[str] | None = None,
|
||||
pipeline_stages: list | None = None,
|
||||
queen_id: str | None = None,
|
||||
colony_name: str | None = None,
|
||||
):
|
||||
from framework.pipeline.runner import PipelineRunner
|
||||
from framework.skills.manager import SkillsManager
|
||||
@@ -173,14 +195,27 @@ class ColonyRuntime:
|
||||
self._goal = goal
|
||||
self._config = config or ColonyConfig()
|
||||
self._runtime_log_store = runtime_log_store
|
||||
self._queen_id: str | None = queen_id
|
||||
# ``colony_id`` is the event-bus scope (session.id in DM sessions);
|
||||
# ``colony_name`` is the on-disk identity under ~/.hive/colonies/.
|
||||
# They coincide for forked colonies but diverge for queen DM
|
||||
# sessions, so separate them explicitly.
|
||||
self._colony_name: str | None = colony_name
|
||||
|
||||
if pipeline_stages:
|
||||
self._pipeline = PipelineRunner(pipeline_stages)
|
||||
else:
|
||||
self._pipeline = self._load_pipeline_from_config()
|
||||
|
||||
if skills_manager_config is not None:
|
||||
self._skills_manager = SkillsManager(skills_manager_config)
|
||||
# Resolve per-colony override paths so UI toggles can reach this
|
||||
# runtime. Callers that build their own SkillsManagerConfig stay
|
||||
# in charge; bare construction auto-wires the standard paths.
|
||||
_effective_cfg = skills_manager_config
|
||||
if _effective_cfg is None and not (skills_catalog_prompt or protocols_prompt):
|
||||
_effective_cfg = self._build_default_skills_config(colony_name, queen_id)
|
||||
|
||||
if _effective_cfg is not None:
|
||||
self._skills_manager = SkillsManager(_effective_cfg)
|
||||
self._skills_manager.load()
|
||||
elif skills_catalog_prompt or protocols_prompt:
|
||||
import warnings
|
||||
@@ -222,6 +257,19 @@ class ColonyRuntime:
|
||||
self._tools = tools or []
|
||||
self._tool_executor = tool_executor
|
||||
|
||||
# Per-colony MCP tool allowlist — applied when spawning workers. A
|
||||
# value of ``None`` means "allow every MCP tool" (default), an empty
|
||||
# list disables every MCP tool, and a list of names only enables
|
||||
# those. Lifecycle / synthetic tools always pass through the filter
|
||||
# because their names are absent from ``_mcp_tool_names_all``. The
|
||||
# allowlist is re-read on every ``spawn`` so a PATCH that mutates
|
||||
# this attribute via ``set_tool_allowlist`` takes effect on the
|
||||
# NEXT worker spawn without a runtime restart. In-flight workers
|
||||
# keep the tool list they booted with — workers have no dynamic
|
||||
# tools provider today.
|
||||
self._enabled_mcp_tools: list[str] | None = None
|
||||
self._mcp_tool_names_all: set[str] = set()
|
||||
|
||||
# Worker management
|
||||
self._workers: dict[str, Worker] = {}
|
||||
# The persistent client-facing overseer (optional). Set by
|
||||
@@ -238,6 +286,13 @@ class ColonyRuntime:
|
||||
self._timer_tasks: list[asyncio.Task] = []
|
||||
self._timer_next_fire: dict[str, float] = {}
|
||||
self._webhook_server: Any = None
|
||||
# Background tasks owned by the runtime that aren't timers —
|
||||
# e.g. the per-spawn soft/hard timeout watchers kicked off by
|
||||
# run_parallel_workers. We hold strong references so asyncio
|
||||
# does not garbage-collect them mid-sleep (Python's asyncio
|
||||
# docs explicitly warn that create_task() needs a referenced
|
||||
# handle).
|
||||
self._background_tasks: set[asyncio.Task] = set()
|
||||
|
||||
# Idempotency
|
||||
self._idempotency_keys: OrderedDict[str, str] = OrderedDict()
|
||||
@@ -357,6 +412,128 @@ class ColonyRuntime:
|
||||
return PipelineRunner([])
|
||||
return build_pipeline_from_config(stages_config)
|
||||
|
||||
@staticmethod
|
||||
def _build_default_skills_config(
|
||||
colony_name: str | None,
|
||||
queen_id: str | None,
|
||||
) -> SkillsManagerConfig:
|
||||
"""Assemble a ``SkillsManagerConfig`` that wires in the per-colony /
|
||||
per-queen override files and the ``queen_ui`` / ``colony_ui`` scope
|
||||
dirs based on the standard ``~/.hive`` layout.
|
||||
|
||||
``colony_name`` must be an actual on-disk colony name
|
||||
(``~/.hive/colonies/{name}/``). DM sessions where the ``colony_id``
|
||||
is a session UUID should pass ``None`` so we don't create a stray
|
||||
override file under a session identifier.
|
||||
"""
|
||||
from framework.config import COLONIES_DIR, QUEENS_DIR
|
||||
from framework.skills.discovery import ExtraScope
|
||||
from framework.skills.manager import SkillsManagerConfig
|
||||
|
||||
extras: list[ExtraScope] = []
|
||||
queen_overrides_path: Path | None = None
|
||||
if queen_id:
|
||||
queen_home = QUEENS_DIR / queen_id
|
||||
queen_overrides_path = queen_home / "skills_overrides.json"
|
||||
extras.append(ExtraScope(directory=queen_home / "skills", label="queen_ui", priority=2))
|
||||
|
||||
colony_overrides_path: Path | None = None
|
||||
if colony_name:
|
||||
colony_home = COLONIES_DIR / colony_name
|
||||
colony_overrides_path = colony_home / "skills_overrides.json"
|
||||
# Colony-scope SKILL.md dir is the project-scope from discovery's
|
||||
# point of view (colony_dir is the project_root). Add it also as
|
||||
# a tagged ``colony_ui`` scope so UI-created entries resolve with
|
||||
# correct provenance.
|
||||
extras.append(
|
||||
ExtraScope(
|
||||
directory=colony_home / ".hive" / "skills",
|
||||
label="colony_ui",
|
||||
priority=3,
|
||||
)
|
||||
)
|
||||
|
||||
return SkillsManagerConfig(
|
||||
queen_id=queen_id,
|
||||
queen_overrides_path=queen_overrides_path,
|
||||
colony_name=colony_name,
|
||||
colony_overrides_path=colony_overrides_path,
|
||||
extra_scope_dirs=extras,
|
||||
interactive=False, # HTTP-driven runtimes never prompt for consent
|
||||
)
|
||||
|
||||
@property
|
||||
def queen_id(self) -> str | None:
|
||||
"""The queen that owns this runtime, if known."""
|
||||
return self._queen_id
|
||||
|
||||
@property
|
||||
def colony_name(self) -> str | None:
|
||||
"""The on-disk colony name (distinct from event-bus scope ``colony_id``)."""
|
||||
return self._colony_name
|
||||
|
||||
@property
|
||||
def skills_manager(self):
|
||||
"""Access the live :class:`SkillsManager` (for HTTP handlers)."""
|
||||
return self._skills_manager
|
||||
|
||||
async def reload_skills(self) -> dict[str, Any]:
|
||||
"""Rebuild the catalog after an override change; in-flight workers
|
||||
pick up the new catalog on their next iteration via
|
||||
``dynamic_skills_catalog_provider``.
|
||||
|
||||
Returns a small stats dict that HTTP handlers can echo back to
|
||||
the UI ("applied — N skills now in catalog").
|
||||
"""
|
||||
async with self._skills_manager.mutation_lock:
|
||||
self._skills_manager.reload()
|
||||
self.skill_dirs = self._skills_manager.allowlisted_dirs
|
||||
self.batch_init_nudge = self._skills_manager.batch_init_nudge
|
||||
self.context_warn_ratio = self._skills_manager.context_warn_ratio
|
||||
catalog_prompt = self._skills_manager.skills_catalog_prompt
|
||||
return {
|
||||
"catalog_chars": len(catalog_prompt),
|
||||
"skill_dirs": list(self.skill_dirs),
|
||||
}
|
||||
|
||||
# ── Per-colony tool allowlist ───────────────────────────────
|
||||
|
||||
def set_tool_allowlist(
|
||||
self,
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
mcp_tool_names_all: set[str] | None = None,
|
||||
) -> None:
|
||||
"""Configure the per-colony MCP tool allowlist.
|
||||
|
||||
Called at construction time (from SessionManager) and again from
|
||||
the ``/api/colony/{name}/tools`` PATCH handler when a user edits
|
||||
the allowlist. The change applies to the NEXT worker spawn — we
|
||||
never mutate the tool list of a worker that is already running
|
||||
(workers have no dynamic tools provider, so hot-reloading their
|
||||
tool set would diverge from the list the LLM was already using).
|
||||
"""
|
||||
self._enabled_mcp_tools = list(enabled_mcp_tools) if enabled_mcp_tools is not None else None
|
||||
if mcp_tool_names_all is not None:
|
||||
self._mcp_tool_names_all = set(mcp_tool_names_all)
|
||||
|
||||
def _apply_tool_allowlist(self, tools: list) -> list:
|
||||
"""Filter ``tools`` against the colony's MCP allowlist.
|
||||
|
||||
Lifecycle / synthetic tools (those whose names are NOT in
|
||||
``_mcp_tool_names_all``) are never gated. MCP tools are kept only
|
||||
when ``_enabled_mcp_tools`` is None (default allow) or contains
|
||||
their name. Input list order is preserved so downstream cache
|
||||
keys and logs stay stable.
|
||||
"""
|
||||
if self._enabled_mcp_tools is None:
|
||||
return tools
|
||||
allowed = set(self._enabled_mcp_tools)
|
||||
return [
|
||||
t
|
||||
for t in tools
|
||||
if getattr(t, "name", None) not in self._mcp_tool_names_all or getattr(t, "name", None) in allowed
|
||||
]
|
||||
|
||||
# ── Lifecycle ───────────────────────────────────────────────
|
||||
|
||||
async def start(self) -> None:
|
||||
@@ -631,6 +808,14 @@ class ColonyRuntime:
|
||||
spawn_tools = tools if tools is not None else self._tools
|
||||
spawn_executor = tool_executor or self._tool_executor
|
||||
|
||||
# Apply the per-colony MCP tool allowlist (if any). Done HERE —
|
||||
# after spawn_tools is resolved but before it's frozen into the
|
||||
# worker's AgentContext — so the next spawn reflects any PATCH
|
||||
# that happened since the last spawn. A value of ``None`` on
|
||||
# ``_enabled_mcp_tools`` is a no-op so the default path is
|
||||
# unchanged.
|
||||
spawn_tools = self._apply_tool_allowlist(spawn_tools)
|
||||
|
||||
# Colony progress tracker: when the caller supplied a db_path
|
||||
# in input_data, this worker is part of a SQLite task queue
|
||||
# and must see the hive.colony-progress-tracker skill body in
|
||||
@@ -653,7 +838,9 @@ class ColonyRuntime:
|
||||
)
|
||||
_pre.load()
|
||||
_spawn_catalog = _pre.skills_catalog_prompt
|
||||
_spawn_skill_dirs = list(_pre.allowlisted_dirs) if hasattr(_pre, "allowlisted_dirs") else self.skill_dirs
|
||||
_spawn_skill_dirs = (
|
||||
list(_pre.allowlisted_dirs) if hasattr(_pre, "allowlisted_dirs") else self.skill_dirs
|
||||
)
|
||||
logger.info(
|
||||
"spawn: pre-activated hive.colony-progress-tracker "
|
||||
"(catalog %d → %d chars) for worker with db_path=%s",
|
||||
@@ -663,8 +850,7 @@ class ColonyRuntime:
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"spawn: failed to pre-activate colony-progress-tracker "
|
||||
"skill, falling back to base catalog: %s",
|
||||
"spawn: failed to pre-activate colony-progress-tracker skill, falling back to base catalog: %s",
|
||||
exc,
|
||||
)
|
||||
|
||||
@@ -712,6 +898,17 @@ class ColonyRuntime:
|
||||
conversation_store=worker_conv_store,
|
||||
)
|
||||
|
||||
# Workers pick up UI-driven override changes via this provider,
|
||||
# which reads the live catalog on each iteration. The db_path
|
||||
# pre-activated catalog stays static because its contents are
|
||||
# built for *this* worker's task (a tombstone toggle from the
|
||||
# UI should not yank it mid-run).
|
||||
_db_path_pre_activated = bool(isinstance(input_data, dict) and input_data.get("db_path"))
|
||||
# Default-bind the manager into the closure so each loop iteration
|
||||
# captures the same manager instance — pyflakes B023 would flag a
|
||||
# free-variable capture here.
|
||||
_provider = None if _db_path_pre_activated else (lambda mgr=self._skills_manager: mgr.skills_catalog_prompt)
|
||||
|
||||
agent_context = AgentContext(
|
||||
runtime=self._make_runtime_adapter(worker_id),
|
||||
agent_id=worker_id,
|
||||
@@ -725,6 +922,7 @@ class ColonyRuntime:
|
||||
skills_catalog_prompt=_spawn_catalog,
|
||||
protocols_prompt=self.protocols_prompt,
|
||||
skill_dirs=_spawn_skill_dirs,
|
||||
dynamic_skills_catalog_provider=_provider,
|
||||
execution_id=worker_id,
|
||||
stream_id=explicit_stream_id or f"worker:{worker_id}",
|
||||
)
|
||||
@@ -969,6 +1167,7 @@ class ColonyRuntime:
|
||||
conversation_store=overseer_conv_store,
|
||||
)
|
||||
|
||||
_overseer_skills_mgr = self._skills_manager
|
||||
overseer_ctx = AgentContext(
|
||||
runtime=self._make_runtime_adapter(overseer_id),
|
||||
agent_id=overseer_id,
|
||||
@@ -982,6 +1181,7 @@ class ColonyRuntime:
|
||||
skills_catalog_prompt=self.skills_catalog_prompt,
|
||||
protocols_prompt=self.protocols_prompt,
|
||||
skill_dirs=self.skill_dirs,
|
||||
dynamic_skills_catalog_provider=lambda: _overseer_skills_mgr.skills_catalog_prompt,
|
||||
execution_id=overseer_id,
|
||||
stream_id="overseer",
|
||||
)
|
||||
@@ -1181,7 +1381,14 @@ class ColonyRuntime:
|
||||
except Exception:
|
||||
logger.exception("watch_batch_timeouts: watcher crashed")
|
||||
|
||||
return asyncio.create_task(_watch(), name=f"batch-timeout:{worker_ids[0] if worker_ids else '?'}")
|
||||
task = asyncio.create_task(_watch(), name=f"batch-timeout:{worker_ids[0] if worker_ids else '?'}")
|
||||
# Hold a strong reference until completion. Without this the
|
||||
# task can be garbage-collected during `await asyncio.sleep`,
|
||||
# silently swallowing the soft-timeout inject (the exact bug
|
||||
# surfaced by workers never seeing [SOFT TIMEOUT]).
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
return task
|
||||
|
||||
# ── Status & Query ──────────────────────────────────────────
|
||||
|
||||
|
||||
@@ -0,0 +1,162 @@
|
||||
"""Per-colony tool configuration sidecar (``tools.json``).
|
||||
|
||||
Lives at ``~/.hive/colonies/{colony_name}/tools.json`` alongside
|
||||
``metadata.json``. Kept separate so provenance (queen_name,
|
||||
created_at, workers) stays in metadata while the user-editable tool
|
||||
allowlist gets its own file.
|
||||
|
||||
Schema::
|
||||
|
||||
{
|
||||
"enabled_mcp_tools": ["read_file", ...] | null,
|
||||
"updated_at": "2026-04-21T12:34:56+00:00"
|
||||
}
|
||||
|
||||
- ``null`` / missing file → default "allow every MCP tool".
|
||||
- ``[]`` → explicitly disable every MCP tool.
|
||||
- ``["foo", "bar"]`` → only those MCP tool names pass the filter.
|
||||
|
||||
Atomic writes via ``os.replace`` mirror
|
||||
``framework.host.colony_metadata.update_colony_metadata``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tools_config_path(colony_name: str) -> Path:
|
||||
"""Return the on-disk path to a colony's ``tools.json``."""
|
||||
return COLONIES_DIR / colony_name / "tools.json"
|
||||
|
||||
|
||||
def _metadata_path(colony_name: str) -> Path:
|
||||
return COLONIES_DIR / colony_name / "metadata.json"
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp = tempfile.mkstemp(
|
||||
prefix=".tools.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _migrate_from_metadata_if_needed(colony_name: str) -> list[str] | None:
|
||||
"""Hoist a legacy ``enabled_mcp_tools`` field out of ``metadata.json``.
|
||||
|
||||
Returns the migrated value (or ``None`` if nothing to migrate). After
|
||||
migration the sidecar exists and ``metadata.json`` no longer contains
|
||||
``enabled_mcp_tools``. Safe to call repeatedly.
|
||||
"""
|
||||
meta_path = _metadata_path(colony_name)
|
||||
if not meta_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Could not read metadata.json during tools migration: %s", colony_name)
|
||||
return None
|
||||
if not isinstance(data, dict) or "enabled_mcp_tools" not in data:
|
||||
return None
|
||||
|
||||
raw = data.pop("enabled_mcp_tools")
|
||||
enabled: list[str] | None
|
||||
if raw is None:
|
||||
enabled = None
|
||||
elif isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
enabled = raw
|
||||
else:
|
||||
logger.warning(
|
||||
"Legacy enabled_mcp_tools on colony %s had unexpected shape %r; dropping",
|
||||
colony_name,
|
||||
raw,
|
||||
)
|
||||
enabled = None
|
||||
|
||||
# Sidecar first so a partial failure leaves the config recoverable.
|
||||
_atomic_write_json(
|
||||
tools_config_path(colony_name),
|
||||
{
|
||||
"enabled_mcp_tools": enabled,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
_atomic_write_json(meta_path, data)
|
||||
logger.info(
|
||||
"Migrated enabled_mcp_tools for colony %s from metadata.json to tools.json",
|
||||
colony_name,
|
||||
)
|
||||
return enabled
|
||||
|
||||
|
||||
def load_colony_tools_config(colony_name: str) -> list[str] | None:
|
||||
"""Return the colony's MCP tool allowlist, or ``None`` for default-allow.
|
||||
|
||||
Order of resolution:
|
||||
1. ``tools.json`` sidecar (authoritative).
|
||||
2. Legacy ``metadata.json`` field (migrated and deleted on first read).
|
||||
3. ``None`` — default "allow every MCP tool".
|
||||
"""
|
||||
path = tools_config_path(colony_name)
|
||||
if path.exists():
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Invalid %s; treating as default-allow", path)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
raw = data.get("enabled_mcp_tools")
|
||||
if raw is None:
|
||||
return None
|
||||
if isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
return raw
|
||||
logger.warning("Unexpected enabled_mcp_tools shape in %s; ignoring", path)
|
||||
return None
|
||||
|
||||
return _migrate_from_metadata_if_needed(colony_name)
|
||||
|
||||
|
||||
def update_colony_tools_config(
|
||||
colony_name: str,
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[str] | None:
|
||||
"""Persist a colony's MCP allowlist to ``tools.json``.
|
||||
|
||||
Raises ``FileNotFoundError`` if the colony's directory is missing.
|
||||
"""
|
||||
colony_dir = COLONIES_DIR / colony_name
|
||||
if not colony_dir.exists():
|
||||
raise FileNotFoundError(f"Colony directory not found: {colony_name}")
|
||||
_atomic_write_json(
|
||||
tools_config_path(colony_name),
|
||||
{
|
||||
"enabled_mcp_tools": enabled_mcp_tools,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
return enabled_mcp_tools
|
||||
@@ -809,16 +809,28 @@ class EventBus:
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
cost_usd: float = 0.0,
|
||||
execution_id: str | None = None,
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM turn completion with stop reason and model metadata."""
|
||||
"""Emit LLM turn completion with stop reason and model metadata.
|
||||
|
||||
``cached_tokens`` and ``cache_creation_tokens`` are subsets of
|
||||
``input_tokens`` (already inside provider ``prompt_tokens``).
|
||||
Subscribers should display them, not add them to a total.
|
||||
|
||||
``cost_usd`` is the USD cost for this turn when known (Anthropic,
|
||||
OpenAI, OpenRouter). 0.0 means unreported (not free).
|
||||
"""
|
||||
data: dict = {
|
||||
"stop_reason": stop_reason,
|
||||
"model": model,
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"cached_tokens": cached_tokens,
|
||||
"cache_creation_tokens": cache_creation_tokens,
|
||||
"cost_usd": cost_usd,
|
||||
}
|
||||
if iteration is not None:
|
||||
data["iteration"] = iteration
|
||||
@@ -914,24 +926,22 @@ class EventBus:
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
prompt: str = "",
|
||||
execution_id: str | None = None,
|
||||
options: list[str] | None = None,
|
||||
questions: list[dict] | None = None,
|
||||
) -> None:
|
||||
"""Emit a user-input request for interactive queen turns.
|
||||
|
||||
Args:
|
||||
options: Optional predefined choices for the user (1-3 items).
|
||||
The frontend appends an "Other" free-text option
|
||||
automatically.
|
||||
questions: Optional list of question dicts for multi-question
|
||||
batches (from ask_user_multiple). Each dict has id,
|
||||
prompt, and optional options.
|
||||
questions: Optional list of question dicts from ``ask_user``.
|
||||
Each dict has ``id``, ``prompt``, and optional ``options``
|
||||
(2-3 predefined choices). The frontend renders the
|
||||
QuestionWidget for a single-entry list and the
|
||||
MultiQuestionWidget for 2+ entries. Free-text asks (no
|
||||
options) stream the prompt separately as a chat message;
|
||||
auto-block turns have no questions at all and fall back
|
||||
to the normal text input.
|
||||
"""
|
||||
data: dict[str, Any] = {"prompt": prompt}
|
||||
if options:
|
||||
data["options"] = options
|
||||
data: dict[str, Any] = {}
|
||||
if questions:
|
||||
data["questions"] = questions
|
||||
await self.publish(
|
||||
|
||||
@@ -16,7 +16,7 @@ from collections import OrderedDict
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.host.shared_state import IsolationLevel, SharedBufferManager
|
||||
@@ -48,6 +48,8 @@ class ExecutionAlreadyRunningError(RuntimeError):
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CancelExecutionResult = Literal["cancelled", "cancelling", "not_found"]
|
||||
|
||||
|
||||
class GraphScopedEventBus(EventBus):
|
||||
"""Proxy that stamps ``graph_id`` on every published event.
|
||||
@@ -130,7 +132,7 @@ class ExecutionContext:
|
||||
run_id: str | None = None # Unique ID per trigger() invocation
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
completed_at: datetime | None = None
|
||||
status: str = "pending" # pending, running, completed, failed, paused
|
||||
status: str = "pending" # pending, running, cancelling, completed, failed, paused, cancelled
|
||||
|
||||
|
||||
class ExecutionManager:
|
||||
@@ -315,6 +317,22 @@ class ExecutionManager:
|
||||
"""Return IDs of all currently active executions."""
|
||||
return list(self._active_executions.keys())
|
||||
|
||||
def _get_blocking_execution_ids_locked(self) -> list[str]:
|
||||
"""Return executions that still block a replacement from starting.
|
||||
|
||||
An execution continues to block replacement until its task has
|
||||
terminated and the task's final cleanup has removed its bookkeeping.
|
||||
This is intentional: a timed-out cancellation does not mean the old
|
||||
task is harmless. If it is still alive, it can still write shared
|
||||
session state, so letting a replacement start would guarantee
|
||||
overlapping mutations on the same session.
|
||||
"""
|
||||
blocking_ids: list[str] = list(self._active_executions.keys())
|
||||
for execution_id, task in self._execution_tasks.items():
|
||||
if not task.done() and execution_id not in self._active_executions:
|
||||
blocking_ids.append(execution_id)
|
||||
return blocking_ids
|
||||
|
||||
@property
|
||||
def agent_idle_seconds(self) -> float:
|
||||
"""Seconds since the last agent activity (LLM call, tool call, node transition).
|
||||
@@ -396,15 +414,22 @@ class ExecutionManager:
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the execution stream and cancel active executions."""
|
||||
if not self._running:
|
||||
return
|
||||
async with self._lock:
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
self._running = False
|
||||
self._running = False
|
||||
|
||||
# Cancel all active executions
|
||||
tasks_to_wait = []
|
||||
for _, task in self._execution_tasks.items():
|
||||
if not task.done():
|
||||
# Cancel all active executions, but keep bookkeeping until each
|
||||
# task reaches its own cleanup path.
|
||||
tasks_to_wait: list[asyncio.Task] = []
|
||||
for execution_id, task in self._execution_tasks.items():
|
||||
if task.done():
|
||||
continue
|
||||
ctx = self._active_executions.get(execution_id)
|
||||
if ctx is not None:
|
||||
ctx.status = "cancelling"
|
||||
self._cancel_reasons.setdefault(execution_id, "Execution cancelled")
|
||||
task.cancel()
|
||||
tasks_to_wait.append(task)
|
||||
|
||||
@@ -418,9 +443,6 @@ class ExecutionManager:
|
||||
len(pending),
|
||||
)
|
||||
|
||||
self._execution_tasks.clear()
|
||||
self._active_executions.clear()
|
||||
|
||||
logger.info(f"ExecutionStream '{self.stream_id}' stopped")
|
||||
|
||||
# Emit stream stopped event
|
||||
@@ -569,12 +591,16 @@ class ExecutionManager:
|
||||
)
|
||||
|
||||
async with self._lock:
|
||||
if not self._running:
|
||||
raise RuntimeError(f"ExecutionStream '{self.stream_id}' is not running")
|
||||
|
||||
blocking_ids = self._get_blocking_execution_ids_locked()
|
||||
if blocking_ids:
|
||||
raise ExecutionAlreadyRunningError(self.stream_id, blocking_ids)
|
||||
|
||||
self._active_executions[execution_id] = ctx
|
||||
self._completion_events[execution_id] = asyncio.Event()
|
||||
|
||||
# Start execution task
|
||||
task = asyncio.create_task(self._run_execution(ctx))
|
||||
self._execution_tasks[execution_id] = task
|
||||
self._execution_tasks[execution_id] = asyncio.create_task(self._run_execution(ctx))
|
||||
|
||||
logger.debug(f"Queued execution {execution_id} for stream {self.stream_id}")
|
||||
return execution_id
|
||||
@@ -1183,7 +1209,7 @@ class ExecutionManager:
|
||||
"""Get execution context."""
|
||||
return self._active_executions.get(execution_id)
|
||||
|
||||
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> bool:
|
||||
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> CancelExecutionResult:
|
||||
"""
|
||||
Cancel a running execution.
|
||||
|
||||
@@ -1194,33 +1220,38 @@ class ExecutionManager:
|
||||
provided, defaults to "Execution cancelled".
|
||||
|
||||
Returns:
|
||||
True if cancelled, False if not found
|
||||
"cancelled" if the task fully exited within the grace period,
|
||||
"cancelling" if cancellation was requested but the task is still
|
||||
shutting down, or "not_found" if no active task exists.
|
||||
"""
|
||||
task = self._execution_tasks.get(execution_id)
|
||||
if task and not task.done():
|
||||
async with self._lock:
|
||||
task = self._execution_tasks.get(execution_id)
|
||||
if task is None or task.done():
|
||||
return "not_found"
|
||||
|
||||
# Store the reason so the CancelledError handler can use it
|
||||
# when emitting the pause/fail event.
|
||||
self._cancel_reasons[execution_id] = reason or "Execution cancelled"
|
||||
ctx = self._active_executions.get(execution_id)
|
||||
if ctx is not None:
|
||||
ctx.status = "cancelling"
|
||||
task.cancel()
|
||||
# Wait briefly for the task to finish. Don't block indefinitely —
|
||||
# the task may be stuck in a long LLM API call that doesn't
|
||||
# respond to cancellation quickly.
|
||||
done, _ = await asyncio.wait({task}, timeout=5.0)
|
||||
if not done:
|
||||
# Task didn't finish within timeout — clean up bookkeeping now
|
||||
# so the session doesn't think it still has running executions.
|
||||
# The task will continue winding down in the background and its
|
||||
# finally block will harmlessly pop already-removed keys.
|
||||
logger.warning(
|
||||
"Execution %s did not finish within cancel timeout; force-cleaning bookkeeping",
|
||||
execution_id,
|
||||
)
|
||||
async with self._lock:
|
||||
self._active_executions.pop(execution_id, None)
|
||||
self._execution_tasks.pop(execution_id, None)
|
||||
self._active_executors.pop(execution_id, None)
|
||||
return True
|
||||
return False
|
||||
|
||||
# Wait briefly for the task to finish. Don't block indefinitely —
|
||||
# the task may be stuck in a long LLM API call that doesn't
|
||||
# respond to cancellation quickly.
|
||||
done, _ = await asyncio.wait({task}, timeout=5.0)
|
||||
if not done:
|
||||
# Keep bookkeeping in place until the task's own finally block runs.
|
||||
# We intentionally do not add deferred cleanup keyed by execution_id
|
||||
# here because resumed executions reuse the same id; a delayed pop
|
||||
# could otherwise delete bookkeeping that belongs to the new run.
|
||||
logger.warning(
|
||||
"Execution %s did not finish within cancel timeout; leaving bookkeeping in place until task exit",
|
||||
execution_id,
|
||||
)
|
||||
return "cancelling"
|
||||
return "cancelled"
|
||||
|
||||
# === STATS AND MONITORING ===
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ import json
|
||||
import logging
|
||||
import sqlite3
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
@@ -118,7 +118,7 @@ _PRAGMAS = (
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(timezone.utc).isoformat(timespec="seconds")
|
||||
return datetime.now(UTC).isoformat(timespec="seconds")
|
||||
|
||||
|
||||
def _new_id() -> str:
|
||||
@@ -167,13 +167,10 @@ def ensure_progress_db(colony_dir: Path) -> Path:
|
||||
con.executescript(_SCHEMA_V1)
|
||||
con.execute(f"PRAGMA user_version = {SCHEMA_VERSION}")
|
||||
con.execute(
|
||||
"INSERT OR REPLACE INTO colony_meta(key, value, updated_at) "
|
||||
"VALUES (?, ?, ?)",
|
||||
"INSERT OR REPLACE INTO colony_meta(key, value, updated_at) VALUES (?, ?, ?)",
|
||||
("schema_version", str(SCHEMA_VERSION), _now_iso()),
|
||||
)
|
||||
logger.info(
|
||||
"progress_db: initialized schema v%d at %s", SCHEMA_VERSION, db_path
|
||||
)
|
||||
logger.info("progress_db: initialized schema v%d at %s", SCHEMA_VERSION, db_path)
|
||||
|
||||
reclaimed = _reclaim_stale_inner(con, stale_after_minutes=15)
|
||||
if reclaimed:
|
||||
@@ -191,19 +188,28 @@ def ensure_progress_db(colony_dir: Path) -> Path:
|
||||
|
||||
|
||||
def _patch_worker_configs(colony_dir: Path, db_path: Path) -> int:
|
||||
"""Inject ``input_data.db_path`` + ``input_data.colony_id`` into
|
||||
existing ``worker.json`` files in a colony directory.
|
||||
"""Inject ``input_data.db_path`` + ``input_data.colony_id`` +
|
||||
``input_data.colony_data_dir`` into existing ``worker.json`` files
|
||||
in a colony directory.
|
||||
|
||||
Runs on every ``ensure_progress_db`` call so colonies that were
|
||||
forked before this feature landed get their worker spawn messages
|
||||
patched in place. Idempotent: if ``input_data`` already contains
|
||||
the correct ``db_path``, the file is not rewritten.
|
||||
all three values, the file is not rewritten.
|
||||
|
||||
Returns the number of files that were actually modified (0 on
|
||||
the common case of already-patched colonies).
|
||||
|
||||
Why ``colony_data_dir``? ``db_path`` alone points agents at
|
||||
``progress.db``; for anything else (custom SQLite stores, JSON
|
||||
ledgers, scraped artefacts) they need the *directory* so they
|
||||
stop creating state under ``~/.hive/skills/`` — which holds skill
|
||||
*definitions*, not runtime data. See
|
||||
``_default_skills/colony-storage-paths/SKILL.md``.
|
||||
"""
|
||||
colony_id = colony_dir.name
|
||||
abs_db = str(db_path)
|
||||
abs_data_dir = str(db_path.parent)
|
||||
patched = 0
|
||||
|
||||
for worker_cfg in colony_dir.glob("*.json"):
|
||||
@@ -227,26 +233,24 @@ def _patch_worker_configs(colony_dir: Path, db_path: Path) -> int:
|
||||
if (
|
||||
input_data.get("db_path") == abs_db
|
||||
and input_data.get("colony_id") == colony_id
|
||||
and input_data.get("colony_data_dir") == abs_data_dir
|
||||
):
|
||||
continue # already patched
|
||||
|
||||
input_data["db_path"] = abs_db
|
||||
input_data["colony_id"] = colony_id
|
||||
input_data["colony_data_dir"] = abs_data_dir
|
||||
data["input_data"] = input_data
|
||||
|
||||
try:
|
||||
worker_cfg.write_text(
|
||||
json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8"
|
||||
)
|
||||
worker_cfg.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
patched += 1
|
||||
except OSError as e:
|
||||
logger.warning(
|
||||
"progress_db: failed to patch worker config %s: %s", worker_cfg, e
|
||||
)
|
||||
logger.warning("progress_db: failed to patch worker config %s: %s", worker_cfg, e)
|
||||
|
||||
if patched:
|
||||
logger.info(
|
||||
"progress_db: patched %d worker config(s) in colony '%s' with db_path",
|
||||
"progress_db: patched %d worker config(s) in colony '%s' with db_path + colony_data_dir",
|
||||
patched,
|
||||
colony_id,
|
||||
)
|
||||
@@ -271,9 +275,7 @@ def ensure_all_colony_dbs(colonies_root: Path | None = None) -> list[Path]:
|
||||
try:
|
||||
initialized.append(ensure_progress_db(entry))
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"progress_db: failed to ensure DB for colony '%s': %s", entry.name, e
|
||||
)
|
||||
logger.warning("progress_db: failed to ensure DB for colony '%s': %s", entry.name, e)
|
||||
return initialized
|
||||
|
||||
|
||||
@@ -340,9 +342,7 @@ def seed_tasks(
|
||||
|
||||
for step_seq, step in enumerate(task.get("steps") or [], start=1):
|
||||
if not step.get("title"):
|
||||
raise ValueError(
|
||||
f"task[{idx}].steps[{step_seq - 1}] missing required 'title'"
|
||||
)
|
||||
raise ValueError(f"task[{idx}].steps[{step_seq - 1}] missing required 'title'")
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO steps (id, task_id, seq, title, detail, status)
|
||||
@@ -361,9 +361,7 @@ def seed_tasks(
|
||||
key = sop.get("key")
|
||||
description = sop.get("description")
|
||||
if not key or not description:
|
||||
raise ValueError(
|
||||
f"task[{idx}].sop_items missing 'key' or 'description'"
|
||||
)
|
||||
raise ValueError(f"task[{idx}].sop_items missing 'key' or 'description'")
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO sop_checklist
|
||||
@@ -421,9 +419,7 @@ def enqueue_task(
|
||||
return ids[0]
|
||||
|
||||
|
||||
def _reclaim_stale_inner(
|
||||
con: sqlite3.Connection, *, stale_after_minutes: int
|
||||
) -> int:
|
||||
def _reclaim_stale_inner(con: sqlite3.Connection, *, stale_after_minutes: int) -> int:
|
||||
"""Reclaim stale claims. Runs inside an existing open connection.
|
||||
|
||||
Two-step:
|
||||
|
||||
@@ -653,10 +653,17 @@ class AntigravityProvider(LLMProvider):
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
import asyncio # noqa: PLC0415
|
||||
import concurrent.futures # noqa: PLC0415
|
||||
|
||||
# Antigravity (Google's proprietary endpoint) doesn't expose a
|
||||
# cache_control hook. Concatenate the dynamic suffix so its shape
|
||||
# matches the legacy single-string call site.
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
queue: asyncio.Queue[StreamEvent | None] = asyncio.Queue()
|
||||
|
||||
|
||||
@@ -1,114 +1,32 @@
|
||||
"""Model capability checks for LLM providers.
|
||||
|
||||
Vision support rules are derived from official vendor documentation:
|
||||
- ZAI (z.ai): docs.z.ai/guides/vlm — GLM-4.6V variants are vision; GLM-5/4.6/4.7 are text-only
|
||||
- MiniMax: platform.minimax.io/docs — minimax-vl-01 is vision; M2.x are text-only
|
||||
- DeepSeek: api-docs.deepseek.com — deepseek-vl2 is vision; chat/reasoner are text-only
|
||||
- Cerebras: inference-docs.cerebras.ai — no vision models at all
|
||||
- Groq: console.groq.com/docs/vision — vision capable; treat as supported by default
|
||||
- Ollama/LM Studio/vLLM/llama.cpp: local runners denied by default; model names
|
||||
don't reliably indicate vision support, so users must configure explicitly
|
||||
Vision support is sourced from the curated ``model_catalog.json``. Each model
|
||||
entry carries an optional ``supports_vision`` boolean; unknown models default
|
||||
to vision-capable so hosted frontier models work out of the box. To toggle
|
||||
support for a model, edit its catalog entry rather than this file.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from framework.llm.model_catalog import model_supports_vision
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.llm.provider import Tool
|
||||
|
||||
|
||||
def _model_name(model: str) -> str:
|
||||
"""Return the bare model name after stripping any 'provider/' prefix."""
|
||||
if "/" in model:
|
||||
return model.split("/", 1)[1]
|
||||
return model
|
||||
|
||||
|
||||
# Step 1: explicit vision allow-list — these always support images regardless
|
||||
# of what the provider-level rules say. Checked first so that e.g. glm-4.6v
|
||||
# is allowed even though glm-4.6 is denied.
|
||||
_VISION_ALLOW_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# ZAI/GLM vision models (docs.z.ai/guides/vlm)
|
||||
"glm-4v", # GLM-4V series (legacy)
|
||||
"glm-4.6v", # GLM-4.6V, GLM-4.6V-flash, GLM-4.6V-flashx
|
||||
# DeepSeek vision models
|
||||
"deepseek-vl", # deepseek-vl2, deepseek-vl2-small, deepseek-vl2-tiny
|
||||
# MiniMax vision model
|
||||
"minimax-vl", # minimax-vl-01
|
||||
)
|
||||
|
||||
# Step 2: provider-level deny — every model from this provider is text-only.
|
||||
_TEXT_ONLY_PROVIDER_PREFIXES: tuple[str, ...] = (
|
||||
# Cerebras: inference-docs.cerebras.ai lists only text models
|
||||
"cerebras/",
|
||||
# Local runners: model names don't reliably indicate vision support
|
||||
"ollama/",
|
||||
"ollama_chat/",
|
||||
"lm_studio/",
|
||||
"vllm/",
|
||||
"llamacpp/",
|
||||
)
|
||||
|
||||
# Step 3: per-model deny — text-only models within otherwise mixed providers.
|
||||
# Matched against the bare model name (provider prefix stripped, lower-cased).
|
||||
# The vision allow-list above is checked first, so vision variants of the same
|
||||
# family are already handled before these deny patterns are reached.
|
||||
_TEXT_ONLY_MODEL_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# --- ZAI / GLM family ---
|
||||
# text-only: glm-5, glm-4.6, glm-4.7, glm-4.5, zai-glm-*
|
||||
# vision: glm-4v, glm-4.6v (caught by allow-list above)
|
||||
"glm-5",
|
||||
"glm-4.6", # bare glm-4.6 is text-only; glm-4.6v is caught by allow-list
|
||||
"glm-4.7",
|
||||
"glm-4.5",
|
||||
"zai-glm",
|
||||
# --- DeepSeek ---
|
||||
# text-only: deepseek-chat, deepseek-coder, deepseek-reasoner
|
||||
# vision: deepseek-vl2 (caught by allow-list above)
|
||||
# Note: LiteLLM's deepseek handler may flatten content lists for some models;
|
||||
# VL models are allowed through and rely on LiteLLM's native VL support.
|
||||
"deepseek-chat",
|
||||
"deepseek-coder",
|
||||
"deepseek-reasoner",
|
||||
# --- MiniMax ---
|
||||
# text-only: minimax-m2.*, minimax-text-*, abab* (legacy)
|
||||
# vision: minimax-vl-01 (caught by allow-list above)
|
||||
"minimax-m2",
|
||||
"minimax-text",
|
||||
"abab",
|
||||
)
|
||||
|
||||
|
||||
def supports_image_tool_results(model: str) -> bool:
|
||||
"""Return whether *model* can receive image content in messages.
|
||||
|
||||
Used to gate both user-message images and tool-result image blocks.
|
||||
|
||||
Logic (checked in order):
|
||||
1. Vision allow-list → True (known vision model, skip all denies)
|
||||
2. Provider deny → False (entire provider is text-only)
|
||||
3. Model deny → False (specific text-only model within a mixed provider)
|
||||
4. Default → True (assume capable; unknown providers and models)
|
||||
Thin wrapper over :func:`model_supports_vision` so existing call sites
|
||||
keep working. Used to gate both user-message images and tool-result
|
||||
image blocks. Empty model strings are treated as capable so the default
|
||||
code path doesn't strip images before a provider is selected.
|
||||
"""
|
||||
model_lower = model.lower()
|
||||
bare = _model_name(model_lower)
|
||||
|
||||
# 1. Explicit vision allow — takes priority over all denies
|
||||
if any(bare.startswith(p) for p in _VISION_ALLOW_BARE_PREFIXES):
|
||||
if not model:
|
||||
return True
|
||||
|
||||
# 2. Provider-level deny (all models from this provider are text-only)
|
||||
if any(model_lower.startswith(p) for p in _TEXT_ONLY_PROVIDER_PREFIXES):
|
||||
return False
|
||||
|
||||
# 3. Per-model deny (text-only variants within mixed-capability families)
|
||||
if any(bare.startswith(p) for p in _TEXT_ONLY_MODEL_BARE_PREFIXES):
|
||||
return False
|
||||
|
||||
# 5. Default: assume vision capable
|
||||
# Covers: OpenAI, Anthropic, Google, Mistral, Kimi, and other hosted providers
|
||||
return True
|
||||
return model_supports_vision(model)
|
||||
|
||||
|
||||
def filter_tools_for_model(tools: list[Tool], model: str) -> tuple[list[Tool], list[str]]:
|
||||
|
||||
+354
-28
@@ -33,6 +33,7 @@ except ImportError:
|
||||
RateLimitError = Exception # type: ignore[assignment, misc]
|
||||
|
||||
from framework.config import HIVE_LLM_ENDPOINT as HIVE_API_BASE
|
||||
from framework.llm.model_catalog import get_model_pricing
|
||||
from framework.llm.provider import LLMProvider, LLMResponse, Tool
|
||||
from framework.llm.stream_events import StreamEvent
|
||||
|
||||
@@ -213,9 +214,72 @@ _CACHE_CONTROL_PREFIXES = (
|
||||
"glm-",
|
||||
)
|
||||
|
||||
# OpenRouter sub-provider prefixes whose upstream API honors `cache_control`.
|
||||
# OpenRouter passes the marker through to the underlying provider for these.
|
||||
# (See https://openrouter.ai/docs/guides/best-practices/prompt-caching.)
|
||||
# OpenAI/DeepSeek/Groq/Grok/Moonshot route through OpenRouter but cache
|
||||
# automatically server-side — sending cache_control there is a no-op, not a
|
||||
# win, and they need a separate prefix-stability fix to actually get hits.
|
||||
_OPENROUTER_CACHE_CONTROL_PREFIXES = (
|
||||
"openrouter/anthropic/",
|
||||
"openrouter/google/gemini-",
|
||||
"openrouter/z-ai/glm",
|
||||
"openrouter/minimax/",
|
||||
)
|
||||
|
||||
|
||||
def _model_supports_cache_control(model: str) -> bool:
|
||||
return any(model.startswith(p) for p in _CACHE_CONTROL_PREFIXES)
|
||||
if any(model.startswith(p) for p in _CACHE_CONTROL_PREFIXES):
|
||||
return True
|
||||
return any(model.startswith(p) for p in _OPENROUTER_CACHE_CONTROL_PREFIXES)
|
||||
|
||||
|
||||
def _build_system_message(
|
||||
system: str,
|
||||
system_dynamic_suffix: str | None,
|
||||
model: str,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Construct the system-role message for the chat completion.
|
||||
|
||||
Returns ``None`` when there is nothing to send.
|
||||
|
||||
Two-block split path — used when the caller supplied a non-empty
|
||||
``system_dynamic_suffix`` AND the provider honors ``cache_control``
|
||||
(Anthropic, MiniMax, Z-AI/GLM). We emit ``content`` as a list of two
|
||||
text blocks with an ephemeral ``cache_control`` marker on the first
|
||||
block only. The prompt cache keeps the static prefix warm across
|
||||
turns and across iterations within a turn; only the small dynamic
|
||||
tail is recomputed on every request.
|
||||
|
||||
Single-string path — used for every other case (no suffix provided,
|
||||
or provider doesn't honor ``cache_control``). We concatenate
|
||||
``system`` + ``\\n\\n`` + ``system_dynamic_suffix`` and attach
|
||||
``cache_control`` to the whole message when the provider supports
|
||||
it. This is byte-identical to the pre-split behavior for all
|
||||
non-cache-control providers (OpenAI, Gemini, Groq, Ollama, etc.).
|
||||
"""
|
||||
if not system and not system_dynamic_suffix:
|
||||
return None
|
||||
if system_dynamic_suffix and _model_supports_cache_control(model):
|
||||
content_blocks: list[dict[str, Any]] = []
|
||||
if system:
|
||||
content_blocks.append(
|
||||
{
|
||||
"type": "text",
|
||||
"text": system,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
)
|
||||
content_blocks.append({"type": "text", "text": system_dynamic_suffix})
|
||||
return {"role": "system", "content": content_blocks}
|
||||
# Single-string path (legacy or no-cache-control provider).
|
||||
combined = system
|
||||
if system_dynamic_suffix:
|
||||
combined = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
sys_msg: dict[str, Any] = {"role": "system", "content": combined}
|
||||
if _model_supports_cache_control(model):
|
||||
sys_msg["cache_control"] = {"type": "ephemeral"}
|
||||
return sys_msg
|
||||
|
||||
|
||||
# Kimi For Coding uses an Anthropic-compatible endpoint (no /v1 suffix).
|
||||
@@ -297,6 +361,171 @@ FAILED_REQUESTS_DIR = Path.home() / ".hive" / "failed_requests"
|
||||
MAX_FAILED_REQUEST_DUMPS = 50
|
||||
|
||||
|
||||
def _cost_from_catalog_pricing(
|
||||
model: str,
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
) -> float:
|
||||
"""Last-resort cost calculation using curated catalog pricing.
|
||||
|
||||
Consulted only when the provider response carries no native cost and
|
||||
LiteLLM's own catalog has no pricing for ``model``. Reads
|
||||
``pricing_usd_per_mtok`` from ``model_catalog.json``. Rates are USD per
|
||||
million tokens.
|
||||
|
||||
``cached_tokens`` and ``cache_creation_tokens`` are subsets of
|
||||
``input_tokens`` (see ``_extract_cache_tokens``), so subtract them from
|
||||
the base input count to avoid double-billing. If a cache rate is absent,
|
||||
fall back to the plain input rate.
|
||||
"""
|
||||
if not model or (input_tokens == 0 and output_tokens == 0):
|
||||
return 0.0
|
||||
pricing = get_model_pricing(model)
|
||||
if pricing is None and "/" in model:
|
||||
# LiteLLM prefixes some ids (e.g. "openrouter/z-ai/glm-5.1"); the
|
||||
# catalog stores the bare form ("z-ai/glm-5.1"). Strip one segment.
|
||||
pricing = get_model_pricing(model.split("/", 1)[1])
|
||||
if pricing is None:
|
||||
return 0.0
|
||||
|
||||
per_mtok_in = pricing.get("input", 0.0)
|
||||
per_mtok_out = pricing.get("output", 0.0)
|
||||
per_mtok_cache_read = pricing.get("cache_read", per_mtok_in)
|
||||
per_mtok_cache_write = pricing.get("cache_creation", per_mtok_in)
|
||||
|
||||
plain_input = max(input_tokens - cached_tokens - cache_creation_tokens, 0)
|
||||
total = (
|
||||
plain_input * per_mtok_in
|
||||
+ cached_tokens * per_mtok_cache_read
|
||||
+ cache_creation_tokens * per_mtok_cache_write
|
||||
+ output_tokens * per_mtok_out
|
||||
) / 1_000_000
|
||||
return float(total) if total > 0 else 0.0
|
||||
|
||||
|
||||
def _extract_cost(response: Any, model: str) -> float:
|
||||
"""Pull the USD cost for a non-streaming completion response.
|
||||
|
||||
Sources checked, in priority order:
|
||||
1. ``usage.cost`` — populated when OpenRouter returns native cost via
|
||||
``usage: {include: true}`` or when ``litellm.include_cost_in_streaming_usage``
|
||||
is on.
|
||||
2. ``response._hidden_params["response_cost"]`` — set by LiteLLM's
|
||||
logging layer after most successful completions.
|
||||
3. ``litellm.completion_cost(...)`` — computes from the model pricing
|
||||
table; works across Anthropic, OpenAI, and OpenRouter as long as the
|
||||
model is in LiteLLM's catalog.
|
||||
4. ``pricing_usd_per_mtok`` from the curated model catalog — covers
|
||||
models (e.g. GLM, Kimi, MiniMax) that LiteLLM doesn't price.
|
||||
|
||||
Returns 0.0 for unpriced models or unexpected response shapes — cost is a
|
||||
display concern, never let it break the hot path. For streaming paths
|
||||
where the aggregate response isn't a full ``ModelResponse``, use
|
||||
:func:`_cost_from_tokens` with the already-extracted token counts.
|
||||
"""
|
||||
if response is None:
|
||||
return 0.0
|
||||
usage = getattr(response, "usage", None)
|
||||
usage_cost = getattr(usage, "cost", None) if usage is not None else None
|
||||
if isinstance(usage_cost, (int, float)) and usage_cost > 0:
|
||||
return float(usage_cost)
|
||||
|
||||
hidden = getattr(response, "_hidden_params", None)
|
||||
if isinstance(hidden, dict):
|
||||
hp_cost = hidden.get("response_cost")
|
||||
if isinstance(hp_cost, (int, float)) and hp_cost > 0:
|
||||
return float(hp_cost)
|
||||
|
||||
try:
|
||||
import litellm as _litellm
|
||||
|
||||
computed = _litellm.completion_cost(completion_response=response, model=model)
|
||||
if isinstance(computed, (int, float)) and computed > 0:
|
||||
return float(computed)
|
||||
except Exception as exc:
|
||||
logger.debug("[cost] completion_cost failed for %s: %s", model, exc)
|
||||
|
||||
if usage is not None:
|
||||
input_tokens = int(getattr(usage, "prompt_tokens", 0) or 0)
|
||||
output_tokens = int(getattr(usage, "completion_tokens", 0) or 0)
|
||||
cache_read, cache_creation = _extract_cache_tokens(usage)
|
||||
fallback = _cost_from_catalog_pricing(model, input_tokens, output_tokens, cache_read, cache_creation)
|
||||
if fallback > 0:
|
||||
return fallback
|
||||
return 0.0
|
||||
|
||||
|
||||
def _cost_from_tokens(
|
||||
model: str,
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
) -> float:
|
||||
"""Compute USD cost from already-normalized token counts.
|
||||
|
||||
Used on streaming paths where the aggregate ``response`` is the stream
|
||||
wrapper (not a full ``ModelResponse``) and ``litellm.completion_cost`` on
|
||||
it either no-ops or raises. Calls ``litellm.cost_per_token`` directly
|
||||
with the cache-aware inputs so Anthropic's 5-min-write / cache-read
|
||||
multipliers are applied correctly.
|
||||
"""
|
||||
if not model or (input_tokens == 0 and output_tokens == 0):
|
||||
return 0.0
|
||||
try:
|
||||
import litellm as _litellm
|
||||
|
||||
prompt_cost, completion_cost = _litellm.cost_per_token(
|
||||
model=model,
|
||||
prompt_tokens=input_tokens,
|
||||
completion_tokens=output_tokens,
|
||||
cache_read_input_tokens=cached_tokens,
|
||||
cache_creation_input_tokens=cache_creation_tokens,
|
||||
)
|
||||
total = (prompt_cost or 0.0) + (completion_cost or 0.0)
|
||||
if total > 0:
|
||||
return float(total)
|
||||
except Exception as exc:
|
||||
logger.debug("[cost] cost_per_token failed for %s: %s", model, exc)
|
||||
return _cost_from_catalog_pricing(model, input_tokens, output_tokens, cached_tokens, cache_creation_tokens)
|
||||
|
||||
|
||||
def _extract_cache_tokens(usage: Any) -> tuple[int, int]:
|
||||
"""Pull (cache_read, cache_creation) from a LiteLLM usage object.
|
||||
|
||||
Both are subsets of ``prompt_tokens`` already — providers count them
|
||||
inside the input total. Surface separately for visibility, never sum.
|
||||
|
||||
Field names vary by provider/proxy; check the known shapes in priority
|
||||
order and fall back to 0:
|
||||
|
||||
cache_read:
|
||||
- ``prompt_tokens_details.cached_tokens`` — OpenAI-shape; also what
|
||||
LiteLLM normalizes Anthropic and OpenRouter into.
|
||||
- ``cache_read_input_tokens`` — raw Anthropic field name.
|
||||
|
||||
cache_creation:
|
||||
- ``prompt_tokens_details.cache_write_tokens`` — OpenRouter's
|
||||
normalized field for cache writes (verified empirically against
|
||||
``openrouter/anthropic/*`` and ``openrouter/z-ai/*`` responses).
|
||||
- ``cache_creation_input_tokens`` — raw Anthropic top-level field.
|
||||
"""
|
||||
if not usage:
|
||||
return 0, 0
|
||||
_details = getattr(usage, "prompt_tokens_details", None)
|
||||
cache_read = (
|
||||
getattr(_details, "cached_tokens", 0) or 0
|
||||
if _details is not None
|
||||
else getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
)
|
||||
cache_creation = (getattr(_details, "cache_write_tokens", 0) or 0 if _details is not None else 0) or (
|
||||
getattr(usage, "cache_creation_input_tokens", 0) or 0
|
||||
)
|
||||
return cache_read, cache_creation
|
||||
|
||||
|
||||
def _estimate_tokens(model: str, messages: list[dict]) -> tuple[int, str]:
|
||||
"""Estimate token count for messages. Returns (token_count, method)."""
|
||||
# Try litellm's token counter first
|
||||
@@ -1015,12 +1244,17 @@ class LiteLLMProvider(LLMProvider):
|
||||
usage = response.usage
|
||||
input_tokens = usage.prompt_tokens if usage else 0
|
||||
output_tokens = usage.completion_tokens if usage else 0
|
||||
cached_tokens, cache_creation_tokens = _extract_cache_tokens(usage)
|
||||
cost_usd = _extract_cost(response, self.model)
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model=response.model or self.model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
stop_reason=response.choices[0].finish_reason or "",
|
||||
raw_response=response,
|
||||
)
|
||||
@@ -1169,8 +1403,16 @@ class LiteLLMProvider(LLMProvider):
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
max_retries: int | None = None,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> LLMResponse:
|
||||
"""Async version of complete(). Uses litellm.acompletion — non-blocking."""
|
||||
"""Async version of complete(). Uses litellm.acompletion — non-blocking.
|
||||
|
||||
``system_dynamic_suffix`` is an optional per-turn tail. When set and
|
||||
the provider honors ``cache_control``, ``system`` is sent as the
|
||||
cached prefix and the suffix trails as an uncached second content
|
||||
block. Otherwise the two strings are concatenated into a single
|
||||
system message (legacy behavior).
|
||||
"""
|
||||
# Codex ChatGPT backend requires streaming — route through stream() which
|
||||
# already handles Codex quirks and has proper tool call accumulation.
|
||||
if self._codex_backend:
|
||||
@@ -1181,6 +1423,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
max_tokens=max_tokens,
|
||||
response_format=response_format,
|
||||
json_mode=json_mode,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
)
|
||||
return await self._collect_stream_to_response(stream_iter)
|
||||
|
||||
@@ -1188,10 +1431,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
if self._claude_code_oauth:
|
||||
billing = _claude_code_billing_header(messages)
|
||||
full_messages.append({"role": "system", "content": billing})
|
||||
if system:
|
||||
sys_msg: dict[str, Any] = {"role": "system", "content": system}
|
||||
if _model_supports_cache_control(self.model):
|
||||
sys_msg["cache_control"] = {"type": "ephemeral"}
|
||||
sys_msg = _build_system_message(system, system_dynamic_suffix, self.model)
|
||||
if sys_msg is not None:
|
||||
full_messages.append(sys_msg)
|
||||
full_messages.extend(messages)
|
||||
|
||||
@@ -1228,12 +1469,17 @@ class LiteLLMProvider(LLMProvider):
|
||||
usage = response.usage
|
||||
input_tokens = usage.prompt_tokens if usage else 0
|
||||
output_tokens = usage.completion_tokens if usage else 0
|
||||
cached_tokens, cache_creation_tokens = _extract_cache_tokens(usage)
|
||||
cost_usd = _extract_cost(response, self.model)
|
||||
|
||||
return LLMResponse(
|
||||
content=content,
|
||||
model=response.model or self.model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
stop_reason=response.choices[0].finish_reason or "",
|
||||
raw_response=response,
|
||||
)
|
||||
@@ -1619,6 +1865,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
messages: list[dict[str, Any]],
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Build a JSON-only prompt for models without native tool support."""
|
||||
tool_specs = [
|
||||
@@ -1646,7 +1893,19 @@ class LiteLLMProvider(LLMProvider):
|
||||
)
|
||||
compat_system = compat_instruction if not system else f"{system}\n\n{compat_instruction}"
|
||||
|
||||
full_messages: list[dict[str, Any]] = [{"role": "system", "content": compat_system}]
|
||||
# If the routed sub-provider honors cache_control (e.g.
|
||||
# openrouter/anthropic/*), split the static prefix from the dynamic
|
||||
# suffix so the prefix stays cache-warm across turns. Otherwise fall
|
||||
# back to a single concatenated string.
|
||||
system_message = _build_system_message(
|
||||
compat_system,
|
||||
system_dynamic_suffix,
|
||||
self.model,
|
||||
)
|
||||
|
||||
full_messages: list[dict[str, Any]] = []
|
||||
if system_message is not None:
|
||||
full_messages.append(system_message)
|
||||
full_messages.extend(messages)
|
||||
return [
|
||||
message
|
||||
@@ -1660,9 +1919,21 @@ class LiteLLMProvider(LLMProvider):
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
max_tokens: int,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> LLMResponse:
|
||||
"""Emulate tool calling via JSON when OpenRouter rejects native tools."""
|
||||
full_messages = self._build_openrouter_tool_compat_messages(messages, system, tools)
|
||||
"""Emulate tool calling via JSON when OpenRouter rejects native tools.
|
||||
|
||||
When the routed sub-provider honors ``cache_control`` (e.g.
|
||||
``openrouter/anthropic/*``), the message builder splits the static
|
||||
prefix from the dynamic suffix so the prefix stays cache-warm.
|
||||
Otherwise the suffix is concatenated into a single system string.
|
||||
"""
|
||||
full_messages = self._build_openrouter_tool_compat_messages(
|
||||
messages,
|
||||
system,
|
||||
tools,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
)
|
||||
kwargs: dict[str, Any] = {
|
||||
"model": self.model,
|
||||
"messages": full_messages,
|
||||
@@ -1683,6 +1954,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
usage = response.usage
|
||||
input_tokens = usage.prompt_tokens if usage else 0
|
||||
output_tokens = usage.completion_tokens if usage else 0
|
||||
cached_tokens, cache_creation_tokens = _extract_cache_tokens(usage)
|
||||
cost_usd = _extract_cost(response, self.model)
|
||||
stop_reason = "tool_calls" if tool_calls else (response.choices[0].finish_reason or "stop")
|
||||
|
||||
return LLMResponse(
|
||||
@@ -1690,6 +1963,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
model=response.model or self.model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
stop_reason=stop_reason,
|
||||
raw_response={
|
||||
"compat_mode": "openrouter_tool_emulation",
|
||||
@@ -1704,6 +1980,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
system: str,
|
||||
tools: list[Tool],
|
||||
max_tokens: int,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Fallback stream for OpenRouter models without native tool support."""
|
||||
from framework.llm.stream_events import (
|
||||
@@ -1724,6 +2001,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
)
|
||||
except Exception as e:
|
||||
yield StreamErrorEvent(error=str(e), recoverable=False)
|
||||
@@ -1747,6 +2025,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
stop_reason=response.stop_reason,
|
||||
input_tokens=response.input_tokens,
|
||||
output_tokens=response.output_tokens,
|
||||
cached_tokens=response.cached_tokens,
|
||||
cache_creation_tokens=response.cache_creation_tokens,
|
||||
cost_usd=response.cost_usd,
|
||||
model=response.model,
|
||||
)
|
||||
|
||||
@@ -1758,6 +2039,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
max_tokens: int,
|
||||
response_format: dict[str, Any] | None,
|
||||
json_mode: bool,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Fallback path: convert non-stream completion to stream events.
|
||||
|
||||
@@ -1781,6 +2063,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
max_tokens=max_tokens,
|
||||
response_format=response_format,
|
||||
json_mode=json_mode,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
)
|
||||
except Exception as e:
|
||||
yield StreamErrorEvent(error=str(e), recoverable=False)
|
||||
@@ -1812,6 +2095,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
stop_reason=response.stop_reason or "stop",
|
||||
input_tokens=response.input_tokens,
|
||||
output_tokens=response.output_tokens,
|
||||
cached_tokens=response.cached_tokens,
|
||||
cache_creation_tokens=response.cache_creation_tokens,
|
||||
cost_usd=response.cost_usd,
|
||||
model=response.model,
|
||||
)
|
||||
|
||||
@@ -1823,6 +2109,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
max_tokens: int = 4096,
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Stream a completion via litellm.acompletion(stream=True).
|
||||
|
||||
@@ -1833,6 +2120,9 @@ class LiteLLMProvider(LLMProvider):
|
||||
Empty responses (e.g. Gemini stealth rate-limits that return 200
|
||||
with no content) are retried with exponential backoff, mirroring
|
||||
the retry behaviour of ``_completion_with_rate_limit_retry``.
|
||||
|
||||
``system_dynamic_suffix`` is an optional per-turn tail. See
|
||||
``acomplete`` docstring for the two-block split semantics.
|
||||
"""
|
||||
from framework.llm.stream_events import (
|
||||
FinishEvent,
|
||||
@@ -1852,6 +2142,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
max_tokens=max_tokens,
|
||||
response_format=response_format,
|
||||
json_mode=json_mode,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
):
|
||||
yield event
|
||||
return
|
||||
@@ -1862,6 +2153,7 @@ class LiteLLMProvider(LLMProvider):
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
):
|
||||
yield event
|
||||
return
|
||||
@@ -1870,10 +2162,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
if self._claude_code_oauth:
|
||||
billing = _claude_code_billing_header(messages)
|
||||
full_messages.append({"role": "system", "content": billing})
|
||||
if system:
|
||||
sys_msg: dict[str, Any] = {"role": "system", "content": system}
|
||||
if _model_supports_cache_control(self.model):
|
||||
sys_msg["cache_control"] = {"type": "ephemeral"}
|
||||
sys_msg = _build_system_message(system, system_dynamic_suffix, self.model)
|
||||
if sys_msg is not None:
|
||||
full_messages.append(sys_msg)
|
||||
full_messages.extend(messages)
|
||||
|
||||
@@ -1959,6 +2249,10 @@ class LiteLLMProvider(LLMProvider):
|
||||
if self._codex_backend:
|
||||
kwargs.pop("max_tokens", None)
|
||||
kwargs.pop("stream_options", None)
|
||||
# Pass store directly to OpenAI in case litellm drops it as unknown
|
||||
if "extra_body" not in kwargs:
|
||||
kwargs["extra_body"] = {}
|
||||
kwargs["extra_body"]["store"] = False
|
||||
|
||||
request_summary = _summarize_request_for_log(kwargs)
|
||||
logger.debug(
|
||||
@@ -2105,37 +2399,44 @@ class LiteLLMProvider(LLMProvider):
|
||||
type(usage).__name__,
|
||||
)
|
||||
cached_tokens = 0
|
||||
cache_creation_tokens = 0
|
||||
if usage:
|
||||
input_tokens = getattr(usage, "prompt_tokens", 0) or 0
|
||||
output_tokens = getattr(usage, "completion_tokens", 0) or 0
|
||||
_details = getattr(usage, "prompt_tokens_details", None)
|
||||
cached_tokens = (
|
||||
getattr(_details, "cached_tokens", 0) or 0
|
||||
if _details is not None
|
||||
else getattr(usage, "cache_read_input_tokens", 0) or 0
|
||||
)
|
||||
cached_tokens, cache_creation_tokens = _extract_cache_tokens(usage)
|
||||
logger.debug(
|
||||
"[tokens] finish-chunk usage: input=%d output=%d cached=%d model=%s",
|
||||
"[tokens] finish-chunk usage: input=%d output=%d cached=%d cache_creation=%d model=%s",
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
cache_creation_tokens,
|
||||
self.model,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"[tokens] finish event: input=%d output=%d cached=%d stop=%s model=%s",
|
||||
"[tokens] finish event: input=%d output=%d cached=%d cache_creation=%d stop=%s model=%s",
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
cache_creation_tokens,
|
||||
choice.finish_reason,
|
||||
self.model,
|
||||
)
|
||||
cost_usd = _cost_from_tokens(
|
||||
self.model,
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
cache_creation_tokens,
|
||||
)
|
||||
tail_events.append(
|
||||
FinishEvent(
|
||||
stop_reason=choice.finish_reason,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
model=self.model,
|
||||
)
|
||||
)
|
||||
@@ -2155,19 +2456,36 @@ class LiteLLMProvider(LLMProvider):
|
||||
_usage = calculate_total_usage(chunks=_chunks)
|
||||
input_tokens = _usage.prompt_tokens or 0
|
||||
output_tokens = _usage.completion_tokens or 0
|
||||
_details = getattr(_usage, "prompt_tokens_details", None)
|
||||
cached_tokens = (
|
||||
getattr(_details, "cached_tokens", 0) or 0
|
||||
if _details is not None
|
||||
else getattr(_usage, "cache_read_input_tokens", 0) or 0
|
||||
)
|
||||
# `calculate_total_usage` aggregates token totals
|
||||
# but discards `prompt_tokens_details` — which is
|
||||
# where OpenRouter puts `cached_tokens` and
|
||||
# `cache_write_tokens`. Recover them directly
|
||||
# from the most recent chunk that carries usage.
|
||||
cached_tokens, cache_creation_tokens = 0, 0
|
||||
for _raw in reversed(_chunks):
|
||||
_raw_usage = getattr(_raw, "usage", None)
|
||||
if _raw_usage is None:
|
||||
continue
|
||||
_cr, _cc = _extract_cache_tokens(_raw_usage)
|
||||
if _cr or _cc:
|
||||
cached_tokens, cache_creation_tokens = _cr, _cc
|
||||
break
|
||||
logger.debug(
|
||||
"[tokens] post-loop chunks fallback: input=%d output=%d cached=%d model=%s",
|
||||
"[tokens] post-loop chunks fallback: input=%d output=%d "
|
||||
"cached=%d cache_creation=%d model=%s",
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
cache_creation_tokens,
|
||||
self.model,
|
||||
)
|
||||
cost_usd = _cost_from_tokens(
|
||||
self.model,
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
cached_tokens,
|
||||
cache_creation_tokens,
|
||||
)
|
||||
# Patch the FinishEvent already queued with 0 tokens
|
||||
for _i, _ev in enumerate(tail_events):
|
||||
if isinstance(_ev, FinishEvent) and _ev.input_tokens == 0:
|
||||
@@ -2176,6 +2494,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
model=_ev.model,
|
||||
)
|
||||
break
|
||||
@@ -2386,6 +2706,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
tool_calls: list[dict[str, Any]] = []
|
||||
input_tokens = 0
|
||||
output_tokens = 0
|
||||
cached_tokens = 0
|
||||
cache_creation_tokens = 0
|
||||
stop_reason = ""
|
||||
model = self.model
|
||||
|
||||
@@ -2403,6 +2725,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
elif isinstance(event, FinishEvent):
|
||||
input_tokens = event.input_tokens
|
||||
output_tokens = event.output_tokens
|
||||
cached_tokens = event.cached_tokens
|
||||
cache_creation_tokens = event.cache_creation_tokens
|
||||
stop_reason = event.stop_reason
|
||||
if event.model:
|
||||
model = event.model
|
||||
@@ -2415,6 +2739,8 @@ class LiteLLMProvider(LLMProvider):
|
||||
model=model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
stop_reason=stop_reason,
|
||||
raw_response={"tool_calls": tool_calls} if tool_calls else None,
|
||||
)
|
||||
|
||||
@@ -155,8 +155,11 @@ class MockLLMProvider(LLMProvider):
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
max_retries: int | None = None,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> LLMResponse:
|
||||
"""Async mock completion (no I/O, returns immediately)."""
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
return self.complete(
|
||||
messages=messages,
|
||||
system=system,
|
||||
@@ -173,6 +176,7 @@ class MockLLMProvider(LLMProvider):
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Stream a mock completion as word-level TextDeltaEvents.
|
||||
|
||||
@@ -180,6 +184,8 @@ class MockLLMProvider(LLMProvider):
|
||||
TextDeltaEvent with an accumulating snapshot, exercising the full
|
||||
streaming pipeline without any API calls.
|
||||
"""
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
content = self._generate_mock_response(system=system, json_mode=False)
|
||||
words = content.split(" ")
|
||||
accumulated = ""
|
||||
|
||||
@@ -9,47 +9,65 @@
|
||||
"label": "Haiku 4.5 - Fast + cheap",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 136000
|
||||
"max_context_tokens": 136000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "claude-sonnet-4-5-20250929",
|
||||
"label": "Sonnet 4.5 - Best balance",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 136000
|
||||
"max_context_tokens": 136000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "claude-opus-4-6",
|
||||
"label": "Opus 4.6 - Most capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"openai": {
|
||||
"default_model": "gpt-5.4",
|
||||
"default_model": "gpt-5.5",
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"label": "GPT-5.4 - Best intelligence",
|
||||
"id": "gpt-5.5",
|
||||
"label": "GPT-5.5 - Frontier coding + reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 960000
|
||||
"max_context_tokens": 1050000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 5.00,
|
||||
"output": 30.00
|
||||
},
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"label": "GPT-5.4 - Previous flagship",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 960000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4-mini",
|
||||
"label": "GPT-5.4 Mini - Faster + cheaper",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 400000
|
||||
"max_context_tokens": 400000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4-nano",
|
||||
"label": "GPT-5.4 Nano - Cheapest high-volume",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 400000
|
||||
"max_context_tokens": 400000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -61,14 +79,16 @@
|
||||
"label": "Gemini 3 Flash - Fast",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gemini-3.1-pro-preview-customtools",
|
||||
"label": "Gemini 3.1 Pro - Best quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -80,28 +100,32 @@
|
||||
"label": "GPT-OSS 120B - Best reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 65536,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "openai/gpt-oss-20b",
|
||||
"label": "GPT-OSS 20B - Fast + cheaper",
|
||||
"recommended": false,
|
||||
"max_tokens": 65536,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "llama-3.3-70b-versatile",
|
||||
"label": "Llama 3.3 70B - General purpose",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "llama-3.1-8b-instant",
|
||||
"label": "Llama 3.1 8B - Fastest",
|
||||
"recommended": false,
|
||||
"max_tokens": 131072,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -113,28 +137,24 @@
|
||||
"label": "GPT-OSS 120B - Best production reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072
|
||||
},
|
||||
{
|
||||
"id": "llama3.1-8b",
|
||||
"label": "Llama 3.1 8B - Fastest production",
|
||||
"recommended": false,
|
||||
"max_tokens": 8192,
|
||||
"max_context_tokens": 32768
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "zai-glm-4.7",
|
||||
"label": "Z.ai GLM 4.7 - Strong coding preview",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "qwen-3-235b-a22b-instruct-2507",
|
||||
"label": "Qwen 3 235B Instruct - Frontier preview",
|
||||
"recommended": false,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -145,15 +165,21 @@
|
||||
"id": "MiniMax-M2.7",
|
||||
"label": "MiniMax M2.7 - Best coding quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 204800
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.30,
|
||||
"output": 1.20
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "MiniMax-M2.5",
|
||||
"label": "MiniMax M2.5 - Strong value",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 204800
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -165,28 +191,32 @@
|
||||
"label": "Mistral Large 3 - Best quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 256000
|
||||
"max_context_tokens": 256000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "mistral-medium-2508",
|
||||
"label": "Mistral Medium 3.1 - Balanced",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "mistral-small-2603",
|
||||
"label": "Mistral Small 4 - Fast + capable",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 256000
|
||||
"max_context_tokens": 256000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "codestral-2508",
|
||||
"label": "Codestral - Coding specialist",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -198,47 +228,71 @@
|
||||
"label": "DeepSeek V3.1 - Best general coding",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
||||
"label": "Qwen3 Coder 480B - Advanced coding",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 262144
|
||||
"max_context_tokens": 262144,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "openai/gpt-oss-120b",
|
||||
"label": "GPT-OSS 120B - Strong reasoning",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"label": "Llama 3.3 70B Turbo - Fast baseline",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 131072
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"deepseek": {
|
||||
"default_model": "deepseek-chat",
|
||||
"default_model": "deepseek-v4-pro",
|
||||
"models": [
|
||||
{
|
||||
"id": "deepseek-chat",
|
||||
"label": "DeepSeek Chat - Fast default",
|
||||
"id": "deepseek-v4-pro",
|
||||
"label": "DeepSeek V4 Pro - Most capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 8192,
|
||||
"max_context_tokens": 128000
|
||||
"max_tokens": 384000,
|
||||
"max_context_tokens": 1000000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.74,
|
||||
"output": 3.48,
|
||||
"cache_read": 0.145
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "deepseek-v4-flash",
|
||||
"label": "DeepSeek V4 Flash - Fast + cheap",
|
||||
"recommended": true,
|
||||
"max_tokens": 384000,
|
||||
"max_context_tokens": 1000000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.14,
|
||||
"output": 0.28,
|
||||
"cache_read": 0.028
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "deepseek-reasoner",
|
||||
"label": "DeepSeek Reasoner - Deep thinking",
|
||||
"label": "DeepSeek Reasoner - Legacy (deprecating)",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 128000
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -250,7 +304,13 @@
|
||||
"label": "Kimi K2.5 - Best coding",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 200000
|
||||
"max_context_tokens": 200000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.60,
|
||||
"output": 2.50,
|
||||
"cache_read": 0.15
|
||||
},
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -262,21 +322,30 @@
|
||||
"label": "Queen - Hive native",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000
|
||||
"max_context_tokens": 180000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "kimi-2.5",
|
||||
"label": "Kimi 2.5 - Via Hive",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "GLM-5",
|
||||
"label": "GLM-5 - Via Hive",
|
||||
"id": "glm-5.1",
|
||||
"label": "GLM-5.1 - Via Hive",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.40,
|
||||
"output": 4.40,
|
||||
"cache_read": 0.26,
|
||||
"cache_creation": 0.0
|
||||
},
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -288,35 +357,82 @@
|
||||
"label": "GPT-5.4 - Best overall",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 922000
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "anthropic/claude-sonnet-4.6",
|
||||
"label": "Claude Sonnet 4.6 - Best coding balance",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 936000
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "anthropic/claude-opus-4.6",
|
||||
"label": "Claude Opus 4.6 - Most capable",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-3.1-pro-preview-customtools",
|
||||
"label": "Gemini 3.1 Pro Preview - Long-context reasoning",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 1048576
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "deepseek/deepseek-v3.2",
|
||||
"label": "DeepSeek V3.2 - Best value",
|
||||
"recommended": false,
|
||||
"id": "qwen/qwen3.6-plus",
|
||||
"label": "Qwen 3.6 Plus - Strong reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 163840
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "z-ai/glm-5v-turbo",
|
||||
"label": "GLM-5V Turbo - Vision capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 192000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "z-ai/glm-5.1",
|
||||
"label": "GLM-5.1 - Better but Slower",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 192000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.40,
|
||||
"output": 4.40,
|
||||
"cache_read": 0.26,
|
||||
"cache_creation": 0.0
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "minimax/minimax-m2.7",
|
||||
"label": "Minimax M2.7 - Minimax flagship",
|
||||
"recommended": false,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.30,
|
||||
"output": 1.20
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "xiaomi/mimo-v2-pro",
|
||||
"label": "MiMo V2 Pro - Xiaomi multimodal",
|
||||
"recommended": true,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -331,7 +447,7 @@
|
||||
"zai_code": {
|
||||
"provider": "openai",
|
||||
"api_key_env_var": "ZAI_API_KEY",
|
||||
"model": "glm-5",
|
||||
"model": "glm-5.1",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000,
|
||||
"api_base": "https://api.z.ai/api/coding/paas/v4"
|
||||
@@ -347,8 +463,8 @@
|
||||
"provider": "minimax",
|
||||
"api_key_env_var": "MINIMAX_API_KEY",
|
||||
"model": "MiniMax-M2.7",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 204800,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180800,
|
||||
"api_base": "https://api.minimax.io/v1"
|
||||
},
|
||||
"kimi_code": {
|
||||
@@ -378,8 +494,8 @@
|
||||
"recommended": false
|
||||
},
|
||||
{
|
||||
"id": "GLM-5",
|
||||
"label": "GLM-5",
|
||||
"id": "glm-5.1",
|
||||
"label": "glm-5.1",
|
||||
"recommended": false
|
||||
}
|
||||
]
|
||||
@@ -397,4 +513,4 @@
|
||||
"api_base": "http://localhost:11434"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,28 @@ def _require_list(value: Any, path: str) -> list[Any]:
|
||||
return value
|
||||
|
||||
|
||||
_PRICING_KEYS = ("input", "output", "cache_read", "cache_creation")
|
||||
|
||||
|
||||
def _validate_pricing(value: Any, path: str) -> None:
|
||||
"""Validate an optional ``pricing_usd_per_mtok`` block.
|
||||
|
||||
Keys are USD-per-million-tokens rates. ``input``/``output`` are required;
|
||||
``cache_read``/``cache_creation`` are optional. All values must be
|
||||
non-negative numbers. Used as a last-resort fallback when neither the
|
||||
provider nor LiteLLM's catalog reports a cost.
|
||||
"""
|
||||
pricing = _require_mapping(value, path)
|
||||
for key in ("input", "output"):
|
||||
if key not in pricing:
|
||||
raise ModelCatalogError(f"{path}.{key} is required")
|
||||
for key, rate in pricing.items():
|
||||
if key not in _PRICING_KEYS:
|
||||
raise ModelCatalogError(f"{path}.{key} is not a recognized pricing field")
|
||||
if not isinstance(rate, (int, float)) or isinstance(rate, bool) or rate < 0:
|
||||
raise ModelCatalogError(f"{path}.{key} must be a non-negative number")
|
||||
|
||||
|
||||
def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
|
||||
providers = _require_mapping(data.get("providers"), "providers")
|
||||
|
||||
@@ -69,6 +91,14 @@ def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
|
||||
if not isinstance(value, int) or value <= 0:
|
||||
raise ModelCatalogError(f"{model_path}.{key} must be a positive integer")
|
||||
|
||||
pricing = model_map.get("pricing_usd_per_mtok")
|
||||
if pricing is not None:
|
||||
_validate_pricing(pricing, f"{model_path}.pricing_usd_per_mtok")
|
||||
|
||||
supports_vision = model_map.get("supports_vision")
|
||||
if supports_vision is not None and not isinstance(supports_vision, bool):
|
||||
raise ModelCatalogError(f"{model_path}.supports_vision must be a boolean when present")
|
||||
|
||||
if not default_found:
|
||||
raise ModelCatalogError(
|
||||
f"{provider_path}.default_model={default_model!r} is not present in {provider_path}.models"
|
||||
@@ -184,6 +214,53 @@ def get_model_limits(provider: str, model_id: str) -> tuple[int, int] | None:
|
||||
return int(model["max_tokens"]), int(model["max_context_tokens"])
|
||||
|
||||
|
||||
def get_model_pricing(model_id: str) -> dict[str, float] | None:
|
||||
"""Return ``pricing_usd_per_mtok`` for a model id, searching all providers.
|
||||
|
||||
Returns ``None`` when the model is absent from the catalog or has no
|
||||
pricing entry. Used by the cost-extraction fallback in ``litellm.py``
|
||||
when the provider response and LiteLLM's catalog both come up empty.
|
||||
"""
|
||||
if not model_id:
|
||||
return None
|
||||
for provider_info in load_model_catalog()["providers"].values():
|
||||
for model in provider_info["models"]:
|
||||
if model["id"] == model_id:
|
||||
pricing = model.get("pricing_usd_per_mtok")
|
||||
if pricing is None:
|
||||
return None
|
||||
return {key: float(rate) for key, rate in pricing.items()}
|
||||
return None
|
||||
|
||||
|
||||
def model_supports_vision(model_id: str) -> bool:
|
||||
"""Return whether *model_id* supports image inputs per the curated catalog.
|
||||
|
||||
Looks up the bare model id (and the provider-prefix-stripped form) in the
|
||||
catalog. Returns the model's ``supports_vision`` flag when found, defaulting
|
||||
to ``True`` for unknown models or when the flag is absent — assume vision
|
||||
capable for hosted providers, since modern frontier models support images
|
||||
by default and the captioning fallback is more expensive than just letting
|
||||
the provider handle the image.
|
||||
"""
|
||||
if not model_id:
|
||||
return True
|
||||
|
||||
candidates = [model_id]
|
||||
if "/" in model_id:
|
||||
candidates.append(model_id.split("/", 1)[1])
|
||||
|
||||
for candidate in candidates:
|
||||
for provider_info in load_model_catalog()["providers"].values():
|
||||
for model in provider_info["models"]:
|
||||
if model["id"] == candidate:
|
||||
flag = model.get("supports_vision")
|
||||
if isinstance(flag, bool):
|
||||
return flag
|
||||
return True
|
||||
return True
|
||||
|
||||
|
||||
def get_preset(preset_id: str) -> dict[str, Any] | None:
|
||||
"""Return one preset entry."""
|
||||
preset = load_model_catalog()["presets"].get(preset_id)
|
||||
|
||||
@@ -10,12 +10,24 @@ from typing import Any
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
"""Response from an LLM call."""
|
||||
"""Response from an LLM call.
|
||||
|
||||
``cached_tokens`` and ``cache_creation_tokens`` are subsets of
|
||||
``input_tokens`` (providers report them inside ``prompt_tokens``).
|
||||
Surface them for visibility; do not add to a total.
|
||||
|
||||
``cost_usd`` is the per-call USD cost when the provider / pricing table
|
||||
can produce one (Anthropic, OpenAI, OpenRouter are supported). 0.0 when
|
||||
unknown or unpriced — treat as "unreported", not "free".
|
||||
"""
|
||||
|
||||
content: str
|
||||
model: str
|
||||
input_tokens: int = 0
|
||||
output_tokens: int = 0
|
||||
cached_tokens: int = 0
|
||||
cache_creation_tokens: int = 0
|
||||
cost_usd: float = 0.0
|
||||
stop_reason: str = ""
|
||||
raw_response: Any = None
|
||||
|
||||
@@ -110,19 +122,28 @@ class LLMProvider(ABC):
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
max_retries: int | None = None,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> "LLMResponse":
|
||||
"""Async version of complete(). Non-blocking on the event loop.
|
||||
|
||||
Default implementation offloads the sync complete() to a thread pool.
|
||||
Subclasses SHOULD override for native async I/O.
|
||||
|
||||
``system_dynamic_suffix`` is an optional per-turn tail for providers
|
||||
that honor ``cache_control`` (see LiteLLMProvider for semantics).
|
||||
The default implementation concatenates it onto ``system`` since the
|
||||
sync ``complete()`` path does not support the split.
|
||||
"""
|
||||
combined_system = system
|
||||
if system_dynamic_suffix:
|
||||
combined_system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
partial(
|
||||
self.complete,
|
||||
messages=messages,
|
||||
system=system,
|
||||
system=combined_system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
response_format=response_format,
|
||||
@@ -137,6 +158,7 @@ class LLMProvider(ABC):
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator["StreamEvent"]:
|
||||
"""
|
||||
Stream a completion as an async iterator of StreamEvents.
|
||||
@@ -147,6 +169,9 @@ class LLMProvider(ABC):
|
||||
Tool orchestration is the CALLER's responsibility:
|
||||
- Caller detects ToolCallEvent, executes tool, adds result
|
||||
to messages, calls stream() again.
|
||||
|
||||
``system_dynamic_suffix`` is forwarded to ``acomplete``; see its
|
||||
docstring for the two-block split semantics.
|
||||
"""
|
||||
from framework.llm.stream_events import (
|
||||
FinishEvent,
|
||||
@@ -159,6 +184,7 @@ class LLMProvider(ABC):
|
||||
system=system,
|
||||
tools=tools,
|
||||
max_tokens=max_tokens,
|
||||
system_dynamic_suffix=system_dynamic_suffix,
|
||||
)
|
||||
yield TextDeltaEvent(content=response.content, snapshot=response.content)
|
||||
yield TextEndEvent(full_text=response.content)
|
||||
@@ -166,6 +192,9 @@ class LLMProvider(ABC):
|
||||
stop_reason=response.stop_reason,
|
||||
input_tokens=response.input_tokens,
|
||||
output_tokens=response.output_tokens,
|
||||
cached_tokens=response.cached_tokens,
|
||||
cache_creation_tokens=response.cache_creation_tokens,
|
||||
cost_usd=response.cost_usd,
|
||||
model=response.model,
|
||||
)
|
||||
|
||||
|
||||
@@ -65,13 +65,23 @@ class ReasoningDeltaEvent:
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class FinishEvent:
|
||||
"""The LLM has finished generating."""
|
||||
"""The LLM has finished generating.
|
||||
|
||||
``cached_tokens`` and ``cache_creation_tokens`` are subsets of
|
||||
``input_tokens`` — providers count both inside ``prompt_tokens`` already.
|
||||
Surface them separately for visibility; never add to a total.
|
||||
|
||||
``cost_usd`` is the per-turn USD cost when the provider or LiteLLM's
|
||||
pricing table supplies one; 0.0 means unreported (not free).
|
||||
"""
|
||||
|
||||
type: Literal["finish"] = "finish"
|
||||
stop_reason: str = ""
|
||||
input_tokens: int = 0
|
||||
output_tokens: int = 0
|
||||
cached_tokens: int = 0
|
||||
cache_creation_tokens: int = 0
|
||||
cost_usd: float = 0.0
|
||||
model: str = ""
|
||||
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib import error as urlerror, parse as urlparse, request as urlrequest
|
||||
@@ -91,7 +92,13 @@ def cmd_serve(args: argparse.Namespace) -> int:
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
_build_frontend()
|
||||
from framework.config import DESKTOP_MODE
|
||||
|
||||
# Skip frontend build when running inside the desktop shell — Electron
|
||||
# ships its own renderer, and running `npm` from a packaged install
|
||||
# often fails (no toolchain on the user's machine).
|
||||
if not DESKTOP_MODE:
|
||||
_build_frontend()
|
||||
|
||||
from framework.observability import configure_logging
|
||||
from framework.server.app import create_app
|
||||
@@ -214,7 +221,13 @@ def cmd_serve(args: argparse.Namespace) -> int:
|
||||
|
||||
def cmd_open(args: argparse.Namespace) -> int:
|
||||
"""Start the HTTP server and open the dashboard in the browser."""
|
||||
_ping_hive_gateway_availability("hive-open")
|
||||
# Don't block local startup on a best-effort analytics probe.
|
||||
threading.Thread(
|
||||
target=_ping_hive_gateway_availability,
|
||||
args=("hive-open",),
|
||||
daemon=True,
|
||||
name="hive-open-gateway-ping",
|
||||
).start()
|
||||
args.open = True
|
||||
return cmd_serve(args)
|
||||
|
||||
|
||||
@@ -89,10 +89,12 @@ class ActiveNodeClientIO(NodeClientIO):
|
||||
self._input_result = None
|
||||
|
||||
if self._event_bus is not None:
|
||||
# `prompt` is consumed by the caller separately (callers emit
|
||||
# it as a text delta when needed). The event only carries the
|
||||
# structured questions payload for widget rendering.
|
||||
await self._event_bus.emit_client_input_requested(
|
||||
stream_id=self.node_id,
|
||||
node_id=self.node_id,
|
||||
prompt=prompt,
|
||||
execution_id=self._execution_id or None,
|
||||
)
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ Nodes that need browser access declare ``tools: {policy: "all"}`` in their
|
||||
agent.json config.
|
||||
|
||||
Note: the canonical source of truth for browser automation guidance is
|
||||
the ``browser-automation`` default skill at
|
||||
``core/framework/skills/_default_skills/browser-automation/SKILL.md``.
|
||||
the ``browser-automation`` preset skill at
|
||||
``core/framework/skills/_preset_skills/browser-automation/SKILL.md``.
|
||||
Activate that skill for the full decision tree. This module holds a
|
||||
compact subset suitable for direct inlining into a node's system prompt
|
||||
when a skill activation is not desired.
|
||||
@@ -26,18 +26,23 @@ Follow these rules for reliable, efficient browser interaction.
|
||||
- **`browser_snapshot`** — compact accessibility tree. Fast, cheap, good
|
||||
for static / text-heavy pages where the DOM matches what's visually
|
||||
rendered (docs, forms, search results, settings pages).
|
||||
- **`browser_screenshot`** — visual capture + scale metadata. Use on any
|
||||
complex SPA (LinkedIn, X / Twitter, Reddit, Gmail, Notion, Slack,
|
||||
Discord) and on any site using shadow DOM or virtual scrolling. On
|
||||
those pages, snapshot refs go stale in seconds, shadow contents
|
||||
aren't in the AX tree, and virtual-scrolled elements disappear from
|
||||
the tree entirely — screenshots are the only reliable way to orient.
|
||||
- **`browser_screenshot`** — visual capture + scale metadata. Use when
|
||||
the snapshot does not show the thing you need, when refs look stale,
|
||||
or when you need visual position/layout to act. This is common on
|
||||
complex SPAs (LinkedIn, X / Twitter, Reddit, Gmail, Notion, Slack,
|
||||
Discord), shadow DOM, and virtual scrolling.
|
||||
|
||||
Neither tool is "preferred" universally — they're for different jobs.
|
||||
Default to snapshot on static pages, screenshot on SPAs and
|
||||
shadow-heavy sites. Interaction tools (click/type/fill/scroll) return
|
||||
a snapshot automatically, so don't call `browser_snapshot` separately
|
||||
after an interaction unless you need a fresh view.
|
||||
Use snapshot first for structure and ordinary controls; switch to
|
||||
screenshot when snapshot can't find or verify the target. Interaction
|
||||
tools (`browser_click`, `browser_type`, `browser_type_focused`,
|
||||
`browser_fill`, `browser_scroll`) wait 0.5 s for the page to settle
|
||||
after a successful action, then attach a fresh snapshot under the
|
||||
`snapshot` key of their result — so don't call `browser_snapshot`
|
||||
separately after an interaction unless you need a newer view. Tune
|
||||
with `auto_snapshot_mode`: `"default"` (full tree) is the default;
|
||||
`"simple"` trims unnamed structural nodes; `"interactive"` returns
|
||||
only controls (tightest token footprint); `"off"` skips the capture
|
||||
entirely — use when batching several interactions.
|
||||
|
||||
Only fall back to `browser_get_text` for extracting small elements by
|
||||
CSS selector.
|
||||
|
||||
@@ -543,6 +543,10 @@ class NodeContext:
|
||||
# Dynamic memory provider — when set, EventLoopNode rebuilds the
|
||||
# system prompt with the latest memory block each iteration.
|
||||
dynamic_memory_provider: Any = None # Callable[[], str] | None
|
||||
# Surgical skills-catalog refresh, same contract as AgentContext's
|
||||
# field of the same name. Lets workers pick up UI-driven skill
|
||||
# toggles without rebuilding the full system prompt each turn.
|
||||
dynamic_skills_catalog_provider: Any = None # Callable[[], str] | None
|
||||
|
||||
# Skill system prompts — injected by the skill discovery pipeline
|
||||
skills_catalog_prompt: str = "" # Available skills XML catalog
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
"""Regression tests for forced cancellation overlap in ExecutionStream."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from framework.host.event_bus import AgentEvent, EventBus, EventType
|
||||
from framework.host.execution_manager import (
|
||||
EntryPointSpec,
|
||||
ExecutionAlreadyRunningError,
|
||||
ExecutionManager,
|
||||
)
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.goal import Goal
|
||||
from framework.orchestrator.orchestrator import ExecutionResult
|
||||
|
||||
|
||||
def _build_stream(tmp_path, *, event_bus: EventBus | None = None) -> ExecutionManager:
|
||||
graph = GraphSpec(
|
||||
id="test-graph",
|
||||
goal_id="goal-1",
|
||||
version="1.0.0",
|
||||
entry_node="start",
|
||||
entry_points={"start": "start"},
|
||||
terminal_nodes=[],
|
||||
pause_nodes=[],
|
||||
nodes=[],
|
||||
edges=[],
|
||||
)
|
||||
goal = Goal(id="goal-1", name="goal-1", description="test goal")
|
||||
entry_spec = EntryPointSpec(
|
||||
id="webhook",
|
||||
name="Webhook",
|
||||
entry_node="start",
|
||||
trigger_type="webhook",
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
)
|
||||
|
||||
storage = SimpleNamespace(base_path=tmp_path)
|
||||
stream = ExecutionManager(
|
||||
stream_id="webhook",
|
||||
entry_spec=entry_spec,
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
state_manager=MagicMock(),
|
||||
storage=storage,
|
||||
outcome_aggregator=MagicMock(),
|
||||
event_bus=event_bus,
|
||||
)
|
||||
stream._running = True
|
||||
return stream
|
||||
|
||||
|
||||
def _install_blocking_executor(monkeypatch, release: asyncio.Event) -> None:
|
||||
class BlockingExecutor:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.node_registry = {}
|
||||
|
||||
async def execute(self, *args, **kwargs):
|
||||
while True:
|
||||
try:
|
||||
await release.wait()
|
||||
break
|
||||
except asyncio.CancelledError:
|
||||
continue
|
||||
return ExecutionResult(success=True, output={"ok": True})
|
||||
|
||||
monkeypatch.setattr("framework.host.execution_manager.Orchestrator", BlockingExecutor)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_forced_cancel_timeout_keeps_stream_locked_until_task_exit(tmp_path, monkeypatch):
|
||||
event_bus = EventBus()
|
||||
stream = _build_stream(tmp_path, event_bus=event_bus)
|
||||
release = asyncio.Event()
|
||||
_install_blocking_executor(monkeypatch, release)
|
||||
|
||||
started_events: list[AgentEvent] = []
|
||||
first_started = asyncio.Event()
|
||||
second_started = asyncio.Event()
|
||||
|
||||
async def on_started(event: AgentEvent) -> None:
|
||||
started_events.append(event)
|
||||
if len(started_events) == 1:
|
||||
first_started.set()
|
||||
elif len(started_events) == 2:
|
||||
second_started.set()
|
||||
|
||||
event_bus.subscribe(
|
||||
event_types=[EventType.EXECUTION_STARTED],
|
||||
handler=on_started,
|
||||
filter_stream="webhook",
|
||||
)
|
||||
|
||||
async def immediate_timeout(_tasks, timeout=None):
|
||||
return set(), set(_tasks)
|
||||
|
||||
execution_id = await stream.execute({}, session_state={"resume_session_id": "session-1"})
|
||||
await asyncio.wait_for(first_started.wait(), timeout=1)
|
||||
|
||||
old_task = stream._execution_tasks[execution_id]
|
||||
monkeypatch.setattr("framework.host.execution_manager.asyncio.wait", immediate_timeout)
|
||||
|
||||
try:
|
||||
cancelled = await stream.cancel_execution(execution_id, reason="forced timeout")
|
||||
|
||||
assert cancelled == "cancelling"
|
||||
assert execution_id in stream._execution_tasks
|
||||
assert execution_id in stream._active_executions
|
||||
assert execution_id in stream._completion_events
|
||||
assert stream._active_executions[execution_id].status == "cancelling"
|
||||
assert not old_task.done()
|
||||
|
||||
with pytest.raises(ExecutionAlreadyRunningError):
|
||||
await stream.execute({}, session_state={"resume_session_id": execution_id})
|
||||
|
||||
assert len(started_events) == 1
|
||||
|
||||
release.set()
|
||||
await asyncio.wait_for(old_task, timeout=1)
|
||||
|
||||
restarted_id = await stream.execute({}, session_state={"resume_session_id": execution_id})
|
||||
assert restarted_id == execution_id
|
||||
await asyncio.wait_for(second_started.wait(), timeout=1)
|
||||
finally:
|
||||
release.set()
|
||||
await asyncio.gather(*stream._execution_tasks.values(), return_exceptions=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_repeated_forced_restarts_do_not_accumulate_parallel_tasks(tmp_path, monkeypatch):
|
||||
event_bus = EventBus()
|
||||
stream = _build_stream(tmp_path, event_bus=event_bus)
|
||||
release = asyncio.Event()
|
||||
_install_blocking_executor(monkeypatch, release)
|
||||
|
||||
started_events: list[AgentEvent] = []
|
||||
first_started = asyncio.Event()
|
||||
|
||||
async def on_started(event: AgentEvent) -> None:
|
||||
started_events.append(event)
|
||||
first_started.set()
|
||||
|
||||
event_bus.subscribe(
|
||||
event_types=[EventType.EXECUTION_STARTED],
|
||||
handler=on_started,
|
||||
filter_stream="webhook",
|
||||
)
|
||||
|
||||
async def immediate_timeout(_tasks, timeout=None):
|
||||
return set(), set(_tasks)
|
||||
|
||||
monkeypatch.setattr("framework.host.execution_manager.asyncio.wait", immediate_timeout)
|
||||
|
||||
execution_id = await stream.execute({}, session_state={"resume_session_id": "session-1"})
|
||||
await asyncio.wait_for(first_started.wait(), timeout=1)
|
||||
|
||||
first_task = stream._execution_tasks[execution_id]
|
||||
|
||||
try:
|
||||
assert await stream.cancel_execution(execution_id, reason="restart-1") == "cancelling"
|
||||
|
||||
with pytest.raises(ExecutionAlreadyRunningError):
|
||||
await stream.execute({}, session_state={"resume_session_id": execution_id})
|
||||
|
||||
with pytest.raises(ExecutionAlreadyRunningError):
|
||||
await stream.execute({}, session_state={"resume_session_id": execution_id})
|
||||
|
||||
assert len(started_events) == 1
|
||||
assert list(stream._execution_tasks) == [execution_id]
|
||||
assert stream._execution_tasks[execution_id] is first_task
|
||||
assert not first_task.done()
|
||||
finally:
|
||||
release.set()
|
||||
await asyncio.wait_for(first_task, timeout=1)
|
||||
+122
-52
@@ -19,6 +19,12 @@ _REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
|
||||
_ALLOWED_AGENT_ROOTS: tuple[Path, ...] | None = None
|
||||
|
||||
|
||||
def _has_encrypted_credentials() -> bool:
|
||||
"""Return True when an encrypted credential store already exists on disk."""
|
||||
cred_dir = Path.home() / ".hive" / "credentials" / "credentials"
|
||||
return cred_dir.is_dir() and any(cred_dir.glob("*.enc"))
|
||||
|
||||
|
||||
def _get_allowed_agent_roots() -> tuple[Path, ...]:
|
||||
"""Return resolved allowed root directories for agent loading.
|
||||
|
||||
@@ -134,6 +140,25 @@ async def cors_middleware(request: web.Request, handler):
|
||||
return response
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def no_cache_api_middleware(request: web.Request, handler):
|
||||
"""Prevent browsers from caching API responses.
|
||||
|
||||
Without this, a one-off bad response (e.g. the SPA catch-all leaking
|
||||
index.html for an /api/* URL before a route was registered) can get
|
||||
pinned in the browser's disk cache and replayed forever, since our
|
||||
JSON handlers don't emit ETag/Last-Modified and browsers fall back
|
||||
to heuristic freshness.
|
||||
"""
|
||||
try:
|
||||
response = await handler(request)
|
||||
except web.HTTPException as exc:
|
||||
response = exc
|
||||
if request.path.startswith("/api/"):
|
||||
response.headers["Cache-Control"] = "no-store"
|
||||
return response
|
||||
|
||||
|
||||
@web.middleware
|
||||
async def error_middleware(request: web.Request, handler):
|
||||
"""Catch exceptions and return JSON error responses.
|
||||
@@ -173,11 +198,12 @@ async def handle_health(request: web.Request) -> web.Response:
|
||||
)
|
||||
|
||||
|
||||
async def handle_browser_status(request: web.Request) -> web.Response:
|
||||
"""GET /api/browser/status — proxy the GCU bridge status check server-side.
|
||||
async def _probe_browser_bridge() -> dict:
|
||||
"""Probe the local GCU bridge and return ``{bridge, connected}``.
|
||||
|
||||
Checks http://127.0.0.1:9230/status so the browser never makes a
|
||||
cross-origin request that would log ERR_CONNECTION_REFUSED in the console.
|
||||
Shared by the one-shot ``GET /api/browser/status`` handler and the
|
||||
``/api/browser/status/stream`` SSE feed so both see the same data
|
||||
source.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
@@ -190,17 +216,66 @@ async def handle_browser_status(request: web.Request) -> web.Response:
|
||||
await writer.drain()
|
||||
raw = await asyncio.wait_for(reader.read(512), timeout=0.5)
|
||||
writer.close()
|
||||
# Parse JSON body after the blank line
|
||||
if b"\r\n\r\n" in raw:
|
||||
body = raw.split(b"\r\n\r\n", 1)[1]
|
||||
import json
|
||||
import json as _json
|
||||
|
||||
data = json.loads(body)
|
||||
return web.json_response({"bridge": True, "connected": data.get("connected", False)})
|
||||
data = _json.loads(body)
|
||||
return {"bridge": True, "connected": bool(data.get("connected", False))}
|
||||
except Exception:
|
||||
pass
|
||||
return {"bridge": False, "connected": False}
|
||||
|
||||
return web.json_response({"bridge": False, "connected": False})
|
||||
|
||||
async def handle_browser_status(request: web.Request) -> web.Response:
|
||||
"""GET /api/browser/status — proxy the GCU bridge status check server-side.
|
||||
|
||||
Checks http://127.0.0.1:9230/status so the browser never makes a
|
||||
cross-origin request that would log ERR_CONNECTION_REFUSED in the console.
|
||||
"""
|
||||
return web.json_response(await _probe_browser_bridge())
|
||||
|
||||
|
||||
async def handle_browser_status_stream(request: web.Request) -> web.StreamResponse:
|
||||
"""GET /api/browser/status/stream — SSE feed of bridge status.
|
||||
|
||||
Emits a ``status`` event immediately, then again only when the
|
||||
probe result changes. Polls the local bridge every 3s; that's the
|
||||
same cadence the frontend used before, but we absorb it
|
||||
server-side instead of the browser burning a request.
|
||||
"""
|
||||
import asyncio
|
||||
import json as _json
|
||||
|
||||
resp = web.StreamResponse(
|
||||
status=200,
|
||||
headers={
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
await resp.prepare(request)
|
||||
|
||||
async def _send(event: str, data: dict) -> None:
|
||||
payload = f"event: {event}\ndata: {_json.dumps(data)}\n\n"
|
||||
await resp.write(payload.encode("utf-8"))
|
||||
|
||||
last: tuple | None = None
|
||||
try:
|
||||
while True:
|
||||
status = await _probe_browser_bridge()
|
||||
signature = (status["bridge"], status["connected"])
|
||||
if signature != last:
|
||||
await _send("status", status)
|
||||
last = signature
|
||||
await asyncio.sleep(3.0)
|
||||
except (asyncio.CancelledError, ConnectionResetError):
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.warning("browser status stream error: %s", exc, exc_info=True)
|
||||
return resp
|
||||
|
||||
|
||||
def create_app(model: str | None = None) -> web.Application:
|
||||
@@ -212,7 +287,7 @@ def create_app(model: str | None = None) -> web.Application:
|
||||
Returns:
|
||||
Configured aiohttp Application ready to run.
|
||||
"""
|
||||
app = web.Application(middlewares=[cors_middleware, error_middleware])
|
||||
app = web.Application(middlewares=[cors_middleware, no_cache_api_middleware, error_middleware])
|
||||
|
||||
# Initialize credential store (before SessionManager so it can be shared)
|
||||
from framework.credentials.store import CredentialStore
|
||||
@@ -225,57 +300,39 @@ def create_app(model: str | None = None) -> web.Application:
|
||||
|
||||
# Auto-generate credential key for web-only users who never ran the TUI
|
||||
if not os.environ.get("HIVE_CREDENTIAL_KEY"):
|
||||
try:
|
||||
from framework.credentials.key_storage import generate_and_save_credential_key
|
||||
if _has_encrypted_credentials():
|
||||
logger.warning(
|
||||
"HIVE_CREDENTIAL_KEY is missing but encrypted credentials already exist; "
|
||||
"not generating a replacement key because it would not decrypt existing credentials"
|
||||
)
|
||||
else:
|
||||
try:
|
||||
from framework.credentials.key_storage import generate_and_save_credential_key
|
||||
|
||||
generate_and_save_credential_key()
|
||||
logger.info("Generated and persisted HIVE_CREDENTIAL_KEY to ~/.hive/secrets/credential_key")
|
||||
except Exception as exc:
|
||||
logger.warning("Could not auto-persist HIVE_CREDENTIAL_KEY: %s", exc)
|
||||
generate_and_save_credential_key()
|
||||
logger.info("Generated and persisted HIVE_CREDENTIAL_KEY to ~/.hive/secrets/credential_key")
|
||||
except Exception as exc:
|
||||
logger.warning("Could not auto-persist HIVE_CREDENTIAL_KEY: %s", exc)
|
||||
|
||||
credential_store = CredentialStore.with_aden_sync()
|
||||
# Local server startup should not wait on an eager Aden sync.
|
||||
# The store can still fetch/refresh credentials on demand.
|
||||
if not os.environ.get("HIVE_CREDENTIAL_KEY") and _has_encrypted_credentials():
|
||||
credential_store = CredentialStore.with_env_storage()
|
||||
else:
|
||||
credential_store = CredentialStore.with_aden_sync(auto_sync=False)
|
||||
except Exception:
|
||||
logger.debug("Encrypted credential store unavailable, using in-memory fallback")
|
||||
credential_store = CredentialStore.for_testing({})
|
||||
|
||||
app["credential_store"] = credential_store
|
||||
|
||||
# Pre-load queen MCP tools once at startup (cached for all sessions)
|
||||
# This avoids rebuilding the tool registry for every queen session
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
|
||||
_queen_tool_registry: ToolRegistry | None = None
|
||||
try:
|
||||
_queen_tool_registry = ToolRegistry()
|
||||
import framework.agents.queen as _queen_pkg
|
||||
|
||||
queen_pkg_dir = Path(_queen_pkg.__file__).parent
|
||||
mcp_config = queen_pkg_dir / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
_queen_tool_registry.load_mcp_config(mcp_config)
|
||||
logger.info("Pre-loaded queen MCP tools from %s", mcp_config)
|
||||
|
||||
registry = MCPRegistry()
|
||||
registry.initialize()
|
||||
registry.ensure_defaults()
|
||||
if (queen_pkg_dir / "mcp_registry.json").is_file():
|
||||
_queen_tool_registry.set_mcp_registry_agent_path(queen_pkg_dir)
|
||||
registry_configs, selection_max_tools = registry.load_agent_selection(queen_pkg_dir)
|
||||
if registry_configs:
|
||||
_queen_tool_registry.load_registry_servers(
|
||||
registry_configs,
|
||||
preserve_existing_tools=True,
|
||||
log_collisions=True,
|
||||
max_tools=selection_max_tools,
|
||||
)
|
||||
logger.info("Pre-loaded queen tool registry with %d tools", len(_queen_tool_registry.get_tools()))
|
||||
except Exception as e:
|
||||
logger.warning("Failed to pre-load queen tool registry: %s", e)
|
||||
|
||||
app["queen_tool_registry"] = _queen_tool_registry
|
||||
# Let queen sessions build their registry lazily on first use instead of
|
||||
# paying the MCP discovery cost during `hive open`.
|
||||
app["queen_tool_registry"] = None
|
||||
app["manager"] = SessionManager(
|
||||
model=model, credential_store=credential_store, queen_tool_registry=_queen_tool_registry
|
||||
model=model,
|
||||
credential_store=credential_store,
|
||||
queen_tool_registry=None,
|
||||
)
|
||||
|
||||
# Register shutdown hook
|
||||
@@ -284,16 +341,23 @@ def create_app(model: str | None = None) -> web.Application:
|
||||
# Health check
|
||||
app.router.add_get("/api/health", handle_health)
|
||||
app.router.add_get("/api/browser/status", handle_browser_status)
|
||||
app.router.add_get("/api/browser/status/stream", handle_browser_status_stream)
|
||||
|
||||
# Register route modules
|
||||
from framework.server.routes_colony_tools import register_routes as register_colony_tools_routes
|
||||
from framework.server.routes_colony_workers import register_routes as register_colony_worker_routes
|
||||
from framework.server.routes_config import register_routes as register_config_routes
|
||||
from framework.server.routes_credentials import register_routes as register_credential_routes
|
||||
from framework.server.routes_events import register_routes as register_event_routes
|
||||
from framework.server.routes_execution import register_routes as register_execution_routes
|
||||
from framework.server.routes_logs import register_routes as register_log_routes
|
||||
from framework.server.routes_mcp import register_routes as register_mcp_routes
|
||||
from framework.server.routes_messages import register_routes as register_message_routes
|
||||
from framework.server.routes_prompts import register_routes as register_prompt_routes
|
||||
from framework.server.routes_queen_tools import register_routes as register_queen_tools_routes
|
||||
from framework.server.routes_queens import register_routes as register_queen_routes
|
||||
from framework.server.routes_sessions import register_routes as register_session_routes
|
||||
from framework.server.routes_skills import register_routes as register_skills_routes
|
||||
from framework.server.routes_workers import register_routes as register_worker_routes
|
||||
|
||||
register_config_routes(app)
|
||||
@@ -305,6 +369,12 @@ def create_app(model: str | None = None) -> web.Application:
|
||||
register_worker_routes(app)
|
||||
register_log_routes(app)
|
||||
register_queen_routes(app)
|
||||
register_queen_tools_routes(app)
|
||||
register_colony_tools_routes(app)
|
||||
register_mcp_routes(app)
|
||||
register_colony_worker_routes(app)
|
||||
register_prompt_routes(app)
|
||||
register_skills_routes(app)
|
||||
|
||||
# Static file serving — Option C production mode
|
||||
# If frontend/dist/ exists, serve built frontend files on /
|
||||
|
||||
@@ -0,0 +1,149 @@
|
||||
"""Track fork-compaction status for freshly-forked colony queen sessions.
|
||||
|
||||
When ``create_colony`` forks a queen session into a colony, the
|
||||
inherited DM transcript is compacted via an LLM call that can legitimately
|
||||
exceed the default tool-call timeout (60s). To keep ``create_colony``
|
||||
responsive we run that compaction in the background and record its
|
||||
status on disk so a subsequent colony session-load can wait for it to
|
||||
settle before reading the conversation files.
|
||||
|
||||
The status lives at ``<queen_dir>/compaction_status.json``:
|
||||
|
||||
{"status": "in_progress", "started_at": "..."}
|
||||
{"status": "done", "completed_at": "...", "messages_compacted": N, "summary_chars": M}
|
||||
{"status": "failed", "completed_at": "...", "error": "..."}
|
||||
|
||||
Only present when a compaction was scheduled for this queen dir — absent
|
||||
otherwise. All writes are fail-soft; a missing/corrupt file is treated
|
||||
as "no compaction pending".
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_STATUS_FILENAME = "compaction_status.json"
|
||||
|
||||
|
||||
def _status_path(queen_dir: Path) -> Path:
|
||||
return Path(queen_dir) / _STATUS_FILENAME
|
||||
|
||||
|
||||
def mark_in_progress(queen_dir: Path) -> None:
|
||||
path = _status_path(queen_dir)
|
||||
try:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"status": "in_progress",
|
||||
"started_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
ensure_ascii=False,
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError:
|
||||
logger.warning(
|
||||
"compaction_status: failed to write 'in_progress' at %s",
|
||||
path,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
def mark_done(
|
||||
queen_dir: Path,
|
||||
*,
|
||||
messages_compacted: int = 0,
|
||||
summary_chars: int = 0,
|
||||
) -> None:
|
||||
path = _status_path(queen_dir)
|
||||
try:
|
||||
path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"status": "done",
|
||||
"completed_at": datetime.now(UTC).isoformat(),
|
||||
"messages_compacted": messages_compacted,
|
||||
"summary_chars": summary_chars,
|
||||
},
|
||||
ensure_ascii=False,
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError:
|
||||
logger.warning(
|
||||
"compaction_status: failed to write 'done' at %s",
|
||||
path,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
def mark_failed(queen_dir: Path, error: str) -> None:
|
||||
path = _status_path(queen_dir)
|
||||
try:
|
||||
path.write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"status": "failed",
|
||||
"completed_at": datetime.now(UTC).isoformat(),
|
||||
"error": (error or "")[:500],
|
||||
},
|
||||
ensure_ascii=False,
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError:
|
||||
logger.warning(
|
||||
"compaction_status: failed to write 'failed' at %s",
|
||||
path,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
|
||||
def get_status(queen_dir: Path) -> dict | None:
|
||||
path = _status_path(queen_dir)
|
||||
if not path.exists():
|
||||
return None
|
||||
try:
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return None
|
||||
|
||||
|
||||
async def await_completion(
|
||||
queen_dir: Path,
|
||||
*,
|
||||
timeout: float = 180.0,
|
||||
poll: float = 0.5,
|
||||
) -> dict | None:
|
||||
"""Block until compaction leaves 'in_progress' state.
|
||||
|
||||
Returns the final status dict, or ``None`` if no compaction marker
|
||||
exists for this dir. On timeout returns the last observed status
|
||||
(still 'in_progress') so the caller can decide whether to proceed
|
||||
with the raw transcript.
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
deadline = loop.time() + max(0.0, timeout)
|
||||
last: dict | None = None
|
||||
while True:
|
||||
last = get_status(queen_dir)
|
||||
if last is None:
|
||||
return None
|
||||
if last.get("status") != "in_progress":
|
||||
return last
|
||||
if loop.time() >= deadline:
|
||||
logger.warning(
|
||||
"compaction_status: timed out after %.0fs waiting for %s (proceeding with raw transcript)",
|
||||
timeout,
|
||||
queen_dir,
|
||||
)
|
||||
return last
|
||||
await asyncio.sleep(poll)
|
||||
@@ -113,10 +113,18 @@ def install_worker_escalation_routing(
|
||||
queen_node = session.queen_executor.node_registry.get("queen") if session.queen_executor is not None else None
|
||||
if queen_node is None or not hasattr(queen_node, "inject_event"):
|
||||
if session.event_bus is not None:
|
||||
# Stream the handoff text so the human sees the worker's
|
||||
# question, then request input so the reply input appears.
|
||||
await session.event_bus.emit_client_output_delta(
|
||||
stream_id="queen",
|
||||
node_id="queen",
|
||||
content=handoff,
|
||||
snapshot=handoff,
|
||||
execution_id=session.id,
|
||||
)
|
||||
await session.event_bus.emit_client_input_requested(
|
||||
stream_id="queen",
|
||||
node_id="queen",
|
||||
prompt=handoff,
|
||||
execution_id=session.id,
|
||||
)
|
||||
return
|
||||
@@ -245,6 +253,92 @@ async def materialize_queen_identity(
|
||||
)
|
||||
|
||||
|
||||
def build_queen_tool_registry_bare() -> tuple[Any, dict[str, list[dict[str, Any]]]]:
|
||||
"""Build a Queen ``ToolRegistry`` and a (server_name → tools) catalog.
|
||||
|
||||
Used by the Tool Library GET route to populate the MCP tool surface
|
||||
without needing a live queen session. We DO NOT register queen
|
||||
lifecycle tools here (they require a Session stub); the catalog only
|
||||
covers MCP-origin tools, which is what the allowlist gates.
|
||||
|
||||
Loading MCP servers spawns subprocesses, so call this once per
|
||||
backend process and cache the result.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
import framework.agents.queen as _queen_pkg
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
|
||||
queen_registry = ToolRegistry()
|
||||
queen_pkg_dir = Path(_queen_pkg.__file__).parent
|
||||
|
||||
mcp_config = queen_pkg_dir / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
try:
|
||||
queen_registry.load_mcp_config(mcp_config)
|
||||
except Exception:
|
||||
logger.warning("build_queen_tool_registry_bare: MCP config failed", exc_info=True)
|
||||
|
||||
try:
|
||||
reg = MCPRegistry()
|
||||
reg.initialize()
|
||||
if (queen_pkg_dir / "mcp_registry.json").is_file():
|
||||
queen_registry.set_mcp_registry_agent_path(queen_pkg_dir)
|
||||
registry_configs, selection_max_tools = reg.load_agent_selection(queen_pkg_dir)
|
||||
|
||||
already = {cfg.get("name") for cfg in registry_configs if cfg.get("name")}
|
||||
extra: list[str] = []
|
||||
try:
|
||||
for entry in reg.list_installed():
|
||||
if entry.get("source") != "local":
|
||||
continue
|
||||
if not entry.get("enabled", True):
|
||||
continue
|
||||
name = entry.get("name")
|
||||
if name and name not in already:
|
||||
extra.append(name)
|
||||
except Exception:
|
||||
pass
|
||||
if extra:
|
||||
try:
|
||||
extra_configs = reg.resolve_for_agent(include=extra)
|
||||
registry_configs = list(registry_configs) + [reg._server_config_to_dict(c) for c in extra_configs]
|
||||
except Exception:
|
||||
logger.debug("build_queen_tool_registry_bare: resolve_for_agent(extra) failed", exc_info=True)
|
||||
|
||||
if registry_configs:
|
||||
queen_registry.load_registry_servers(
|
||||
registry_configs,
|
||||
preserve_existing_tools=True,
|
||||
log_collisions=False,
|
||||
max_tools=selection_max_tools,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("build_queen_tool_registry_bare: MCP registry load failed", exc_info=True)
|
||||
|
||||
# Build the catalog.
|
||||
tools_by_name = queen_registry.get_tools()
|
||||
server_map = dict(getattr(queen_registry, "_mcp_server_tools", {}) or {})
|
||||
catalog: dict[str, list[dict[str, Any]]] = {}
|
||||
for server_name in sorted(server_map):
|
||||
entries: list[dict[str, Any]] = []
|
||||
for tool_name in sorted(server_map[server_name]):
|
||||
tool = tools_by_name.get(tool_name)
|
||||
if tool is None:
|
||||
continue
|
||||
entries.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"input_schema": tool.parameters,
|
||||
}
|
||||
)
|
||||
catalog[server_name] = entries
|
||||
|
||||
return queen_registry, catalog
|
||||
|
||||
|
||||
async def create_queen(
|
||||
session: Session,
|
||||
session_manager: Any,
|
||||
@@ -266,16 +360,19 @@ async def create_queen(
|
||||
queen_loop_config as _base_loop_config,
|
||||
)
|
||||
from framework.agents.queen.nodes import (
|
||||
_QUEEN_INCUBATING_TOOLS,
|
||||
_QUEEN_INDEPENDENT_TOOLS,
|
||||
_QUEEN_REVIEWING_TOOLS,
|
||||
_QUEEN_WORKING_TOOLS,
|
||||
_queen_behavior_always,
|
||||
_queen_behavior_independent,
|
||||
_queen_character_core,
|
||||
_queen_role_incubating,
|
||||
_queen_role_independent,
|
||||
_queen_role_reviewing,
|
||||
_queen_role_working,
|
||||
_queen_style,
|
||||
_queen_tools_incubating,
|
||||
_queen_tools_independent,
|
||||
_queen_tools_reviewing,
|
||||
_queen_tools_working,
|
||||
@@ -315,6 +412,45 @@ async def create_queen(
|
||||
if (queen_pkg_dir / "mcp_registry.json").is_file():
|
||||
queen_registry.set_mcp_registry_agent_path(queen_pkg_dir)
|
||||
registry_configs, selection_max_tools = registry.load_agent_selection(queen_pkg_dir)
|
||||
|
||||
# Auto-include every user-added local MCP server that the repo
|
||||
# selection hasn't already loaded. Users register servers via
|
||||
# the `/api/mcp/servers` route (or `hive mcp add`); they live in
|
||||
# ~/.hive/mcp_registry/installed.json with source == "local".
|
||||
# New servers take effect on the next queen session start; the
|
||||
# prompt cache and ToolRegistry are still loaded once per boot.
|
||||
already_loaded_names = {cfg.get("name") for cfg in registry_configs if cfg.get("name")}
|
||||
extra_names: list[str] = []
|
||||
try:
|
||||
for entry in registry.list_installed():
|
||||
if entry.get("source") != "local":
|
||||
continue
|
||||
if not entry.get("enabled", True):
|
||||
continue
|
||||
name = entry.get("name")
|
||||
if not name or name in already_loaded_names:
|
||||
continue
|
||||
extra_names.append(name)
|
||||
except Exception:
|
||||
logger.debug("Queen: list_installed() failed while auto-including user servers", exc_info=True)
|
||||
|
||||
if extra_names:
|
||||
try:
|
||||
extra_configs = registry.resolve_for_agent(include=extra_names)
|
||||
extra_dicts = [registry._server_config_to_dict(c) for c in extra_configs]
|
||||
registry_configs = list(registry_configs) + extra_dicts
|
||||
logger.info(
|
||||
"Queen: auto-including %d user-added MCP server(s): %s",
|
||||
len(extra_dicts),
|
||||
[c.get("name") for c in extra_dicts],
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Queen: failed to resolve user-added MCP servers %s",
|
||||
extra_names,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
if registry_configs:
|
||||
results = queen_registry.load_registry_servers(
|
||||
registry_configs,
|
||||
@@ -378,6 +514,7 @@ async def create_queen(
|
||||
|
||||
# ---- Partition tools by phase ------------------------------------
|
||||
independent_names = set(_QUEEN_INDEPENDENT_TOOLS)
|
||||
incubating_names = set(_QUEEN_INCUBATING_TOOLS)
|
||||
working_names = set(_QUEEN_WORKING_TOOLS)
|
||||
reviewing_names = set(_QUEEN_REVIEWING_TOOLS)
|
||||
|
||||
@@ -386,16 +523,89 @@ async def create_queen(
|
||||
|
||||
phase_state.working_tools = [t for t in queen_tools if t.name in working_names]
|
||||
phase_state.reviewing_tools = [t for t in queen_tools if t.name in reviewing_names]
|
||||
# Incubating tool surface is intentionally minimal (read-only inspection
|
||||
# + create_colony + cancel_incubation) — no MCP tools spliced in, so the
|
||||
# queen stays focused on drafting the spec.
|
||||
phase_state.incubating_tools = [t for t in queen_tools if t.name in incubating_names]
|
||||
|
||||
# Independent phase gets core tools + all MCP tools not claimed by any
|
||||
# other phase (coder-tools file I/O, gcu-tools browser, etc.).
|
||||
all_phase_names = independent_names | working_names | reviewing_names
|
||||
all_phase_names = independent_names | incubating_names | working_names | reviewing_names
|
||||
mcp_tools = [t for t in queen_tools if t.name not in all_phase_names]
|
||||
phase_state.independent_tools = [t for t in queen_tools if t.name in independent_names] + mcp_tools
|
||||
logger.info(
|
||||
"Queen: independent tools: %s",
|
||||
sorted(t.name for t in phase_state.independent_tools),
|
||||
)
|
||||
logger.info(
|
||||
"Queen: incubating tools: %s",
|
||||
sorted(t.name for t in phase_state.incubating_tools),
|
||||
)
|
||||
|
||||
# ---- Per-queen MCP tool allowlist --------------------------------
|
||||
# Capture the set of MCP-origin tool names so the allowlist in
|
||||
# ``QueenPhaseState`` only gates MCP tools (lifecycle and synthetic
|
||||
# tools always pass through). Then apply the queen profile's stored
|
||||
# allowlist (if any) and memoize the filtered independent tool list.
|
||||
mcp_server_tools_map: dict[str, set[str]] = dict(getattr(queen_registry, "_mcp_server_tools", {}))
|
||||
phase_state.mcp_tool_names_all = set().union(*mcp_server_tools_map.values()) if mcp_server_tools_map else set()
|
||||
# The queen's MCP tool allowlist now lives in a dedicated
|
||||
# ``tools.json`` sidecar next to ``profile.yaml``. ``load_queen_tools_config``
|
||||
# migrates any legacy ``enabled_mcp_tools`` field out of profile.yaml
|
||||
# on first read, so existing installs upgrade silently.
|
||||
from framework.agents.queen.queen_tools_config import load_queen_tools_config
|
||||
|
||||
# Build a minimal catalog for default-tool resolution. The full
|
||||
# ``session_manager._mcp_tool_catalog`` snapshot is written further
|
||||
# down the flow; a queen booted for the first time needs the catalog
|
||||
# now so ``@server:NAME`` shorthands in the role-default table can
|
||||
# expand against the just-loaded MCP servers.
|
||||
_boot_catalog: dict[str, list[dict]] = {
|
||||
srv: [{"name": name} for name in sorted(names)] for srv, names in mcp_server_tools_map.items()
|
||||
}
|
||||
# ``queen_dir`` is ``queens/<queen_id>/sessions/<session_id>``; the
|
||||
# allowlist sidecar is keyed by queen_id, not session_id.
|
||||
phase_state.enabled_mcp_tools = load_queen_tools_config(session.queen_name, _boot_catalog)
|
||||
phase_state.rebuild_independent_filter()
|
||||
if phase_state.enabled_mcp_tools is not None:
|
||||
total_mcp = len(phase_state.mcp_tool_names_all)
|
||||
allowed_mcp = len(set(phase_state.enabled_mcp_tools) & phase_state.mcp_tool_names_all)
|
||||
logger.info(
|
||||
"Queen: per-queen MCP allowlist active — %d of %d MCP tools enabled",
|
||||
allowed_mcp,
|
||||
total_mcp,
|
||||
)
|
||||
|
||||
# ---- MCP tool catalog for the frontend ---------------------------
|
||||
# Snapshot per-server tool metadata so the Queen Tools API can render
|
||||
# the tool surface without spawning MCP subprocesses. Keyed by server
|
||||
# name so the UI can group tools by origin. Updated every time a
|
||||
# queen boots, so installing a new server and starting a new queen
|
||||
# session refreshes the catalog.
|
||||
mcp_tool_catalog: dict[str, list[dict[str, Any]]] = {}
|
||||
tools_by_name = {t.name: t for t in queen_tools}
|
||||
for server_name, tool_names in mcp_server_tools_map.items():
|
||||
server_entries: list[dict[str, Any]] = []
|
||||
for tool_name in sorted(tool_names):
|
||||
tool = tools_by_name.get(tool_name)
|
||||
if tool is None:
|
||||
continue
|
||||
server_entries.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"input_schema": tool.parameters,
|
||||
}
|
||||
)
|
||||
mcp_tool_catalog[server_name] = server_entries
|
||||
# All queens share one MCP registry, so the catalog is a manager-level
|
||||
# fact; stash it on the SessionManager so the Queen Tools route can
|
||||
# render the tool list even when no queen session is currently live.
|
||||
if session_manager is not None:
|
||||
try:
|
||||
session_manager._mcp_tool_catalog = mcp_tool_catalog # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("Queen: could not attach mcp_tool_catalog to manager", exc_info=True)
|
||||
|
||||
# ---- Global + queen-scoped memory ----------------------------------
|
||||
global_dir, queen_mem_dir = initialize_memory_scopes(session, phase_state)
|
||||
@@ -428,16 +638,20 @@ async def create_queen(
|
||||
),
|
||||
_has_vision,
|
||||
)
|
||||
phase_state.prompt_working = finalize_queen_prompt(
|
||||
phase_state.prompt_incubating = finalize_queen_prompt(
|
||||
(
|
||||
_queen_character_core
|
||||
+ _queen_role_working
|
||||
+ _queen_role_incubating
|
||||
+ _queen_style
|
||||
+ _queen_tools_working
|
||||
+ _queen_tools_incubating
|
||||
+ _queen_behavior_always
|
||||
),
|
||||
_has_vision,
|
||||
)
|
||||
phase_state.prompt_working = finalize_queen_prompt(
|
||||
(_queen_character_core + _queen_role_working + _queen_style + _queen_tools_working + _queen_behavior_always),
|
||||
_has_vision,
|
||||
)
|
||||
phase_state.prompt_reviewing = finalize_queen_prompt(
|
||||
(
|
||||
_queen_character_core
|
||||
@@ -452,12 +666,34 @@ async def create_queen(
|
||||
# ---- Default skill protocols -------------------------------------
|
||||
_queen_skill_dirs: list[str] = []
|
||||
try:
|
||||
from framework.config import QUEENS_DIR
|
||||
from framework.skills.discovery import ExtraScope
|
||||
from framework.skills.manager import SkillsManager, SkillsManagerConfig
|
||||
|
||||
# Pass project_root so user-scope skills (~/.hive/skills/, ~/.agents/skills/)
|
||||
# are discovered. Queen has no agent-specific project root, so we use its
|
||||
# own directory — the value just needs to be non-None to enable user-scope scanning.
|
||||
_queen_skills_mgr = SkillsManager(SkillsManagerConfig(project_root=Path(__file__).parent))
|
||||
# Queen home backs the queen-UI skill scope and the queen's
|
||||
# override store. The directory already exists (or is created on
|
||||
# demand by queen_profiles.py); treat a missing queen_name as the
|
||||
# default queen to preserve backwards compatibility.
|
||||
_queen_id = getattr(session, "queen_name", None) or "default"
|
||||
_queen_home = QUEENS_DIR / _queen_id
|
||||
_queen_skills_mgr = SkillsManager(
|
||||
SkillsManagerConfig(
|
||||
queen_id=_queen_id,
|
||||
queen_overrides_path=_queen_home / "skills_overrides.json",
|
||||
extra_scope_dirs=[
|
||||
ExtraScope(
|
||||
directory=_queen_home / "skills",
|
||||
label="queen_ui",
|
||||
priority=2,
|
||||
)
|
||||
],
|
||||
# No project_root — queen's project is her own identity;
|
||||
# user-scope discovery still runs without one.
|
||||
project_root=None,
|
||||
skip_community_discovery=True,
|
||||
interactive=False,
|
||||
)
|
||||
)
|
||||
_queen_skills_mgr.load()
|
||||
phase_state.protocols_prompt = _queen_skills_mgr.protocols_prompt
|
||||
phase_state.skills_catalog_prompt = _queen_skills_mgr.skills_catalog_prompt
|
||||
@@ -496,8 +732,37 @@ async def create_queen(
|
||||
|
||||
# ---- Recall on each real user turn --------------------------------
|
||||
async def _recall_on_user_input(event: AgentEvent) -> None:
|
||||
"""Re-select memories when real user input arrives."""
|
||||
await _refresh_recall_cache((event.data or {}).get("content", ""))
|
||||
"""On real user input, freeze the dynamic system-prompt suffix and
|
||||
refresh recall memories in the background.
|
||||
|
||||
The EventBus drops handlers that exceed 15s, so we MUST return fast.
|
||||
Recall selection queries the LLM and can take >15s on slow backends;
|
||||
we fire it off as a background task and re-stamp the suffix when it
|
||||
completes. The immediate refresh_dynamic_suffix call stamps a fresh
|
||||
timestamp using the last-known recall blocks so every iteration of
|
||||
THIS user turn sees a byte-stable prompt (prompt cache hits on the
|
||||
static block). Phase-change injections and worker-report injections
|
||||
go through agent_loop.inject_event() and do NOT publish
|
||||
CLIENT_INPUT_RECEIVED, so this runs exactly once per real user turn.
|
||||
"""
|
||||
query = (event.data or {}).get("content", "")
|
||||
# Immediate: stamp "now" into the frozen suffix, using whatever
|
||||
# recall blocks we already cached (from the prior turn or seeding).
|
||||
phase_state.refresh_dynamic_suffix()
|
||||
|
||||
async def _bg_refresh() -> None:
|
||||
try:
|
||||
await _refresh_recall_cache(query)
|
||||
# Re-stamp with the fresh recall blocks. Any iteration that
|
||||
# read the suffix before this point used the older recall
|
||||
# — acceptable; recall was already eventual-consistency.
|
||||
phase_state.refresh_dynamic_suffix()
|
||||
except Exception:
|
||||
logger.debug("background recall refresh failed", exc_info=True)
|
||||
|
||||
import asyncio as _asyncio
|
||||
|
||||
_asyncio.create_task(_bg_refresh())
|
||||
|
||||
session.event_bus.subscribe(
|
||||
[EventType.CLIENT_INPUT_RECEIVED],
|
||||
@@ -607,6 +872,9 @@ async def create_queen(
|
||||
except Exception:
|
||||
logger.debug("recall: initial seeding failed", exc_info=True)
|
||||
|
||||
# Freeze the dynamic suffix once so the first real turn sends a
|
||||
# byte-stable prompt even before CLIENT_INPUT_RECEIVED fires.
|
||||
phase_state.refresh_dynamic_suffix()
|
||||
return HookResult(system_prompt=phase_state.get_current_prompt())
|
||||
|
||||
# ---- Colony preparation -------------------------------------------
|
||||
@@ -706,7 +974,8 @@ async def create_queen(
|
||||
stream_id="queen",
|
||||
execution_id=session.id,
|
||||
dynamic_tools_provider=phase_state.get_current_tools,
|
||||
dynamic_prompt_provider=phase_state.get_current_prompt,
|
||||
dynamic_prompt_provider=phase_state.get_static_prompt,
|
||||
dynamic_prompt_suffix_provider=phase_state.get_dynamic_suffix,
|
||||
iteration_metadata_provider=lambda: {"phase": phase_state.phase},
|
||||
skills_catalog_prompt=phase_state.skills_catalog_prompt,
|
||||
protocols_prompt=phase_state.protocols_prompt,
|
||||
|
||||
@@ -0,0 +1,329 @@
|
||||
"""Per-colony MCP tool allowlist routes.
|
||||
|
||||
- GET /api/colony/{colony_name}/tools -- enumerate colony tool surface
|
||||
- PATCH /api/colony/{colony_name}/tools -- set or clear the allowlist
|
||||
|
||||
A colony's tool set is inherited from the queen that forked it, so the
|
||||
tool surface mirrors the queen's MCP servers. Lifecycle/synthetic tools
|
||||
are included for display only. MCP tools are grouped by origin server
|
||||
with per-tool ``enabled`` flags.
|
||||
|
||||
Semantics:
|
||||
|
||||
- ``enabled_mcp_tools: null`` → allow every MCP tool (default).
|
||||
- ``enabled_mcp_tools: []`` → allow no MCP tools (only lifecycle /
|
||||
synthetic pass through).
|
||||
- ``enabled_mcp_tools: [...]`` → only listed names pass.
|
||||
|
||||
The allowlist is persisted in a dedicated ``tools.json`` sidecar at
|
||||
``~/.hive/colonies/{colony_name}/tools.json``. Changes take effect on
|
||||
the *next* worker spawn. In-flight workers keep the tool list they
|
||||
booted with because workers have no dynamic tools provider today —
|
||||
mutating their tool set mid-turn would diverge from the list the LLM
|
||||
is already using.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.host.colony_metadata import colony_metadata_path
|
||||
from framework.host.colony_tools_config import (
|
||||
load_colony_tools_config,
|
||||
update_colony_tools_config,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_SYNTHETIC_NAMES = {"ask_user"}
|
||||
|
||||
|
||||
def _synthetic_entries() -> list[dict[str, Any]]:
|
||||
try:
|
||||
from framework.agent_loop.internals.synthetic_tools import build_ask_user_tool
|
||||
|
||||
tool = build_ask_user_tool()
|
||||
return [
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"editable": False,
|
||||
}
|
||||
]
|
||||
except Exception:
|
||||
return [
|
||||
{
|
||||
"name": "ask_user",
|
||||
"description": "Pause and ask the user a structured question.",
|
||||
"editable": False,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def _colony_runtimes_for_name(manager: Any, colony_name: str) -> list[Any]:
|
||||
"""Return every live ColonyRuntime whose session is attached to ``colony_name``."""
|
||||
sessions = getattr(manager, "_sessions", None) or {}
|
||||
runtimes: list[Any] = []
|
||||
for session in sessions.values():
|
||||
if getattr(session, "colony_name", None) != colony_name:
|
||||
continue
|
||||
# Both ``session.colony`` (queen-side unified runtime) and
|
||||
# ``session.colony_runtime`` (legacy worker runtime) may carry
|
||||
# tools that need the allowlist applied. We update both.
|
||||
for attr in ("colony", "colony_runtime"):
|
||||
rt = getattr(session, attr, None)
|
||||
if rt is not None and rt not in runtimes:
|
||||
runtimes.append(rt)
|
||||
return runtimes
|
||||
|
||||
|
||||
async def _render_catalog(manager: Any, colony_name: str) -> dict[str, list[dict[str, Any]]]:
|
||||
"""Build a per-server tool catalog for this colony.
|
||||
|
||||
All colonies inherit the queen's MCP surface, so we reuse the
|
||||
manager-level ``_mcp_tool_catalog`` populated during queen boot.
|
||||
"""
|
||||
# If a live runtime exists and carries its own registry, prefer it —
|
||||
# it's authoritative (reflects any post-queen-boot MCP additions).
|
||||
for rt in _colony_runtimes_for_name(manager, colony_name):
|
||||
tools = getattr(rt, "_tools", None)
|
||||
if not tools:
|
||||
continue
|
||||
mcp_names = set(getattr(rt, "_mcp_tool_names_all", set()) or set())
|
||||
if not mcp_names:
|
||||
continue
|
||||
catalog: dict[str, list[dict[str, Any]]] = {"(mcp)": []}
|
||||
for tool in tools:
|
||||
name = getattr(tool, "name", None)
|
||||
if name in mcp_names:
|
||||
catalog["(mcp)"].append(
|
||||
{
|
||||
"name": name,
|
||||
"description": getattr(tool, "description", ""),
|
||||
"input_schema": getattr(tool, "parameters", {}),
|
||||
}
|
||||
)
|
||||
return catalog
|
||||
|
||||
# Otherwise fall back to the queen-level snapshot. Build it on demand
|
||||
# (off the event loop) when empty so the Tool Library works before
|
||||
# any queen has been started in this process.
|
||||
cached = getattr(manager, "_mcp_tool_catalog", None)
|
||||
if isinstance(cached, dict) and cached:
|
||||
return cached
|
||||
try:
|
||||
import asyncio
|
||||
|
||||
from framework.server.queen_orchestrator import build_queen_tool_registry_bare
|
||||
|
||||
registry, built = await asyncio.to_thread(build_queen_tool_registry_bare)
|
||||
if manager is not None:
|
||||
manager._mcp_tool_catalog = built # type: ignore[attr-defined]
|
||||
manager._bootstrap_tool_registry = registry # type: ignore[attr-defined]
|
||||
return built
|
||||
except Exception:
|
||||
logger.warning("Colony tools: catalog bootstrap failed", exc_info=True)
|
||||
return {}
|
||||
|
||||
|
||||
def _lifecycle_entries_from_runtime(manager: Any, colony_name: str) -> list[dict[str, Any]]:
|
||||
"""Non-MCP tools currently registered on the colony runtime (if any).
|
||||
|
||||
When no live runtime is available we fall back to the bootstrap
|
||||
registry stashed on the manager by ``routes_queen_tools`` — it
|
||||
already has queen lifecycle tools registered, which are also the
|
||||
lifecycle tools colonies inherit at spawn time.
|
||||
"""
|
||||
out: list[dict[str, Any]] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
def _push(name: str, description: str) -> None:
|
||||
if not name or name in seen:
|
||||
return
|
||||
if name in _SYNTHETIC_NAMES:
|
||||
return
|
||||
seen.add(name)
|
||||
out.append({"name": name, "description": description, "editable": False})
|
||||
|
||||
runtimes = _colony_runtimes_for_name(manager, colony_name)
|
||||
if runtimes:
|
||||
for rt in runtimes:
|
||||
mcp_names = set(getattr(rt, "_mcp_tool_names_all", set()) or set())
|
||||
for tool in getattr(rt, "_tools", []) or []:
|
||||
name = getattr(tool, "name", None)
|
||||
if name in mcp_names:
|
||||
continue
|
||||
_push(name, getattr(tool, "description", ""))
|
||||
else:
|
||||
# No live runtime — derive from the bootstrap registry.
|
||||
from framework.server.routes_queen_tools import _lifecycle_entries_without_session
|
||||
|
||||
catalog = getattr(manager, "_mcp_tool_catalog", {}) or {}
|
||||
mcp_names: set[str] = set()
|
||||
for entries in catalog.values():
|
||||
for entry in entries:
|
||||
if entry.get("name"):
|
||||
mcp_names.add(entry["name"])
|
||||
out.extend(_lifecycle_entries_without_session(manager, mcp_names))
|
||||
return out
|
||||
return sorted(out, key=lambda e: e["name"])
|
||||
|
||||
|
||||
def _render_servers(
|
||||
catalog: dict[str, list[dict[str, Any]]],
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[dict[str, Any]]:
|
||||
allowed: set[str] | None = None if enabled_mcp_tools is None else set(enabled_mcp_tools)
|
||||
servers: list[dict[str, Any]] = []
|
||||
for name in sorted(catalog):
|
||||
tools = []
|
||||
for entry in catalog[name]:
|
||||
tool_name = entry.get("name")
|
||||
tools.append(
|
||||
{
|
||||
"name": tool_name,
|
||||
"description": entry.get("description", ""),
|
||||
"input_schema": entry.get("input_schema", {}),
|
||||
"enabled": True if allowed is None else tool_name in allowed,
|
||||
}
|
||||
)
|
||||
servers.append({"name": name, "tools": tools})
|
||||
return servers
|
||||
|
||||
|
||||
async def handle_get_tools(request: web.Request) -> web.Response:
|
||||
"""GET /api/colony/{colony_name}/tools."""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
if not colony_metadata_path(colony_name).exists():
|
||||
return web.json_response({"error": f"Colony '{colony_name}' not found"}, status=404)
|
||||
|
||||
manager = request.app.get("manager")
|
||||
# Allowlist now lives in a dedicated tools.json sidecar; helper
|
||||
# migrates any legacy metadata.json field on first read.
|
||||
enabled = load_colony_tools_config(colony_name)
|
||||
|
||||
catalog = await _render_catalog(manager, colony_name)
|
||||
stale = not catalog
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"colony_name": colony_name,
|
||||
"enabled_mcp_tools": enabled,
|
||||
"stale": stale,
|
||||
"lifecycle": _lifecycle_entries_from_runtime(manager, colony_name),
|
||||
"synthetic": _synthetic_entries(),
|
||||
"mcp_servers": _render_servers(catalog, enabled),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def handle_patch_tools(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/colony/{colony_name}/tools."""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
if not colony_metadata_path(colony_name).exists():
|
||||
return web.json_response({"error": f"Colony '{colony_name}' not found"}, status=404)
|
||||
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict) or "enabled_mcp_tools" not in body:
|
||||
return web.json_response(
|
||||
{"error": "Body must be an object with an 'enabled_mcp_tools' field"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
enabled = body["enabled_mcp_tools"]
|
||||
if enabled is not None:
|
||||
if not isinstance(enabled, list) or not all(isinstance(x, str) for x in enabled):
|
||||
return web.json_response(
|
||||
{"error": "'enabled_mcp_tools' must be null or a list of strings"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
manager = request.app.get("manager")
|
||||
|
||||
# Validate names against the known MCP catalog — lifts the same
|
||||
# typo-catching guarantee we already offer on queen tools.
|
||||
catalog = await _render_catalog(manager, colony_name)
|
||||
known: set[str] = {e.get("name") for entries in catalog.values() for e in entries if e.get("name")}
|
||||
if enabled is not None and known:
|
||||
unknown = sorted(set(enabled) - known)
|
||||
if unknown:
|
||||
return web.json_response(
|
||||
{"error": "Unknown MCP tool name(s)", "unknown": unknown},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Persist — tools.json sidecar, not metadata.json. Missing directory
|
||||
# is already guarded by the 404 check above.
|
||||
try:
|
||||
update_colony_tools_config(colony_name, enabled)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Colony '{colony_name}' not found"}, status=404)
|
||||
|
||||
# Update any live runtimes so the NEXT worker spawn reflects the change.
|
||||
# We do NOT rebuild in-flight workers' tool lists (see module docstring).
|
||||
refreshed = 0
|
||||
for rt in _colony_runtimes_for_name(manager, colony_name):
|
||||
setter = getattr(rt, "set_tool_allowlist", None)
|
||||
if callable(setter):
|
||||
try:
|
||||
setter(enabled)
|
||||
refreshed += 1
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Colony tools: set_tool_allowlist failed on runtime for %s",
|
||||
colony_name,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Colony tools: colony=%s allowlist=%s refreshed_runtimes=%d",
|
||||
colony_name,
|
||||
"null" if enabled is None else f"{len(enabled)} tool(s)",
|
||||
refreshed,
|
||||
)
|
||||
return web.json_response(
|
||||
{
|
||||
"colony_name": colony_name,
|
||||
"enabled_mcp_tools": enabled,
|
||||
"refreshed_runtimes": refreshed,
|
||||
"note": "Changes apply to the next worker spawn. Running workers keep their booted tool list.",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def handle_list_colonies(request: web.Request) -> web.Response:
|
||||
"""GET /api/colonies — list colonies with their tool allowlist status.
|
||||
|
||||
Powers the Tool Library page's colony picker.
|
||||
"""
|
||||
from framework.host.colony_metadata import list_colony_names, load_colony_metadata
|
||||
|
||||
colonies: list[dict[str, Any]] = []
|
||||
for name in list_colony_names():
|
||||
meta = load_colony_metadata(name)
|
||||
# Provenance stays in metadata.json; allowlist lives in tools.json.
|
||||
allowlist = load_colony_tools_config(name)
|
||||
colonies.append(
|
||||
{
|
||||
"name": name,
|
||||
"queen_name": meta.get("queen_name"),
|
||||
"created_at": meta.get("created_at"),
|
||||
"has_allowlist": allowlist is not None,
|
||||
"enabled_count": len(allowlist) if isinstance(allowlist, list) else None,
|
||||
}
|
||||
)
|
||||
return web.json_response({"colonies": colonies})
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
"""Register per-colony tool routes."""
|
||||
app.router.add_get("/api/colonies/tools-index", handle_list_colonies)
|
||||
app.router.add_get("/api/colony/{colony_name}/tools", handle_get_tools)
|
||||
app.router.add_patch("/api/colony/{colony_name}/tools", handle_patch_tools)
|
||||
@@ -0,0 +1,708 @@
|
||||
"""Colony worker inspection routes.
|
||||
|
||||
These expose per-spawned-worker data (identified by worker_id) so the
|
||||
frontend can render a colony-workers sidebar analogous to the queen
|
||||
profile panel. Distinct from ``routes_workers.py``, which deals with
|
||||
*graph nodes* inside a worker definition rather than live worker
|
||||
instances.
|
||||
|
||||
Session-scoped (bound to a live session's runtime):
|
||||
- GET /api/sessions/{session_id}/workers — live + completed workers
|
||||
- GET /api/sessions/{session_id}/colony/skills — colony's shared skills catalog
|
||||
- GET /api/sessions/{session_id}/colony/tools — colony's default tools
|
||||
|
||||
Colony-scoped (bound to the on-disk colony directory, independent of any
|
||||
live session — one colony has exactly one progress.db):
|
||||
- GET /api/colonies/{colony_name}/progress/snapshot — progress.db tasks/steps snapshot
|
||||
- GET /api/colonies/{colony_name}/progress/stream — SSE feed of upserts (polled)
|
||||
- GET /api/colonies/{colony_name}/data/tables — list user tables in progress.db
|
||||
- GET /api/colonies/{colony_name}/data/tables/{table}/rows — paginated rows
|
||||
- PATCH /api/colonies/{colony_name}/data/tables/{table}/rows — edit a row
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.server.app import resolve_session
|
||||
|
||||
# Same validation used by create_colony — keep them in sync. Blocks path
|
||||
# traversal (``..``) and shell-special chars; the endpoint would 400 on
|
||||
# anything else anyway, but validating early avoids a disk hit.
|
||||
_COLONY_NAME_RE = re.compile(r"^[a-z0-9_]+$")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Poll interval for the progress SSE stream. Progress rows flip on the
|
||||
# order of seconds as workers finish LLM turns, so 1s feels live without
|
||||
# hammering the DB.
|
||||
_PROGRESS_POLL_INTERVAL = 1.0
|
||||
|
||||
|
||||
def _worker_info_to_dict(info) -> dict:
|
||||
"""Serialize a WorkerInfo dataclass to a JSON-friendly dict."""
|
||||
result_dict = None
|
||||
if info.result is not None:
|
||||
r = info.result
|
||||
result_dict = {
|
||||
"status": r.status,
|
||||
"summary": r.summary,
|
||||
"error": r.error,
|
||||
"tokens_used": r.tokens_used,
|
||||
"duration_seconds": r.duration_seconds,
|
||||
}
|
||||
return {
|
||||
"worker_id": info.id,
|
||||
"task": info.task,
|
||||
"status": str(info.status),
|
||||
"started_at": info.started_at,
|
||||
"result": result_dict,
|
||||
}
|
||||
|
||||
|
||||
async def handle_list_workers(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/workers -- list workers in a session's colony.
|
||||
|
||||
Returns two populations merged:
|
||||
1. In-memory workers from the session's unified ColonyRuntime
|
||||
(``session.colony._workers``). Includes live + just-finished
|
||||
entries since ``_workers`` isn't pruned on termination.
|
||||
2. Historical worker directories on disk under
|
||||
``<session_dir>/workers/`` that are not in memory. Populated
|
||||
from dir name / first user message / dir mtime. These appear
|
||||
as ``status="historical"`` so the frontend can style them
|
||||
distinctly from actives.
|
||||
|
||||
Falls back to the legacy ``session.colony_runtime`` for the
|
||||
in-memory half when ``session.colony`` isn't set.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
runtime = getattr(session, "colony", None) or getattr(session, "colony_runtime", None)
|
||||
|
||||
workers: list[dict] = []
|
||||
known_ids: set[str] = set()
|
||||
storage_path: Path | None = None
|
||||
if runtime is not None:
|
||||
for info in runtime.list_workers():
|
||||
workers.append(_worker_info_to_dict(info))
|
||||
known_ids.add(info.id)
|
||||
raw_storage = getattr(runtime, "_storage_path", None)
|
||||
if raw_storage is not None:
|
||||
storage_path = Path(raw_storage)
|
||||
|
||||
# Fall back to the session's directory if the runtime didn't expose one.
|
||||
if storage_path is None:
|
||||
session_dir = getattr(session, "queen_dir", None) or getattr(session, "session_dir", None)
|
||||
if session_dir is not None:
|
||||
storage_path = Path(session_dir)
|
||||
|
||||
if storage_path is not None:
|
||||
workers.extend(await asyncio.to_thread(_walk_historical_workers, storage_path, known_ids))
|
||||
|
||||
return web.json_response({"workers": workers})
|
||||
|
||||
|
||||
def _walk_historical_workers(storage_path: Path, known_ids: set[str]) -> list[dict]:
|
||||
"""Scan ``<storage_path>/workers/`` for worker session dirs not already
|
||||
in memory and return minimal ``WorkerSummary``-shaped entries.
|
||||
|
||||
We don't persist a standalone status file per worker, so the on-disk
|
||||
entries get ``status="historical"`` and ``result=None``. The task is
|
||||
reconstructed from the first non-boilerplate user message in the
|
||||
worker's conversation parts.
|
||||
"""
|
||||
workers_dir = storage_path / "workers"
|
||||
if not workers_dir.exists() or not workers_dir.is_dir():
|
||||
return []
|
||||
|
||||
out: list[dict] = []
|
||||
try:
|
||||
entries = list(workers_dir.iterdir())
|
||||
except OSError:
|
||||
return []
|
||||
|
||||
# Newest dir first so recent runs surface first in the tab.
|
||||
entries.sort(key=lambda p: _safe_mtime(p), reverse=True)
|
||||
|
||||
for entry in entries:
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
wid = entry.name
|
||||
if wid in known_ids:
|
||||
continue
|
||||
out.append(
|
||||
{
|
||||
"worker_id": wid,
|
||||
"task": _extract_historical_task(entry),
|
||||
"status": "historical",
|
||||
"started_at": _safe_mtime(entry),
|
||||
"result": None,
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def _safe_mtime(path: Path) -> float:
|
||||
try:
|
||||
return path.stat().st_mtime
|
||||
except OSError:
|
||||
return 0.0
|
||||
|
||||
|
||||
def _extract_historical_task(worker_dir: Path) -> str:
|
||||
"""Pull the worker's initial task from its conversation parts.
|
||||
|
||||
seq 0 is a boilerplate "Hello" greeting in most flows; the real
|
||||
task lands in an early user message (typically seq 1 or 2). Scan
|
||||
the first few parts and return the first ``role="user"`` content
|
||||
that isn't the greeting. Bounded at 5 parts to stay cheap on
|
||||
directory listings containing hundreds of workers.
|
||||
"""
|
||||
parts_dir = worker_dir / "conversations" / "parts"
|
||||
if not parts_dir.exists():
|
||||
return ""
|
||||
try:
|
||||
for i in range(5):
|
||||
p = parts_dir / f"{i:010d}.json"
|
||||
if not p.exists():
|
||||
break
|
||||
data = json.loads(p.read_text(encoding="utf-8"))
|
||||
if data.get("role") != "user":
|
||||
continue
|
||||
content = data.get("content", "")
|
||||
if not isinstance(content, str):
|
||||
continue
|
||||
text = content.strip()
|
||||
if not text or text.lower() == "hello":
|
||||
continue
|
||||
return text[:400]
|
||||
except Exception:
|
||||
return ""
|
||||
return ""
|
||||
|
||||
|
||||
# ── Skills & tools ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _parsed_skill_to_dict(skill) -> dict:
|
||||
"""Serialize a ParsedSkill for the frontend."""
|
||||
return {
|
||||
"name": skill.name,
|
||||
"description": skill.description,
|
||||
"location": skill.location,
|
||||
"base_dir": skill.base_dir,
|
||||
"source_scope": skill.source_scope,
|
||||
}
|
||||
|
||||
|
||||
async def handle_list_colony_skills(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/colony/skills -- list skills the colony sees."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
runtime = session.colony_runtime
|
||||
if runtime is None:
|
||||
return web.json_response({"skills": []})
|
||||
|
||||
# Reach into the skills manager's catalog. There is no public
|
||||
# iterator yet; we touch the private dict directly and defensively
|
||||
# tolerate either shape (bare SkillsManager, or the
|
||||
# from_precomputed variant which has no catalog).
|
||||
catalog = getattr(runtime._skills_manager, "_catalog", None)
|
||||
skills_dict = getattr(catalog, "_skills", None) if catalog is not None else None
|
||||
if not isinstance(skills_dict, dict):
|
||||
return web.json_response({"skills": []})
|
||||
|
||||
skills = [_parsed_skill_to_dict(s) for s in skills_dict.values()]
|
||||
skills.sort(key=lambda s: s["name"])
|
||||
return web.json_response({"skills": skills})
|
||||
|
||||
|
||||
# Tools that ship with the framework and have no credential provider,
|
||||
# but still deserve their own logical group. Surfaced to the frontend
|
||||
# as ``provider="system"`` so the UI treats them exactly like a
|
||||
# credential-backed group.
|
||||
_SYSTEM_TOOLS: frozenset[str] = frozenset(
|
||||
{
|
||||
"get_account_info",
|
||||
"get_current_time",
|
||||
"bash_kill",
|
||||
"bash_output",
|
||||
"execute_command_tool",
|
||||
"example_tool",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _tool_to_dict(tool, provider_map: dict[str, str] | None) -> dict:
|
||||
"""Serialize a Tool dataclass for the frontend.
|
||||
|
||||
``provider_map`` is the colony runtime's tool_name → credential
|
||||
provider map (built by the CredentialResolver pipeline stage from
|
||||
``CredentialStoreAdapter.get_tool_provider_map()``). Credential-
|
||||
backed tools get a canonical provider key (e.g. ``"hubspot"``,
|
||||
``"gmail"``); framework / core tools return ``None``, except for
|
||||
the hand-picked entries in ``_SYSTEM_TOOLS`` which are tagged
|
||||
``"system"``.
|
||||
"""
|
||||
name = getattr(tool, "name", "")
|
||||
provider = (provider_map or {}).get(name)
|
||||
if provider is None and name in _SYSTEM_TOOLS:
|
||||
provider = "system"
|
||||
return {
|
||||
"name": name,
|
||||
"description": getattr(tool, "description", ""),
|
||||
"provider": provider,
|
||||
}
|
||||
|
||||
|
||||
async def handle_list_colony_tools(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/colony/tools -- list the colony's default tools."""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
runtime = session.colony_runtime
|
||||
if runtime is None:
|
||||
return web.json_response({"tools": []})
|
||||
|
||||
provider_map = getattr(runtime, "_tool_provider_map", None)
|
||||
tools = [_tool_to_dict(t, provider_map) for t in (runtime._tools or [])]
|
||||
tools.sort(key=lambda t: t["name"])
|
||||
return web.json_response({"tools": tools})
|
||||
|
||||
|
||||
# ── Progress DB (tasks/steps) ──────────────────────────────────────
|
||||
|
||||
|
||||
def _resolve_progress_db_by_name(colony_name: str) -> Path | None:
|
||||
"""Resolve a colony's progress.db path by directory name.
|
||||
|
||||
Returns ``None`` when the name fails validation or the file does not
|
||||
exist. Both conditions render as an empty Data tab in the UI rather
|
||||
than a hard error so an operator can open the panel before any
|
||||
workers have actually run.
|
||||
"""
|
||||
if not _COLONY_NAME_RE.match(colony_name):
|
||||
return None
|
||||
db_path = Path.home() / ".hive" / "colonies" / colony_name / "data" / "progress.db"
|
||||
return db_path if db_path.exists() else None
|
||||
|
||||
|
||||
def _read_progress_snapshot(db_path: Path, worker_id: str | None) -> dict:
|
||||
"""Read tasks + steps from progress.db, optionally filtered by worker_id.
|
||||
|
||||
The worker_id filter applies to tasks (claimed by that worker) and
|
||||
to steps (executed by that worker). If omitted, returns all rows.
|
||||
"""
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
|
||||
try:
|
||||
con.row_factory = sqlite3.Row
|
||||
if worker_id:
|
||||
task_rows = con.execute(
|
||||
"SELECT * FROM tasks WHERE worker_id = ? ORDER BY updated_at DESC",
|
||||
(worker_id,),
|
||||
).fetchall()
|
||||
step_rows = con.execute(
|
||||
"SELECT * FROM steps WHERE worker_id = ? ORDER BY task_id, seq",
|
||||
(worker_id,),
|
||||
).fetchall()
|
||||
else:
|
||||
task_rows = con.execute("SELECT * FROM tasks ORDER BY updated_at DESC LIMIT 500").fetchall()
|
||||
step_rows = con.execute("SELECT * FROM steps ORDER BY task_id, seq LIMIT 2000").fetchall()
|
||||
return {
|
||||
"tasks": [dict(r) for r in task_rows],
|
||||
"steps": [dict(r) for r in step_rows],
|
||||
}
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
async def handle_progress_snapshot(request: web.Request) -> web.Response:
|
||||
"""GET /api/colonies/{colony_name}/progress/snapshot
|
||||
|
||||
Optional ?worker_id=... to filter to rows touched by a specific worker.
|
||||
"""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
db_path = _resolve_progress_db_by_name(colony_name)
|
||||
if db_path is None:
|
||||
return web.json_response({"tasks": [], "steps": []})
|
||||
|
||||
worker_id = request.query.get("worker_id") or None
|
||||
snapshot = await asyncio.to_thread(_read_progress_snapshot, db_path, worker_id)
|
||||
return web.json_response(snapshot)
|
||||
|
||||
|
||||
def _read_progress_upserts(
|
||||
db_path: Path,
|
||||
worker_id: str | None,
|
||||
since: str | None,
|
||||
) -> tuple[list[dict], list[dict], str | None]:
|
||||
"""Return task/step rows with ``updated_at`` (tasks) or a derived
|
||||
timestamp (steps) newer than ``since``, plus the new high-water mark.
|
||||
|
||||
Steps don't carry an ``updated_at`` column — we use
|
||||
``COALESCE(completed_at, started_at)`` as the change witness. A step
|
||||
without either timestamp hasn't changed since the last poll and is
|
||||
skipped.
|
||||
|
||||
``since`` is an ISO8601 string (as produced by progress_db._now_iso).
|
||||
``None`` means "give me everything" — used for the SSE priming frame.
|
||||
"""
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
|
||||
try:
|
||||
con.row_factory = sqlite3.Row
|
||||
task_sql = "SELECT * FROM tasks"
|
||||
step_sql = (
|
||||
"SELECT *, COALESCE(completed_at, started_at) AS _ts "
|
||||
"FROM steps WHERE COALESCE(completed_at, started_at) IS NOT NULL"
|
||||
)
|
||||
task_args: list = []
|
||||
step_args: list = []
|
||||
if since is not None:
|
||||
task_sql += " WHERE updated_at > ?"
|
||||
step_sql += " AND COALESCE(completed_at, started_at) > ?"
|
||||
task_args.append(since)
|
||||
step_args.append(since)
|
||||
if worker_id:
|
||||
joiner_t = " AND " if since is not None else " WHERE "
|
||||
task_sql += joiner_t + "worker_id = ?"
|
||||
step_sql += " AND worker_id = ?"
|
||||
task_args.append(worker_id)
|
||||
step_args.append(worker_id)
|
||||
task_sql += " ORDER BY updated_at"
|
||||
step_sql += " ORDER BY _ts"
|
||||
|
||||
task_rows = con.execute(task_sql, task_args).fetchall()
|
||||
step_rows = con.execute(step_sql, step_args).fetchall()
|
||||
|
||||
tasks = [dict(r) for r in task_rows]
|
||||
steps = [dict(r) for r in step_rows]
|
||||
# High-water mark = max timestamp across both sets. Fall back to
|
||||
# the previous ``since`` when nothing changed.
|
||||
ts_values = [t["updated_at"] for t in tasks]
|
||||
ts_values.extend(s["_ts"] for s in steps if s.get("_ts"))
|
||||
new_since = max(ts_values) if ts_values else since
|
||||
return tasks, steps, new_since
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
async def handle_progress_stream(request: web.Request) -> web.StreamResponse:
|
||||
"""GET /api/colonies/{colony_name}/progress/stream
|
||||
|
||||
SSE feed that emits ``snapshot`` once (current state) followed by
|
||||
``upsert`` events whenever a task/step row changes. Polls the DB
|
||||
every ``_PROGRESS_POLL_INTERVAL`` seconds — the sqlite3 CLI path
|
||||
workers use for writes doesn't fire SQLite's update hook on our
|
||||
connection, so polling is the robust option.
|
||||
"""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
worker_id = request.query.get("worker_id") or None
|
||||
|
||||
resp = web.StreamResponse(
|
||||
status=200,
|
||||
headers={
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
await resp.prepare(request)
|
||||
|
||||
async def _send(event: str, data: dict) -> None:
|
||||
payload = f"event: {event}\ndata: {json.dumps(data)}\n\n"
|
||||
await resp.write(payload.encode("utf-8"))
|
||||
|
||||
db_path = _resolve_progress_db_by_name(colony_name)
|
||||
if db_path is None:
|
||||
await _send("snapshot", {"tasks": [], "steps": []})
|
||||
await _send("end", {"reason": "no_progress_db"})
|
||||
return resp
|
||||
|
||||
try:
|
||||
snapshot = await asyncio.to_thread(_read_progress_snapshot, db_path, worker_id)
|
||||
await _send("snapshot", snapshot)
|
||||
|
||||
since: str | None = None
|
||||
# Initialize the high-water mark from the snapshot so we don't
|
||||
# re-emit every row as "new" on the first poll.
|
||||
ts_values: list[str] = [t.get("updated_at") for t in snapshot["tasks"] if t.get("updated_at")]
|
||||
ts_values.extend(
|
||||
s.get("completed_at") or s.get("started_at")
|
||||
for s in snapshot["steps"]
|
||||
if s.get("completed_at") or s.get("started_at")
|
||||
)
|
||||
if ts_values:
|
||||
since = max(v for v in ts_values if v)
|
||||
|
||||
# The loop relies on client disconnect surfacing as
|
||||
# ConnectionResetError from ``_send`` — no explicit alive check
|
||||
# required.
|
||||
while True:
|
||||
await asyncio.sleep(_PROGRESS_POLL_INTERVAL)
|
||||
tasks, steps, new_since = await asyncio.to_thread(_read_progress_upserts, db_path, worker_id, since)
|
||||
if tasks or steps:
|
||||
await _send("upsert", {"tasks": tasks, "steps": steps})
|
||||
since = new_since
|
||||
except (asyncio.CancelledError, ConnectionResetError):
|
||||
# Client disconnected; clean exit.
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.warning("progress stream error: %s", exc, exc_info=True)
|
||||
try:
|
||||
await _send("error", {"message": str(exc)})
|
||||
except Exception:
|
||||
pass
|
||||
return resp
|
||||
|
||||
|
||||
# ── Raw data grid (airtable-style view/edit of progress.db tables) ─────
|
||||
#
|
||||
# The Data tab lets the operator inspect and hand-edit SQLite rows.
|
||||
# Identifier-quoting note: SQLite params can only bind values, never
|
||||
# identifiers, so we have to interpolate table/column names into SQL.
|
||||
# Every name is *validated against sqlite_master / PRAGMA table_info*
|
||||
# before use and then wrapped with ``_q()`` which escapes embedded
|
||||
# quotes. Do NOT accept raw names from the request without running them
|
||||
# through ``_validate_ident`` first.
|
||||
|
||||
|
||||
def _q(ident: str) -> str:
|
||||
"""Quote a SQLite identifier (table or column) safely."""
|
||||
return '"' + ident.replace('"', '""') + '"'
|
||||
|
||||
|
||||
def _list_user_tables(con: sqlite3.Connection) -> list[str]:
|
||||
return [
|
||||
r["name"]
|
||||
for r in con.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%' ORDER BY name"
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
def _table_columns(con: sqlite3.Connection, table: str) -> list[dict]:
|
||||
"""Return PRAGMA table_info rows as dicts. Empty list if no such table."""
|
||||
return [
|
||||
{
|
||||
"name": r["name"],
|
||||
"type": r["type"] or "",
|
||||
"notnull": bool(r["notnull"]),
|
||||
# pk>0 means the column is part of the primary key (ordinal);
|
||||
# 0 means non-PK.
|
||||
"pk": int(r["pk"]),
|
||||
"dflt_value": r["dflt_value"],
|
||||
}
|
||||
for r in con.execute(f"PRAGMA table_info({_q(table)})")
|
||||
]
|
||||
|
||||
|
||||
def _read_tables_overview(db_path: Path) -> list[dict]:
|
||||
"""List user tables with columns + row counts."""
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
|
||||
try:
|
||||
con.row_factory = sqlite3.Row
|
||||
out: list[dict] = []
|
||||
for name in _list_user_tables(con):
|
||||
cols = _table_columns(con, name)
|
||||
count_row = con.execute(f"SELECT COUNT(*) AS c FROM {_q(name)}").fetchone()
|
||||
out.append(
|
||||
{
|
||||
"name": name,
|
||||
"columns": cols,
|
||||
"row_count": int(count_row["c"]),
|
||||
"primary_key": [c["name"] for c in cols if c["pk"] > 0],
|
||||
}
|
||||
)
|
||||
return out
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
def _validate_ident(name: str, known: set[str]) -> str | None:
|
||||
"""Return ``name`` if present in ``known``, else ``None``."""
|
||||
return name if name in known else None
|
||||
|
||||
|
||||
def _read_table_rows(
|
||||
db_path: Path,
|
||||
table: str,
|
||||
limit: int,
|
||||
offset: int,
|
||||
order_by: str | None,
|
||||
order_dir: str,
|
||||
) -> dict:
|
||||
con = sqlite3.connect(f"file:{db_path}?mode=ro", uri=True, timeout=5.0)
|
||||
try:
|
||||
con.row_factory = sqlite3.Row
|
||||
tables = set(_list_user_tables(con))
|
||||
if _validate_ident(table, tables) is None:
|
||||
return {"error": f"unknown table: {table}"}
|
||||
cols = _table_columns(con, table)
|
||||
col_names = {c["name"] for c in cols}
|
||||
|
||||
sql = f"SELECT * FROM {_q(table)}"
|
||||
if order_by and order_by in col_names:
|
||||
direction = "DESC" if order_dir.lower() == "desc" else "ASC"
|
||||
sql += f" ORDER BY {_q(order_by)} {direction}"
|
||||
sql += " LIMIT ? OFFSET ?"
|
||||
rows = con.execute(sql, (int(limit), int(offset))).fetchall()
|
||||
total = con.execute(f"SELECT COUNT(*) AS c FROM {_q(table)}").fetchone()["c"]
|
||||
return {
|
||||
"table": table,
|
||||
"columns": cols,
|
||||
"primary_key": [c["name"] for c in cols if c["pk"] > 0],
|
||||
"rows": [dict(r) for r in rows],
|
||||
"total": int(total),
|
||||
"limit": int(limit),
|
||||
"offset": int(offset),
|
||||
}
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
def _update_table_row(
|
||||
db_path: Path,
|
||||
table: str,
|
||||
pk: dict,
|
||||
updates: dict,
|
||||
) -> dict:
|
||||
"""Apply ``updates`` (column->value) to the row matching ``pk``.
|
||||
|
||||
Returns ``{"updated": n}`` with the number of rows affected (0 or 1),
|
||||
or ``{"error": ...}`` on validation failure.
|
||||
"""
|
||||
if not updates:
|
||||
return {"error": "no updates provided"}
|
||||
con = sqlite3.connect(db_path, timeout=5.0)
|
||||
try:
|
||||
con.row_factory = sqlite3.Row
|
||||
tables = set(_list_user_tables(con))
|
||||
if _validate_ident(table, tables) is None:
|
||||
return {"error": f"unknown table: {table}"}
|
||||
cols = _table_columns(con, table)
|
||||
col_names = {c["name"] for c in cols}
|
||||
pk_cols = [c["name"] for c in cols if c["pk"] > 0]
|
||||
if not pk_cols:
|
||||
return {"error": f"table {table!r} has no primary key; cannot edit by row"}
|
||||
|
||||
# Validate pk has every pk column and all values are scalars.
|
||||
missing = [p for p in pk_cols if p not in pk]
|
||||
if missing:
|
||||
return {"error": f"missing primary key columns: {missing}"}
|
||||
|
||||
# Validate update columns exist and aren't part of the primary key
|
||||
# (changing a PK column would silently break joins/foreign refs).
|
||||
bad = [c for c in updates if c not in col_names]
|
||||
if bad:
|
||||
return {"error": f"unknown columns: {bad}"}
|
||||
pk_update = [c for c in updates if c in pk_cols]
|
||||
if pk_update:
|
||||
return {"error": f"cannot edit primary key columns: {pk_update}"}
|
||||
|
||||
set_sql = ", ".join(f"{_q(c)} = ?" for c in updates)
|
||||
where_sql = " AND ".join(f"{_q(c)} = ?" for c in pk_cols)
|
||||
sql = f"UPDATE {_q(table)} SET {set_sql} WHERE {where_sql}"
|
||||
params = list(updates.values()) + [pk[c] for c in pk_cols]
|
||||
cur = con.execute(sql, params)
|
||||
con.commit()
|
||||
return {"updated": cur.rowcount}
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
async def handle_list_tables(request: web.Request) -> web.Response:
|
||||
"""GET /api/colonies/{colony_name}/data/tables"""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
db_path = _resolve_progress_db_by_name(colony_name)
|
||||
if db_path is None:
|
||||
return web.json_response({"tables": []})
|
||||
tables = await asyncio.to_thread(_read_tables_overview, db_path)
|
||||
return web.json_response({"tables": tables})
|
||||
|
||||
|
||||
async def handle_table_rows(request: web.Request) -> web.Response:
|
||||
"""GET /api/colonies/{colony_name}/data/tables/{table}/rows"""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
db_path = _resolve_progress_db_by_name(colony_name)
|
||||
if db_path is None:
|
||||
return web.json_response({"error": "no progress.db"}, status=404)
|
||||
|
||||
table = request.match_info["table"]
|
||||
# Clamp limit: 500 is enough for the grid's virtualization window;
|
||||
# a larger cap would make accidental full-table loads cheap.
|
||||
try:
|
||||
limit = max(1, min(500, int(request.query.get("limit", "100"))))
|
||||
offset = max(0, int(request.query.get("offset", "0")))
|
||||
except ValueError:
|
||||
return web.json_response({"error": "invalid limit/offset"}, status=400)
|
||||
order_by = request.query.get("order_by") or None
|
||||
order_dir = request.query.get("order_dir", "asc")
|
||||
|
||||
result = await asyncio.to_thread(_read_table_rows, db_path, table, limit, offset, order_by, order_dir)
|
||||
if "error" in result:
|
||||
return web.json_response(result, status=400)
|
||||
return web.json_response(result)
|
||||
|
||||
|
||||
async def handle_update_row(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/colonies/{colony_name}/data/tables/{table}/rows
|
||||
|
||||
Body: ``{"pk": {col: value, ...}, "updates": {col: value, ...}}``.
|
||||
"""
|
||||
colony_name = request.match_info["colony_name"]
|
||||
db_path = _resolve_progress_db_by_name(colony_name)
|
||||
if db_path is None:
|
||||
return web.json_response({"error": "no progress.db"}, status=404)
|
||||
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "invalid JSON body"}, status=400)
|
||||
pk = body.get("pk") or {}
|
||||
updates = body.get("updates") or {}
|
||||
if not isinstance(pk, dict) or not isinstance(updates, dict):
|
||||
return web.json_response({"error": "pk and updates must be objects"}, status=400)
|
||||
|
||||
table = request.match_info["table"]
|
||||
result = await asyncio.to_thread(_update_table_row, db_path, table, pk, updates)
|
||||
if "error" in result:
|
||||
return web.json_response(result, status=400)
|
||||
return web.json_response(result)
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
"""Register colony worker routes."""
|
||||
# Session-scoped — these read live runtime state from a session.
|
||||
app.router.add_get("/api/sessions/{session_id}/workers", handle_list_workers)
|
||||
app.router.add_get("/api/sessions/{session_id}/colony/skills", handle_list_colony_skills)
|
||||
app.router.add_get("/api/sessions/{session_id}/colony/tools", handle_list_colony_tools)
|
||||
# Colony-scoped — one progress.db per colony, no session indirection.
|
||||
app.router.add_get(
|
||||
"/api/colonies/{colony_name}/progress/snapshot",
|
||||
handle_progress_snapshot,
|
||||
)
|
||||
app.router.add_get(
|
||||
"/api/colonies/{colony_name}/progress/stream",
|
||||
handle_progress_stream,
|
||||
)
|
||||
app.router.add_get("/api/colonies/{colony_name}/data/tables", handle_list_tables)
|
||||
app.router.add_get(
|
||||
"/api/colonies/{colony_name}/data/tables/{table}/rows",
|
||||
handle_table_rows,
|
||||
)
|
||||
app.router.add_patch(
|
||||
"/api/colonies/{colony_name}/data/tables/{table}/rows",
|
||||
handle_update_row,
|
||||
)
|
||||
@@ -6,6 +6,7 @@ Routes:
|
||||
- GET /api/config/models — curated provider→models list
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
@@ -301,6 +302,53 @@ def _hot_swap_sessions(request: web.Request, full_model: str, api_key: str | Non
|
||||
return swapped
|
||||
|
||||
|
||||
async def _validate_provider_key(
|
||||
provider: str,
|
||||
api_key: str,
|
||||
api_base: str | None = None,
|
||||
model: str | None = None,
|
||||
) -> dict:
|
||||
"""Validate an API key against the provider. Returns {"valid": bool, "message": str}.
|
||||
|
||||
Runs the check in a thread pool to avoid blocking the event loop.
|
||||
"""
|
||||
from scripts.check_llm_key import (
|
||||
PROVIDERS as CHECK_PROVIDERS,
|
||||
check_anthropic_compatible,
|
||||
check_minimax,
|
||||
check_openai_compatible,
|
||||
check_openrouter,
|
||||
check_openrouter_model,
|
||||
)
|
||||
|
||||
def _check() -> dict:
|
||||
pid = provider.lower()
|
||||
try:
|
||||
# Subscription providers with custom api_base
|
||||
if pid == "openrouter" and model:
|
||||
return check_openrouter_model(api_key, model=model, api_base=api_base or "https://openrouter.ai/api/v1")
|
||||
if api_base and pid == "minimax":
|
||||
return check_minimax(api_key, api_base)
|
||||
if api_base and pid == "openrouter":
|
||||
return check_openrouter(api_key, api_base)
|
||||
if api_base and pid == "kimi":
|
||||
return check_anthropic_compatible(api_key, api_base.rstrip("/") + "/v1/messages", "Kimi")
|
||||
if api_base and pid == "hive":
|
||||
return check_anthropic_compatible(api_key, api_base.rstrip("/") + "/v1/messages", "Hive")
|
||||
if api_base:
|
||||
endpoint = api_base.rstrip("/") + "/models"
|
||||
name = {"zai": "ZAI"}.get(pid, "Custom provider")
|
||||
return check_openai_compatible(api_key, endpoint, name)
|
||||
if pid in CHECK_PROVIDERS:
|
||||
return CHECK_PROVIDERS[pid](api_key)
|
||||
# No check available — assume valid
|
||||
return {"valid": True, "message": f"No health check for {pid}"}
|
||||
except Exception as exc:
|
||||
return {"valid": None, "message": f"Validation error: {exc}"}
|
||||
|
||||
return await asyncio.get_event_loop().run_in_executor(None, _check)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Handlers
|
||||
# ------------------------------------------------------------------
|
||||
@@ -324,9 +372,9 @@ async def handle_get_llm_config(request: web.Request) -> web.Response:
|
||||
if _resolve_api_key(pid, request) is not None:
|
||||
connected.append(pid)
|
||||
|
||||
# Subscription detection
|
||||
# Subscription detection — only include subscriptions whose tokens exist
|
||||
active_subscription = _get_active_subscription(llm)
|
||||
detected_subscriptions = _detect_subscriptions()
|
||||
detected_subscriptions = [sid for sid in _detect_subscriptions() if _get_subscription_token(sid)]
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
@@ -369,6 +417,21 @@ async def handle_update_llm_config(request: web.Request) -> web.Response:
|
||||
provider = sub["provider"]
|
||||
api_base = sub.get("api_base")
|
||||
|
||||
# Validate the subscription token before committing
|
||||
token = _get_subscription_token(subscription_id)
|
||||
if not token:
|
||||
return web.json_response(
|
||||
{"error": f"No credential found for {sub['name']}. Please check your subscription or API key."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
check = await _validate_provider_key(provider, token, api_base=api_base)
|
||||
if check.get("valid") is False:
|
||||
return web.json_response(
|
||||
{"error": f"{sub['name']} key validation failed: {check.get('message', 'unknown error')}"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Look up token limits from preset
|
||||
max_tokens: int | None = None
|
||||
max_context_tokens: int | None = None
|
||||
@@ -399,8 +462,7 @@ async def handle_update_llm_config(request: web.Request) -> web.Response:
|
||||
|
||||
_write_config_atomic(config)
|
||||
|
||||
# Hot-swap with subscription token
|
||||
token = _get_subscription_token(subscription_id)
|
||||
# Hot-swap with subscription token (already validated above)
|
||||
full_model = f"{provider}/{model}"
|
||||
swapped = _hot_swap_sessions(request, full_model, api_key=token, api_base=api_base)
|
||||
|
||||
@@ -430,15 +492,36 @@ async def handle_update_llm_config(request: web.Request) -> web.Response:
|
||||
if not provider or not model:
|
||||
return web.json_response({"error": "Both 'provider' and 'model' are required"}, status=400)
|
||||
|
||||
# Look up token limits from catalogue
|
||||
# Verify model exists in the catalogue
|
||||
model_info = _find_model_info(provider, model)
|
||||
max_tokens = model_info["max_tokens"] if model_info else 8192
|
||||
max_context_tokens = model_info["max_context_tokens"] if model_info else 120000
|
||||
if not model_info:
|
||||
return web.json_response(
|
||||
{"error": f"Model '{model}' is not available for provider '{provider}'."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
max_tokens = model_info["max_tokens"]
|
||||
max_context_tokens = model_info["max_context_tokens"]
|
||||
|
||||
# Determine env var and api_base
|
||||
env_var = PROVIDER_ENV_VARS.get(provider.lower(), "")
|
||||
api_base = _get_api_base_for_provider(provider)
|
||||
|
||||
# Validate the API key before committing
|
||||
api_key = _resolve_api_key(provider, request)
|
||||
if not api_key:
|
||||
return web.json_response(
|
||||
{"error": f"No API key found for {provider}. Please add one in Manage Keys."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
check = await _validate_provider_key(provider, api_key, api_base=api_base, model=model)
|
||||
if check.get("valid") is False:
|
||||
return web.json_response(
|
||||
{"error": f"API key validation failed for {provider}: {check.get('message', 'unknown error')}"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Update ~/.hive/configuration.json
|
||||
config = get_hive_config()
|
||||
llm_section = config.setdefault("llm", {})
|
||||
@@ -458,8 +541,7 @@ async def handle_update_llm_config(request: web.Request) -> web.Response:
|
||||
|
||||
_write_config_atomic(config)
|
||||
|
||||
# Hot-swap all running sessions
|
||||
api_key = _resolve_api_key(provider, request)
|
||||
# Hot-swap all running sessions (api_key already validated above)
|
||||
full_model = f"{provider}/{model}"
|
||||
swapped = _hot_swap_sessions(request, full_model, api_key=api_key, api_base=api_base)
|
||||
|
||||
@@ -594,6 +676,64 @@ async def handle_get_models(request: web.Request) -> web.Response:
|
||||
return web.json_response({"models": MODELS_CATALOGUE})
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# User avatar
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
MAX_AVATAR_BYTES = 2 * 1024 * 1024 # 2 MB
|
||||
_ALLOWED_AVATAR_TYPES = {
|
||||
"image/jpeg": ".jpg",
|
||||
"image/png": ".png",
|
||||
"image/webp": ".webp",
|
||||
}
|
||||
|
||||
|
||||
async def handle_upload_user_avatar(request: web.Request) -> web.Response:
|
||||
"""POST /api/config/profile/avatar — upload user profile picture."""
|
||||
reader = await request.multipart()
|
||||
field = await reader.next()
|
||||
if field is None or field.name != "avatar":
|
||||
return web.json_response({"error": "Expected a file field named 'avatar'"}, status=400)
|
||||
|
||||
content_type = getattr(field, "content_type", None) or field.headers.get("Content-Type", "")
|
||||
ext = _ALLOWED_AVATAR_TYPES.get(content_type)
|
||||
if not ext:
|
||||
return web.json_response(
|
||||
{"error": f"Unsupported image type: {content_type}. Use JPEG, PNG, or WebP."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
data = bytearray()
|
||||
while True:
|
||||
chunk = await field.read_chunk(8192)
|
||||
if not chunk:
|
||||
break
|
||||
data.extend(chunk)
|
||||
if len(data) > MAX_AVATAR_BYTES:
|
||||
return web.json_response({"error": "Image too large. Maximum size is 2 MB."}, status=400)
|
||||
|
||||
if not data:
|
||||
return web.json_response({"error": "Empty file"}, status=400)
|
||||
|
||||
# Remove existing avatar files
|
||||
for existing in HIVE_CONFIG_FILE.parent.glob("avatar.*"):
|
||||
existing.unlink(missing_ok=True)
|
||||
|
||||
avatar_path = HIVE_CONFIG_FILE.parent / f"avatar{ext}"
|
||||
avatar_path.write_bytes(data)
|
||||
logger.info("User avatar uploaded: %s (%d bytes)", avatar_path.name, len(data))
|
||||
return web.json_response({"avatar_url": "/api/config/profile/avatar"})
|
||||
|
||||
|
||||
async def handle_get_user_avatar(request: web.Request) -> web.Response:
|
||||
"""GET /api/config/profile/avatar — serve user profile picture."""
|
||||
for ext in _ALLOWED_AVATAR_TYPES.values():
|
||||
avatar_path = HIVE_CONFIG_FILE.parent / f"avatar{ext}"
|
||||
if avatar_path.exists():
|
||||
return web.FileResponse(avatar_path, headers={"Cache-Control": "public, max-age=3600"})
|
||||
return web.json_response({"error": "No avatar found"}, status=404)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Route registration
|
||||
# ------------------------------------------------------------------
|
||||
@@ -606,3 +746,5 @@ def register_routes(app: web.Application) -> None:
|
||||
app.router.add_get("/api/config/models", handle_get_models)
|
||||
app.router.add_get("/api/config/profile", handle_get_profile)
|
||||
app.router.add_put("/api/config/profile", handle_update_profile)
|
||||
app.router.add_post("/api/config/profile/avatar", handle_upload_user_avatar)
|
||||
app.router.add_get("/api/config/profile/avatar", handle_get_user_avatar)
|
||||
|
||||
@@ -7,7 +7,7 @@ import os
|
||||
from aiohttp import web
|
||||
from pydantic import SecretStr
|
||||
|
||||
from framework.credentials.models import CredentialKey, CredentialObject
|
||||
from framework.credentials.models import CredentialDecryptionError, CredentialKey, CredentialObject
|
||||
from framework.credentials.store import CredentialStore
|
||||
from framework.server.app import validate_agent_path
|
||||
|
||||
@@ -84,23 +84,52 @@ def _credential_to_dict(cred: CredentialObject) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def _is_available_for_specs(store: CredentialStore, credential_id: str) -> bool:
|
||||
"""Best-effort availability check for the repair UI.
|
||||
|
||||
The credential settings page must stay reachable even when an encrypted
|
||||
file was written with the wrong key or is otherwise unreadable.
|
||||
"""
|
||||
try:
|
||||
return store.is_available(credential_id)
|
||||
except CredentialDecryptionError as exc:
|
||||
logger.warning("Credential '%s' is unreadable; marking unavailable in specs: %s", credential_id, exc)
|
||||
return False
|
||||
|
||||
|
||||
async def handle_list_credentials(request: web.Request) -> web.Response:
|
||||
"""GET /api/credentials — list all credential metadata (no secrets)."""
|
||||
store = _get_store(request)
|
||||
cred_ids = store.list_credentials()
|
||||
credentials = []
|
||||
unreadable = []
|
||||
for cid in cred_ids:
|
||||
cred = store.get_credential(cid, refresh_if_needed=False)
|
||||
try:
|
||||
cred = store.get_credential(cid, refresh_if_needed=False)
|
||||
except CredentialDecryptionError as exc:
|
||||
logger.warning("Credential '%s' is unreadable while listing credentials: %s", cid, exc)
|
||||
unreadable.append(cid)
|
||||
continue
|
||||
if cred:
|
||||
credentials.append(_credential_to_dict(cred))
|
||||
return web.json_response({"credentials": credentials})
|
||||
return web.json_response({"credentials": credentials, "unreadable_credentials": unreadable})
|
||||
|
||||
|
||||
async def handle_get_credential(request: web.Request) -> web.Response:
|
||||
"""GET /api/credentials/{credential_id} — get single credential metadata."""
|
||||
credential_id = request.match_info["credential_id"]
|
||||
store = _get_store(request)
|
||||
cred = store.get_credential(credential_id, refresh_if_needed=False)
|
||||
try:
|
||||
cred = store.get_credential(credential_id, refresh_if_needed=False)
|
||||
except CredentialDecryptionError:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": f"Credential '{credential_id}' could not be decrypted",
|
||||
"credential_id": credential_id,
|
||||
"recoverable": True,
|
||||
},
|
||||
status=409,
|
||||
)
|
||||
if cred is None:
|
||||
return web.json_response({"error": f"Credential '{credential_id}' not found"}, status=404)
|
||||
return web.json_response(_credential_to_dict(cred))
|
||||
@@ -393,7 +422,7 @@ async def handle_list_specs(request: web.Request) -> web.Response:
|
||||
if spec.aden_supported and not spec.direct_api_key_supported:
|
||||
available = len(accounts) > 0
|
||||
else:
|
||||
available = store.is_available(cred_id)
|
||||
available = _is_available_for_specs(store, cred_id)
|
||||
specs.append(
|
||||
{
|
||||
"credential_name": name,
|
||||
|
||||
@@ -209,6 +209,7 @@ async def handle_events(request: web.Request) -> web.StreamResponse:
|
||||
EventType.TRIGGER_AVAILABLE.value,
|
||||
EventType.TRIGGER_ACTIVATED.value,
|
||||
EventType.TRIGGER_DEACTIVATED.value,
|
||||
EventType.TRIGGER_FIRED.value,
|
||||
EventType.TRIGGER_REMOVED.value,
|
||||
EventType.TRIGGER_UPDATED.value,
|
||||
}
|
||||
|
||||
@@ -10,11 +10,17 @@ from aiohttp import web
|
||||
|
||||
from framework.agent_loop.conversation import LEGACY_RUN_ID
|
||||
from framework.credentials.validation import validate_agent_credentials
|
||||
from framework.host.execution_manager import ExecutionAlreadyRunningError
|
||||
from framework.server.app import resolve_session, safe_path_segment, sessions_dir
|
||||
from framework.server.routes_sessions import _credential_error_response
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Strong refs to background fork-finalize tasks (compaction + worker-conv
|
||||
# copy) so asyncio doesn't GC them mid-run. fork_session_into_colony
|
||||
# schedules into this set and the done-callback evicts on completion.
|
||||
_BACKGROUND_FORK_TASKS: set[asyncio.Task[None]] = set()
|
||||
|
||||
|
||||
def _load_checkpoint_run_id(cp_path) -> str | None:
|
||||
try:
|
||||
@@ -46,7 +52,6 @@ _WORKER_INHERITED_TOOLS: frozenset[str] = frozenset(
|
||||
"set_output",
|
||||
"escalate",
|
||||
"ask_user",
|
||||
"ask_user_multiple",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -101,6 +106,17 @@ def _resolve_queen_only_tools() -> frozenset[str]:
|
||||
return frozenset(derived | _QUEEN_LIFECYCLE_EXTRAS)
|
||||
|
||||
|
||||
def _execution_already_running_response(exc: ExecutionAlreadyRunningError) -> web.Response:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": str(exc),
|
||||
"stream_id": exc.stream_id,
|
||||
"active_execution_ids": exc.active_ids,
|
||||
},
|
||||
status=409,
|
||||
)
|
||||
|
||||
|
||||
async def handle_trigger(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/trigger — start an execution.
|
||||
|
||||
@@ -142,11 +158,14 @@ async def handle_trigger(request: web.Request) -> web.Response:
|
||||
if "resume_session_id" not in session_state:
|
||||
session_state["resume_session_id"] = session.id
|
||||
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_point_id,
|
||||
input_data,
|
||||
session_state=session_state,
|
||||
)
|
||||
try:
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_point_id,
|
||||
input_data,
|
||||
session_state=session_state,
|
||||
)
|
||||
except ExecutionAlreadyRunningError as exc:
|
||||
return _execution_already_running_response(exc)
|
||||
|
||||
# Cancel queen's in-progress LLM turn so it picks up the phase change cleanly
|
||||
if session.queen_executor:
|
||||
@@ -202,6 +221,25 @@ async def handle_chat(request: web.Request) -> web.Response:
|
||||
logger.debug("[handle_chat] Session resolution failed: %s", err)
|
||||
return err
|
||||
|
||||
# Sessions that have spawned a colony are locked: the user must compact +
|
||||
# fork into a fresh session before continuing the conversation. Frontend
|
||||
# surfaces this as a button instead of the textarea, but enforce server-
|
||||
# side too so the lock can't be bypassed by a stale tab or scripted call.
|
||||
if getattr(session, "colony_spawned", False):
|
||||
return web.json_response(
|
||||
{
|
||||
"error": "session_locked",
|
||||
"reason": "colony_spawned",
|
||||
"spawned_colony_name": getattr(session, "spawned_colony_name", None),
|
||||
"message": (
|
||||
"This session is locked because a colony has been "
|
||||
"spawned from it. Compact and start a new session "
|
||||
"with the same queen to continue."
|
||||
),
|
||||
},
|
||||
status=409,
|
||||
)
|
||||
|
||||
body = await request.json()
|
||||
message = body.get("message", "")
|
||||
display_message = body.get("display_message")
|
||||
@@ -435,11 +473,14 @@ async def handle_resume(request: web.Request) -> web.Response:
|
||||
|
||||
input_data = state.get("input_data", {})
|
||||
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_points[0].id,
|
||||
input_data=input_data,
|
||||
session_state=resume_session_state,
|
||||
)
|
||||
try:
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_points[0].id,
|
||||
input_data=input_data,
|
||||
session_state=resume_session_state,
|
||||
)
|
||||
except ExecutionAlreadyRunningError as exc:
|
||||
return _execution_already_running_response(exc)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
@@ -466,6 +507,7 @@ async def handle_pause(request: web.Request) -> web.Response:
|
||||
|
||||
runtime = session.colony_runtime
|
||||
cancelled = []
|
||||
cancelling = []
|
||||
|
||||
for colony_id in runtime.list_graphs():
|
||||
reg = runtime.get_graph_registration(colony_id)
|
||||
@@ -482,9 +524,11 @@ async def handle_pause(request: web.Request) -> web.Response:
|
||||
|
||||
for exec_id in list(stream.active_execution_ids):
|
||||
try:
|
||||
ok = await stream.cancel_execution(exec_id, reason="Execution paused by user")
|
||||
if ok:
|
||||
outcome = await stream.cancel_execution(exec_id, reason="Execution paused by user")
|
||||
if outcome == "cancelled":
|
||||
cancelled.append(exec_id)
|
||||
elif outcome == "cancelling":
|
||||
cancelling.append(exec_id)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -498,8 +542,9 @@ async def handle_pause(request: web.Request) -> web.Response:
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"stopped": bool(cancelled),
|
||||
"stopped": bool(cancelled) and not cancelling,
|
||||
"cancelled": cancelled,
|
||||
"cancelling": cancelling,
|
||||
"timers_paused": True,
|
||||
}
|
||||
)
|
||||
@@ -536,8 +581,9 @@ async def handle_stop(request: web.Request) -> web.Response:
|
||||
if hasattr(node, "cancel_current_turn"):
|
||||
node.cancel_current_turn()
|
||||
|
||||
cancelled = await stream.cancel_execution(execution_id, reason="Execution stopped by user")
|
||||
if cancelled:
|
||||
outcome = await stream.cancel_execution(execution_id, reason="Execution stopped by user")
|
||||
|
||||
if outcome == "cancelled":
|
||||
# Cancel queen's in-progress LLM turn
|
||||
if session.queen_executor:
|
||||
node = session.queen_executor.node_registry.get("queen")
|
||||
@@ -552,9 +598,19 @@ async def handle_stop(request: web.Request) -> web.Response:
|
||||
return web.json_response(
|
||||
{
|
||||
"stopped": True,
|
||||
"cancelling": False,
|
||||
"execution_id": execution_id,
|
||||
}
|
||||
)
|
||||
if outcome == "cancelling":
|
||||
return web.json_response(
|
||||
{
|
||||
"stopped": False,
|
||||
"cancelling": True,
|
||||
"execution_id": execution_id,
|
||||
},
|
||||
status=202,
|
||||
)
|
||||
|
||||
return web.json_response({"stopped": False, "error": "Execution not found"}, status=404)
|
||||
|
||||
@@ -597,11 +653,14 @@ async def handle_replay(request: web.Request) -> web.Response:
|
||||
"run_id": _load_checkpoint_run_id(cp_path),
|
||||
}
|
||||
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_points[0].id,
|
||||
input_data={},
|
||||
session_state=replay_session_state,
|
||||
)
|
||||
try:
|
||||
execution_id = await session.colony_runtime.trigger(
|
||||
entry_points[0].id,
|
||||
input_data={},
|
||||
session_state=replay_session_state,
|
||||
)
|
||||
except ExecutionAlreadyRunningError as exc:
|
||||
return _execution_already_running_response(exc)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
@@ -627,6 +686,329 @@ async def handle_cancel_queen(request: web.Request) -> web.Response:
|
||||
return web.json_response({"cancelled": True})
|
||||
|
||||
|
||||
def persist_colony_spawn_lock(session: Any, colony_name: str) -> None:
|
||||
"""Persist the colony-spawned lock on a queen session.
|
||||
|
||||
Writes ``colony_spawned: true`` + ``spawned_colony_name`` + a timestamp
|
||||
into the queen session's ``meta.json`` and mirrors the same fields onto
|
||||
the live ``Session`` object so subsequent ``/chat`` calls in this
|
||||
process are rejected immediately without disk I/O.
|
||||
|
||||
Shared by the HTTP route ``handle_mark_colony_spawned`` (frontend
|
||||
click on the colony-link card) and the in-process ``create_colony``
|
||||
tool path (when the queen forks while in ``incubating`` phase).
|
||||
|
||||
Raises ``OSError`` if the meta.json write fails. Callers should catch
|
||||
and respond/log appropriately.
|
||||
"""
|
||||
from datetime import datetime as _dt
|
||||
|
||||
queen_dir = getattr(session, "queen_dir", None)
|
||||
if queen_dir is None:
|
||||
# Tool-side callers may invoke before the queen dir is available.
|
||||
# Still mirror onto the session so the in-process /chat guard
|
||||
# works; the meta.json write is just deferred until the next
|
||||
# session start writes the file (rare path).
|
||||
session.colony_spawned = True
|
||||
session.spawned_colony_name = colony_name
|
||||
return
|
||||
|
||||
meta_path = queen_dir / "meta.json"
|
||||
meta: dict = {}
|
||||
if meta_path.exists():
|
||||
try:
|
||||
meta = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
meta = {}
|
||||
|
||||
meta["colony_spawned"] = True
|
||||
meta["spawned_colony_name"] = colony_name
|
||||
meta["spawned_colony_at"] = _dt.now(UTC).isoformat()
|
||||
|
||||
meta_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
meta_path.write_text(json.dumps(meta), encoding="utf-8")
|
||||
|
||||
session.colony_spawned = True
|
||||
session.spawned_colony_name = colony_name
|
||||
|
||||
|
||||
async def handle_mark_colony_spawned(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/mark-colony-spawned -- lock the queen DM.
|
||||
|
||||
Called by the frontend the first time the user clicks the
|
||||
COLONY_CREATED system message. Thin wrapper around
|
||||
:func:`persist_colony_spawn_lock` — the heavy lifting (meta.json
|
||||
merge + Session cache) lives in the helper so the in-process
|
||||
``create_colony`` path can reuse it without re-issuing an HTTP call.
|
||||
|
||||
Body: ``{"colony_name": "..."}``
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
body = await request.json() if request.can_read_body else {}
|
||||
colony_name = (body.get("colony_name") or "").strip()
|
||||
if not colony_name:
|
||||
return web.json_response({"error": "colony_name is required"}, status=400)
|
||||
|
||||
try:
|
||||
persist_colony_spawn_lock(session, colony_name)
|
||||
except OSError as exc:
|
||||
logger.exception("mark_colony_spawned: failed to persist meta.json")
|
||||
return web.json_response({"error": f"failed to persist: {exc}"}, status=500)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"session_id": session.id,
|
||||
"colony_spawned": True,
|
||||
"spawned_colony_name": colony_name,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def _compact_queen_conversation_in_place(
|
||||
*,
|
||||
queen_dir: Any,
|
||||
queen_ctx: Any,
|
||||
queen_loop: Any,
|
||||
inherited_from: str | None = None,
|
||||
) -> tuple[int, int, str] | None:
|
||||
"""Compact ``queen_dir/conversations`` into one summary message in place.
|
||||
|
||||
Reads ``parts/`` via :class:`FileConversationStore`, runs
|
||||
:func:`llm_compact` with ``preserve_user_messages=True``, wipes
|
||||
``parts/`` + ``partials/`` and writes a single ``user``-role
|
||||
:class:`Message` (seq 0) tagged with ``inherited_from`` when provided,
|
||||
then resets ``cursor.json`` to ``next_seq=1``. ``events.jsonl`` is
|
||||
NOT touched — callers decide whether to wipe it (compact-and-fork)
|
||||
or append a boundary marker (colony fork).
|
||||
|
||||
Returns ``(messages_compacted, summary_chars, summary_text)`` on
|
||||
success, or ``None`` when there is nothing to do (no LLM ctx, no
|
||||
conversation directory, or no messages on disk). Raises on LLM or
|
||||
filesystem failure so the caller can decide between user-facing
|
||||
error response (compact-and-fork) and silent fall-through (colony
|
||||
fork keeps the raw transcript).
|
||||
"""
|
||||
import shutil as _shutil
|
||||
|
||||
from framework.agent_loop.conversation import Message
|
||||
from framework.agent_loop.internals.compaction import llm_compact
|
||||
from framework.storage.conversation_store import FileConversationStore
|
||||
|
||||
if queen_ctx is None or getattr(queen_ctx, "llm", None) is None:
|
||||
return None
|
||||
|
||||
convs_dir = queen_dir / "conversations"
|
||||
if not convs_dir.exists():
|
||||
return None
|
||||
|
||||
src_store = FileConversationStore(convs_dir)
|
||||
raw_parts = await src_store.read_parts()
|
||||
messages: list[Message] = []
|
||||
for part in raw_parts:
|
||||
try:
|
||||
messages.append(Message.from_storage_dict(part))
|
||||
except (KeyError, TypeError):
|
||||
# Skip malformed parts; the summary still covers everything else.
|
||||
logger.warning("compact_in_place: skipping malformed part %r", part)
|
||||
continue
|
||||
if not messages:
|
||||
return None
|
||||
|
||||
max_ctx_tokens = 180_000
|
||||
loop_cfg = getattr(queen_loop, "_config", None)
|
||||
if loop_cfg is not None and getattr(loop_cfg, "max_context_tokens", None):
|
||||
max_ctx_tokens = int(loop_cfg.max_context_tokens)
|
||||
|
||||
summary = await llm_compact(
|
||||
queen_ctx,
|
||||
messages,
|
||||
accumulator=None,
|
||||
max_context_tokens=max_ctx_tokens,
|
||||
preserve_user_messages=True,
|
||||
)
|
||||
|
||||
parts_dir = convs_dir / "parts"
|
||||
partials_dir = convs_dir / "partials"
|
||||
|
||||
def _wipe_stores() -> None:
|
||||
if parts_dir.exists():
|
||||
_shutil.rmtree(parts_dir)
|
||||
if partials_dir.exists():
|
||||
_shutil.rmtree(partials_dir)
|
||||
|
||||
await asyncio.to_thread(_wipe_stores)
|
||||
|
||||
summary_msg = Message(
|
||||
seq=0,
|
||||
role="user",
|
||||
content=summary,
|
||||
inherited_from=inherited_from,
|
||||
)
|
||||
dest_store = FileConversationStore(convs_dir)
|
||||
await dest_store.write_part(0, summary_msg.to_storage_dict())
|
||||
await dest_store.write_cursor({"next_seq": 1})
|
||||
|
||||
return (len(messages), len(summary), summary)
|
||||
|
||||
|
||||
async def handle_compact_and_fork(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/compact-and-fork -- compact + new session.
|
||||
|
||||
The locked-by-colony-spawn UI calls this when the user clicks "compact
|
||||
+ start a new session with the same queen". The flow:
|
||||
|
||||
1. Mint a fresh session ID and copy the old queen-session dir to it.
|
||||
2. Run the shared :func:`_compact_queen_conversation_in_place` helper
|
||||
on the copy, which reads the parts, runs the LLM compactor with
|
||||
``preserve_user_messages=True``, and replaces ``parts/`` with a
|
||||
single summary message.
|
||||
3. Wipe ``events.jsonl`` so the new session presents a clean SSE
|
||||
replay (the parent's events would otherwise show up in the new
|
||||
chat as ghost history).
|
||||
4. Update meta.json (clear the parent's lock, record provenance) and
|
||||
spin up the live session.
|
||||
|
||||
The OLD session stays alive but locked; the user navigates to the
|
||||
new session via the response.
|
||||
"""
|
||||
import shutil
|
||||
import time as _time
|
||||
from datetime import datetime as _dt
|
||||
|
||||
from framework.agent_loop.types import AgentContext
|
||||
from framework.server.session_manager import (
|
||||
_generate_session_id,
|
||||
_queen_session_dir,
|
||||
)
|
||||
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
queen_dir = getattr(session, "queen_dir", None)
|
||||
if queen_dir is None or not queen_dir.exists():
|
||||
return web.json_response(
|
||||
{"error": "queen session directory not found"},
|
||||
status=404,
|
||||
)
|
||||
|
||||
queen_executor = getattr(session, "queen_executor", None)
|
||||
if queen_executor is None:
|
||||
return web.json_response({"error": "queen is not running"}, status=503)
|
||||
queen_node = queen_executor.node_registry.get("queen") if queen_executor else None
|
||||
queen_ctx: AgentContext | None = getattr(queen_node, "_last_ctx", None) if queen_node else None
|
||||
if queen_ctx is None or queen_ctx.llm is None:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": (
|
||||
"queen context not yet stamped (no LLM available for "
|
||||
"compaction). Send a message to the queen and retry."
|
||||
)
|
||||
},
|
||||
status=503,
|
||||
)
|
||||
|
||||
queen_name = session.queen_name or "default"
|
||||
|
||||
new_session_id = _generate_session_id()
|
||||
new_dir = _queen_session_dir(new_session_id, queen_name)
|
||||
if new_dir.exists():
|
||||
# Defensively: same-second collision would clobber another session.
|
||||
return web.json_response(
|
||||
{"error": f"new session dir collision: {new_dir}"},
|
||||
status=500,
|
||||
)
|
||||
|
||||
try:
|
||||
await asyncio.to_thread(shutil.copytree, queen_dir, new_dir)
|
||||
except OSError as exc:
|
||||
logger.exception("compact_and_fork: copytree failed")
|
||||
return web.json_response(
|
||||
{"error": f"failed to fork session dir: {exc}"},
|
||||
status=500,
|
||||
)
|
||||
|
||||
# Compact in place against the COPY so the source DM is untouched.
|
||||
# Failures here are user-visible — the whole point of the action is
|
||||
# the compacted summary.
|
||||
try:
|
||||
result = await _compact_queen_conversation_in_place(
|
||||
queen_dir=new_dir,
|
||||
queen_ctx=queen_ctx,
|
||||
queen_loop=queen_node,
|
||||
inherited_from=None, # this IS the new live session, not an inheritance
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("compact_and_fork: compaction failed")
|
||||
return web.json_response(
|
||||
{"error": f"compaction failed: {exc}"},
|
||||
status=500,
|
||||
)
|
||||
if result is None:
|
||||
return web.json_response(
|
||||
{"error": "queen conversation is empty -- nothing to compact"},
|
||||
status=400,
|
||||
)
|
||||
messages_compacted, summary_chars, _summary_text = result
|
||||
|
||||
# Clean partials are already gone; also wipe events.jsonl so the new
|
||||
# session's SSE replay starts fresh (the helper deliberately leaves
|
||||
# events.jsonl alone so the colony-fork path can append a marker).
|
||||
new_events_path = new_dir / "events.jsonl"
|
||||
try:
|
||||
await asyncio.to_thread(lambda: new_events_path.exists() and new_events_path.unlink())
|
||||
except OSError:
|
||||
logger.warning("compact_and_fork: failed to wipe events.jsonl", exc_info=True)
|
||||
|
||||
# Update meta.json: clear the lock and record provenance.
|
||||
new_meta_path = new_dir / "meta.json"
|
||||
new_meta: dict = {}
|
||||
if new_meta_path.exists():
|
||||
try:
|
||||
new_meta = json.loads(new_meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
new_meta = {}
|
||||
new_meta.pop("colony_spawned", None)
|
||||
new_meta.pop("spawned_colony_name", None)
|
||||
new_meta.pop("spawned_colony_at", None)
|
||||
new_meta["queen_id"] = queen_name
|
||||
new_meta["compacted_from"] = session.id
|
||||
new_meta["compacted_at"] = _dt.now(UTC).isoformat()
|
||||
new_meta["created_at"] = _time.time()
|
||||
try:
|
||||
new_meta_path.write_text(json.dumps(new_meta), encoding="utf-8")
|
||||
except OSError:
|
||||
logger.warning("compact_and_fork: failed to write new meta.json", exc_info=True)
|
||||
|
||||
manager: Any = request.app["manager"]
|
||||
try:
|
||||
new_session = await manager.create_session(
|
||||
session_id=None,
|
||||
queen_resume_from=new_session_id,
|
||||
queen_name=queen_name,
|
||||
initial_phase="independent",
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.exception("compact_and_fork: create_session failed for forked id %s", new_session_id)
|
||||
return web.json_response(
|
||||
{"error": f"failed to start forked session: {exc}"},
|
||||
status=500,
|
||||
)
|
||||
|
||||
return web.json_response(
|
||||
{
|
||||
"new_session_id": new_session.id,
|
||||
"queen_id": queen_name,
|
||||
"compacted_from": session.id,
|
||||
"summary_chars": summary_chars,
|
||||
"messages_compacted": messages_compacted,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def handle_colony_spawn(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/colony-spawn -- fork queen session into a colony.
|
||||
|
||||
@@ -674,12 +1056,102 @@ async def handle_colony_spawn(request: web.Request) -> web.Response:
|
||||
return web.json_response(result)
|
||||
|
||||
|
||||
async def _compact_inherited_conversation(
|
||||
*,
|
||||
dest_queen_dir: Any,
|
||||
queen_ctx: Any,
|
||||
queen_loop: Any,
|
||||
source_session_id: str,
|
||||
) -> None:
|
||||
"""Compact a freshly-forked colony's inherited transcript in place.
|
||||
|
||||
Thin wrapper over :func:`_compact_queen_conversation_in_place` that
|
||||
tags the resulting summary message with ``inherited_from`` and
|
||||
appends a ``colony_fork_marker`` event to the colony's
|
||||
``events.jsonl`` so the frontend can group + collapse everything
|
||||
that preceded the fork.
|
||||
|
||||
Called from ``fork_session_into_colony`` after the parent queen
|
||||
session directory has been copied into the colony's queue dir.
|
||||
|
||||
Fail-soft: any exception (compaction, write, marker append) logs a
|
||||
warning and leaves the directory as the raw copytree wrote it. The
|
||||
colony still works; it just inherits the full DM transcript instead
|
||||
of the summary.
|
||||
"""
|
||||
import json as _json
|
||||
from datetime import UTC as _UTC, datetime as _datetime
|
||||
|
||||
try:
|
||||
result = await _compact_queen_conversation_in_place(
|
||||
queen_dir=dest_queen_dir,
|
||||
queen_ctx=queen_ctx,
|
||||
queen_loop=queen_loop,
|
||||
inherited_from=source_session_id,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"compact_inherited: compaction failed; leaving raw transcript",
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
|
||||
if result is None:
|
||||
# No queen ctx, no parts on disk, or empty conversation. Nothing
|
||||
# to compact and nothing to mark — the colony will just open with
|
||||
# an empty chat (or whatever raw state was copied).
|
||||
logger.info(
|
||||
"compact_inherited: nothing to compact for colony forked from %s",
|
||||
source_session_id,
|
||||
)
|
||||
return
|
||||
|
||||
messages_compacted, summary_chars, summary_text = result
|
||||
|
||||
# Append the boundary marker to the colony's events.jsonl so the
|
||||
# frontend can group + collapse everything that came before. The
|
||||
# marker carries the parent session id and a short summary preview
|
||||
# so the collapsed widget has something to label itself with even
|
||||
# before the user expands it.
|
||||
fork_iso = _datetime.now(_UTC).isoformat()
|
||||
marker = {
|
||||
"type": "colony_fork_marker",
|
||||
"stream_id": "queen",
|
||||
"data": {
|
||||
"parent_session_id": source_session_id,
|
||||
"fork_time": fork_iso,
|
||||
"summary_preview": summary_text[:240],
|
||||
"inherited_message_count": messages_compacted,
|
||||
},
|
||||
"timestamp": fork_iso,
|
||||
}
|
||||
events_path = dest_queen_dir / "events.jsonl"
|
||||
|
||||
def _append_marker() -> None:
|
||||
events_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(events_path, "a", encoding="utf-8") as f:
|
||||
f.write(_json.dumps(marker) + "\n")
|
||||
|
||||
try:
|
||||
await asyncio.to_thread(_append_marker)
|
||||
except OSError:
|
||||
logger.warning("compact_inherited: failed to append fork marker", exc_info=True)
|
||||
|
||||
logger.info(
|
||||
"compact_inherited: compacted %d parent message(s) -> 1 summary (%d chars) for colony forked from %s",
|
||||
messages_compacted,
|
||||
summary_chars,
|
||||
source_session_id,
|
||||
)
|
||||
|
||||
|
||||
async def fork_session_into_colony(
|
||||
*,
|
||||
session: Any,
|
||||
colony_name: str,
|
||||
task: str,
|
||||
tasks: list[dict] | None = None,
|
||||
concurrency_hint: int | None = None,
|
||||
) -> dict:
|
||||
"""Fork a queen session into a colony directory.
|
||||
|
||||
@@ -712,15 +1184,71 @@ async def fork_session_into_colony(
|
||||
from pathlib import Path
|
||||
|
||||
from framework.agent_loop.agent_loop import AgentLoop, LoopConfig
|
||||
from framework.agent_loop.types import AgentContext, AgentSpec
|
||||
from framework.agent_loop.types import AgentContext
|
||||
from framework.host.progress_db import ensure_progress_db, seed_tasks
|
||||
from framework.server.session_manager import _queen_session_dir
|
||||
|
||||
queen_loop: AgentLoop = session.queen_executor.node_registry["queen"]
|
||||
queen_ctx: AgentContext = getattr(queen_loop, "_last_ctx", None)
|
||||
# Diagnostic capture: when the fork fails here we want to know which
|
||||
# piece of queen state was missing (executor cleared vs. node missing
|
||||
# vs. _last_ctx never stamped). Without this, callers only see
|
||||
# "'NoneType' object has no attribute 'node_registry'" with no hint
|
||||
# whether the queen loop exited, is mid-revive, or ran a different
|
||||
# path that never ran AgentLoop._execute_impl.
|
||||
queen_executor = getattr(session, "queen_executor", None)
|
||||
queen_task = getattr(session, "queen_task", None)
|
||||
phase_state_dbg = getattr(session, "phase_state", None)
|
||||
logger.info(
|
||||
"[fork_session_into_colony] session=%s colony=%s "
|
||||
"queen_executor=%s queen_task=%s queen_task_done=%s "
|
||||
"phase=%s queen_name=%s",
|
||||
session.id,
|
||||
colony_name,
|
||||
queen_executor,
|
||||
queen_task,
|
||||
queen_task.done() if queen_task is not None else None,
|
||||
getattr(phase_state_dbg, "phase", None),
|
||||
getattr(session, "queen_name", None),
|
||||
)
|
||||
|
||||
if queen_executor is None:
|
||||
raise RuntimeError(
|
||||
f"queen_executor is None for session {session.id!r} — the "
|
||||
"queen loop isn't running right now. Wait for the queen to "
|
||||
"come back (or send her a chat message to revive her) and "
|
||||
"retry create_colony. The skill folder is already written, "
|
||||
"so the retry is free."
|
||||
)
|
||||
|
||||
node_registry = getattr(queen_executor, "node_registry", None)
|
||||
if not isinstance(node_registry, dict) or "queen" not in node_registry:
|
||||
raise RuntimeError(
|
||||
f"queen node is missing from the executor's registry for "
|
||||
f"session {session.id!r} (registry keys="
|
||||
f"{list(node_registry.keys()) if isinstance(node_registry, dict) else type(node_registry).__name__}"
|
||||
"). The queen loop is in an initialization or teardown "
|
||||
"window; retry after a moment."
|
||||
)
|
||||
|
||||
queen_loop: AgentLoop = node_registry["queen"]
|
||||
queen_ctx: AgentContext = getattr(queen_loop, "_last_ctx", None)
|
||||
if queen_ctx is None:
|
||||
logger.warning(
|
||||
"[fork_session_into_colony] queen_loop has no _last_ctx yet "
|
||||
"(session=%s) — falling back to empty tool/skill snapshot; "
|
||||
"the forked worker will inherit no tools.",
|
||||
session.id,
|
||||
)
|
||||
|
||||
# "is_new" keys off worker.json, not bare dir existence: the queen's
|
||||
# create_colony tool now pre-creates colony_dir (so it can
|
||||
# materialize the colony-scoped skill folder BEFORE the fork), which
|
||||
# would wrongly flag every fresh colony as "already-exists" if we
|
||||
# used ``not colony_dir.exists()``. A colony is "new" until its
|
||||
# worker config has actually been written.
|
||||
colony_dir = Path.home() / ".hive" / "colonies" / colony_name
|
||||
is_new = not colony_dir.exists()
|
||||
worker_name = "worker"
|
||||
worker_config_path = colony_dir / f"{worker_name}.json"
|
||||
is_new = not worker_config_path.exists()
|
||||
colony_dir.mkdir(parents=True, exist_ok=True)
|
||||
(colony_dir / "data").mkdir(exist_ok=True)
|
||||
|
||||
@@ -730,9 +1258,7 @@ async def fork_session_into_colony(
|
||||
db_path = await asyncio.to_thread(ensure_progress_db, colony_dir)
|
||||
seeded_task_ids: list[str] = []
|
||||
if tasks:
|
||||
seeded_task_ids = await asyncio.to_thread(
|
||||
seed_tasks, db_path, tasks, source="queen_create"
|
||||
)
|
||||
seeded_task_ids = await asyncio.to_thread(seed_tasks, db_path, tasks, source="queen_create")
|
||||
logger.info(
|
||||
"progress_db: seeded %d task(s) into colony '%s'",
|
||||
len(seeded_task_ids),
|
||||
@@ -754,23 +1280,20 @@ async def fork_session_into_colony(
|
||||
source="create_colony_auto",
|
||||
)
|
||||
logger.info(
|
||||
"progress_db: auto-seeded 1 task into colony '%s' "
|
||||
"(task_id=%s, from single-task create_colony form)",
|
||||
"progress_db: auto-seeded 1 task into colony '%s' (task_id=%s, from single-task create_colony form)",
|
||||
colony_name,
|
||||
seeded_task_ids[0] if seeded_task_ids else "?",
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"progress_db: auto-seed failed for colony '%s' (continuing "
|
||||
"without a pre-seeded row): %s",
|
||||
"progress_db: auto-seed failed for colony '%s' (continuing without a pre-seeded row): %s",
|
||||
colony_name,
|
||||
exc,
|
||||
)
|
||||
|
||||
# Fixed worker name -- sessions are the unit of parallelism, not workers
|
||||
worker_name = "worker"
|
||||
|
||||
worker_config_path = colony_dir / f"{worker_name}.json"
|
||||
# Fixed worker name and config path are already computed above so
|
||||
# ``is_new`` can be derived from worker.json rather than the colony
|
||||
# directory (see comment on the ``is_new`` block).
|
||||
|
||||
# ── 1. Gather queen state ─────────────────────────────────────
|
||||
# Queen-lifecycle + agent-management tools are registered ONLY against
|
||||
@@ -866,6 +1389,13 @@ async def fork_session_into_colony(
|
||||
"spawned_from": session.id,
|
||||
"spawned_at": datetime.now(UTC).isoformat(),
|
||||
}
|
||||
# Concurrency advisory baked in at incubation time. Not enforced — the
|
||||
# progress.db queue is atomic regardless — but the colony queen reads
|
||||
# this when planning fan-outs (run_parallel_workers, trigger-fired
|
||||
# batches) so behavior matches what the user agreed to during
|
||||
# incubation.
|
||||
if isinstance(concurrency_hint, int) and concurrency_hint > 0:
|
||||
worker_meta["concurrency_hint"] = concurrency_hint
|
||||
worker_config_path.write_text(json.dumps(worker_meta, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
|
||||
# ── 3. Duplicate queen session into colony ───────────────────
|
||||
@@ -906,6 +1436,11 @@ async def fork_session_into_colony(
|
||||
dest_meta["queen_id"] = queen_name
|
||||
dest_meta["forked_from"] = session.id
|
||||
dest_meta["colony_fork"] = True # exclude from queen DM history
|
||||
# Clear any colony_spawned lock that came over from the parent meta —
|
||||
# it was the PARENT session that locked, not this freshly-forked one.
|
||||
dest_meta.pop("colony_spawned", None)
|
||||
dest_meta.pop("spawned_colony_name", None)
|
||||
dest_meta.pop("spawned_colony_at", None)
|
||||
dest_meta_path.write_text(json.dumps(dest_meta, ensure_ascii=False), encoding="utf-8")
|
||||
logger.info(
|
||||
"Duplicated queen session %s -> %s for colony '%s'",
|
||||
@@ -913,15 +1448,108 @@ async def fork_session_into_colony(
|
||||
colony_session_id,
|
||||
colony_name,
|
||||
)
|
||||
# Copy queen conversations into worker storage so the worker
|
||||
# starts with the queen's full context.
|
||||
worker_storage = Path.home() / ".hive" / "agents" / colony_name / worker_name
|
||||
worker_storage.mkdir(parents=True, exist_ok=True)
|
||||
worker_conv_dir = worker_storage / "conversations"
|
||||
source_conv_dir = dest_queen_dir / "conversations"
|
||||
if source_conv_dir.exists():
|
||||
await asyncio.to_thread(shutil.copytree, source_conv_dir, worker_conv_dir, dirs_exist_ok=True)
|
||||
logger.info("Copied queen conversations to worker storage %s", worker_conv_dir)
|
||||
|
||||
# ── 3a. Compact the inherited conversation (fire-and-forget) ──
|
||||
# The colony queen doesn't need the full DM transcript — that
|
||||
# transcript was about REACHING the decision to fork, which is
|
||||
# now settled. Compaction replaces the copied parts with a
|
||||
# single summary message tagged ``inherited_from``.
|
||||
#
|
||||
# Compaction issues an LLM call that can legitimately exceed
|
||||
# the 60s tool-call timeout, so we schedule it (plus the
|
||||
# downstream worker-storage copy) as a background task and
|
||||
# return immediately. A compaction_status.json marker in
|
||||
# dest_queen_dir lets a subsequent colony session-load await
|
||||
# completion before reading the conversation files (see
|
||||
# session_manager.create_session_with_worker_colony).
|
||||
#
|
||||
# Fail-soft: any exception is logged and recorded in the
|
||||
# marker; the colony still works with the raw transcript.
|
||||
from framework.server import compaction_status
|
||||
|
||||
compaction_status.mark_in_progress(dest_queen_dir)
|
||||
|
||||
_worker_storage = Path.home() / ".hive" / "agents" / colony_name / worker_name
|
||||
_dest_queen_dir = dest_queen_dir
|
||||
_queen_ctx = queen_ctx
|
||||
_queen_loop = queen_loop
|
||||
_source_session_id = session.id
|
||||
|
||||
# Wall-clock cap on the background compaction's LLM call.
|
||||
# Without this a hung/misbehaving model (seen with local
|
||||
# endpoints) leaves compaction_status="in_progress" forever and
|
||||
# the colony-open await_completion waste its full poll window
|
||||
# before giving up. When this fires we still fall through to
|
||||
# the worker-storage copy below so the colony opens with the
|
||||
# raw transcript instead of empty state.
|
||||
_COMPACTION_TIMEOUT_SECONDS = 180.0
|
||||
|
||||
async def _finalize_fork() -> None:
|
||||
compaction_error: str | None = None
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
_compact_inherited_conversation(
|
||||
dest_queen_dir=_dest_queen_dir,
|
||||
queen_ctx=_queen_ctx,
|
||||
queen_loop=_queen_loop,
|
||||
source_session_id=_source_session_id,
|
||||
),
|
||||
timeout=_COMPACTION_TIMEOUT_SECONDS,
|
||||
)
|
||||
except TimeoutError:
|
||||
compaction_error = (
|
||||
f"compaction timed out after {_COMPACTION_TIMEOUT_SECONDS:.0f}s (falling back to raw transcript)"
|
||||
)
|
||||
logger.warning(
|
||||
"fork_session_into_colony: %s for %s",
|
||||
compaction_error,
|
||||
_dest_queen_dir,
|
||||
)
|
||||
except Exception as exc:
|
||||
compaction_error = f"compaction failed: {exc}"
|
||||
logger.warning(
|
||||
"fork_session_into_colony: %s for %s (falling back to raw transcript)",
|
||||
compaction_error,
|
||||
_dest_queen_dir,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# Worker storage copy runs regardless of the compaction
|
||||
# outcome. If compaction succeeded, the worker gets the
|
||||
# summary; if it failed / timed out, dest_queen_dir still
|
||||
# has the raw transcript from the earlier copytree and the
|
||||
# worker gets that. Without this copy-on-failure the worker
|
||||
# would open to empty state on every compaction hiccup.
|
||||
try:
|
||||
_worker_storage.mkdir(parents=True, exist_ok=True)
|
||||
worker_conv_dir = _worker_storage / "conversations"
|
||||
source_conv_dir = _dest_queen_dir / "conversations"
|
||||
if source_conv_dir.exists():
|
||||
await asyncio.to_thread(
|
||||
shutil.copytree,
|
||||
source_conv_dir,
|
||||
worker_conv_dir,
|
||||
dirs_exist_ok=True,
|
||||
)
|
||||
logger.info(
|
||||
"Copied queen conversations to worker storage %s",
|
||||
worker_conv_dir,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"fork_session_into_colony: worker-storage copy failed for %s",
|
||||
_worker_storage,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
if compaction_error:
|
||||
compaction_status.mark_failed(_dest_queen_dir, compaction_error)
|
||||
else:
|
||||
compaction_status.mark_done(_dest_queen_dir)
|
||||
|
||||
_bg_task = asyncio.create_task(_finalize_fork())
|
||||
_BACKGROUND_FORK_TASKS.add(_bg_task)
|
||||
_bg_task.add_done_callback(_BACKGROUND_FORK_TASKS.discard)
|
||||
else:
|
||||
logger.warning(
|
||||
"Queen session dir %s not found, colony will start fresh",
|
||||
@@ -949,6 +1577,39 @@ async def fork_session_into_colony(
|
||||
}
|
||||
metadata_path.write_text(json.dumps(metadata, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
|
||||
# ── 4a. Inherit the queen's tool allowlist into the colony ───
|
||||
# A colony forked from a curated queen should start with the same
|
||||
# tool surface (otherwise the colony silently falls back to its own
|
||||
# "allow every MCP tool" default, undoing the parent's curation).
|
||||
# We copy the queen's LIVE effective allowlist so the snapshot
|
||||
# reflects whatever was in force the moment the user clicked "Create
|
||||
# Colony". Users can further narrow the colony via the Tool Library.
|
||||
# Skip the write when the queen is on allow-all (None) so the colony
|
||||
# keeps the same semantics without creating an inert sidecar.
|
||||
try:
|
||||
queen_enabled = getattr(
|
||||
getattr(session, "phase_state", None),
|
||||
"enabled_mcp_tools",
|
||||
None,
|
||||
)
|
||||
if isinstance(queen_enabled, list):
|
||||
from framework.host.colony_tools_config import update_colony_tools_config
|
||||
|
||||
update_colony_tools_config(colony_name, list(queen_enabled))
|
||||
logger.info(
|
||||
"Inherited queen allowlist into colony '%s' (%d tools)",
|
||||
colony_name,
|
||||
len(queen_enabled),
|
||||
)
|
||||
except Exception:
|
||||
# Inheritance is best-effort — don't let a tools.json hiccup
|
||||
# abort colony creation.
|
||||
logger.warning(
|
||||
"Failed to inherit queen allowlist into colony '%s'",
|
||||
colony_name,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# ── 5. Update source queen session meta.json ─────────────────
|
||||
# Link the originating session back to the colony for discovery.
|
||||
source_meta_path = source_queen_dir / "meta.json"
|
||||
@@ -981,6 +1642,12 @@ async def fork_session_into_colony(
|
||||
"is_new": is_new,
|
||||
"db_path": str(db_path),
|
||||
"task_ids": seeded_task_ids,
|
||||
# "in_progress" when a background compactor was scheduled above,
|
||||
# "skipped" when the source queen dir was missing (nothing to
|
||||
# compact). Frontend uses this to decide whether to display a
|
||||
# "preparing colony…" state while session-load blocks on the
|
||||
# compaction marker.
|
||||
"compaction_status": ("in_progress" if source_queen_dir.exists() else "skipped"),
|
||||
}
|
||||
|
||||
|
||||
@@ -998,3 +1665,11 @@ def register_routes(app: web.Application) -> None:
|
||||
app.router.add_post("/api/sessions/{session_id}/replay", handle_replay)
|
||||
app.router.add_get("/api/sessions/{session_id}/goal-progress", handle_goal_progress)
|
||||
app.router.add_post("/api/sessions/{session_id}/colony-spawn", handle_colony_spawn)
|
||||
app.router.add_post(
|
||||
"/api/sessions/{session_id}/mark-colony-spawned",
|
||||
handle_mark_colony_spawned,
|
||||
)
|
||||
app.router.add_post(
|
||||
"/api/sessions/{session_id}/compact-and-fork",
|
||||
handle_compact_and_fork,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,291 @@
|
||||
"""MCP server registration routes.
|
||||
|
||||
Thin HTTP wrapper around ``MCPRegistry`` so the frontend can add, remove,
|
||||
enable, and health-check user-registered MCP servers. The CLI path
|
||||
(``hive mcp add`` / ``hive mcp remove`` / etc.) is unchanged.
|
||||
|
||||
- GET /api/mcp/servers -- list installed servers
|
||||
- POST /api/mcp/servers -- register a local server
|
||||
- DELETE /api/mcp/servers/{name} -- remove a local server
|
||||
- POST /api/mcp/servers/{name}/enable -- enable a server
|
||||
- POST /api/mcp/servers/{name}/disable -- disable a server
|
||||
- POST /api/mcp/servers/{name}/health -- probe server health
|
||||
|
||||
New servers take effect on the *next* queen session start. Existing live
|
||||
queen sessions keep the tool list they booted with to avoid mid-turn
|
||||
cache invalidation. The ``add`` response hints at this explicitly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.loader.mcp_errors import MCPError
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_VALID_TRANSPORTS = {"stdio", "http", "sse", "unix"}
|
||||
|
||||
|
||||
def _registry() -> MCPRegistry:
|
||||
# MCPRegistry is a thin wrapper around ~/.hive/mcp_registry/installed.json
|
||||
# so instantiation is cheap — no need to cache on app["..."].
|
||||
reg = MCPRegistry()
|
||||
reg.initialize()
|
||||
return reg
|
||||
|
||||
|
||||
def _package_builtin_servers() -> list[dict[str, Any]]:
|
||||
"""Return the package-baked queen MCP servers from ``queen/mcp_servers.json``.
|
||||
|
||||
Those servers are loaded directly by ``ToolRegistry.load_mcp_config``
|
||||
at queen boot and never go through ``MCPRegistry.list_installed``,
|
||||
so the raw registry view shows them as missing. Surface them here so
|
||||
the Tool Library reflects what the queen actually talks to.
|
||||
|
||||
Entries carry ``source: "built-in"`` and are NOT removable / toggleable
|
||||
— editing them requires changing the repo file.
|
||||
"""
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import framework.agents.queen as _queen_pkg
|
||||
|
||||
path = Path(_queen_pkg.__file__).parent / "mcp_servers.json"
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return []
|
||||
|
||||
out: list[dict[str, Any]] = []
|
||||
for name, cfg in data.items():
|
||||
if not isinstance(cfg, dict):
|
||||
continue
|
||||
out.append(
|
||||
{
|
||||
"name": name,
|
||||
"source": "built-in",
|
||||
"transport": cfg.get("transport", "stdio"),
|
||||
"description": cfg.get("description", "") or "",
|
||||
"enabled": True,
|
||||
"last_health_status": None,
|
||||
"last_error": None,
|
||||
"last_health_check_at": None,
|
||||
"tool_count": None,
|
||||
"removable": False,
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def _server_to_summary(entry: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Shape an installed.json entry for API responses.
|
||||
|
||||
Strips the full manifest body (which can be large) but keeps the tool
|
||||
list if the manifest already embeds one (happens for registry-installed
|
||||
servers). Users with ``source: "local"`` only get a tool list after
|
||||
running a health check.
|
||||
"""
|
||||
manifest = entry.get("manifest") or {}
|
||||
tools = manifest.get("tools") if isinstance(manifest, dict) else None
|
||||
if not isinstance(tools, list):
|
||||
tools = None
|
||||
return {
|
||||
"name": entry.get("name"),
|
||||
"source": entry.get("source"),
|
||||
"transport": entry.get("transport"),
|
||||
"description": (manifest.get("description") if isinstance(manifest, dict) else None) or "",
|
||||
"enabled": entry.get("enabled", True),
|
||||
"last_health_status": entry.get("last_health_status"),
|
||||
"last_error": entry.get("last_error"),
|
||||
"last_health_check_at": entry.get("last_health_check_at"),
|
||||
"tool_count": (len(tools) if tools is not None else None),
|
||||
}
|
||||
|
||||
|
||||
def _mcp_error_response(exc: MCPError, *, default_status: int = 400) -> web.Response:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": exc.what,
|
||||
"code": exc.code.value,
|
||||
"what": exc.what,
|
||||
"why": exc.why,
|
||||
"fix": exc.fix,
|
||||
},
|
||||
status=default_status,
|
||||
)
|
||||
|
||||
|
||||
async def handle_list_servers(request: web.Request) -> web.Response:
|
||||
"""GET /api/mcp/servers — list every server the queen actually uses.
|
||||
|
||||
Merges two sources:
|
||||
|
||||
- ``MCPRegistry.list_installed()`` — servers registered via
|
||||
``hive mcp add`` / the ``/api/mcp/servers`` POST route, stored in
|
||||
``~/.hive/mcp_registry/installed.json``. These carry
|
||||
``source: "local"`` (user-added) or ``source: "registry"``
|
||||
(installed from the remote registry).
|
||||
- Repo-baked queen servers from
|
||||
``core/framework/agents/queen/mcp_servers.json``. These are loaded
|
||||
directly by the queen's ``ToolRegistry`` at boot and never touch
|
||||
``MCPRegistry``; we surface them here so the UI reflects what the
|
||||
queen really talks to. They are not removable from the UI because
|
||||
editing them requires changing the repo.
|
||||
|
||||
If a name collides between the two sources, the registry entry wins
|
||||
because that's the one the user has customized.
|
||||
"""
|
||||
reg = _registry()
|
||||
registry_entries = [_server_to_summary(e) for e in reg.list_installed()]
|
||||
seen_names = {e.get("name") for e in registry_entries}
|
||||
|
||||
package_entries = [e for e in _package_builtin_servers() if e.get("name") not in seen_names]
|
||||
|
||||
servers = [*package_entries, *registry_entries]
|
||||
return web.json_response({"servers": servers})
|
||||
|
||||
|
||||
async def handle_add_server(request: web.Request) -> web.Response:
|
||||
"""POST /api/mcp/servers — register a local MCP server.
|
||||
|
||||
Body mirrors ``MCPRegistry.add_local`` args:
|
||||
|
||||
::
|
||||
|
||||
{
|
||||
"name": "my-tool",
|
||||
"transport": "stdio" | "http" | "sse" | "unix",
|
||||
"command": "...", "args": [...], "env": {...}, "cwd": "...",
|
||||
"url": "...", "headers": {...},
|
||||
"socket_path": "...",
|
||||
"description": "..."
|
||||
}
|
||||
"""
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict):
|
||||
return web.json_response({"error": "Body must be a JSON object"}, status=400)
|
||||
|
||||
name = body.get("name")
|
||||
transport = body.get("transport")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
return web.json_response({"error": "'name' is required"}, status=400)
|
||||
if transport not in _VALID_TRANSPORTS:
|
||||
return web.json_response(
|
||||
{"error": f"'transport' must be one of {sorted(_VALID_TRANSPORTS)}"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
reg = _registry()
|
||||
try:
|
||||
entry = reg.add_local(
|
||||
name=name.strip(),
|
||||
transport=transport,
|
||||
command=body.get("command"),
|
||||
args=body.get("args"),
|
||||
env=body.get("env"),
|
||||
cwd=body.get("cwd"),
|
||||
url=body.get("url"),
|
||||
headers=body.get("headers"),
|
||||
socket_path=body.get("socket_path"),
|
||||
description=body.get("description", ""),
|
||||
)
|
||||
except MCPError as exc:
|
||||
status = 409 if "already exists" in exc.what else 400
|
||||
return _mcp_error_response(exc, default_status=status)
|
||||
except Exception as exc:
|
||||
logger.exception("MCP add_local failed for %r", name)
|
||||
return web.json_response({"error": str(exc)}, status=500)
|
||||
|
||||
summary = _server_to_summary({"name": name, **entry})
|
||||
return web.json_response(
|
||||
{
|
||||
"server": summary,
|
||||
"hint": "Start a new queen session to use this server's tools.",
|
||||
},
|
||||
status=201,
|
||||
)
|
||||
|
||||
|
||||
async def handle_remove_server(request: web.Request) -> web.Response:
|
||||
"""DELETE /api/mcp/servers/{name} — remove a local server."""
|
||||
name = request.match_info["name"]
|
||||
reg = _registry()
|
||||
|
||||
existing = reg.get_server(name)
|
||||
if existing is None:
|
||||
return web.json_response({"error": f"Server '{name}' not installed"}, status=404)
|
||||
if existing.get("source") != "local":
|
||||
return web.json_response(
|
||||
{
|
||||
"error": f"Server '{name}' is a built-in; it cannot be removed from the UI.",
|
||||
},
|
||||
status=400,
|
||||
)
|
||||
|
||||
try:
|
||||
reg.remove(name)
|
||||
except MCPError as exc:
|
||||
return _mcp_error_response(exc, default_status=404)
|
||||
return web.json_response({"removed": name})
|
||||
|
||||
|
||||
async def handle_set_enabled(request: web.Request, *, enabled: bool) -> web.Response:
|
||||
name = request.match_info["name"]
|
||||
reg = _registry()
|
||||
try:
|
||||
if enabled:
|
||||
reg.enable(name)
|
||||
else:
|
||||
reg.disable(name)
|
||||
except MCPError as exc:
|
||||
return _mcp_error_response(exc, default_status=404)
|
||||
return web.json_response({"name": name, "enabled": enabled})
|
||||
|
||||
|
||||
async def handle_enable(request: web.Request) -> web.Response:
|
||||
"""POST /api/mcp/servers/{name}/enable."""
|
||||
return await handle_set_enabled(request, enabled=True)
|
||||
|
||||
|
||||
async def handle_disable(request: web.Request) -> web.Response:
|
||||
"""POST /api/mcp/servers/{name}/disable."""
|
||||
return await handle_set_enabled(request, enabled=False)
|
||||
|
||||
|
||||
async def handle_health(request: web.Request) -> web.Response:
|
||||
"""POST /api/mcp/servers/{name}/health — probe one server."""
|
||||
name = request.match_info["name"]
|
||||
reg = _registry()
|
||||
try:
|
||||
# MCPRegistry.health_check blocks on subprocess IO — run it off
|
||||
# the event loop so the HTTP worker stays responsive.
|
||||
import asyncio
|
||||
|
||||
result = await asyncio.to_thread(reg.health_check, name)
|
||||
except MCPError as exc:
|
||||
return _mcp_error_response(exc, default_status=404)
|
||||
except Exception as exc:
|
||||
logger.exception("MCP health_check failed for %r", name)
|
||||
return web.json_response({"error": str(exc)}, status=500)
|
||||
return web.json_response(result)
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
"""Register MCP server CRUD routes."""
|
||||
app.router.add_get("/api/mcp/servers", handle_list_servers)
|
||||
app.router.add_post("/api/mcp/servers", handle_add_server)
|
||||
app.router.add_delete("/api/mcp/servers/{name}", handle_remove_server)
|
||||
app.router.add_post("/api/mcp/servers/{name}/enable", handle_enable)
|
||||
app.router.add_post("/api/mcp/servers/{name}/disable", handle_disable)
|
||||
app.router.add_post("/api/mcp/servers/{name}/health", handle_health)
|
||||
@@ -0,0 +1,87 @@
|
||||
"""Custom user prompts — CRUD for user-uploaded prompts.
|
||||
|
||||
- GET /api/prompts — list all custom prompts
|
||||
- POST /api/prompts — add a new custom prompt
|
||||
- DELETE /api/prompts/{id} — delete a custom prompt
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CUSTOM_PROMPTS_FILE = HIVE_HOME / "custom_prompts.json"
|
||||
|
||||
|
||||
def _load_custom_prompts() -> list[dict]:
|
||||
if not CUSTOM_PROMPTS_FILE.exists():
|
||||
return []
|
||||
try:
|
||||
data = json.loads(CUSTOM_PROMPTS_FILE.read_text(encoding="utf-8"))
|
||||
return data if isinstance(data, list) else []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _save_custom_prompts(prompts: list[dict]) -> None:
|
||||
CUSTOM_PROMPTS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
CUSTOM_PROMPTS_FILE.write_text(
|
||||
json.dumps(prompts, indent=2, ensure_ascii=False) + "\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
async def handle_list_prompts(request: web.Request) -> web.Response:
|
||||
"""GET /api/prompts — list all custom prompts."""
|
||||
return web.json_response({"prompts": _load_custom_prompts()})
|
||||
|
||||
|
||||
async def handle_create_prompt(request: web.Request) -> web.Response:
|
||||
"""POST /api/prompts — add a new custom prompt."""
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
|
||||
title = (body.get("title") or "").strip()
|
||||
category = (body.get("category") or "").strip()
|
||||
content = (body.get("content") or "").strip()
|
||||
|
||||
if not title or not content:
|
||||
return web.json_response({"error": "Title and content are required"}, status=400)
|
||||
|
||||
prompts = _load_custom_prompts()
|
||||
new_prompt = {
|
||||
"id": f"custom_{int(time.time() * 1000)}",
|
||||
"title": title,
|
||||
"category": category or "custom",
|
||||
"content": content,
|
||||
"custom": True,
|
||||
}
|
||||
prompts.append(new_prompt)
|
||||
_save_custom_prompts(prompts)
|
||||
logger.info("Custom prompt added: %s", title)
|
||||
return web.json_response(new_prompt, status=201)
|
||||
|
||||
|
||||
async def handle_delete_prompt(request: web.Request) -> web.Response:
|
||||
"""DELETE /api/prompts/{prompt_id} — delete a custom prompt."""
|
||||
prompt_id = request.match_info["prompt_id"]
|
||||
prompts = _load_custom_prompts()
|
||||
before = len(prompts)
|
||||
prompts = [p for p in prompts if p.get("id") != prompt_id]
|
||||
if len(prompts) == before:
|
||||
return web.json_response({"error": "Prompt not found"}, status=404)
|
||||
_save_custom_prompts(prompts)
|
||||
return web.json_response({"deleted": prompt_id})
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
app.router.add_get("/api/prompts", handle_list_prompts)
|
||||
app.router.add_post("/api/prompts", handle_create_prompt)
|
||||
app.router.add_delete("/api/prompts/{prompt_id}", handle_delete_prompt)
|
||||
@@ -0,0 +1,506 @@
|
||||
"""Per-queen MCP tool allowlist routes.
|
||||
|
||||
- GET /api/queen/{queen_id}/tools -- enumerate the queen's tool surface
|
||||
- PATCH /api/queen/{queen_id}/tools -- set or clear the MCP tool allowlist
|
||||
|
||||
Lifecycle and synthetic tools (``ask_user``) are always part of the queen's
|
||||
surface in INDEPENDENT mode and are returned with ``editable: false``. MCP
|
||||
tools are grouped by origin server and carry per-tool ``enabled`` flags.
|
||||
|
||||
The allowlist is persisted in a dedicated ``tools.json`` sidecar at
|
||||
``~/.hive/agents/queens/{queen_id}/tools.json``:
|
||||
|
||||
- ``null`` / missing file -> "allow every MCP tool" (default)
|
||||
- ``[]`` -> explicitly disable every MCP tool
|
||||
- ``["foo", "bar"]`` -> only these MCP tools pass through to the LLM
|
||||
|
||||
Filtering happens in ``QueenPhaseState.rebuild_independent_filter`` so the
|
||||
LLM prompt cache stays warm between saves.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
from framework.agents.queen.queen_profiles import (
|
||||
ensure_default_queens,
|
||||
load_queen_profile,
|
||||
)
|
||||
from framework.agents.queen.queen_tools_config import (
|
||||
delete_queen_tools_config,
|
||||
load_queen_tools_config,
|
||||
tools_config_exists,
|
||||
update_queen_tools_config,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_SYNTHETIC_NAMES = {"ask_user"}
|
||||
|
||||
|
||||
async def _ensure_manager_catalog(manager: Any) -> dict[str, list[dict[str, Any]]]:
|
||||
"""Return the cached MCP tool catalog, building it on first call.
|
||||
|
||||
``queen_orchestrator.create_queen`` populates ``_mcp_tool_catalog`` on
|
||||
every queen boot. On a fresh backend process the user may open the
|
||||
Tool Library before any queen session has started, so the catalog is
|
||||
empty. In that case we build one from the shared MCP config; the
|
||||
first call pays an MCP-subprocess-spawn cost, subsequent calls are
|
||||
cache hits. The build runs off the event loop via asyncio.to_thread
|
||||
so the HTTP worker stays responsive while MCP servers initialize.
|
||||
"""
|
||||
if manager is None:
|
||||
return {}
|
||||
catalog = getattr(manager, "_mcp_tool_catalog", None)
|
||||
if isinstance(catalog, dict) and catalog:
|
||||
return catalog
|
||||
try:
|
||||
import asyncio
|
||||
|
||||
from framework.server.queen_orchestrator import build_queen_tool_registry_bare
|
||||
|
||||
registry, built = await asyncio.to_thread(build_queen_tool_registry_bare)
|
||||
manager._mcp_tool_catalog = built # type: ignore[attr-defined]
|
||||
manager._bootstrap_tool_registry = registry # type: ignore[attr-defined]
|
||||
return built
|
||||
except Exception:
|
||||
logger.warning("Tool catalog bootstrap failed", exc_info=True)
|
||||
return {}
|
||||
|
||||
|
||||
def _lifecycle_entries_without_session(
|
||||
manager: Any,
|
||||
mcp_names: set[str],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Derive lifecycle tool names from the registry even without a session.
|
||||
|
||||
We register queen lifecycle tools against a temporary registry using a
|
||||
minimal stub, then subtract the MCP-origin set and the synthetic set.
|
||||
The result matches what the queen sees at runtime (minus context-
|
||||
specific variants).
|
||||
"""
|
||||
registry = getattr(manager, "_bootstrap_tool_registry", None)
|
||||
# If the bootstrap registry exists but doesn't carry lifecycle tools
|
||||
# yet, register them now.
|
||||
if registry is not None and not getattr(registry, "_lifecycle_bootstrap_done", False):
|
||||
try:
|
||||
from types import SimpleNamespace
|
||||
|
||||
from framework.tools.queen_lifecycle_tools import register_queen_lifecycle_tools
|
||||
|
||||
stub_session = SimpleNamespace(
|
||||
id="tool-library-bootstrap",
|
||||
colony_runtime=None,
|
||||
event_bus=None,
|
||||
worker_path=None,
|
||||
phase_state=None,
|
||||
llm=None,
|
||||
)
|
||||
register_queen_lifecycle_tools(
|
||||
registry,
|
||||
session=stub_session,
|
||||
session_id=stub_session.id,
|
||||
session_manager=None,
|
||||
manager_session_id=stub_session.id,
|
||||
phase_state=None,
|
||||
)
|
||||
registry._lifecycle_bootstrap_done = True # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
logger.debug("lifecycle bootstrap failed", exc_info=True)
|
||||
|
||||
if registry is None:
|
||||
return []
|
||||
|
||||
out: list[dict[str, Any]] = []
|
||||
for name, tool in sorted(registry.get_tools().items()):
|
||||
if name in mcp_names or name in _SYNTHETIC_NAMES:
|
||||
continue
|
||||
out.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"editable": False,
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def _synthetic_entries() -> list[dict[str, Any]]:
|
||||
"""Return display metadata for synthetic tools injected by the agent loop.
|
||||
|
||||
Kept behind a lazy import so test harnesses that don't wire the agent
|
||||
loop can still hit this route without blowing up.
|
||||
"""
|
||||
try:
|
||||
from framework.agent_loop.internals.synthetic_tools import build_ask_user_tool
|
||||
|
||||
tool = build_ask_user_tool()
|
||||
return [
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"editable": False,
|
||||
}
|
||||
]
|
||||
except Exception:
|
||||
return [
|
||||
{
|
||||
"name": "ask_user",
|
||||
"description": "Pause and ask the user a structured question.",
|
||||
"editable": False,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def _live_queen_session(manager: Any, queen_id: str) -> Any:
|
||||
"""Return any live DM session owned by this queen, or ``None``."""
|
||||
sessions = getattr(manager, "_sessions", None) or {}
|
||||
for session in sessions.values():
|
||||
if getattr(session, "queen_name", None) != queen_id:
|
||||
continue
|
||||
# Prefer DM (non-colony) sessions
|
||||
if getattr(session, "colony_runtime", None) is None:
|
||||
return session
|
||||
return None
|
||||
|
||||
|
||||
def _render_mcp_servers(
|
||||
*,
|
||||
mcp_tool_names_by_server: dict[str, list[dict[str, Any]]],
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Shape the mcp_tool_catalog entries for the API response."""
|
||||
allowed: set[str] | None = None if enabled_mcp_tools is None else set(enabled_mcp_tools)
|
||||
servers: list[dict[str, Any]] = []
|
||||
for server_name in sorted(mcp_tool_names_by_server):
|
||||
entries = mcp_tool_names_by_server[server_name]
|
||||
tools = []
|
||||
for entry in entries:
|
||||
name = entry.get("name")
|
||||
enabled = True if allowed is None else name in allowed
|
||||
tools.append(
|
||||
{
|
||||
"name": name,
|
||||
"description": entry.get("description", ""),
|
||||
"input_schema": entry.get("input_schema", {}),
|
||||
"enabled": enabled,
|
||||
}
|
||||
)
|
||||
servers.append({"name": server_name, "tools": tools})
|
||||
return servers
|
||||
|
||||
|
||||
def _catalog_from_live_session(session: Any) -> dict[str, list[dict[str, Any]]]:
|
||||
"""Rebuild a per-server tool catalog from a live queen session.
|
||||
|
||||
The session's registry is authoritative — this reflects any hot-added
|
||||
MCP servers since the manager-level snapshot was cached.
|
||||
"""
|
||||
registry = getattr(session, "_queen_tool_registry", None)
|
||||
if registry is None:
|
||||
# session._queen_tools_by_name is a stash from create_queen; we
|
||||
# only have registry via the tools list, so reconstruct from the
|
||||
# phase state instead.
|
||||
phase_state = getattr(session, "phase_state", None)
|
||||
if phase_state is None:
|
||||
return {}
|
||||
mcp_names = getattr(phase_state, "mcp_tool_names_all", set()) or set()
|
||||
independent_tools = getattr(phase_state, "independent_tools", []) or []
|
||||
result: dict[str, list[dict[str, Any]]] = {"MCP Tools": []}
|
||||
for tool in independent_tools:
|
||||
if tool.name not in mcp_names:
|
||||
continue
|
||||
result["MCP Tools"].append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"input_schema": tool.parameters,
|
||||
}
|
||||
)
|
||||
return result if result["MCP Tools"] else {}
|
||||
|
||||
server_map = getattr(registry, "_mcp_server_tools", {}) or {}
|
||||
tools_by_name = {t.name: t for t in registry.get_tools().values()}
|
||||
catalog: dict[str, list[dict[str, Any]]] = {}
|
||||
for server_name, tool_names in server_map.items():
|
||||
entries: list[dict[str, Any]] = []
|
||||
for name in sorted(tool_names):
|
||||
tool = tools_by_name.get(name)
|
||||
if tool is None:
|
||||
continue
|
||||
entries.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"input_schema": tool.parameters,
|
||||
}
|
||||
)
|
||||
catalog[server_name] = entries
|
||||
return catalog
|
||||
|
||||
|
||||
def _lifecycle_entries(
|
||||
*,
|
||||
session: Any,
|
||||
mcp_tool_names_all: set[str],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Lifecycle tools = independent_tools minus MCP-origin minus synthetic.
|
||||
|
||||
We compute this from a live session when available so the list exactly
|
||||
matches what the queen actually sees on her next turn.
|
||||
"""
|
||||
if session is None:
|
||||
return []
|
||||
phase_state = getattr(session, "phase_state", None)
|
||||
if phase_state is None:
|
||||
return []
|
||||
result: list[dict[str, Any]] = []
|
||||
for tool in getattr(phase_state, "independent_tools", []) or []:
|
||||
if tool.name in mcp_tool_names_all:
|
||||
continue
|
||||
if tool.name in _SYNTHETIC_NAMES:
|
||||
continue
|
||||
result.append(
|
||||
{
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
"editable": False,
|
||||
}
|
||||
)
|
||||
return sorted(result, key=lambda x: x["name"])
|
||||
|
||||
|
||||
async def handle_get_tools(request: web.Request) -> web.Response:
|
||||
"""GET /api/queen/{queen_id}/tools — enumerate tool surface for the UI."""
|
||||
queen_id = request.match_info["queen_id"]
|
||||
ensure_default_queens()
|
||||
try:
|
||||
load_queen_profile(queen_id)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
manager = request.app.get("manager")
|
||||
session = _live_queen_session(manager, queen_id) if manager is not None else None
|
||||
|
||||
# Prefer a live session's registry for freshness. Otherwise use (or
|
||||
# build on demand) the manager-level catalog so the Tool Library works
|
||||
# even before any queen has been started in this process.
|
||||
if session is not None:
|
||||
catalog = _catalog_from_live_session(session)
|
||||
else:
|
||||
catalog = await _ensure_manager_catalog(manager)
|
||||
stale = not catalog
|
||||
|
||||
mcp_tool_names_all: set[str] = set()
|
||||
for entries in catalog.values():
|
||||
for entry in entries:
|
||||
if entry.get("name"):
|
||||
mcp_tool_names_all.add(entry["name"])
|
||||
|
||||
if session is not None:
|
||||
lifecycle = _lifecycle_entries(
|
||||
session=session,
|
||||
mcp_tool_names_all=mcp_tool_names_all,
|
||||
)
|
||||
else:
|
||||
lifecycle = _lifecycle_entries_without_session(manager, mcp_tool_names_all)
|
||||
|
||||
# Allowlist lives in the dedicated tools.json sidecar; helper
|
||||
# migrates legacy profile.yaml field on first read, and falls back
|
||||
# to the role-based default when no sidecar exists.
|
||||
enabled_mcp_tools = load_queen_tools_config(queen_id, mcp_catalog=catalog)
|
||||
is_role_default = not tools_config_exists(queen_id)
|
||||
|
||||
response = {
|
||||
"queen_id": queen_id,
|
||||
"enabled_mcp_tools": enabled_mcp_tools,
|
||||
"is_role_default": is_role_default,
|
||||
"stale": stale,
|
||||
"lifecycle": lifecycle,
|
||||
"synthetic": _synthetic_entries(),
|
||||
"mcp_servers": _render_mcp_servers(
|
||||
mcp_tool_names_by_server=catalog,
|
||||
enabled_mcp_tools=enabled_mcp_tools,
|
||||
),
|
||||
}
|
||||
return web.json_response(response)
|
||||
|
||||
|
||||
async def handle_patch_tools(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/queen/{queen_id}/tools — persist the MCP tool allowlist.
|
||||
|
||||
Body: ``{"enabled_mcp_tools": null | string[]}``.
|
||||
|
||||
- ``null`` resets to "allow every MCP tool" (default).
|
||||
- A list is validated against the known MCP catalog; unknown names
|
||||
are rejected with 400 so the frontend catches typos.
|
||||
"""
|
||||
queen_id = request.match_info["queen_id"]
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict) or "enabled_mcp_tools" not in body:
|
||||
return web.json_response(
|
||||
{"error": "Body must be an object with an 'enabled_mcp_tools' field"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
enabled = body["enabled_mcp_tools"]
|
||||
if enabled is not None:
|
||||
if not isinstance(enabled, list) or not all(isinstance(x, str) for x in enabled):
|
||||
return web.json_response(
|
||||
{"error": "'enabled_mcp_tools' must be null or a list of strings"},
|
||||
status=400,
|
||||
)
|
||||
|
||||
ensure_default_queens()
|
||||
try:
|
||||
load_queen_profile(queen_id)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
# Validate names against the known MCP tool catalog. We prefer a live
|
||||
# session's registry for the most up-to-date set, then fall back to
|
||||
# the manager-level snapshot (building it on demand if absent).
|
||||
manager = request.app.get("manager")
|
||||
session = _live_queen_session(manager, queen_id) if manager is not None else None
|
||||
if session is not None:
|
||||
catalog = _catalog_from_live_session(session)
|
||||
else:
|
||||
catalog = await _ensure_manager_catalog(manager)
|
||||
known_names: set[str] = set()
|
||||
for entries in catalog.values():
|
||||
for entry in entries:
|
||||
if entry.get("name"):
|
||||
known_names.add(entry["name"])
|
||||
|
||||
if enabled is not None and known_names:
|
||||
unknown = sorted(set(enabled) - known_names)
|
||||
if unknown:
|
||||
return web.json_response(
|
||||
{"error": "Unknown MCP tool name(s)", "unknown": unknown},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Persist — tools.json sidecar, not profile.yaml.
|
||||
try:
|
||||
update_queen_tools_config(queen_id, enabled)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
# Hot-reload every live DM session for this queen. The filter memo is
|
||||
# rebuilt so the very next turn sees the new allowlist without a
|
||||
# session restart, and the prompt cache is invalidated exactly once.
|
||||
refreshed = 0
|
||||
sessions = getattr(manager, "_sessions", None) or {}
|
||||
for sess in sessions.values():
|
||||
if getattr(sess, "queen_name", None) != queen_id:
|
||||
continue
|
||||
phase_state = getattr(sess, "phase_state", None)
|
||||
if phase_state is None:
|
||||
continue
|
||||
phase_state.enabled_mcp_tools = enabled
|
||||
rebuild = getattr(phase_state, "rebuild_independent_filter", None)
|
||||
if callable(rebuild):
|
||||
try:
|
||||
rebuild()
|
||||
refreshed += 1
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Queen tools: rebuild_independent_filter failed for session %s",
|
||||
getattr(sess, "id", "?"),
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Queen tools: queen_id=%s allowlist=%s refreshed_sessions=%d",
|
||||
queen_id,
|
||||
"null" if enabled is None else f"{len(enabled)} tool(s)",
|
||||
refreshed,
|
||||
)
|
||||
return web.json_response(
|
||||
{
|
||||
"queen_id": queen_id,
|
||||
"enabled_mcp_tools": enabled,
|
||||
"refreshed_sessions": refreshed,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
async def handle_delete_tools(request: web.Request) -> web.Response:
|
||||
"""DELETE /api/queen/{queen_id}/tools — drop the sidecar, fall back to role defaults.
|
||||
|
||||
Users click "Reset to role default" in the Tool Library. That
|
||||
removes ``tools.json`` so the queen's effective allowlist becomes
|
||||
the role-based default (or allow-all if the queen has no role
|
||||
entry). Live sessions are refreshed so the next turn reflects the
|
||||
change without a restart.
|
||||
"""
|
||||
queen_id = request.match_info["queen_id"]
|
||||
ensure_default_queens()
|
||||
try:
|
||||
load_queen_profile(queen_id)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
removed = delete_queen_tools_config(queen_id)
|
||||
|
||||
# Recompute the queen's effective allowlist from the role defaults
|
||||
# so we can hot-reload live sessions in one pass (same shape as
|
||||
# PATCH).
|
||||
manager = request.app.get("manager")
|
||||
session = _live_queen_session(manager, queen_id) if manager is not None else None
|
||||
if session is not None:
|
||||
catalog = _catalog_from_live_session(session)
|
||||
else:
|
||||
catalog = await _ensure_manager_catalog(manager)
|
||||
new_enabled = load_queen_tools_config(queen_id, mcp_catalog=catalog)
|
||||
|
||||
refreshed = 0
|
||||
sessions = getattr(manager, "_sessions", None) or {}
|
||||
for sess in sessions.values():
|
||||
if getattr(sess, "queen_name", None) != queen_id:
|
||||
continue
|
||||
phase_state = getattr(sess, "phase_state", None)
|
||||
if phase_state is None:
|
||||
continue
|
||||
phase_state.enabled_mcp_tools = new_enabled
|
||||
rebuild = getattr(phase_state, "rebuild_independent_filter", None)
|
||||
if callable(rebuild):
|
||||
try:
|
||||
rebuild()
|
||||
refreshed += 1
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Queen tools: rebuild_independent_filter failed for session %s",
|
||||
getattr(sess, "id", "?"),
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Queen tools: queen_id=%s reset-to-default removed=%s refreshed_sessions=%d",
|
||||
queen_id,
|
||||
removed,
|
||||
refreshed,
|
||||
)
|
||||
return web.json_response(
|
||||
{
|
||||
"queen_id": queen_id,
|
||||
"removed": removed,
|
||||
"enabled_mcp_tools": new_enabled,
|
||||
"is_role_default": True,
|
||||
"refreshed_sessions": refreshed,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
"""Register queen-tools routes."""
|
||||
app.router.add_get("/api/queen/{queen_id}/tools", handle_get_tools)
|
||||
app.router.add_patch("/api/queen/{queen_id}/tools", handle_patch_tools)
|
||||
app.router.add_delete("/api/queen/{queen_id}/tools", handle_delete_tools)
|
||||
@@ -3,6 +3,8 @@
|
||||
- GET /api/queen/profiles -- list all queen profiles (id, name, title)
|
||||
- GET /api/queen/{queen_id}/profile -- get full queen profile
|
||||
- PATCH /api/queen/{queen_id}/profile -- update queen profile fields
|
||||
- POST /api/queen/{queen_id}/avatar -- upload queen avatar image
|
||||
- GET /api/queen/{queen_id}/avatar -- serve queen avatar image
|
||||
- POST /api/queen/{queen_id}/session -- get or create a persistent session for a queen
|
||||
- POST /api/queen/{queen_id}/session/select -- resume a specific session for a queen
|
||||
- POST /api/queen/{queen_id}/session/new -- create a fresh session for a queen
|
||||
@@ -166,6 +168,34 @@ async def handle_get_profile(request: web.Request) -> web.Response:
|
||||
return web.json_response({"id": queen_id, **api_profile})
|
||||
|
||||
|
||||
def _reverse_transform_for_yaml(body: dict) -> dict:
|
||||
"""Map API-format fields back to YAML profile fields.
|
||||
|
||||
The API exposes a simplified view (summary, skills, signature_achievement)
|
||||
that maps onto the underlying YAML structure (core_traits, hidden_background,
|
||||
psychological_profile, world_lore, etc.).
|
||||
"""
|
||||
yaml_updates: dict[str, Any] = {}
|
||||
|
||||
if "name" in body:
|
||||
yaml_updates["name"] = body["name"]
|
||||
if "title" in body:
|
||||
yaml_updates["title"] = body["title"]
|
||||
|
||||
if "summary" in body:
|
||||
# Summary is displayed as core_traits + anti_stereotype joined by \n\n.
|
||||
# Store the full text in core_traits for simplicity.
|
||||
yaml_updates["core_traits"] = body["summary"]
|
||||
|
||||
if "skills" in body:
|
||||
yaml_updates["skills"] = body["skills"]
|
||||
|
||||
if "signature_achievement" in body:
|
||||
yaml_updates.setdefault("world_lore", {})["habitat"] = body["signature_achievement"]
|
||||
|
||||
return yaml_updates
|
||||
|
||||
|
||||
async def handle_update_profile(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/queen/{queen_id}/profile — update queen profile fields."""
|
||||
queen_id = request.match_info["queen_id"]
|
||||
@@ -175,11 +205,18 @@ async def handle_update_profile(request: web.Request) -> web.Response:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict):
|
||||
return web.json_response({"error": "Body must be a JSON object"}, status=400)
|
||||
|
||||
yaml_updates = _reverse_transform_for_yaml(body)
|
||||
if not yaml_updates:
|
||||
return web.json_response({"error": "No valid fields to update"}, status=400)
|
||||
|
||||
try:
|
||||
updated = update_queen_profile(queen_id, body)
|
||||
updated = update_queen_profile(queen_id, yaml_updates)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
return web.json_response({"id": queen_id, **updated})
|
||||
|
||||
api_profile = _transform_profile_for_api(updated)
|
||||
return web.json_response({"id": queen_id, **api_profile})
|
||||
|
||||
|
||||
async def handle_queen_session(request: web.Request) -> web.Response:
|
||||
@@ -207,16 +244,26 @@ async def handle_queen_session(request: web.Request) -> web.Response:
|
||||
initial_prompt = body.get("initial_prompt")
|
||||
initial_phase = body.get("initial_phase")
|
||||
|
||||
# 1. Check for an existing live session bound to this queen.
|
||||
for session in manager.list_sessions():
|
||||
if session.queen_name == queen_id:
|
||||
return web.json_response(
|
||||
{
|
||||
"session_id": session.id,
|
||||
"queen_id": queen_id,
|
||||
"status": "live",
|
||||
}
|
||||
)
|
||||
# 1. Check for an existing live DM session bound to this queen.
|
||||
# Skip colony sessions: a colony forked from this queen also carries
|
||||
# queen_name == queen_id, but it has a worker loaded (colony_id /
|
||||
# worker_path set) and is the colony's chat, not the queen's DM.
|
||||
# When multiple DM sessions for this queen are live at once (e.g. the
|
||||
# user created a new session, then navigated away and back), return
|
||||
# the most recently loaded one so we don't resurrect a stale older
|
||||
# session ahead of a freshly created one.
|
||||
live_matches = [
|
||||
s for s in manager.list_sessions() if s.queen_name == queen_id and s.colony_id is None and s.worker_path is None
|
||||
]
|
||||
if live_matches:
|
||||
latest = max(live_matches, key=lambda s: s.loaded_at)
|
||||
return web.json_response(
|
||||
{
|
||||
"session_id": latest.id,
|
||||
"queen_id": queen_id,
|
||||
"status": "live",
|
||||
}
|
||||
)
|
||||
|
||||
# 2. Find the most recent cold session for this queen and resume it.
|
||||
# IMPORTANT: skip sessions that don't belong in the queen DM:
|
||||
@@ -338,6 +385,8 @@ async def handle_select_queen_session(request: web.Request) -> web.Response:
|
||||
|
||||
async def handle_new_queen_session(request: web.Request) -> web.Response:
|
||||
"""POST /api/queen/{queen_id}/session/new -- create a fresh queen session."""
|
||||
from framework.tools.queen_lifecycle_tools import QUEEN_PHASES
|
||||
|
||||
queen_id = request.match_info["queen_id"]
|
||||
manager = request.app["manager"]
|
||||
|
||||
@@ -347,9 +396,25 @@ async def handle_new_queen_session(request: web.Request) -> web.Response:
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
body = await request.json() if request.can_read_body else {}
|
||||
if request.can_read_body:
|
||||
try:
|
||||
body = await request.json()
|
||||
except json.JSONDecodeError:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict):
|
||||
return web.json_response({"error": "Request body must be a JSON object"}, status=400)
|
||||
else:
|
||||
body = {}
|
||||
initial_prompt = body.get("initial_prompt")
|
||||
initial_phase = body.get("initial_phase") or "independent"
|
||||
if initial_phase not in QUEEN_PHASES:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": f"Invalid initial_phase '{initial_phase}'",
|
||||
"valid": sorted(QUEEN_PHASES),
|
||||
},
|
||||
status=400,
|
||||
)
|
||||
|
||||
session = await manager.create_session(
|
||||
initial_prompt=initial_prompt,
|
||||
@@ -365,11 +430,98 @@ async def handle_new_queen_session(request: web.Request) -> web.Response:
|
||||
)
|
||||
|
||||
|
||||
MAX_AVATAR_BYTES = 2 * 1024 * 1024 # 2 MB max after compression
|
||||
_ALLOWED_AVATAR_TYPES = {
|
||||
"image/jpeg": ".jpg",
|
||||
"image/png": ".png",
|
||||
"image/webp": ".webp",
|
||||
}
|
||||
|
||||
|
||||
async def handle_upload_avatar(request: web.Request) -> web.Response:
|
||||
"""POST /api/queen/{queen_id}/avatar — upload queen avatar image.
|
||||
|
||||
Accepts multipart/form-data with a single file field named 'avatar'.
|
||||
Stores as avatar.{ext} in the queen's profile directory.
|
||||
"""
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
queen_id = request.match_info["queen_id"]
|
||||
queen_dir = QUEENS_DIR / queen_id
|
||||
if not (queen_dir / "profile.yaml").exists():
|
||||
return web.json_response({"error": f"Queen '{queen_id}' not found"}, status=404)
|
||||
|
||||
reader = await request.multipart()
|
||||
field = await reader.next()
|
||||
if field is None or field.name != "avatar":
|
||||
return web.json_response({"error": "Expected a file field named 'avatar'"}, status=400)
|
||||
|
||||
content_type = field.headers.get("Content-Type", "application/octet-stream")
|
||||
# Also check by content_type from the field
|
||||
if hasattr(field, "content_type"):
|
||||
content_type = field.content_type or content_type
|
||||
|
||||
ext = _ALLOWED_AVATAR_TYPES.get(content_type)
|
||||
if not ext:
|
||||
return web.json_response(
|
||||
{"error": f"Unsupported image type: {content_type}. Use JPEG, PNG, or WebP."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
# Read the file data with size limit
|
||||
data = bytearray()
|
||||
while True:
|
||||
chunk = await field.read_chunk(8192)
|
||||
if not chunk:
|
||||
break
|
||||
data.extend(chunk)
|
||||
if len(data) > MAX_AVATAR_BYTES:
|
||||
return web.json_response(
|
||||
{"error": f"Image too large. Maximum size is {MAX_AVATAR_BYTES // 1024 // 1024} MB."},
|
||||
status=400,
|
||||
)
|
||||
|
||||
if not data:
|
||||
return web.json_response({"error": "Empty file"}, status=400)
|
||||
|
||||
# Remove any existing avatar files
|
||||
for existing in queen_dir.glob("avatar.*"):
|
||||
existing.unlink(missing_ok=True)
|
||||
|
||||
# Write the new avatar
|
||||
avatar_path = queen_dir / f"avatar{ext}"
|
||||
avatar_path.write_bytes(data)
|
||||
|
||||
logger.info("Avatar uploaded for queen %s: %s (%d bytes)", queen_id, avatar_path.name, len(data))
|
||||
return web.json_response({"avatar_url": f"/api/queen/{queen_id}/avatar"})
|
||||
|
||||
|
||||
async def handle_get_avatar(request: web.Request) -> web.Response:
|
||||
"""GET /api/queen/{queen_id}/avatar — serve queen avatar image."""
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
queen_id = request.match_info["queen_id"]
|
||||
queen_dir = QUEENS_DIR / queen_id
|
||||
|
||||
# Find avatar file with any supported extension
|
||||
for ext in _ALLOWED_AVATAR_TYPES.values():
|
||||
avatar_path = queen_dir / f"avatar{ext}"
|
||||
if avatar_path.exists():
|
||||
return web.FileResponse(
|
||||
avatar_path,
|
||||
headers={"Cache-Control": "public, max-age=3600"},
|
||||
)
|
||||
|
||||
return web.json_response({"error": "No avatar found"}, status=404)
|
||||
|
||||
|
||||
def register_routes(app: web.Application) -> None:
|
||||
"""Register queen profile routes."""
|
||||
app.router.add_get("/api/queen/profiles", handle_list_profiles)
|
||||
app.router.add_get("/api/queen/{queen_id}/profile", handle_get_profile)
|
||||
app.router.add_patch("/api/queen/{queen_id}/profile", handle_update_profile)
|
||||
app.router.add_post("/api/queen/{queen_id}/avatar", handle_upload_avatar)
|
||||
app.router.add_get("/api/queen/{queen_id}/avatar", handle_get_avatar)
|
||||
app.router.add_post("/api/queen/{queen_id}/session", handle_queen_session)
|
||||
app.router.add_post("/api/queen/{queen_id}/session/select", handle_select_queen_session)
|
||||
app.router.add_post("/api/queen/{queen_id}/session/new", handle_new_queen_session)
|
||||
|
||||
@@ -10,6 +10,7 @@ Session-primary routes:
|
||||
- GET /api/sessions/{session_id}/stats — runtime statistics
|
||||
- GET /api/sessions/{session_id}/entry-points — list entry points
|
||||
- PATCH /api/sessions/{session_id}/triggers/{id} — update trigger task
|
||||
- POST /api/sessions/{session_id}/triggers/{id}/run — fire trigger once (manual)
|
||||
- GET /api/sessions/{session_id}/colonies — list colony IDs
|
||||
- GET /api/sessions/{session_id}/events/history — persisted eventbus log (for replay)
|
||||
|
||||
@@ -63,6 +64,8 @@ def _session_to_live_dict(session) -> dict:
|
||||
"queen_supports_images": supports_image_tool_results(queen_model) if queen_model else True,
|
||||
"queen_id": getattr(phase_state, "queen_id", None) if phase_state else None,
|
||||
"queen_name": (phase_state.queen_profile or {}).get("name") if phase_state else None,
|
||||
"colony_spawned": getattr(session, "colony_spawned", False),
|
||||
"spawned_colony_name": getattr(session, "spawned_colony_name", None),
|
||||
}
|
||||
|
||||
|
||||
@@ -119,8 +122,19 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
(equivalent to the old POST /api/agents). Otherwise creates a queen-only
|
||||
session that can later have a colony loaded via POST /sessions/{id}/colony.
|
||||
"""
|
||||
from framework.agents.queen.queen_profiles import ensure_default_queens, load_queen_profile
|
||||
from framework.tools.queen_lifecycle_tools import QUEEN_PHASES
|
||||
|
||||
manager = _get_manager(request)
|
||||
body = await request.json() if request.can_read_body else {}
|
||||
if request.can_read_body:
|
||||
try:
|
||||
body = await request.json()
|
||||
except json.JSONDecodeError:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
if not isinstance(body, dict):
|
||||
return web.json_response({"error": "Request body must be a JSON object"}, status=400)
|
||||
else:
|
||||
body = {}
|
||||
agent_path = body.get("agent_path")
|
||||
agent_id = body.get("agent_id")
|
||||
session_id = body.get("session_id")
|
||||
@@ -131,6 +145,21 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
initial_phase = body.get("initial_phase")
|
||||
worker_name = body.get("worker_name")
|
||||
|
||||
if initial_phase is not None and initial_phase not in QUEEN_PHASES:
|
||||
return web.json_response(
|
||||
{
|
||||
"error": f"Invalid initial_phase '{initial_phase}'",
|
||||
"valid": sorted(QUEEN_PHASES),
|
||||
},
|
||||
status=400,
|
||||
)
|
||||
if queen_name:
|
||||
ensure_default_queens()
|
||||
try:
|
||||
load_queen_profile(queen_name)
|
||||
except FileNotFoundError:
|
||||
return web.json_response({"error": f"Queen '{queen_name}' not found"}, status=404)
|
||||
|
||||
if agent_path:
|
||||
try:
|
||||
agent_path = str(validate_agent_path(agent_path))
|
||||
@@ -157,6 +186,7 @@ async def handle_create_session(request: web.Request) -> web.Response:
|
||||
model=model,
|
||||
initial_prompt=initial_prompt,
|
||||
queen_resume_from=queen_resume_from,
|
||||
queen_name=queen_name,
|
||||
initial_phase=initial_phase,
|
||||
)
|
||||
except ValueError as e:
|
||||
@@ -245,7 +275,14 @@ async def handle_get_live_session(request: web.Request) -> web.Response:
|
||||
}
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(t.id)
|
||||
if mono is not None:
|
||||
entry["next_fire_in"] = max(0.0, mono - time.monotonic())
|
||||
remaining = max(0.0, mono - time.monotonic())
|
||||
entry["next_fire_in"] = remaining
|
||||
entry["next_fire_at"] = int((time.time() + remaining) * 1000)
|
||||
stats = getattr(session, "trigger_fire_stats", {}).get(t.id)
|
||||
if stats:
|
||||
entry["fire_count"] = stats.get("fire_count", 0)
|
||||
if stats.get("last_fired_at") is not None:
|
||||
entry["last_fired_at"] = stats["last_fired_at"]
|
||||
data["entry_points"].append(entry)
|
||||
data["colonies"] = session.colony_runtime.list_graphs()
|
||||
|
||||
@@ -395,7 +432,14 @@ async def handle_session_entry_points(request: web.Request) -> web.Response:
|
||||
}
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(t.id)
|
||||
if mono is not None:
|
||||
entry["next_fire_in"] = max(0.0, mono - time.monotonic())
|
||||
remaining = max(0.0, mono - time.monotonic())
|
||||
entry["next_fire_in"] = remaining
|
||||
entry["next_fire_at"] = int((time.time() + remaining) * 1000)
|
||||
stats = getattr(session, "trigger_fire_stats", {}).get(t.id)
|
||||
if stats:
|
||||
entry["fire_count"] = stats.get("fire_count", 0)
|
||||
if stats.get("last_fired_at") is not None:
|
||||
entry["last_fired_at"] = stats["last_fired_at"]
|
||||
entry_points.append(entry)
|
||||
return web.json_response({"entry_points": entry_points})
|
||||
|
||||
@@ -546,6 +590,60 @@ async def handle_update_trigger_task(request: web.Request) -> web.Response:
|
||||
)
|
||||
|
||||
|
||||
async def handle_run_trigger(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/triggers/{trigger_id}/run — fire the trigger once.
|
||||
|
||||
Manual invocation for testing. Works whether the trigger is active or
|
||||
inactive; does not change active state and does not reset the scheduled
|
||||
next-fire time of an active timer.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
trigger_id = request.match_info["trigger_id"]
|
||||
tdef = getattr(session, "available_triggers", {}).get(trigger_id)
|
||||
if tdef is None:
|
||||
return web.json_response(
|
||||
{"error": f"Trigger '{trigger_id}' not found"},
|
||||
status=404,
|
||||
)
|
||||
|
||||
if getattr(session, "colony_runtime", None) is None:
|
||||
return web.json_response({"error": "Colony not loaded"}, status=409)
|
||||
|
||||
executor = getattr(session, "queen_executor", None)
|
||||
queen_node = getattr(executor, "node_registry", {}).get("queen") if executor else None
|
||||
if queen_node is None:
|
||||
return web.json_response({"error": "Queen not ready"}, status=409)
|
||||
|
||||
from framework.agent_loop.agent_loop import TriggerEvent
|
||||
|
||||
try:
|
||||
await queen_node.inject_trigger(
|
||||
TriggerEvent(
|
||||
trigger_type=tdef.trigger_type,
|
||||
source_id=trigger_id,
|
||||
payload={
|
||||
"task": tdef.task or "",
|
||||
"trigger_config": tdef.trigger_config,
|
||||
"forced": True,
|
||||
},
|
||||
)
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return web.json_response(
|
||||
{"error": f"Failed to fire trigger: {exc}"},
|
||||
status=500,
|
||||
)
|
||||
|
||||
from framework.tools.queen_lifecycle_tools import _emit_trigger_fired
|
||||
|
||||
await _emit_trigger_fired(session, trigger_id, tdef.trigger_type)
|
||||
|
||||
return web.json_response({"status": "fired", "trigger_id": trigger_id})
|
||||
|
||||
|
||||
async def handle_activate_trigger(request: web.Request) -> web.Response:
|
||||
"""POST /api/sessions/{session_id}/triggers/{trigger_id}/activate — start a trigger."""
|
||||
session, err = resolve_session(request)
|
||||
@@ -597,6 +695,17 @@ async def handle_activate_trigger(request: web.Request) -> web.Response:
|
||||
|
||||
runner = getattr(session, "runner", None)
|
||||
colony_entry = runner.graph.entry_node if runner else None
|
||||
config_out = dict(tdef.trigger_config)
|
||||
mono = getattr(session, "trigger_next_fire", {}).get(trigger_id)
|
||||
if mono is not None:
|
||||
remaining = max(0.0, mono - time.monotonic())
|
||||
config_out["next_fire_in"] = remaining
|
||||
config_out["next_fire_at"] = int((time.time() + remaining) * 1000)
|
||||
stats = getattr(session, "trigger_fire_stats", {}).get(trigger_id)
|
||||
if stats:
|
||||
config_out["fire_count"] = stats.get("fire_count", 0)
|
||||
if stats.get("last_fired_at") is not None:
|
||||
config_out["last_fired_at"] = stats["last_fired_at"]
|
||||
await bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.TRIGGER_ACTIVATED,
|
||||
@@ -604,7 +713,7 @@ async def handle_activate_trigger(request: web.Request) -> web.Response:
|
||||
data={
|
||||
"trigger_id": trigger_id,
|
||||
"trigger_type": tdef.trigger_type,
|
||||
"trigger_config": tdef.trigger_config,
|
||||
"trigger_config": config_out,
|
||||
"name": tdef.description or trigger_id,
|
||||
**({"entry_node": colony_entry} if colony_entry else {}),
|
||||
},
|
||||
@@ -866,6 +975,8 @@ async def handle_discover(request: web.Request) -> web.Response:
|
||||
"tool_count": entry.tool_count,
|
||||
"tags": entry.tags,
|
||||
"last_active": entry.last_active,
|
||||
"created_at": entry.created_at,
|
||||
"icon": entry.icon,
|
||||
"is_loaded": str(entry.path.resolve()) in loaded_paths,
|
||||
"workers": [w.to_dict() for w in entry.workers],
|
||||
}
|
||||
@@ -946,6 +1057,40 @@ async def handle_reveal_session_folder(request: web.Request) -> web.Response:
|
||||
return web.json_response({"path": str(folder)})
|
||||
|
||||
|
||||
async def handle_update_colony_metadata(request: web.Request) -> web.Response:
|
||||
"""PATCH /api/agents/metadata — update colony metadata (e.g. icon).
|
||||
|
||||
Body: {"agent_path": "...", "icon": "rocket"}
|
||||
"""
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return web.json_response({"error": "Invalid JSON body"}, status=400)
|
||||
|
||||
agent_path = body.get("agent_path")
|
||||
if not agent_path:
|
||||
return web.json_response({"error": "agent_path is required"}, status=400)
|
||||
|
||||
try:
|
||||
resolved = validate_agent_path(agent_path)
|
||||
except ValueError as exc:
|
||||
return web.json_response({"error": str(exc)}, status=400)
|
||||
|
||||
metadata_path = resolved / "metadata.json"
|
||||
metadata: dict = {}
|
||||
if metadata_path.exists():
|
||||
try:
|
||||
metadata = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if "icon" in body:
|
||||
metadata["icon"] = body["icon"]
|
||||
|
||||
metadata_path.write_text(json.dumps(metadata, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
return web.json_response({"ok": True})
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Route registration
|
||||
# ------------------------------------------------------------------
|
||||
@@ -956,6 +1101,7 @@ def register_routes(app: web.Application) -> None:
|
||||
# Discovery & agent management
|
||||
app.router.add_get("/api/discover", handle_discover)
|
||||
app.router.add_delete("/api/agents", handle_delete_agent)
|
||||
app.router.add_patch("/api/agents/metadata", handle_update_colony_metadata)
|
||||
|
||||
# Session lifecycle
|
||||
app.router.add_post("/api/sessions", handle_create_session)
|
||||
@@ -983,6 +1129,10 @@ def register_routes(app: web.Application) -> None:
|
||||
"/api/sessions/{session_id}/triggers/{trigger_id}/deactivate",
|
||||
handle_deactivate_trigger,
|
||||
)
|
||||
app.router.add_post(
|
||||
"/api/sessions/{session_id}/triggers/{trigger_id}/run",
|
||||
handle_run_trigger,
|
||||
)
|
||||
app.router.add_get("/api/sessions/{session_id}/colonies", handle_session_colonies)
|
||||
|
||||
app.router.add_get("/api/sessions/{session_id}/events/history", handle_session_events_history)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -252,25 +252,17 @@ def _active_colony(session):
|
||||
return getattr(session, "colony", None)
|
||||
|
||||
|
||||
async def handle_list_live_workers(request: web.Request) -> web.Response:
|
||||
"""GET /api/sessions/{session_id}/workers — list live workers.
|
||||
def _build_live_workers_payload(colony) -> list[dict]:
|
||||
"""Serialize the colony's current worker registry.
|
||||
|
||||
Returns an array of ``{worker_id, task, status, started_at, duration_seconds,
|
||||
is_active}`` objects. Active workers come first. The queen overseer
|
||||
(persistent worker) is included because the frontend should know it
|
||||
exists, but the stop action on it is a session-level kill — the UI
|
||||
should treat it differently (not offered here).
|
||||
Extracted so both the one-shot ``GET /workers`` handler and the SSE
|
||||
``/workers/stream`` handler render the exact same shape.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
colony = _active_colony(session)
|
||||
if colony is None:
|
||||
return web.json_response({"workers": []})
|
||||
return []
|
||||
|
||||
now = time.monotonic()
|
||||
payload = []
|
||||
payload: list[dict] = []
|
||||
try:
|
||||
workers = list(colony._workers.values()) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
@@ -295,7 +287,75 @@ async def handle_list_live_workers(request: web.Request) -> web.Response:
|
||||
|
||||
# Active workers first, then terminated, newest-started first within group.
|
||||
payload.sort(key=lambda r: (not r["is_active"], -(r["duration_seconds"] or 0)))
|
||||
return web.json_response({"workers": payload})
|
||||
return payload
|
||||
|
||||
|
||||
def _payload_change_signature(payload: list[dict]) -> tuple:
|
||||
"""Cheap fingerprint for change detection on the SSE stream.
|
||||
|
||||
We intentionally exclude ``duration_seconds`` — it ticks every call
|
||||
and would make every poll look like a change, defeating the "only
|
||||
emit on change" optimisation. Everything else (status, result,
|
||||
explicit_report) actually reflects worker state transitions.
|
||||
"""
|
||||
return tuple(
|
||||
(
|
||||
w["worker_id"],
|
||||
w["status"],
|
||||
w["is_active"],
|
||||
w["result_status"],
|
||||
w["result_summary"],
|
||||
bool(w["explicit_report"]),
|
||||
)
|
||||
for w in payload
|
||||
)
|
||||
|
||||
|
||||
async def handle_live_workers_stream(request: web.Request) -> web.StreamResponse:
|
||||
"""GET /api/sessions/{session_id}/workers/stream — SSE feed.
|
||||
|
||||
Emits a ``snapshot`` event immediately, then re-emits every time
|
||||
the worker registry changes (status transitions, new spawns, new
|
||||
reports). Polls the runtime every 2s internally — the colony's
|
||||
``_workers`` dict is not observable otherwise. Clients disconnecting
|
||||
bubbles up as ConnectionResetError from ``resp.write``.
|
||||
"""
|
||||
session, err = resolve_session(request)
|
||||
if err:
|
||||
return err
|
||||
|
||||
import asyncio
|
||||
|
||||
resp = web.StreamResponse(
|
||||
status=200,
|
||||
headers={
|
||||
"Content-Type": "text/event-stream",
|
||||
"Cache-Control": "no-cache, no-transform",
|
||||
"Connection": "keep-alive",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
await resp.prepare(request)
|
||||
|
||||
async def _send(event: str, data) -> None:
|
||||
payload = f"event: {event}\ndata: {json.dumps(data)}\n\n"
|
||||
await resp.write(payload.encode("utf-8"))
|
||||
|
||||
last_signature: tuple | None = None
|
||||
try:
|
||||
while True:
|
||||
colony = _active_colony(session)
|
||||
workers = _build_live_workers_payload(colony)
|
||||
signature = _payload_change_signature(workers)
|
||||
if signature != last_signature:
|
||||
await _send("snapshot", {"workers": workers})
|
||||
last_signature = signature
|
||||
await asyncio.sleep(2.0)
|
||||
except (asyncio.CancelledError, ConnectionResetError):
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.warning("live workers stream error: %s", exc, exc_info=True)
|
||||
return resp
|
||||
|
||||
|
||||
async def handle_stop_live_worker(request: web.Request) -> web.Response:
|
||||
@@ -399,8 +459,13 @@ def register_routes(app: web.Application) -> None:
|
||||
"/api/sessions/{session_id}/colonies/{colony_id}/nodes/{node_id}/tools",
|
||||
handle_node_tools,
|
||||
)
|
||||
# Live worker control
|
||||
app.router.add_get("/api/sessions/{session_id}/workers", handle_list_live_workers)
|
||||
# Live worker control. The GET /workers list endpoint lives in
|
||||
# routes_colony_workers.py — it reads from session.colony (the
|
||||
# unified ColonyRuntime where run_parallel_workers-spawned workers
|
||||
# actually live) and returns the WorkerSummary shape the frontend
|
||||
# types against. Registering a duplicate here shadowed it in
|
||||
# aiohttp's router and broke the Sessions tab.
|
||||
app.router.add_get("/api/sessions/{session_id}/workers/stream", handle_live_workers_stream)
|
||||
app.router.add_post(
|
||||
"/api/sessions/{session_id}/workers/stop-all",
|
||||
handle_stop_all_live_workers,
|
||||
|
||||
@@ -93,6 +93,9 @@ class Session:
|
||||
worker_configured: bool = False
|
||||
# Monotonic timestamps for next trigger fire (mirrors AgentRuntime._timer_next_fire)
|
||||
trigger_next_fire: dict[str, float] = field(default_factory=dict)
|
||||
# Per-trigger fire stats (session lifetime): {trigger_id: {"fire_count": int, "last_fired_at": epoch_ms}}.
|
||||
# Reset on process restart — good enough as a "since this session started" counter.
|
||||
trigger_fire_stats: dict[str, dict[str, Any]] = field(default_factory=dict)
|
||||
# Session directory resumption:
|
||||
# When set, _start_queen writes queen conversations to this existing session's
|
||||
# directory instead of creating a new one. This lets cold-restores accumulate
|
||||
@@ -111,6 +114,12 @@ class Session:
|
||||
# tool unlocked. The mode is the canonical discriminator for storage
|
||||
# path, tool exposure, and SSE filtering — see the Phase 2 plan.
|
||||
mode: Literal["dm", "colony"] = "dm"
|
||||
# Set to True after the user clicks the COLONY_CREATED system message
|
||||
# in this DM. Locks the chat input — the user must compact+fork into a
|
||||
# fresh session before continuing the conversation. Persisted in
|
||||
# meta.json so the lock survives server restarts.
|
||||
colony_spawned: bool = False
|
||||
spawned_colony_name: str | None = None
|
||||
|
||||
|
||||
class SessionManager:
|
||||
@@ -149,13 +158,9 @@ class SessionManager:
|
||||
try:
|
||||
ensured = ensure_all_colony_dbs()
|
||||
if ensured:
|
||||
logger.info(
|
||||
"progress_db: ensured %d colony DB(s) at startup", len(ensured)
|
||||
)
|
||||
logger.info("progress_db: ensured %d colony DB(s) at startup", len(ensured))
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"progress_db: backfill at startup failed (non-fatal)", exc_info=True
|
||||
)
|
||||
logger.warning("progress_db: backfill at startup failed (non-fatal)", exc_info=True)
|
||||
|
||||
def build_llm(self, model: str | None = None):
|
||||
"""Construct an LLM provider using the server's configured defaults."""
|
||||
@@ -417,6 +422,27 @@ class SessionManager:
|
||||
if existing.worker_path and str(existing.worker_path) == str(agent_path):
|
||||
return existing
|
||||
|
||||
# When the queen forked this colony, the inherited DM transcript
|
||||
# is compacted in the background (see fork_session_into_colony).
|
||||
# Block here until that compactor finishes so _load_worker_core
|
||||
# reads the compacted summary — not the raw transcript (which
|
||||
# would defeat the fork's purpose). Bounded wait: on timeout we
|
||||
# proceed anyway so a stuck compactor can't brick the colony.
|
||||
if queen_resume_from:
|
||||
try:
|
||||
from framework.server import compaction_status
|
||||
|
||||
await compaction_status.await_completion(
|
||||
_find_queen_session_dir(queen_resume_from),
|
||||
timeout=180.0,
|
||||
)
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"await_compaction failed for %s — proceeding",
|
||||
queen_resume_from,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
session = await self._create_session_core(
|
||||
session_id=_colony_session_id or queen_resume_from,
|
||||
model=model,
|
||||
@@ -1197,8 +1223,27 @@ class SessionManager:
|
||||
logger.info("Session '%s': shutdown reflection spawned", session_id)
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(self._background_tasks.discard)
|
||||
except Exception:
|
||||
logger.warning("Session '%s': failed to spawn shutdown reflection", session_id, exc_info=True)
|
||||
except RuntimeError as exc:
|
||||
# Most common when a session is stopped after the event loop
|
||||
# has closed (e.g. during server shutdown or from an atexit
|
||||
# handler). The reflection would have had nothing to write
|
||||
# anyway — no new turns since the last periodic reflection.
|
||||
logger.warning(
|
||||
"Session '%s': shutdown reflection skipped — event loop unavailable (%s). "
|
||||
"Normal during server shutdown; anything worth persisting was saved by the "
|
||||
"periodic reflection after the last turn.",
|
||||
session_id,
|
||||
exc,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Session '%s': failed to spawn shutdown reflection: %s: %s. "
|
||||
"Check that queen_dir exists and session.llm is configured; full traceback follows.",
|
||||
session_id,
|
||||
type(exc).__name__,
|
||||
exc,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
if session.queen_task is not None:
|
||||
session.queen_task.cancel()
|
||||
@@ -1323,6 +1368,13 @@ class SessionManager:
|
||||
_new_meta["agent_path"] = str(session.worker_path)
|
||||
_existing_meta.update(_new_meta)
|
||||
_meta_path.write_text(json.dumps(_existing_meta), encoding="utf-8")
|
||||
# Hydrate colony-spawned lock state from meta.json so the lock
|
||||
# survives server restart / cold-resume into a live session.
|
||||
if _existing_meta.get("colony_spawned") is True:
|
||||
session.colony_spawned = True
|
||||
_spawned_name = _existing_meta.get("spawned_colony_name")
|
||||
if isinstance(_spawned_name, str):
|
||||
session.spawned_colony_name = _spawned_name
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
@@ -1483,8 +1535,46 @@ class SessionManager:
|
||||
tool_executor=queen_tool_executor,
|
||||
event_bus=session.event_bus,
|
||||
colony_id=session.id,
|
||||
# Wire the on-disk colony name and queen id so
|
||||
# ColonyRuntime auto-derives its override paths. DM sessions
|
||||
# have no colony_name (session.colony_name is None), which
|
||||
# keeps them out of the per-colony JSON store.
|
||||
colony_name=getattr(session, "colony_name", None),
|
||||
queen_id=getattr(session, "queen_name", None) or None,
|
||||
pipeline_stages=[], # queen pipeline runs in queen_orchestrator, not here
|
||||
)
|
||||
|
||||
# Per-colony tool allowlist, loaded from the colony's metadata.json
|
||||
# when this session is attached to a real forked colony. For pure
|
||||
# queen DM sessions (session.colony_name is None) we only capture
|
||||
# the MCP-origin set — the allowlist stays ``None`` so every MCP
|
||||
# tool passes through by default.
|
||||
try:
|
||||
mcp_tool_names_all: set[str] = set()
|
||||
mgr_catalog = getattr(self, "_mcp_tool_catalog", None)
|
||||
if isinstance(mgr_catalog, dict):
|
||||
for entries in mgr_catalog.values():
|
||||
for entry in entries:
|
||||
name = entry.get("name") if isinstance(entry, dict) else None
|
||||
if name:
|
||||
mcp_tool_names_all.add(name)
|
||||
enabled_mcp_tools: list[str] | None = None
|
||||
colony_name = getattr(session, "colony_name", None)
|
||||
if colony_name:
|
||||
# Colony tool allowlist lives in a dedicated tools.json
|
||||
# sidecar next to metadata.json. The helper migrates any
|
||||
# legacy field out of metadata.json on first read.
|
||||
from framework.host.colony_tools_config import load_colony_tools_config
|
||||
|
||||
enabled_mcp_tools = load_colony_tools_config(colony_name)
|
||||
colony.set_tool_allowlist(enabled_mcp_tools, mcp_tool_names_all)
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Colony allowlist bootstrap failed for session %s",
|
||||
session.id,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
await colony.start()
|
||||
session.colony = colony
|
||||
|
||||
@@ -1577,8 +1667,28 @@ class SessionManager:
|
||||
# Resolve entry node for trigger target
|
||||
runner = getattr(session, "runner", None)
|
||||
colony_entry = runner.graph.entry_node if runner else None
|
||||
fire_times = getattr(session, "trigger_next_fire", {})
|
||||
fire_stats = getattr(session, "trigger_fire_stats", {})
|
||||
now_mono = time.monotonic()
|
||||
now_wall = time.time()
|
||||
|
||||
for t in triggers.values():
|
||||
# Merge ephemeral next-fire data + historical fire stats into
|
||||
# trigger_config so the UI can render a live-ticking countdown
|
||||
# and a "fired Nx · last 2m ago" badge. `next_fire_at` is epoch
|
||||
# milliseconds (wall clock) — the frontend anchors its ticker
|
||||
# on this. `next_fire_in` is kept for legacy consumers.
|
||||
config_out = dict(t.trigger_config)
|
||||
mono = fire_times.get(t.id)
|
||||
if mono is not None:
|
||||
remaining = max(0.0, mono - now_mono)
|
||||
config_out["next_fire_in"] = remaining
|
||||
config_out["next_fire_at"] = int((now_wall + remaining) * 1000)
|
||||
stats = fire_stats.get(t.id)
|
||||
if stats:
|
||||
config_out["fire_count"] = stats.get("fire_count", 0)
|
||||
if stats.get("last_fired_at") is not None:
|
||||
config_out["last_fired_at"] = stats["last_fired_at"]
|
||||
await session.event_bus.publish(
|
||||
AgentEvent(
|
||||
type=event_type,
|
||||
@@ -1586,7 +1696,7 @@ class SessionManager:
|
||||
data={
|
||||
"trigger_id": t.id,
|
||||
"trigger_type": t.trigger_type,
|
||||
"trigger_config": t.trigger_config,
|
||||
"trigger_config": config_out,
|
||||
"name": t.description or t.id,
|
||||
**({"entry_node": colony_entry} if colony_entry else {}),
|
||||
},
|
||||
@@ -1654,6 +1764,42 @@ class SessionManager:
|
||||
def list_sessions(self) -> list[Session]:
|
||||
return list(self._sessions.values())
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Skill override helpers — used by routes_skills to find every live
|
||||
# SkillsManager affected by a queen- or colony-scope mutation so a
|
||||
# single HTTP call can reload them all.
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def iter_queen_sessions(self, queen_id: str):
|
||||
"""Yield live sessions whose queen matches ``queen_id``."""
|
||||
for s in self._sessions.values():
|
||||
if getattr(s, "queen_name", None) == queen_id:
|
||||
yield s
|
||||
|
||||
def iter_colony_runtimes(
|
||||
self,
|
||||
*,
|
||||
queen_id: str | None = None,
|
||||
colony_name: str | None = None,
|
||||
):
|
||||
"""Yield live ``ColonyRuntime`` instances matching the filters.
|
||||
|
||||
``queen_id`` alone → every runtime whose ``queen_id`` matches
|
||||
(useful when the user toggles a queen-scope skill — all her
|
||||
colonies must reload). ``colony_name`` alone → the single
|
||||
runtime pinned to that colony. Both → intersection. No filters
|
||||
→ every live runtime (used by global ``/api/skills`` reload).
|
||||
"""
|
||||
for s in self._sessions.values():
|
||||
colony = getattr(s, "colony", None)
|
||||
if colony is None:
|
||||
continue
|
||||
if queen_id is not None and getattr(colony, "queen_id", None) != queen_id:
|
||||
continue
|
||||
if colony_name is not None and getattr(colony, "colony_name", None) != colony_name:
|
||||
continue
|
||||
yield colony
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Cold session helpers (disk-only, no live runtime required)
|
||||
# ------------------------------------------------------------------
|
||||
@@ -1698,6 +1844,8 @@ class SessionManager:
|
||||
# Read extra metadata written at session start
|
||||
agent_name: str | None = None
|
||||
agent_path: str | None = None
|
||||
colony_spawned: bool = False
|
||||
spawned_colony_name: str | None = None
|
||||
meta_path = queen_dir / "meta.json"
|
||||
if meta_path.exists():
|
||||
try:
|
||||
@@ -1705,6 +1853,10 @@ class SessionManager:
|
||||
agent_name = meta.get("agent_name")
|
||||
agent_path = meta.get("agent_path")
|
||||
created_at = meta.get("created_at") or created_at
|
||||
colony_spawned = bool(meta.get("colony_spawned"))
|
||||
_spawned = meta.get("spawned_colony_name")
|
||||
if isinstance(_spawned, str):
|
||||
spawned_colony_name = _spawned
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
@@ -1716,6 +1868,8 @@ class SessionManager:
|
||||
"created_at": created_at,
|
||||
"agent_name": agent_name,
|
||||
"agent_path": agent_path,
|
||||
"colony_spawned": colony_spawned,
|
||||
"spawned_colony_name": spawned_colony_name,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -14,6 +14,7 @@ from unittest.mock import AsyncMock, MagicMock
|
||||
import pytest
|
||||
from aiohttp.test_utils import TestClient, TestServer
|
||||
|
||||
from framework.host.execution_manager import ExecutionAlreadyRunningError
|
||||
from framework.host.triggers import TriggerDefinition
|
||||
from framework.llm.model_catalog import get_models_catalogue
|
||||
from framework.server import (
|
||||
@@ -89,8 +90,8 @@ class MockStream:
|
||||
_active_executors: dict = field(default_factory=dict)
|
||||
active_execution_ids: set = field(default_factory=set)
|
||||
|
||||
async def cancel_execution(self, execution_id: str, reason: str | None = None) -> bool:
|
||||
return execution_id in self._execution_tasks
|
||||
async def cancel_execution(self, execution_id: str, reason: str | None = None) -> str:
|
||||
return "cancelled" if execution_id in self._execution_tasks else "not_found"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -780,6 +781,21 @@ class TestExecution:
|
||||
data = await resp.json()
|
||||
assert data["execution_id"] == "exec_test_123"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_trigger_returns_409_when_execution_still_running(self):
|
||||
session = _make_session()
|
||||
session.colony_runtime.trigger = AsyncMock(side_effect=ExecutionAlreadyRunningError("default", ["session-1"]))
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/sessions/test_agent/trigger",
|
||||
json={"entry_point_id": "default", "input_data": {"msg": "hi"}},
|
||||
)
|
||||
assert resp.status == 409
|
||||
data = await resp.json()
|
||||
assert data["stream_id"] == "default"
|
||||
assert data["active_execution_ids"] == ["session-1"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_trigger_not_found(self):
|
||||
app = create_app()
|
||||
@@ -918,6 +934,7 @@ class TestExecution:
|
||||
data = await resp.json()
|
||||
assert data["stopped"] is False
|
||||
assert data["cancelled"] == []
|
||||
assert data["cancelling"] == []
|
||||
assert data["timers_paused"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -1027,6 +1044,22 @@ class TestStop:
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
assert data["stopped"] is True
|
||||
assert data["cancelling"] is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_returns_accepted_while_execution_is_still_cancelling(self):
|
||||
session = _make_session()
|
||||
session.colony_runtime._mock_streams["default"].cancel_execution = AsyncMock(return_value="cancelling")
|
||||
app = _make_app_with_session(session)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/sessions/test_agent/stop",
|
||||
json={"execution_id": "exec_abc"},
|
||||
)
|
||||
assert resp.status == 202
|
||||
data = await resp.json()
|
||||
assert data["stopped"] is False
|
||||
assert data["cancelling"] is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_not_found(self):
|
||||
@@ -1483,6 +1516,65 @@ class TestCredentials:
|
||||
data = await resp.json()
|
||||
assert data["credentials"] == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_credentials_skips_unreadable_encrypted_entry(self):
|
||||
from pydantic import SecretStr
|
||||
|
||||
from framework.credentials.models import CredentialDecryptionError, CredentialKey, CredentialObject
|
||||
|
||||
class BrokenStore:
|
||||
def list_credentials(self):
|
||||
return ["good_cred", "bad_cred"]
|
||||
|
||||
def get_credential(self, credential_id, refresh_if_needed=False):
|
||||
if credential_id == "bad_cred":
|
||||
raise CredentialDecryptionError("bad encrypted file")
|
||||
return CredentialObject(
|
||||
id=credential_id,
|
||||
keys={"api_key": CredentialKey(name="api_key", value=SecretStr("secret"))},
|
||||
)
|
||||
|
||||
app = create_app()
|
||||
app["credential_store"] = BrokenStore()
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/credentials")
|
||||
assert resp.status == 200
|
||||
data = await resp.json()
|
||||
|
||||
assert [c["credential_id"] for c in data["credentials"]] == ["good_cred"]
|
||||
assert data["unreadable_credentials"] == ["bad_cred"]
|
||||
assert "secret" not in json.dumps(data)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_credential_unreadable_returns_recoverable_conflict(self):
|
||||
from framework.credentials.models import CredentialDecryptionError
|
||||
|
||||
class BrokenStore:
|
||||
def get_credential(self, credential_id, refresh_if_needed=False):
|
||||
raise CredentialDecryptionError("bad encrypted file")
|
||||
|
||||
app = create_app()
|
||||
app["credential_store"] = BrokenStore()
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/credentials/bad_cred")
|
||||
data = await resp.json()
|
||||
|
||||
assert resp.status == 409
|
||||
assert data["credential_id"] == "bad_cred"
|
||||
assert data["recoverable"] is True
|
||||
|
||||
def test_specs_availability_treats_decryption_error_as_unavailable(self):
|
||||
from framework.credentials.models import CredentialDecryptionError
|
||||
from framework.server.routes_credentials import _is_available_for_specs
|
||||
|
||||
class BrokenStore:
|
||||
def is_available(self, credential_id):
|
||||
raise CredentialDecryptionError("bad encrypted file")
|
||||
|
||||
assert _is_available_for_specs(BrokenStore(), "exa_search") is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_save_and_list_credential(self):
|
||||
app = self._make_app()
|
||||
|
||||
@@ -0,0 +1,300 @@
|
||||
"""Tests for the per-colony MCP tool allowlist filter + routes.
|
||||
|
||||
Covers:
|
||||
1. ``ColonyRuntime`` filter semantics (default-allow, allowlist, empty,
|
||||
lifecycle passes through).
|
||||
2. routes_colony_tools round trip (GET/PATCH, validation, 404).
|
||||
3. Colony index route for the Tool Library picker.
|
||||
|
||||
Routes never touch the real ``~/.hive/colonies`` tree — we redirect
|
||||
``COLONIES_DIR`` into ``tmp_path`` via monkeypatch.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from aiohttp import web
|
||||
from aiohttp.test_utils import TestClient, TestServer
|
||||
|
||||
from framework.host.colony_runtime import ColonyRuntime
|
||||
from framework.llm.provider import Tool
|
||||
from framework.server import routes_colony_tools
|
||||
|
||||
|
||||
def _tool(name: str) -> Tool:
|
||||
return Tool(name=name, description=f"desc of {name}", parameters={"type": "object"})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ColonyRuntime filter unit tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _bare_runtime() -> ColonyRuntime:
|
||||
rt = ColonyRuntime.__new__(ColonyRuntime)
|
||||
rt._enabled_mcp_tools = None
|
||||
rt._mcp_tool_names_all = set()
|
||||
return rt
|
||||
|
||||
|
||||
class TestColonyFilter:
|
||||
def test_default_is_noop(self):
|
||||
rt = _bare_runtime()
|
||||
tools = [_tool("mcp_a"), _tool("lc_b")]
|
||||
assert rt._apply_tool_allowlist(tools) == tools
|
||||
|
||||
def test_allowlist_gates_mcp_only(self):
|
||||
rt = _bare_runtime()
|
||||
rt._mcp_tool_names_all = {"mcp_a", "mcp_b"}
|
||||
rt._enabled_mcp_tools = ["mcp_a"]
|
||||
tools = [_tool("mcp_a"), _tool("mcp_b"), _tool("lc_c")]
|
||||
names = [t.name for t in rt._apply_tool_allowlist(tools)]
|
||||
assert names == ["mcp_a", "lc_c"]
|
||||
|
||||
def test_empty_allowlist_keeps_lifecycle(self):
|
||||
rt = _bare_runtime()
|
||||
rt._mcp_tool_names_all = {"mcp_a", "mcp_b"}
|
||||
rt._enabled_mcp_tools = []
|
||||
tools = [_tool("mcp_a"), _tool("mcp_b"), _tool("lc_c")]
|
||||
names = [t.name for t in rt._apply_tool_allowlist(tools)]
|
||||
assert names == ["lc_c"]
|
||||
|
||||
def test_setter_mutates_live_state(self):
|
||||
rt = _bare_runtime()
|
||||
rt.set_tool_allowlist(["x"], {"x", "y"})
|
||||
assert rt._enabled_mcp_tools == ["x"]
|
||||
assert rt._mcp_tool_names_all == {"x", "y"}
|
||||
|
||||
# Passing None on allowlist clears gating; mcp_tool_names_all
|
||||
# defaults to "keep current" so a subsequent caller doesn't need
|
||||
# to repeat the set.
|
||||
rt.set_tool_allowlist(None)
|
||||
assert rt._enabled_mcp_tools is None
|
||||
assert rt._mcp_tool_names_all == {"x", "y"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Route round-trip tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class _FakeSession:
|
||||
colony_name: str
|
||||
colony: Any = None
|
||||
colony_runtime: Any = None
|
||||
id: str = "sess-1"
|
||||
|
||||
|
||||
@dataclass
|
||||
class _FakeManager:
|
||||
_sessions: dict = field(default_factory=dict)
|
||||
_mcp_tool_catalog: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def colony_dir(tmp_path, monkeypatch):
|
||||
"""Point COLONIES_DIR into a tmp tree and seed a colony."""
|
||||
colonies = tmp_path / "colonies"
|
||||
colonies.mkdir()
|
||||
monkeypatch.setattr("framework.host.colony_metadata.COLONIES_DIR", colonies)
|
||||
monkeypatch.setattr("framework.host.colony_tools_config.COLONIES_DIR", colonies)
|
||||
|
||||
name = "my_colony"
|
||||
cdir = colonies / name
|
||||
cdir.mkdir()
|
||||
(cdir / "metadata.json").write_text(
|
||||
json.dumps(
|
||||
{
|
||||
"colony_name": name,
|
||||
"queen_name": "queen_technology",
|
||||
"created_at": "2026-04-20T00:00:00+00:00",
|
||||
}
|
||||
)
|
||||
)
|
||||
return colonies, name
|
||||
|
||||
|
||||
async def _app(manager: _FakeManager) -> web.Application:
|
||||
app = web.Application()
|
||||
app["manager"] = manager
|
||||
routes_colony_tools.register_routes(app)
|
||||
return app
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_tools_default_allow(colony_dir):
|
||||
_, name = colony_dir
|
||||
manager = _FakeManager(
|
||||
_mcp_tool_catalog={
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "read", "input_schema": {}},
|
||||
{"name": "write_file", "description": "write", "input_schema": {}},
|
||||
],
|
||||
}
|
||||
)
|
||||
app = await _app(manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get(f"/api/colony/{name}/tools")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["enabled_mcp_tools"] is None
|
||||
assert body["stale"] is False
|
||||
tools = {t["name"]: t for t in body["mcp_servers"][0]["tools"]}
|
||||
assert all(t["enabled"] for t in tools.values())
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_persists_and_validates(colony_dir):
|
||||
colonies_dir, name = colony_dir
|
||||
manager = _FakeManager(
|
||||
_mcp_tool_catalog={
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "", "input_schema": {}},
|
||||
{"name": "write_file", "description": "", "input_schema": {}},
|
||||
]
|
||||
}
|
||||
)
|
||||
app = await _app(manager)
|
||||
tools_path = colonies_dir / name / "tools.json"
|
||||
metadata_path = colonies_dir / name / "metadata.json"
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(f"/api/colony/{name}/tools", json={"enabled_mcp_tools": ["read_file"]})
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["enabled_mcp_tools"] == ["read_file"]
|
||||
|
||||
# Persisted to tools.json; metadata.json does not carry the field.
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] == ["read_file"]
|
||||
assert "updated_at" in sidecar
|
||||
meta = json.loads(metadata_path.read_text())
|
||||
assert "enabled_mcp_tools" not in meta
|
||||
|
||||
# GET reflects the allowlist
|
||||
resp = await client.get(f"/api/colony/{name}/tools")
|
||||
body = await resp.json()
|
||||
tools = {t["name"]: t for t in body["mcp_servers"][0]["tools"]}
|
||||
assert tools["read_file"]["enabled"] is True
|
||||
assert tools["write_file"]["enabled"] is False
|
||||
|
||||
# Unknown → 400
|
||||
resp = await client.patch(f"/api/colony/{name}/tools", json={"enabled_mcp_tools": ["ghost"]})
|
||||
assert resp.status == 400
|
||||
assert "ghost" in (await resp.json()).get("unknown", [])
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_refreshes_live_runtime(colony_dir):
|
||||
_, name = colony_dir
|
||||
|
||||
rt = _bare_runtime()
|
||||
rt._mcp_tool_names_all = {"read_file", "write_file"}
|
||||
rt.set_tool_allowlist(None)
|
||||
|
||||
session = _FakeSession(colony_name=name, colony=rt)
|
||||
manager = _FakeManager(
|
||||
_sessions={session.id: session},
|
||||
_mcp_tool_catalog={
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "", "input_schema": {}},
|
||||
{"name": "write_file", "description": "", "input_schema": {}},
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
app = await _app(manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(f"/api/colony/{name}/tools", json={"enabled_mcp_tools": ["read_file"]})
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["refreshed_runtimes"] == 1
|
||||
assert rt._enabled_mcp_tools == ["read_file"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_404_for_unknown_colony(colony_dir):
|
||||
manager = _FakeManager()
|
||||
app = await _app(manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/colony/unknown/tools")
|
||||
assert resp.status == 404
|
||||
resp = await client.patch("/api/colony/unknown/tools", json={"enabled_mcp_tools": None})
|
||||
assert resp.status == 404
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_tools_index_lists_colonies(colony_dir):
|
||||
_, name = colony_dir
|
||||
manager = _FakeManager()
|
||||
app = await _app(manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/colonies/tools-index")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
entries = {c["name"]: c for c in body["colonies"]}
|
||||
assert name in entries
|
||||
assert entries[name]["queen_name"] == "queen_technology"
|
||||
assert entries[name]["has_allowlist"] is False
|
||||
|
||||
|
||||
def test_queen_allowlist_inherits_into_new_colony(tmp_path, monkeypatch):
|
||||
"""A colony forked with a curated queen inherits her allowlist.
|
||||
|
||||
Exercises the inheritance hook in
|
||||
``routes_execution.fork_session_into_colony`` without running the
|
||||
full fork machinery — we just call
|
||||
``update_colony_tools_config`` the same way the hook does and
|
||||
assert the colony's ``tools.json`` matches the queen's live list.
|
||||
"""
|
||||
colonies = tmp_path / "colonies"
|
||||
colonies.mkdir()
|
||||
monkeypatch.setattr("framework.host.colony_tools_config.COLONIES_DIR", colonies)
|
||||
|
||||
from framework.host.colony_tools_config import (
|
||||
load_colony_tools_config,
|
||||
update_colony_tools_config,
|
||||
)
|
||||
|
||||
colony_name = "forked_child"
|
||||
(colonies / colony_name).mkdir()
|
||||
|
||||
# Simulate: queen has a curated allowlist (e.g. role default resolved
|
||||
# to a concrete list). The inheritance hook copies it verbatim.
|
||||
queen_live_allowlist = ["read_file", "web_scrape", "csv_read"]
|
||||
update_colony_tools_config(colony_name, list(queen_live_allowlist))
|
||||
|
||||
assert load_colony_tools_config(colony_name) == queen_live_allowlist
|
||||
|
||||
|
||||
def test_legacy_metadata_field_migrates_to_sidecar(colony_dir):
|
||||
"""A legacy enabled_mcp_tools field in metadata.json is hoisted to tools.json."""
|
||||
colonies_dir, name = colony_dir
|
||||
meta_path = colonies_dir / name / "metadata.json"
|
||||
tools_path = colonies_dir / name / "tools.json"
|
||||
|
||||
# Seed legacy field in metadata.json.
|
||||
meta = json.loads(meta_path.read_text())
|
||||
meta["enabled_mcp_tools"] = ["read_file"]
|
||||
meta_path.write_text(json.dumps(meta))
|
||||
|
||||
from framework.host.colony_tools_config import load_colony_tools_config
|
||||
|
||||
# First load migrates.
|
||||
assert load_colony_tools_config(name) == ["read_file"]
|
||||
assert tools_path.exists()
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] == ["read_file"]
|
||||
|
||||
# metadata.json no longer contains the field; provenance fields preserved.
|
||||
migrated = json.loads(meta_path.read_text())
|
||||
assert "enabled_mcp_tools" not in migrated
|
||||
assert migrated["queen_name"] == "queen_technology"
|
||||
|
||||
# Second load is a direct sidecar read.
|
||||
assert load_colony_tools_config(name) == ["read_file"]
|
||||
@@ -0,0 +1,239 @@
|
||||
"""Tests for the MCP server CRUD HTTP routes.
|
||||
|
||||
Monkey-patches ``MCPRegistry`` inside ``routes_mcp`` so the HTTP layer is
|
||||
exercised without reading or writing ``~/.hive/mcp_registry/installed.json``
|
||||
or spawning actual subprocesses.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from aiohttp import web
|
||||
from aiohttp.test_utils import TestClient, TestServer
|
||||
|
||||
from framework.loader.mcp_errors import MCPError, MCPErrorCode
|
||||
from framework.server import routes_mcp
|
||||
|
||||
|
||||
class _FakeRegistry:
|
||||
"""Stand-in for MCPRegistry — just enough surface for the routes."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._servers: dict[str, dict[str, Any]] = {
|
||||
"built-in-seed": {
|
||||
"source": "registry",
|
||||
"transport": "stdio",
|
||||
"enabled": True,
|
||||
"manifest": {"description": "Factory-seeded server", "tools": []},
|
||||
"last_health_status": "healthy",
|
||||
"last_error": None,
|
||||
"last_health_check_at": None,
|
||||
}
|
||||
}
|
||||
|
||||
def initialize(self) -> None: # noqa: D401 — registry idempotent init
|
||||
return
|
||||
|
||||
def list_installed(self) -> list[dict[str, Any]]:
|
||||
return [{"name": name, **entry} for name, entry in self._servers.items()]
|
||||
|
||||
def get_server(self, name: str) -> dict | None:
|
||||
if name not in self._servers:
|
||||
return None
|
||||
return {"name": name, **self._servers[name]}
|
||||
|
||||
def add_local(self, *, name: str, transport: str, **kwargs: Any) -> dict:
|
||||
if name in self._servers:
|
||||
raise MCPError(
|
||||
code=MCPErrorCode.MCP_INSTALL_FAILED,
|
||||
what=f"Server '{name}' already exists",
|
||||
why="A server with this name is already registered locally.",
|
||||
fix=f"Run: hive mcp remove {name}",
|
||||
)
|
||||
entry = {
|
||||
"source": "local",
|
||||
"transport": transport,
|
||||
"enabled": True,
|
||||
"manifest": {"description": kwargs.get("description") or ""},
|
||||
"last_health_status": None,
|
||||
"last_error": None,
|
||||
"last_health_check_at": None,
|
||||
}
|
||||
self._servers[name] = entry
|
||||
return entry
|
||||
|
||||
def remove(self, name: str) -> None:
|
||||
if name not in self._servers:
|
||||
raise MCPError(
|
||||
code=MCPErrorCode.MCP_INSTALL_FAILED,
|
||||
what=f"Cannot remove server '{name}'",
|
||||
why="Server is not installed.",
|
||||
fix="Run: hive mcp list",
|
||||
)
|
||||
del self._servers[name]
|
||||
|
||||
def enable(self, name: str) -> None:
|
||||
if name not in self._servers:
|
||||
raise MCPError(
|
||||
code=MCPErrorCode.MCP_INSTALL_FAILED,
|
||||
what="not found",
|
||||
why="not found",
|
||||
fix="x",
|
||||
)
|
||||
self._servers[name]["enabled"] = True
|
||||
|
||||
def disable(self, name: str) -> None:
|
||||
if name not in self._servers:
|
||||
raise MCPError(
|
||||
code=MCPErrorCode.MCP_INSTALL_FAILED,
|
||||
what="not found",
|
||||
why="not found",
|
||||
fix="x",
|
||||
)
|
||||
self._servers[name]["enabled"] = False
|
||||
|
||||
def health_check(self, name: str) -> dict[str, Any]:
|
||||
if name not in self._servers:
|
||||
raise MCPError(
|
||||
code=MCPErrorCode.MCP_HEALTH_FAILED,
|
||||
what="not found",
|
||||
why="not found",
|
||||
fix="x",
|
||||
)
|
||||
return {"name": name, "status": "healthy", "tools": 3, "error": None}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def registry(monkeypatch):
|
||||
reg = _FakeRegistry()
|
||||
monkeypatch.setattr(routes_mcp, "_registry", lambda: reg)
|
||||
return reg
|
||||
|
||||
|
||||
async def _make_app() -> web.Application:
|
||||
app = web.Application()
|
||||
routes_mcp.register_routes(app)
|
||||
return app
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_servers_returns_built_in(registry):
|
||||
app = await _make_app()
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/mcp/servers")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
names = {s["name"] for s in body["servers"]}
|
||||
# The registry fake carries one entry; the list also merges package-
|
||||
# baked entries from core/framework/agents/queen/mcp_servers.json so
|
||||
# the UI matches what the queen actually loads. Both should appear.
|
||||
assert "built-in-seed" in names
|
||||
sources = {s["name"]: s["source"] for s in body["servers"]}
|
||||
assert sources.get("built-in-seed") == "registry"
|
||||
# The package-baked servers (coder-tools/gcu-tools/hive_tools) carry
|
||||
# source=="built-in" and are non-removable.
|
||||
pkg_entries = [s for s in body["servers"] if s["source"] == "built-in"]
|
||||
assert pkg_entries, "expected at least one package-baked MCP server"
|
||||
assert all(s.get("removable") is False for s in pkg_entries)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_local_server(registry):
|
||||
app = await _make_app()
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/mcp/servers",
|
||||
json={
|
||||
"name": "my-tool",
|
||||
"transport": "stdio",
|
||||
"command": "echo",
|
||||
"args": ["hi"],
|
||||
"description": "says hi",
|
||||
},
|
||||
)
|
||||
assert resp.status == 201
|
||||
body = await resp.json()
|
||||
assert body["server"]["name"] == "my-tool"
|
||||
assert body["server"]["source"] == "local"
|
||||
|
||||
resp = await client.get("/api/mcp/servers")
|
||||
names = [s["name"] for s in (await resp.json())["servers"]]
|
||||
assert "my-tool" in names
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_rejects_duplicate(registry):
|
||||
app = await _make_app()
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
for _ in range(2):
|
||||
resp = await client.post(
|
||||
"/api/mcp/servers",
|
||||
json={"name": "dup", "transport": "stdio", "command": "x"},
|
||||
)
|
||||
assert resp.status == 409
|
||||
body = await resp.json()
|
||||
assert "already exists" in body["error"].lower()
|
||||
assert body["fix"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_rejects_invalid_transport(registry):
|
||||
app = await _make_app()
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post(
|
||||
"/api/mcp/servers",
|
||||
json={"name": "x", "transport": "nope"},
|
||||
)
|
||||
assert resp.status == 400
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_enable_disable_cycle(registry):
|
||||
app = await _make_app()
|
||||
# Seed a local server
|
||||
registry.add_local(name="local-one", transport="stdio", command="x")
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post("/api/mcp/servers/local-one/disable")
|
||||
assert resp.status == 200
|
||||
assert (await resp.json())["enabled"] is False
|
||||
assert registry._servers["local-one"]["enabled"] is False
|
||||
|
||||
resp = await client.post("/api/mcp/servers/local-one/enable")
|
||||
assert resp.status == 200
|
||||
assert (await resp.json())["enabled"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_remove_local_only(registry):
|
||||
app = await _make_app()
|
||||
registry.add_local(name="local-two", transport="stdio", command="x")
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
# Built-ins are protected
|
||||
resp = await client.delete("/api/mcp/servers/built-in-seed")
|
||||
assert resp.status == 400
|
||||
|
||||
# Missing
|
||||
resp = await client.delete("/api/mcp/servers/ghost")
|
||||
assert resp.status == 404
|
||||
|
||||
# Happy path
|
||||
resp = await client.delete("/api/mcp/servers/local-two")
|
||||
assert resp.status == 200
|
||||
assert "local-two" not in registry._servers
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_health_check(registry, monkeypatch):
|
||||
app = await _make_app()
|
||||
registry.add_local(name="pingable", transport="stdio", command="x")
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.post("/api/mcp/servers/pingable/health")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["status"] == "healthy"
|
||||
assert body["tools"] == 3
|
||||
@@ -0,0 +1,443 @@
|
||||
"""Tests for the per-queen MCP tool allowlist filter + routes.
|
||||
|
||||
Covers:
|
||||
1. QueenPhaseState filter semantics (default-allow, allowlist, empty, phase-
|
||||
isolation, memo identity for LLM prompt-cache stability).
|
||||
2. routes_queen_tools round trip (GET, PATCH, validation, live-session
|
||||
hot-reload).
|
||||
|
||||
Route tests monkey-patch a tiny queen profile + manager catalog; they never
|
||||
spawn an MCP subprocess.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from aiohttp import web
|
||||
from aiohttp.test_utils import TestClient, TestServer
|
||||
|
||||
from framework.llm.provider import Tool
|
||||
from framework.server import routes_queen_tools
|
||||
from framework.tools.queen_lifecycle_tools import QueenPhaseState
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# QueenPhaseState filter — pure unit tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _tool(name: str) -> Tool:
|
||||
return Tool(name=name, description=f"desc of {name}", parameters={"type": "object"})
|
||||
|
||||
|
||||
class TestPhaseStateFilter:
|
||||
def test_default_allow_returns_every_tool(self):
|
||||
ps = QueenPhaseState(phase="independent")
|
||||
ps.independent_tools = [_tool("mcp_a"), _tool("mcp_b"), _tool("lc_c")]
|
||||
ps.mcp_tool_names_all = {"mcp_a", "mcp_b"}
|
||||
ps.enabled_mcp_tools = None
|
||||
ps.rebuild_independent_filter()
|
||||
|
||||
names = [t.name for t in ps.get_current_tools()]
|
||||
assert names == ["mcp_a", "mcp_b", "lc_c"]
|
||||
|
||||
def test_allowlist_keeps_listed_mcp_plus_all_lifecycle(self):
|
||||
ps = QueenPhaseState(phase="independent")
|
||||
ps.independent_tools = [_tool("mcp_a"), _tool("mcp_b"), _tool("lc_c")]
|
||||
ps.mcp_tool_names_all = {"mcp_a", "mcp_b"}
|
||||
ps.enabled_mcp_tools = ["mcp_a"]
|
||||
ps.rebuild_independent_filter()
|
||||
|
||||
names = [t.name for t in ps.get_current_tools()]
|
||||
assert names == ["mcp_a", "lc_c"]
|
||||
|
||||
def test_empty_allowlist_keeps_only_lifecycle(self):
|
||||
ps = QueenPhaseState(phase="independent")
|
||||
ps.independent_tools = [_tool("mcp_a"), _tool("mcp_b"), _tool("lc_c")]
|
||||
ps.mcp_tool_names_all = {"mcp_a", "mcp_b"}
|
||||
ps.enabled_mcp_tools = []
|
||||
ps.rebuild_independent_filter()
|
||||
|
||||
names = [t.name for t in ps.get_current_tools()]
|
||||
assert names == ["lc_c"]
|
||||
|
||||
def test_filter_isolated_to_independent_phase(self):
|
||||
ps = QueenPhaseState(phase="independent")
|
||||
ps.independent_tools = [_tool("mcp_a"), _tool("lc_c")]
|
||||
ps.working_tools = [_tool("mcp_a"), _tool("lc_c")]
|
||||
ps.mcp_tool_names_all = {"mcp_a"}
|
||||
ps.enabled_mcp_tools = []
|
||||
ps.rebuild_independent_filter()
|
||||
|
||||
# Independent → filtered
|
||||
assert [t.name for t in ps.get_current_tools()] == ["lc_c"]
|
||||
|
||||
# Other phases → unaffected
|
||||
ps.phase = "working"
|
||||
assert [t.name for t in ps.get_current_tools()] == ["mcp_a", "lc_c"]
|
||||
|
||||
def test_memo_returns_stable_identity_for_prompt_cache(self):
|
||||
"""Same Python list object across turns → LLM prompt cache stays warm."""
|
||||
ps = QueenPhaseState(phase="independent")
|
||||
ps.independent_tools = [_tool("mcp_a"), _tool("lc_c")]
|
||||
ps.mcp_tool_names_all = {"mcp_a"}
|
||||
ps.enabled_mcp_tools = None
|
||||
ps.rebuild_independent_filter()
|
||||
|
||||
first = ps.get_current_tools()
|
||||
second = ps.get_current_tools()
|
||||
assert first is second, "memoized list must be the same object across turns"
|
||||
|
||||
# A rebuild should produce a different object so downstream caches
|
||||
# correctly invalidate.
|
||||
ps.enabled_mcp_tools = ["mcp_a"]
|
||||
ps.rebuild_independent_filter()
|
||||
third = ps.get_current_tools()
|
||||
assert third is not first
|
||||
assert [t.name for t in third] == ["mcp_a", "lc_c"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Route round-trip tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class _FakeSession:
|
||||
queen_name: str
|
||||
phase_state: QueenPhaseState
|
||||
colony_runtime: Any = None
|
||||
id: str = "sess-1"
|
||||
_queen_tool_registry: Any = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class _FakeManager:
|
||||
_sessions: dict = field(default_factory=dict)
|
||||
_mcp_tool_catalog: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def queen_dir(tmp_path, monkeypatch):
|
||||
"""Redirect queen profile + tools storage into a tmp dir."""
|
||||
queens_dir = tmp_path / "queens"
|
||||
queens_dir.mkdir()
|
||||
monkeypatch.setattr("framework.agents.queen.queen_profiles.QUEENS_DIR", queens_dir)
|
||||
monkeypatch.setattr("framework.agents.queen.queen_tools_config.QUEENS_DIR", queens_dir)
|
||||
|
||||
queen_id = "queen_technology"
|
||||
(queens_dir / queen_id).mkdir()
|
||||
(queens_dir / queen_id / "profile.yaml").write_text(
|
||||
yaml.safe_dump({"name": "Alexandra", "title": "Head of Technology"})
|
||||
)
|
||||
return queens_dir, queen_id
|
||||
|
||||
|
||||
async def _make_app(*, manager: _FakeManager) -> web.Application:
|
||||
app = web.Application()
|
||||
app["manager"] = manager
|
||||
routes_queen_tools.register_routes(app)
|
||||
return app
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_tools_default_allows_everything_for_unknown_queen(queen_dir, monkeypatch):
|
||||
"""Queens NOT in the role-default table fall back to allow-all."""
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
|
||||
queens_dir, _ = queen_dir
|
||||
# Use a queen id that isn't in QUEEN_DEFAULT_CATEGORIES so we exercise
|
||||
# the fallback-to-allow-all path.
|
||||
custom_id = "queen_custom_unknown"
|
||||
(queens_dir / custom_id).mkdir()
|
||||
(queens_dir / custom_id / "profile.yaml").write_text(yaml.safe_dump({"name": "Custom", "title": "Custom Role"}))
|
||||
|
||||
manager = _FakeManager()
|
||||
manager._mcp_tool_catalog = {
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "read", "input_schema": {}},
|
||||
{"name": "write_file", "description": "write", "input_schema": {}},
|
||||
],
|
||||
}
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get(f"/api/queen/{custom_id}/tools")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
|
||||
assert body["enabled_mcp_tools"] is None
|
||||
assert body["is_role_default"] is True # no sidecar → default-allow
|
||||
assert body["stale"] is False
|
||||
servers = {s["name"]: s for s in body["mcp_servers"]}
|
||||
assert set(servers) == {"coder-tools"}
|
||||
for tool in servers["coder-tools"]["tools"]:
|
||||
assert tool["enabled"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_tools_applies_role_default(queen_dir, monkeypatch):
|
||||
"""Known persona queens get their role-based default allowlist."""
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
_, queen_id = queen_dir # queen_technology — has a role default
|
||||
|
||||
manager = _FakeManager()
|
||||
# Seed a catalog covering tools the role default references so the
|
||||
# response reflects what the queen would actually see on boot.
|
||||
manager._mcp_tool_catalog = {
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "", "input_schema": {}},
|
||||
{"name": "port_scan", "description": "", "input_schema": {}}, # security
|
||||
{"name": "excel_read", "description": "", "input_schema": {}}, # data
|
||||
{"name": "fluffy_unknown_tool", "description": "", "input_schema": {}},
|
||||
],
|
||||
}
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get(f"/api/queen/{queen_id}/tools")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
|
||||
# queen_technology's role default includes file_read, data, security, etc.
|
||||
assert body["is_role_default"] is True
|
||||
enabled = set(body["enabled_mcp_tools"] or [])
|
||||
assert "read_file" in enabled
|
||||
assert "port_scan" in enabled # technology role includes security
|
||||
assert "excel_read" in enabled
|
||||
# Tools not in any category (and not in a @server: expansion target
|
||||
# the role references) are NOT part of the default.
|
||||
assert "fluffy_unknown_tool" not in enabled
|
||||
|
||||
|
||||
def test_resolve_queen_default_tools_expands_server_shorthand():
|
||||
"""@server:NAME shorthand expands against the provided catalog."""
|
||||
from framework.agents.queen.queen_tools_defaults import resolve_queen_default_tools
|
||||
|
||||
catalog = {
|
||||
"gcu-tools": [
|
||||
{"name": "browser_navigate"},
|
||||
{"name": "browser_click"},
|
||||
],
|
||||
}
|
||||
# queen_brand_design uses "browser" category → expands via @server:gcu-tools.
|
||||
result = resolve_queen_default_tools("queen_brand_design", catalog)
|
||||
assert result is not None
|
||||
assert "browser_navigate" in result
|
||||
assert "browser_click" in result
|
||||
|
||||
|
||||
def test_resolve_queen_default_tools_unknown_queen_returns_none():
|
||||
from framework.agents.queen.queen_tools_defaults import resolve_queen_default_tools
|
||||
|
||||
assert resolve_queen_default_tools("queen_made_up", {}) is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_persists_and_validates(queen_dir, monkeypatch):
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
queens_dir, queen_id = queen_dir
|
||||
|
||||
manager = _FakeManager()
|
||||
manager._mcp_tool_catalog = {
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "", "input_schema": {}},
|
||||
{"name": "write_file", "description": "", "input_schema": {}},
|
||||
]
|
||||
}
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
tools_path = queens_dir / queen_id / "tools.json"
|
||||
profile_path = queens_dir / queen_id / "profile.yaml"
|
||||
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
# Happy path
|
||||
resp = await client.patch(
|
||||
f"/api/queen/{queen_id}/tools",
|
||||
json={"enabled_mcp_tools": ["read_file"]},
|
||||
)
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["enabled_mcp_tools"] == ["read_file"]
|
||||
|
||||
# Sidecar persisted; profile YAML untouched by tools PATCH
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] == ["read_file"]
|
||||
assert "updated_at" in sidecar
|
||||
profile = yaml.safe_load(profile_path.read_text())
|
||||
assert "enabled_mcp_tools" not in profile
|
||||
|
||||
# GET reflects the new state
|
||||
resp = await client.get(f"/api/queen/{queen_id}/tools")
|
||||
body = await resp.json()
|
||||
assert body["is_role_default"] is False # user has explicitly saved
|
||||
servers = {t["name"]: t for t in body["mcp_servers"][0]["tools"]}
|
||||
assert servers["read_file"]["enabled"] is True
|
||||
assert servers["write_file"]["enabled"] is False
|
||||
|
||||
# Null resets
|
||||
resp = await client.patch(f"/api/queen/{queen_id}/tools", json={"enabled_mcp_tools": None})
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["enabled_mcp_tools"] is None
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] is None
|
||||
|
||||
# Unknown tool name → 400; sidecar unchanged
|
||||
resp = await client.patch(
|
||||
f"/api/queen/{queen_id}/tools",
|
||||
json={"enabled_mcp_tools": ["nope_not_a_tool"]},
|
||||
)
|
||||
assert resp.status == 400
|
||||
detail = await resp.json()
|
||||
assert "nope_not_a_tool" in detail.get("unknown", [])
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_hot_reloads_live_session(queen_dir, monkeypatch):
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
_, queen_id = queen_dir
|
||||
|
||||
# Build a fake live session whose phase state carries a tool list the
|
||||
# filter can gate. We also need a fake registry so
|
||||
# _catalog_from_live_session can enumerate tools.
|
||||
class _FakeRegistry:
|
||||
def __init__(self, server_map, tools_by_name):
|
||||
self._mcp_server_tools = server_map
|
||||
self._tools_by_name = tools_by_name
|
||||
|
||||
def get_tools(self):
|
||||
return {n: MagicMock(name=n) for n in self._tools_by_name}
|
||||
|
||||
tools_by_name = {"read_file": _tool("read_file"), "write_file": _tool("write_file")}
|
||||
registry = _FakeRegistry(
|
||||
server_map={"coder-tools": {"read_file", "write_file"}},
|
||||
tools_by_name=tools_by_name,
|
||||
)
|
||||
# Patch get_tools to return real Tool objects for name/description plumbing.
|
||||
registry.get_tools = lambda: tools_by_name # type: ignore[method-assign]
|
||||
|
||||
phase_state = QueenPhaseState(phase="independent")
|
||||
phase_state.independent_tools = [tools_by_name["read_file"], tools_by_name["write_file"]]
|
||||
phase_state.mcp_tool_names_all = {"read_file", "write_file"}
|
||||
phase_state.enabled_mcp_tools = None
|
||||
phase_state.rebuild_independent_filter()
|
||||
|
||||
session = _FakeSession(queen_name=queen_id, phase_state=phase_state)
|
||||
session._queen_tool_registry = registry
|
||||
manager = _FakeManager(_sessions={"sess-1": session})
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.patch(
|
||||
f"/api/queen/{queen_id}/tools",
|
||||
json={"enabled_mcp_tools": ["read_file"]},
|
||||
)
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["refreshed_sessions"] == 1
|
||||
|
||||
# Session's phase state reflects the new allowlist without a restart
|
||||
current = phase_state.get_current_tools()
|
||||
assert [t.name for t in current] == ["read_file"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_missing_queen_returns_404(queen_dir, monkeypatch):
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
manager = _FakeManager()
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
resp = await client.get("/api/queen/queen_nonexistent/tools")
|
||||
assert resp.status == 404
|
||||
|
||||
resp = await client.patch(
|
||||
"/api/queen/queen_nonexistent/tools",
|
||||
json={"enabled_mcp_tools": None},
|
||||
)
|
||||
assert resp.status == 404
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_restores_role_default(queen_dir, monkeypatch):
|
||||
"""DELETE removes tools.json so the queen falls back to the role default."""
|
||||
monkeypatch.setattr(routes_queen_tools, "ensure_default_queens", lambda: None)
|
||||
queens_dir, queen_id = queen_dir
|
||||
tools_path = queens_dir / queen_id / "tools.json"
|
||||
|
||||
manager = _FakeManager()
|
||||
manager._mcp_tool_catalog = {
|
||||
"coder-tools": [
|
||||
{"name": "read_file", "description": "", "input_schema": {}},
|
||||
{"name": "port_scan", "description": "", "input_schema": {}},
|
||||
],
|
||||
}
|
||||
|
||||
app = await _make_app(manager=manager)
|
||||
async with TestClient(TestServer(app)) as client:
|
||||
# Seed a custom allowlist first so we have a sidecar to delete.
|
||||
resp = await client.patch(
|
||||
f"/api/queen/{queen_id}/tools",
|
||||
json={"enabled_mcp_tools": ["read_file"]},
|
||||
)
|
||||
assert resp.status == 200
|
||||
assert tools_path.exists()
|
||||
|
||||
resp = await client.delete(f"/api/queen/{queen_id}/tools")
|
||||
assert resp.status == 200
|
||||
body = await resp.json()
|
||||
assert body["removed"] is True
|
||||
assert body["is_role_default"] is True
|
||||
assert not tools_path.exists()
|
||||
|
||||
# The new effective list is the role default for queen_technology,
|
||||
# which includes both read_file (file_read) and port_scan (security).
|
||||
enabled = set(body["enabled_mcp_tools"] or [])
|
||||
assert "read_file" in enabled
|
||||
assert "port_scan" in enabled
|
||||
|
||||
# GET confirms.
|
||||
resp = await client.get(f"/api/queen/{queen_id}/tools")
|
||||
body = await resp.json()
|
||||
assert body["is_role_default"] is True
|
||||
|
||||
# Deleting again is a no-op.
|
||||
resp = await client.delete(f"/api/queen/{queen_id}/tools")
|
||||
assert resp.status == 200
|
||||
assert (await resp.json())["removed"] is False
|
||||
|
||||
|
||||
def test_legacy_profile_field_migrates_to_sidecar(queen_dir):
|
||||
"""A legacy enabled_mcp_tools field in profile.yaml is hoisted to tools.json."""
|
||||
queens_dir, queen_id = queen_dir
|
||||
profile_path = queens_dir / queen_id / "profile.yaml"
|
||||
tools_path = queens_dir / queen_id / "tools.json"
|
||||
|
||||
# Seed legacy field in profile.yaml.
|
||||
profile = yaml.safe_load(profile_path.read_text()) or {}
|
||||
profile["enabled_mcp_tools"] = ["read_file", "write_file"]
|
||||
profile_path.write_text(yaml.safe_dump(profile, sort_keys=False))
|
||||
|
||||
from framework.agents.queen.queen_tools_config import load_queen_tools_config
|
||||
|
||||
# First load migrates.
|
||||
assert load_queen_tools_config(queen_id) == ["read_file", "write_file"]
|
||||
assert tools_path.exists()
|
||||
sidecar = json.loads(tools_path.read_text())
|
||||
assert sidecar["enabled_mcp_tools"] == ["read_file", "write_file"]
|
||||
|
||||
# profile.yaml no longer contains the field; other fields preserved.
|
||||
migrated_profile = yaml.safe_load(profile_path.read_text())
|
||||
assert "enabled_mcp_tools" not in migrated_profile
|
||||
assert migrated_profile["name"] == "Alexandra"
|
||||
|
||||
# Second load is a direct read — no migration work to do.
|
||||
assert load_queen_tools_config(queen_id) == ["read_file", "write_file"]
|
||||
@@ -21,10 +21,32 @@ Each skill is a directory containing a `SKILL.md`. At startup, only the frontmat
|
||||
|
||||
### Choosing where to put a new skill
|
||||
|
||||
- **Project-scoped**: put under `<project>/.hive/skills/` when the skill is tied to that codebase's APIs, conventions, or infra.
|
||||
- **User-scoped**: put under `~/.hive/skills/` when the skill is reusable across projects for this machine/user.
|
||||
- **Colony-scoped (via `create_colony`)**: when the skill is the operational protocol a single colony needs — its API auth, DOM selectors, DB schema, task-queue conventions — do NOT place it under `~/.hive/skills/` or `<project>/.hive/skills/` yourself. Those roots are SHARED and every colony on the machine will see it. Instead, pass the skill content INLINE to the `create_colony` tool (`skill_name`, `skill_description`, `skill_body`, optional `skill_files`). The tool materializes the folder under `~/.hive/colonies/<colony_name>/.hive/skills/<skill-name>/` where it is discovered as **project scope** by only that colony's workers. See the subsection below.
|
||||
- **Project-scoped**: put under `<project>/.hive/skills/` when the skill is tied to that codebase's APIs, conventions, or infra and multiple agents in the project should share it.
|
||||
- **User-scoped**: put under `~/.hive/skills/` when the skill is reusable across projects for this machine/user and all agents should see it.
|
||||
- **Framework default**: add under `core/framework/skills/_default_skills/` AND register in `framework/skills/defaults.py::SKILL_REGISTRY` only when the skill is a universal operational protocol shipped with Hive. Default skills use the `hive.<name>` naming convention and include `type: default-skill` in metadata.
|
||||
|
||||
### Colony-scoped skills via `create_colony`
|
||||
|
||||
A colony-scoped skill is one that belongs to exactly ONE colony — e.g. it encodes the HoneyComb staging API the `honeycomb_research` colony polls, or the LinkedIn outbound flow the `linkedin_outbound_campaign` colony runs. Writing such a skill at `~/.hive/skills/` or `<project>/.hive/skills/` leaks it to every other colony, which will then see it at selection time.
|
||||
|
||||
**Do not reach for `write_file` to create the folder.** The `create_colony` tool takes the skill content INLINE and places it for you:
|
||||
|
||||
```
|
||||
create_colony(
|
||||
colony_name="honeycomb_research",
|
||||
task="Build a daily honeycomb market report…",
|
||||
skill_name="honeycomb-api-protocol",
|
||||
skill_description="How to query the HoneyComb staging API…",
|
||||
skill_body="## Operational Protocol\n\nAuth: …",
|
||||
skill_files=[{"path": "scripts/fetch_tickers.py", "content": "…"}], # optional
|
||||
)
|
||||
```
|
||||
|
||||
The tool writes `~/.hive/colonies/honeycomb_research/.hive/skills/honeycomb-api-protocol/SKILL.md` (plus any `skill_files`), which `SkillDiscovery` picks up as project scope when that colony's workers start — and ONLY that colony's workers. No cross-colony leakage.
|
||||
|
||||
Do not write colony-bound skill folders by hand under `~/.hive/skills/`. A skill placed there is user-scoped and becomes visible to every colony on the machine — defeating the isolation you wanted.
|
||||
|
||||
### Directory layout
|
||||
|
||||
```
|
||||
@@ -124,8 +146,8 @@ For Python scripts in a Hive project, prefer `uv run scripts/foo.py ...`.
|
||||
### Creating a new skill — workflow
|
||||
|
||||
1. Pick a `<skill-name>` (lowercase-hyphenated).
|
||||
2. Decide scope: project (`<project>/.hive/skills/`), user (`~/.hive/skills/`), or framework default (`core/framework/skills/_default_skills/` + registry entry).
|
||||
3. Create the directory and write `SKILL.md` with frontmatter + body.
|
||||
2. Decide scope: **colony** (pass content INLINE to `create_colony` — STOP here, do not hand-author the folder), project (`<project>/.hive/skills/`), user (`~/.hive/skills/`), or framework default (`core/framework/skills/_default_skills/` + registry entry).
|
||||
3. For the non-colony scopes: create the directory and write `SKILL.md` with frontmatter + body.
|
||||
4. Add `scripts/`, `references/`, `assets/` only if needed.
|
||||
5. Validate the frontmatter: name matches dir, description is specific, no forbidden characters.
|
||||
6. Validate using the Hive CLI:
|
||||
|
||||
+6
-4
@@ -31,7 +31,7 @@ browser_shadow_query(...) → rect → same
|
||||
|
||||
## Screenshot + coordinates is shadow-agnostic — prefer it on shadow-heavy sites
|
||||
|
||||
On sites that use Shadow DOM heavily (Reddit's faceplate Web Components, LinkedIn's `#interop-outlet` messaging overlay, some X custom elements), **coordinate-based operations reach elements that selector-based tools can't see.**
|
||||
Start with `browser_snapshot` when you need to inspect the page structure or find ordinary controls. If the snapshot does not show the thing you need, shows stale or misleading refs, or cannot prove where a visible target is, take `browser_screenshot` and use the screenshot + coordinate path. This is especially useful on sites that use Shadow DOM heavily
|
||||
|
||||
Why:
|
||||
|
||||
@@ -53,7 +53,7 @@ Whereas `wait_for_selector`, `browser_click(selector=...)`, `browser_type(select
|
||||
|
||||
1. Call `browser_click_coordinate(x, y)` to click the target element.
|
||||
2. Check the `focused_element` field in the response — it tells you what actually received focus (tag, id, role, contenteditable, rect).
|
||||
3. If the focused element is editable, call `browser_type_focused(text="...")` to insert text. use tools to verify the text took effect.
|
||||
3. If the focused element is editable, call `browser_type_focused(text="...")` to insert text. Use tools to verify the text took effect — prefer checking the underlying `.value` / `innerText` via `browser_evaluate` or confirming the submit button enabled. A screenshot alone can mislead: narrow input boxes visually clip long text, so only a portion may appear on screen even though the full string was accepted.
|
||||
4. If it is NOT editable, your click landed on the wrong thing — refine coordinates and retry. Do NOT reach for `browser_evaluate` + `execCommand('insertText')` or shadow-root traversals. The problem is the click target, not the typing method.
|
||||
|
||||
`browser_click` (selector-based) also returns `focused_element`, so the same check works whether you clicked by selector or coordinate.
|
||||
@@ -113,8 +113,8 @@ Even after `wait_until="load"`, React/Vue SPAs often render their real chrome in
|
||||
### Reading pages efficiently
|
||||
|
||||
- **Prefer `browser_snapshot` over `browser_get_text("body")`** — returns a compact ~1–5 KB accessibility tree vs 100+ KB of raw HTML.
|
||||
- Interaction tools (`browser_click`, `browser_type`, `browser_type_focused`, `browser_fill`, `browser_scroll`, etc.) return a page snapshot automatically in their result. Use it to decide your next action — do NOT call `browser_snapshot` separately after every action. Only call `browser_snapshot` when you need a fresh view without performing an action, or after setting `auto_snapshot=false`.
|
||||
- Complex pages (LinkedIn, Twitter/X, SPAs with virtual scrolling) have DOMs that don't match what's visually rendered — snapshot refs may be stale, missing, or misaligned with visible layout. On these pages, `browser_screenshot` is the only reliable way to orient yourself.
|
||||
- Interaction tools `browser_click`, `browser_type`, `browser_type_focused`, `browser_fill`, and `browser_scroll` wait 0.5 s for the page to settle after a successful action, then attach a fresh accessibility snapshot under the `snapshot` key of their result. Use it to decide your next action — do NOT call `browser_snapshot` separately after every action. Tune the capture via `auto_snapshot_mode`: `"default"` (full tree, the default), `"simple"` (trims unnamed structural nodes), `"interactive"` (only controls — tightest token footprint), or `"off"` to skip the capture entirely (useful when batching several interactions and you don't need the intermediate trees). Call `browser_snapshot` explicitly only when you need a newer view or a different mode than what was auto-captured.
|
||||
- Complex pages (LinkedIn, Twitter/X, SPAs with virtual scrolling) can have DOMs that don't match what's visually rendered — snapshot refs may be stale, missing, or misaligned with visible layout. Try the available snapshot first; when the target is not present in that snapshot or visual position matters, switch to `browser_screenshot` to orient yourself.
|
||||
- Only fall back to `browser_get_text` for extracting specific small elements by CSS selector.
|
||||
|
||||
## Typing and keyboard input
|
||||
@@ -139,6 +139,8 @@ The symptom is always the same: **you type, the characters appear visually, and
|
||||
|
||||
3. **Verify** the submit button is enabled before clicking it. Use `browser_evaluate` to check the button's `disabled` or `aria-disabled` attribute. Do NOT trust that typing worked — always check state.
|
||||
|
||||
**Partial visibility is fine.** Small single-line inputs, chat boxes with fixed width, and search fields commonly clip or truncate long text visually — only the tail or head may be shown on screen. Don't treat that as failure. What matters is that the framework accepted the input: the submit button enabled, or `element.value` / `innerText` read via `browser_evaluate` contains the full string. If the visible pixels don't match what you typed but the button is enabled and the underlying value is correct, typing succeeded — proceed.
|
||||
|
||||
4. **Only click send if the button is enabled.** If the button is still disabled, try the recovery dance: click the textarea again, press `End`, press a space, press `Backspace` — this forces React to recompute `hasRealContent`. Then re-check the button state.
|
||||
|
||||
### Why `browser_type` uses `Input.insertText` by default
|
||||
+6
-15
@@ -381,24 +381,15 @@ is_logged_in = browser_evaluate("""
|
||||
|
||||
## Deduplication pattern
|
||||
|
||||
For any daily loop (connection acceptance, profile visits, DMs), maintain a ledger file:
|
||||
Dedup is handled by the colony progress queue, not a separate JSON file. For any daily loop (connection acceptance, profile visits, DMs), the queen enqueues one row in the `tasks` table per `(profile_url, action)` pair; workers claim, act, and mark done. Already-`done` rows are skipped on the next claim — that's your crash-resume and cross-day dedup. See `hive.colony-progress-tracker` for the full claim/update protocol.
|
||||
|
||||
```
|
||||
# data/linkedin_contacts.json
|
||||
{
|
||||
"contacts": [
|
||||
{
|
||||
"profile_url": "https://www.linkedin.com/in/username/",
|
||||
"name": "First Last",
|
||||
"action": "connection_accepted+message_sent",
|
||||
"timestamp": "2026-04-13T09:30:00Z",
|
||||
"message_preview": "first 50 chars of message sent"
|
||||
}
|
||||
]
|
||||
}
|
||||
If you need to check whether a given `(profile_url, action)` has already been handled in a prior run before enqueuing a new row, query the queue directly:
|
||||
|
||||
```bash
|
||||
sqlite3 "<db_path>" "SELECT status FROM tasks WHERE payload LIKE '%\"profile_url\":\"<url>\"%' AND payload LIKE '%\"action\":\"<action>\"%';"
|
||||
```
|
||||
|
||||
Before any action, check if the profile URL already has a recent entry for the same action. Skip if yes. Atomic-write the ledger after each success so crash-resume works.
|
||||
Empty → not yet enqueued, safe to add. Otherwise honor the existing row's status.
|
||||
|
||||
## See also
|
||||
|
||||
+10
-19
@@ -203,7 +203,7 @@ for c in candidates:
|
||||
else:
|
||||
browser_click("[data-testid='tweetButton']")
|
||||
sleep(2)
|
||||
record_sent(c['preview'], reply_text) # append to ledger
|
||||
# Mark the task done in progress.db — see hive.colony-progress-tracker
|
||||
|
||||
# Close the composer (press Escape or click the Close button)
|
||||
browser_press("Escape")
|
||||
@@ -307,24 +307,9 @@ If any of these appear, **stop the run, screenshot the state, and surface the is
|
||||
|
||||
## Deduplication pattern
|
||||
|
||||
Every daily loop should maintain a ledger file. Append after each successful reply/post, atomic-write to survive crashes.
|
||||
Dedup is handled by the colony progress queue, not a separate JSON file. The queen enqueues one row in the `tasks` table per reply target (keyed by tweet URL); workers claim, reply, and mark done. Already-`done` rows are skipped on the next claim — that's your crash-resume and cross-day dedup, for free. See `hive.colony-progress-tracker` for the full claim/update protocol.
|
||||
|
||||
```
|
||||
# data/x_replies_ledger.json
|
||||
{
|
||||
"replies": [
|
||||
{
|
||||
"tweet_url": "https://x.com/<author>/status/<id>",
|
||||
"author": "username",
|
||||
"original_preview": "first 100 chars of the tweet",
|
||||
"reply_text": "what you sent",
|
||||
"timestamp": "2026-04-13T09:30:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Extract the tweet URL via `browser_evaluate`:
|
||||
Extract the tweet URL via `browser_evaluate` so the queen can use it as the task key:
|
||||
|
||||
```
|
||||
url = browser_evaluate("""
|
||||
@@ -337,7 +322,13 @@ url = browser_evaluate("""
|
||||
""", article_index)
|
||||
```
|
||||
|
||||
Before each reply, check if the URL already has a ledger entry. If yes, skip. This survives across runs and across days.
|
||||
If you need to check whether a given tweet URL has already been replied to in a prior run (e.g., scanning live search results before enqueuing), query the queue directly:
|
||||
|
||||
```bash
|
||||
sqlite3 "<db_path>" "SELECT status FROM tasks WHERE payload LIKE '%\"tweet_url\":\"<url>\"%';"
|
||||
```
|
||||
|
||||
Empty → not yet enqueued, safe to add. Otherwise honor the existing row's status.
|
||||
|
||||
## Reply style guidelines
|
||||
|
||||
@@ -0,0 +1,203 @@
|
||||
"""Shared skill authoring primitives.
|
||||
|
||||
Validates and materializes a skill folder. Used by three callers:
|
||||
|
||||
1. Queen's ``create_colony`` tool (``queen_lifecycle_tools.py``) — inline
|
||||
content passed by the queen during colony creation.
|
||||
2. HTTP POST / PUT routes under ``/api/**/skills`` — UI-driven creation.
|
||||
3. Future ``create_learned_skill`` tool — runtime learning.
|
||||
|
||||
Keeping the validators and writer here ensures the three paths share one
|
||||
authority; changes to the name regex or frontmatter layout happen in one
|
||||
place.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import shutil
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Framework skill names include dots (``hive.note-taking``), so the
|
||||
# validator needs to allow them even though the queen's ``create_colony``
|
||||
# tool historically forbade dots. User-created skills without dots still
|
||||
# pass; the dot cap just prevents us from rejecting existing framework
|
||||
# names when the UI toggles them via ``validate_skill_name``.
|
||||
_SKILL_NAME_RE = re.compile(r"^[a-z0-9.-]+$")
|
||||
_MAX_NAME_LEN = 64
|
||||
_MAX_DESC_LEN = 1024
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillFile:
|
||||
"""Supporting file bundled with a skill (relative path + content)."""
|
||||
|
||||
rel_path: Path
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillDraft:
|
||||
"""Validated skill content ready to be written to disk."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
body: str
|
||||
files: list[SkillFile] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def skill_md_text(self) -> str:
|
||||
"""Assemble the final SKILL.md text (frontmatter + body)."""
|
||||
body_norm = self.body.rstrip() + "\n"
|
||||
return f"---\nname: {self.name}\ndescription: {self.description}\n---\n\n{body_norm}"
|
||||
|
||||
|
||||
def validate_skill_name(raw: str) -> tuple[str | None, str | None]:
|
||||
"""Return ``(normalized_name, error)``. Either side may be None."""
|
||||
name = (raw or "").strip() if isinstance(raw, str) else ""
|
||||
if not name:
|
||||
return None, "skill_name is required"
|
||||
if not _SKILL_NAME_RE.match(name):
|
||||
return None, f"skill_name '{name}' must match [a-z0-9-] pattern"
|
||||
if name.startswith("-") or name.endswith("-") or "--" in name:
|
||||
return None, f"skill_name '{name}' has leading/trailing/consecutive hyphens"
|
||||
if len(name) > _MAX_NAME_LEN:
|
||||
return None, f"skill_name '{name}' exceeds {_MAX_NAME_LEN} chars"
|
||||
return name, None
|
||||
|
||||
|
||||
def validate_description(raw: str) -> tuple[str | None, str | None]:
|
||||
desc = (raw or "").strip() if isinstance(raw, str) else ""
|
||||
if not desc:
|
||||
return None, "skill_description is required"
|
||||
if len(desc) > _MAX_DESC_LEN:
|
||||
return None, f"skill_description must be 1–{_MAX_DESC_LEN} chars"
|
||||
# Frontmatter descriptions are line-oriented — the parser reads one value.
|
||||
if "\n" in desc or "\r" in desc:
|
||||
return None, "skill_description must be a single line (no newlines)"
|
||||
return desc, None
|
||||
|
||||
|
||||
def validate_files(raw: list[dict] | None) -> tuple[list[SkillFile] | None, str | None]:
|
||||
if not raw:
|
||||
return [], None
|
||||
if not isinstance(raw, list):
|
||||
return None, "skill_files must be an array"
|
||||
out: list[SkillFile] = []
|
||||
for entry in raw:
|
||||
if not isinstance(entry, dict):
|
||||
return None, "each skill_files entry must be an object with 'path' and 'content'"
|
||||
rel_raw = entry.get("path")
|
||||
content = entry.get("content")
|
||||
if not isinstance(rel_raw, str) or not rel_raw.strip():
|
||||
return None, "skill_files entry missing non-empty 'path'"
|
||||
if not isinstance(content, str):
|
||||
return None, f"skill_files entry '{rel_raw}' missing string 'content'"
|
||||
rel_stripped = rel_raw.strip()
|
||||
# Allow './foo' but reject '/foo' — relativizing absolute paths silently
|
||||
# has bitten other tools; make the intent loud instead.
|
||||
if rel_stripped.startswith("./"):
|
||||
rel_stripped = rel_stripped[2:]
|
||||
rel_path = Path(rel_stripped)
|
||||
if rel_stripped.startswith("/") or rel_path.is_absolute() or ".." in rel_path.parts:
|
||||
return None, f"skill_files path '{rel_raw}' must be relative and inside the skill folder"
|
||||
if rel_path.as_posix() == "SKILL.md":
|
||||
return None, "skill_files must not contain SKILL.md — pass skill_body instead"
|
||||
out.append(SkillFile(rel_path=rel_path, content=content))
|
||||
return out, None
|
||||
|
||||
|
||||
def build_draft(
|
||||
*,
|
||||
skill_name: str,
|
||||
skill_description: str,
|
||||
skill_body: str,
|
||||
skill_files: list[dict] | None = None,
|
||||
) -> tuple[SkillDraft | None, str | None]:
|
||||
"""Validate all inputs and return an immutable draft ready for writing."""
|
||||
name, err = validate_skill_name(skill_name)
|
||||
if err or name is None:
|
||||
return None, err
|
||||
desc, err = validate_description(skill_description)
|
||||
if err or desc is None:
|
||||
return None, err
|
||||
body = skill_body if isinstance(skill_body, str) else ""
|
||||
if not body.strip():
|
||||
return None, (
|
||||
"skill_body is required — the operational procedure the colony worker needs to run this job unattended"
|
||||
)
|
||||
files, err = validate_files(skill_files)
|
||||
if err or files is None:
|
||||
return None, err
|
||||
return SkillDraft(name=name, description=desc, body=body, files=list(files)), None
|
||||
|
||||
|
||||
def write_skill(
|
||||
draft: SkillDraft,
|
||||
*,
|
||||
target_root: Path,
|
||||
replace_existing: bool = True,
|
||||
) -> tuple[Path | None, str | None, bool]:
|
||||
"""Write the draft under ``target_root/{draft.name}/``.
|
||||
|
||||
``target_root`` is the parent scope dir (e.g.
|
||||
``~/.hive/agents/queens/{id}/skills`` or
|
||||
``{colony_dir}/.hive/skills``). The function creates it if needed.
|
||||
|
||||
Returns ``(installed_path, error, replaced)``. On success ``error`` is
|
||||
``None``; on failure ``installed_path`` is ``None`` and the target is
|
||||
left as it was before the call (best-effort).
|
||||
|
||||
When ``replace_existing=False`` and the target dir already exists,
|
||||
the write is refused with a non-fatal error (caller decides whether
|
||||
to surface it as a 409 or a warning).
|
||||
"""
|
||||
try:
|
||||
target_root.mkdir(parents=True, exist_ok=True)
|
||||
except OSError as e:
|
||||
return None, f"failed to create skills root: {e}", False
|
||||
|
||||
target = target_root / draft.name
|
||||
replaced = False
|
||||
try:
|
||||
if target.exists():
|
||||
if not replace_existing:
|
||||
return None, f"skill '{draft.name}' already exists", False
|
||||
# Remove the old dir outright so stale files from a prior
|
||||
# version don't linger alongside the new ones.
|
||||
replaced = True
|
||||
shutil.rmtree(target)
|
||||
target.mkdir(parents=True, exist_ok=False)
|
||||
(target / "SKILL.md").write_text(draft.skill_md_text, encoding="utf-8")
|
||||
for sf in draft.files:
|
||||
full_path = target / sf.rel_path
|
||||
full_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
full_path.write_text(sf.content, encoding="utf-8")
|
||||
except OSError as e:
|
||||
return None, f"failed to write skill folder {target}: {e}", replaced
|
||||
return target, None, replaced
|
||||
|
||||
|
||||
def remove_skill(target_root: Path, skill_name: str) -> tuple[bool, str | None]:
|
||||
"""Rm-tree the skill directory under ``target_root/{skill_name}/``.
|
||||
|
||||
Returns ``(removed, error)``. ``removed=False, error=None`` means
|
||||
the directory didn't exist (idempotent). Name is validated on the
|
||||
way in so an attacker with UI access can't traverse out of the
|
||||
scope root.
|
||||
"""
|
||||
name, err = validate_skill_name(skill_name)
|
||||
if err or name is None:
|
||||
return False, err
|
||||
target = target_root / name
|
||||
if not target.exists():
|
||||
return False, None
|
||||
try:
|
||||
shutil.rmtree(target)
|
||||
except OSError as e:
|
||||
return False, f"failed to remove skill folder {target}: {e}"
|
||||
return True, None
|
||||
@@ -34,12 +34,8 @@ Constraints: never read more than one skill up front; only read after selecting.
|
||||
- When a skill drives external API writes (Gmail, Calendar, GitHub, etc.),
|
||||
assume rate limits: prefer fewer larger writes, avoid tight one-item loops,
|
||||
serialize bursts when possible, and respect 429/Retry-After.
|
||||
|
||||
|
||||
The following skills provide specialized instructions for specific tasks.
|
||||
Use `read_file` to load a skill's SKILL.md when the task matches its description.
|
||||
When a skill file references a relative path, resolve it against the
|
||||
skill directory (parent of SKILL.md) and use that absolute path in tool commands."""
|
||||
- When a selected skill references a relative path, resolve it against the
|
||||
skill directory (parent of SKILL.md) and use that absolute path in tool commands."""
|
||||
|
||||
_MANDATORY_HEADER_COMPACT = """## Skills (mandatory)
|
||||
Before replying: scan <available_skills> <name> entries.
|
||||
@@ -50,12 +46,8 @@ Constraints: never read more than one skill up front; only read after selecting.
|
||||
- When a skill drives external API writes (Gmail, Calendar, GitHub, etc.),
|
||||
assume rate limits: prefer fewer larger writes, avoid tight one-item loops,
|
||||
serialize bursts when possible, and respect 429/Retry-After.
|
||||
|
||||
|
||||
The following skills provide specialized instructions for specific tasks.
|
||||
Use `read_file` to load a skill's SKILL.md when the task matches its name.
|
||||
When a skill file references a relative path, resolve it against the
|
||||
skill directory (parent of SKILL.md) and use that absolute path in tool commands."""
|
||||
- When a selected skill references a relative path, resolve it against the
|
||||
skill directory (parent of SKILL.md) and use that absolute path in tool commands."""
|
||||
|
||||
|
||||
class SkillCatalog:
|
||||
|
||||
@@ -7,7 +7,7 @@ locations. Resolves name collisions deterministically.
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
from framework.skills.parser import ParsedSkill, parse_skill_md
|
||||
@@ -30,16 +30,40 @@ _SKIP_DIRS = frozenset(
|
||||
)
|
||||
|
||||
# Scope priority (higher = takes precedence)
|
||||
# ``preset`` sits between framework and user: bundled alongside the
|
||||
# framework distribution, but off by default — capability packs the user
|
||||
# opts into per queen/colony rather than globally-enabled infra.
|
||||
_SCOPE_PRIORITY = {
|
||||
"framework": 0,
|
||||
"user": 1,
|
||||
"project": 2,
|
||||
"preset": 1,
|
||||
"user": 2,
|
||||
"queen_ui": 3,
|
||||
"colony_ui": 4,
|
||||
"project": 5,
|
||||
}
|
||||
|
||||
# Within the same scope, Hive-specific paths override cross-client paths.
|
||||
# We encode this by scanning cross-client first, then Hive-specific (later wins).
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtraScope:
|
||||
"""Additional scope dir to scan beyond the standard five.
|
||||
|
||||
Used by :class:`framework.skills.manager.SkillsManager` to surface
|
||||
per-queen (``queen_ui``) and per-colony (``colony_ui``) skill
|
||||
directories created through the UI. The ``label`` feeds
|
||||
:attr:`ParsedSkill.source_scope` so downstream consumers (trust
|
||||
gate, UI provenance resolver) can distinguish scope origins.
|
||||
"""
|
||||
|
||||
directory: Path
|
||||
label: str
|
||||
# Kept for forward-compat with the priority table; discovery itself
|
||||
# relies on scan order for last-wins resolution.
|
||||
priority: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class DiscoveryConfig:
|
||||
"""Configuration for skill discovery."""
|
||||
@@ -49,6 +73,10 @@ class DiscoveryConfig:
|
||||
skip_framework_scope: bool = False
|
||||
max_depth: int = 4
|
||||
max_dirs: int = 2000
|
||||
# Additional scope dirs scanned between user and project scopes,
|
||||
# in the order they are provided. Use ``ExtraScope`` to tag each
|
||||
# with its logical label (``queen_ui`` / ``colony_ui``).
|
||||
extra_scopes: list[ExtraScope] = field(default_factory=list)
|
||||
|
||||
|
||||
class SkillDiscovery:
|
||||
@@ -82,13 +110,22 @@ class SkillDiscovery:
|
||||
all_skills: list[ParsedSkill] = []
|
||||
self._scanned_dirs = []
|
||||
|
||||
# Framework scope (lowest precedence)
|
||||
# Framework scope (lowest precedence) — always-on infra skills.
|
||||
if not self._config.skip_framework_scope:
|
||||
framework_dir = Path(__file__).parent / "_default_skills"
|
||||
if framework_dir.is_dir():
|
||||
self._scanned_dirs.append(framework_dir)
|
||||
all_skills.extend(self._scan_scope(framework_dir, "framework"))
|
||||
|
||||
# Preset scope — bundled capability packs that ship with the
|
||||
# framework but default to OFF. User opts in per queen/colony
|
||||
# via the Skills Library. ``skip_framework_scope`` covers both
|
||||
# bundled directories since they live side-by-side on disk.
|
||||
preset_dir = Path(__file__).parent / "_preset_skills"
|
||||
if preset_dir.is_dir():
|
||||
self._scanned_dirs.append(preset_dir)
|
||||
all_skills.extend(self._scan_scope(preset_dir, "preset"))
|
||||
|
||||
# User scope
|
||||
if not self._config.skip_user_scope:
|
||||
home = Path.home()
|
||||
@@ -105,6 +142,13 @@ class SkillDiscovery:
|
||||
self._scanned_dirs.append(user_hive)
|
||||
all_skills.extend(self._scan_scope(user_hive, "user"))
|
||||
|
||||
# Extra scopes (queen_ui / colony_ui), scanned between user and project
|
||||
# so colony overrides beat queen overrides, and both beat user-scope.
|
||||
for extra in self._config.extra_scopes:
|
||||
if extra.directory.is_dir():
|
||||
self._scanned_dirs.append(extra.directory)
|
||||
all_skills.extend(self._scan_scope(extra.directory, extra.label))
|
||||
|
||||
# Project scope (highest precedence)
|
||||
if self._config.project_root:
|
||||
root = self._config.project_root
|
||||
|
||||
@@ -23,6 +23,7 @@ Typical usage — **bare** (exported agents, SDK users)::
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
@@ -44,6 +45,18 @@ class SkillsManagerConfig:
|
||||
even when ``project_root`` is set.
|
||||
interactive: Whether trust gating can prompt the user interactively.
|
||||
When ``False``, untrusted project skills are silently skipped.
|
||||
queen_id: Optional queen identifier. When set, enables the
|
||||
``queen_ui`` scope and per-queen override file.
|
||||
queen_overrides_path: Path to
|
||||
``~/.hive/agents/queens/{queen_id}/skills_overrides.json``.
|
||||
When set, the store is loaded and its entries override
|
||||
discovery results (disable skills, record provenance).
|
||||
colony_name: Optional colony identifier; mirrors ``queen_id`` for
|
||||
the ``colony_ui`` scope.
|
||||
colony_overrides_path: Per-colony override file path.
|
||||
extra_scope_dirs: Extra scope dirs scanned between user and
|
||||
project scopes. Typically populated by the caller with the
|
||||
queen/colony UI skill directories.
|
||||
"""
|
||||
|
||||
skills_config: SkillsConfig = field(default_factory=SkillsConfig)
|
||||
@@ -51,6 +64,15 @@ class SkillsManagerConfig:
|
||||
skip_community_discovery: bool = False
|
||||
interactive: bool = True
|
||||
|
||||
# Override support
|
||||
queen_id: str | None = None
|
||||
queen_overrides_path: Path | None = None
|
||||
colony_name: str | None = None
|
||||
colony_overrides_path: Path | None = None
|
||||
# Typed at the call site as ``list[ExtraScope]`` — not imported here
|
||||
# to keep this module free of discovery-layer dependencies.
|
||||
extra_scope_dirs: list = field(default_factory=list)
|
||||
|
||||
|
||||
class SkillsManager:
|
||||
"""Unified skill lifecycle: discovery → loading → prompt renderation.
|
||||
@@ -65,13 +87,21 @@ class SkillsManager:
|
||||
self._config = config or SkillsManagerConfig()
|
||||
self._loaded = False
|
||||
self._catalog: object = None # SkillCatalog, set after load()
|
||||
self._all_skills: list = [] # list[ParsedSkill], pre-override-filter
|
||||
self._catalog_prompt: str = ""
|
||||
self._protocols_prompt: str = ""
|
||||
self._allowlisted_dirs: list[str] = []
|
||||
self._default_mgr: object = None # DefaultSkillManager, set after load()
|
||||
# Override stores (loaded lazily in _do_load). Queen-scope and
|
||||
# colony-scope are read together; colony entries win on collision.
|
||||
self._queen_overrides: object = None # SkillOverrideStore | None
|
||||
self._colony_overrides: object = None # SkillOverrideStore | None
|
||||
# Hot-reload state
|
||||
self._watched_dirs: list[str] = []
|
||||
self._watched_files: list[str] = []
|
||||
self._watcher_task: object = None # asyncio.Task, set by start_watching()
|
||||
# Serializes in-process mutations (HTTP handlers + create_colony).
|
||||
self._mutation_lock = asyncio.Lock()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Factory for backwards-compat bridge
|
||||
@@ -119,6 +149,7 @@ class SkillsManager:
|
||||
from framework.skills.catalog import SkillCatalog
|
||||
from framework.skills.defaults import DefaultSkillManager
|
||||
from framework.skills.discovery import DiscoveryConfig, SkillDiscovery
|
||||
from framework.skills.overrides import SkillOverrideStore
|
||||
|
||||
skills_config = self._config.skills_config
|
||||
|
||||
@@ -128,12 +159,13 @@ class SkillsManager:
|
||||
DiscoveryConfig(
|
||||
project_root=self._config.project_root,
|
||||
skip_framework_scope=False,
|
||||
extra_scopes=list(self._config.extra_scope_dirs or []),
|
||||
)
|
||||
)
|
||||
discovered = discovery.discover()
|
||||
self._watched_dirs = discovery.scanned_directories
|
||||
|
||||
# Trust-gate project-scope skills (AS-13)
|
||||
# Trust-gate project-scope skills (AS-13). UI scopes bypass.
|
||||
if self._config.project_root is not None and not self._config.skip_community_discovery:
|
||||
from framework.skills.trust import TrustGate
|
||||
|
||||
@@ -141,6 +173,31 @@ class SkillsManager:
|
||||
discovered, project_dir=self._config.project_root
|
||||
)
|
||||
|
||||
# 1b. Load per-scope override stores. Missing files → empty stores.
|
||||
queen_store = None
|
||||
if self._config.queen_overrides_path is not None:
|
||||
queen_store = SkillOverrideStore.load(
|
||||
self._config.queen_overrides_path,
|
||||
scope_label=f"queen:{self._config.queen_id or ''}",
|
||||
)
|
||||
colony_store = None
|
||||
if self._config.colony_overrides_path is not None:
|
||||
colony_store = SkillOverrideStore.load(
|
||||
self._config.colony_overrides_path,
|
||||
scope_label=f"colony:{self._config.colony_name or ''}",
|
||||
)
|
||||
self._queen_overrides = queen_store
|
||||
self._colony_overrides = colony_store
|
||||
self._watched_files = [
|
||||
str(p) for p in (self._config.queen_overrides_path, self._config.colony_overrides_path) if p is not None
|
||||
]
|
||||
|
||||
# 1c. Apply override filtering. Colony entries take precedence over
|
||||
# queen entries on name collision; the store's ``is_disabled`` keeps
|
||||
# the resolution rule in one place.
|
||||
self._all_skills = list(discovered)
|
||||
discovered = self._apply_overrides(discovered, skills_config, queen_store, colony_store)
|
||||
|
||||
catalog = SkillCatalog(discovered)
|
||||
self._catalog = catalog
|
||||
self._allowlisted_dirs = catalog.allowlisted_dirs
|
||||
@@ -174,6 +231,101 @@ class SkillsManager:
|
||||
len(catalog_prompt),
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Override application
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _apply_overrides(
|
||||
discovered: list,
|
||||
skills_config: SkillsConfig,
|
||||
queen_store: object,
|
||||
colony_store: object,
|
||||
) -> list:
|
||||
"""Filter ``discovered`` per the queen + colony override stores.
|
||||
|
||||
Resolution rule:
|
||||
1. Tombstoned names (``deleted_ui_skills``) drop out.
|
||||
2. An explicit ``enabled=False`` override drops the skill.
|
||||
3. An explicit ``enabled=True`` override keeps it (wins over
|
||||
``all_defaults_disabled`` for framework defaults AND over the
|
||||
preset-scope default-off rule).
|
||||
4. Otherwise: preset-scope skills are off by default; everything
|
||||
else inherits :meth:`SkillsConfig.is_default_enabled`.
|
||||
"""
|
||||
from framework.skills.overrides import SkillOverrideStore
|
||||
|
||||
stores: list[SkillOverrideStore] = [s for s in (queen_store, colony_store) if s is not None]
|
||||
|
||||
tombstones: set[str] = set()
|
||||
for store in stores:
|
||||
tombstones |= set(store.deleted_ui_skills)
|
||||
|
||||
out = []
|
||||
for skill in discovered:
|
||||
if skill.name in tombstones:
|
||||
continue
|
||||
# Check colony first so colony overrides win over queen's.
|
||||
explicit: bool | None = None
|
||||
master_disabled = False
|
||||
for store in reversed(stores): # colony, then queen
|
||||
entry = store.get(skill.name)
|
||||
if entry is not None and entry.enabled is not None:
|
||||
explicit = entry.enabled
|
||||
break
|
||||
if store.all_defaults_disabled:
|
||||
master_disabled = True
|
||||
if explicit is False:
|
||||
continue
|
||||
if explicit is True:
|
||||
out.append(skill)
|
||||
continue
|
||||
# Preset-scope capability packs are bundled but ship OFF; the
|
||||
# user must explicitly enable them per queen or colony. This
|
||||
# runs even when no store is present so bare agents don't
|
||||
# silently load x-automation etc.
|
||||
if skill.source_scope == "preset":
|
||||
continue
|
||||
# No explicit entry — master switch takes effect against framework defaults.
|
||||
default_enabled = skills_config.is_default_enabled(skill.name)
|
||||
if master_disabled and default_enabled and skill.source_scope == "framework":
|
||||
continue
|
||||
if default_enabled:
|
||||
out.append(skill)
|
||||
return out
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Override accessors
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@property
|
||||
def queen_overrides(self) -> object:
|
||||
"""The queen-scope :class:`SkillOverrideStore` or ``None``."""
|
||||
return self._queen_overrides
|
||||
|
||||
@property
|
||||
def colony_overrides(self) -> object:
|
||||
"""The colony-scope :class:`SkillOverrideStore` or ``None``."""
|
||||
return self._colony_overrides
|
||||
|
||||
@property
|
||||
def mutation_lock(self) -> asyncio.Lock:
|
||||
"""Serializes in-process override mutations (routes + queen tools)."""
|
||||
return self._mutation_lock
|
||||
|
||||
def reload(self) -> None:
|
||||
"""Re-run discovery and rebuild cached prompts. Public wrapper for ``_reload``."""
|
||||
self._reload()
|
||||
|
||||
def enumerate_skills_with_source(self) -> list:
|
||||
"""Return every discovered skill, including ones disabled by overrides.
|
||||
|
||||
The UI relies on this: a disabled framework skill needs to render
|
||||
in the list so the user can toggle it back on. The post-filter
|
||||
catalog omits those entries.
|
||||
"""
|
||||
return list(self._all_skills)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Hot-reload: watch skill directories for SKILL.md changes.
|
||||
# ------------------------------------------------------------------
|
||||
@@ -181,14 +333,14 @@ class SkillsManager:
|
||||
async def start_watching(self) -> None:
|
||||
"""Start a background task watching skill directories for changes.
|
||||
|
||||
When a ``SKILL.md`` file is added/modified/removed, the cached
|
||||
``skills_catalog_prompt`` is rebuilt. The next node iteration picks
|
||||
up the new prompt automatically via the ``dynamic_prompt_provider``.
|
||||
Triggers a reload when any ``SKILL.md`` changes or an override
|
||||
JSON file is modified. The next node iteration picks up the new
|
||||
prompt via the ``dynamic_prompt_provider`` / per-worker
|
||||
``dynamic_skills_catalog_provider``.
|
||||
|
||||
Silently no-ops when ``watchfiles`` is not installed or when no
|
||||
directories are being watched (e.g. bare mode, no project_root).
|
||||
Silently no-ops when ``watchfiles`` is not installed or there
|
||||
are no paths to watch.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
import watchfiles # noqa: F401 -- optional dep check
|
||||
@@ -196,7 +348,7 @@ class SkillsManager:
|
||||
logger.debug("watchfiles not installed; skill hot-reload disabled")
|
||||
return
|
||||
|
||||
if not self._watched_dirs:
|
||||
if not self._watched_dirs and not self._watched_files:
|
||||
logger.debug("No skill directories to watch; hot-reload skipped")
|
||||
return
|
||||
|
||||
@@ -208,14 +360,13 @@ class SkillsManager:
|
||||
name="skills-hot-reload",
|
||||
)
|
||||
logger.info(
|
||||
"Skill hot-reload enabled (watching %d directories)",
|
||||
"Skill hot-reload enabled (watching %d dirs, %d override files)",
|
||||
len(self._watched_dirs),
|
||||
len(self._watched_files),
|
||||
)
|
||||
|
||||
async def stop_watching(self) -> None:
|
||||
"""Cancel the background watcher task (if running)."""
|
||||
import asyncio
|
||||
|
||||
task = self._watcher_task
|
||||
if task is None:
|
||||
return
|
||||
@@ -228,22 +379,35 @@ class SkillsManager:
|
||||
pass
|
||||
|
||||
async def _watch_loop(self) -> None:
|
||||
"""Background coroutine that watches SKILL.md files and triggers reload."""
|
||||
import asyncio
|
||||
|
||||
"""Watch SKILL.md + override JSON files and trigger reload on change."""
|
||||
import watchfiles
|
||||
|
||||
def _filter(_change: object, path: str) -> bool:
|
||||
return path.endswith("SKILL.md")
|
||||
return path.endswith("SKILL.md") or path.endswith("skills_overrides.json")
|
||||
|
||||
# watchfiles accepts a mix of dirs and files; file watches survive
|
||||
# a tmp+rename (the containing dir sees the event).
|
||||
watch_targets = list(self._watched_dirs)
|
||||
for f in self._watched_files:
|
||||
# watchfiles needs the parent dir for file-level events to fire
|
||||
# reliably through atomic replace; adding the file path directly
|
||||
# works on Linux/macOS inotify/FSEvents but a dir watch is
|
||||
# belt-and-braces.
|
||||
parent = str(Path(f).parent)
|
||||
if parent not in watch_targets:
|
||||
watch_targets.append(parent)
|
||||
|
||||
if not watch_targets:
|
||||
return
|
||||
|
||||
try:
|
||||
async for changes in watchfiles.awatch(
|
||||
*self._watched_dirs,
|
||||
*watch_targets,
|
||||
watch_filter=_filter,
|
||||
debounce=1000,
|
||||
):
|
||||
paths = [p for _, p in changes]
|
||||
logger.info("SKILL.md changes detected: %s", paths)
|
||||
logger.info("Skill state changes detected: %s", paths)
|
||||
try:
|
||||
self._reload()
|
||||
except Exception:
|
||||
|
||||
@@ -0,0 +1,254 @@
|
||||
"""Per-scope skill override store.
|
||||
|
||||
Sits between :mod:`framework.skills.discovery` and
|
||||
:class:`framework.skills.catalog.SkillCatalog`: records the user's
|
||||
per-queen and per-colony decisions about which skills are enabled,
|
||||
who created them (provenance), and any parameter tweaks.
|
||||
|
||||
Two well-known paths back this module:
|
||||
|
||||
* Queen scope: ``~/.hive/agents/queens/{queen_id}/skills_overrides.json``
|
||||
* Colony scope: ``~/.hive/colonies/{colony_name}/skills_overrides.json``
|
||||
|
||||
The schema is intentionally small; see :class:`SkillOverrideStore` for
|
||||
the JSON shape. Atomic writes mirror
|
||||
:class:`framework.skills.trust.TrustedRepoStore` (tmp + rename).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC, datetime
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SCHEMA_VERSION = 1
|
||||
|
||||
|
||||
class Provenance(StrEnum):
|
||||
"""Where a skill came from.
|
||||
|
||||
The override store is the authoritative provenance ledger for anything
|
||||
the UI or the queen tools touched. Framework / user-dropped /
|
||||
project-dropped skills don't need an entry unless they've been
|
||||
explicitly configured.
|
||||
"""
|
||||
|
||||
FRAMEWORK = "framework"
|
||||
PRESET = "preset"
|
||||
USER_DROPPED = "user_dropped"
|
||||
USER_UI_CREATED = "user_ui_created"
|
||||
QUEEN_CREATED = "queen_created"
|
||||
LEARNED_RUNTIME = "learned_runtime"
|
||||
PROJECT_DROPPED = "project_dropped"
|
||||
# Catch-all for skills with no recorded authorship: legacy rows from
|
||||
# before the override store existed, PATCHes that precede any CREATE,
|
||||
# etc. Keeps the ledger honest rather than forcing a guess.
|
||||
OTHER = "other"
|
||||
|
||||
|
||||
@dataclass
|
||||
class OverrideEntry:
|
||||
"""Per-skill override record inside a scope's store."""
|
||||
|
||||
enabled: bool | None = None
|
||||
provenance: Provenance = Provenance.FRAMEWORK
|
||||
trust: str | None = None
|
||||
param_overrides: dict[str, Any] = field(default_factory=dict)
|
||||
notes: str | None = None
|
||||
created_at: datetime | None = None
|
||||
created_by: str | None = None
|
||||
|
||||
def clone(self) -> OverrideEntry:
|
||||
"""Return a deep-enough copy (dict fields are re-allocated)."""
|
||||
return OverrideEntry(
|
||||
enabled=self.enabled,
|
||||
provenance=self.provenance,
|
||||
trust=self.trust,
|
||||
param_overrides=dict(self.param_overrides),
|
||||
notes=self.notes,
|
||||
created_at=self.created_at,
|
||||
created_by=self.created_by,
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
out: dict[str, Any] = {"provenance": str(self.provenance)}
|
||||
if self.enabled is not None:
|
||||
out["enabled"] = bool(self.enabled)
|
||||
if self.trust is not None:
|
||||
out["trust"] = self.trust
|
||||
if self.param_overrides:
|
||||
out["param_overrides"] = dict(self.param_overrides)
|
||||
if self.notes is not None:
|
||||
out["notes"] = self.notes
|
||||
if self.created_at is not None:
|
||||
out["created_at"] = self.created_at.isoformat()
|
||||
if self.created_by is not None:
|
||||
out["created_by"] = self.created_by
|
||||
return out
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, raw: dict[str, Any]) -> OverrideEntry:
|
||||
created_at_raw = raw.get("created_at")
|
||||
created_at: datetime | None = None
|
||||
if isinstance(created_at_raw, str):
|
||||
try:
|
||||
created_at = datetime.fromisoformat(created_at_raw)
|
||||
except ValueError:
|
||||
created_at = None
|
||||
provenance_raw = raw.get("provenance") or Provenance.FRAMEWORK
|
||||
try:
|
||||
provenance = Provenance(provenance_raw)
|
||||
except ValueError:
|
||||
logger.warning("override: unknown provenance %r; defaulting to framework", provenance_raw)
|
||||
provenance = Provenance.FRAMEWORK
|
||||
enabled = raw.get("enabled")
|
||||
return cls(
|
||||
enabled=enabled if isinstance(enabled, bool) else None,
|
||||
provenance=provenance,
|
||||
trust=raw.get("trust") if isinstance(raw.get("trust"), str) else None,
|
||||
param_overrides=dict(raw.get("param_overrides") or {}),
|
||||
notes=raw.get("notes") if isinstance(raw.get("notes"), str) else None,
|
||||
created_at=created_at,
|
||||
created_by=raw.get("created_by") if isinstance(raw.get("created_by"), str) else None,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SkillOverrideStore:
|
||||
"""Persistent per-scope override file.
|
||||
|
||||
The file is created lazily on first save; a missing file behaves like
|
||||
an empty store (all skills inherit defaults, no metadata recorded).
|
||||
"""
|
||||
|
||||
path: Path
|
||||
scope_label: str = ""
|
||||
version: int = _SCHEMA_VERSION
|
||||
all_defaults_disabled: bool = False
|
||||
overrides: dict[str, OverrideEntry] = field(default_factory=dict)
|
||||
deleted_ui_skills: set[str] = field(default_factory=set)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Factory
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@classmethod
|
||||
def load(cls, path: Path, scope_label: str = "") -> SkillOverrideStore:
|
||||
"""Load the store from disk; return an empty store if the file is absent.
|
||||
|
||||
Permissive on parse errors: logs and returns an empty store rather
|
||||
than raising, so a corrupted file never takes down skill loading.
|
||||
"""
|
||||
store = cls(path=path, scope_label=scope_label)
|
||||
try:
|
||||
raw = json.loads(path.read_text(encoding="utf-8"))
|
||||
except FileNotFoundError:
|
||||
return store
|
||||
except Exception as exc:
|
||||
logger.warning("override: failed to read %s (%s); starting empty", path, exc)
|
||||
return store
|
||||
if not isinstance(raw, dict):
|
||||
logger.warning("override: %s is not an object; starting empty", path)
|
||||
return store
|
||||
|
||||
store.version = int(raw.get("version", _SCHEMA_VERSION))
|
||||
store.all_defaults_disabled = bool(raw.get("all_defaults_disabled", False))
|
||||
raw_overrides = raw.get("overrides") or {}
|
||||
if isinstance(raw_overrides, dict):
|
||||
for name, entry_raw in raw_overrides.items():
|
||||
if not isinstance(name, str) or not isinstance(entry_raw, dict):
|
||||
continue
|
||||
store.overrides[name] = OverrideEntry.from_dict(entry_raw)
|
||||
deleted = raw.get("deleted_ui_skills") or []
|
||||
if isinstance(deleted, list):
|
||||
store.deleted_ui_skills = {s for s in deleted if isinstance(s, str)}
|
||||
return store
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Mutations
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def upsert(self, skill_name: str, entry: OverrideEntry) -> None:
|
||||
"""Insert or replace a skill's override entry."""
|
||||
self.overrides[skill_name] = entry
|
||||
# If we're explicitly managing this skill again, lift any tombstone.
|
||||
self.deleted_ui_skills.discard(skill_name)
|
||||
|
||||
def set_enabled(self, skill_name: str, enabled: bool, *, provenance: Provenance | None = None) -> None:
|
||||
"""Convenience: toggle enabled without rewriting other fields."""
|
||||
existing = self.overrides.get(skill_name)
|
||||
if existing is None:
|
||||
existing = OverrideEntry(
|
||||
enabled=enabled,
|
||||
provenance=provenance or Provenance.FRAMEWORK,
|
||||
)
|
||||
else:
|
||||
existing.enabled = enabled
|
||||
if provenance is not None:
|
||||
existing.provenance = provenance
|
||||
self.overrides[skill_name] = existing
|
||||
|
||||
def remove(self, skill_name: str, *, tombstone: bool = True) -> None:
|
||||
"""Drop a skill's override entry; optionally leave a tombstone.
|
||||
|
||||
Tombstones matter for UI-created skills: if the user deletes a
|
||||
queen-scope skill via the UI, we rm-tree its directory, but the
|
||||
file watcher might lag or a background process might have an
|
||||
open handle. A tombstone ensures the loader treats the skill as
|
||||
gone even if a stale SKILL.md lingers.
|
||||
"""
|
||||
self.overrides.pop(skill_name, None)
|
||||
if tombstone:
|
||||
self.deleted_ui_skills.add(skill_name)
|
||||
|
||||
def is_disabled(self, skill_name: str, *, default_enabled: bool) -> bool:
|
||||
"""Return True when this scope's override force-disables the skill."""
|
||||
if self.all_defaults_disabled and default_enabled:
|
||||
# Caller says "default enabled"; master switch flips it off unless
|
||||
# an explicit enabled=True override re-enables.
|
||||
entry = self.overrides.get(skill_name)
|
||||
if entry is not None and entry.enabled is True:
|
||||
return False
|
||||
return True
|
||||
entry = self.overrides.get(skill_name)
|
||||
if entry is None:
|
||||
return not default_enabled
|
||||
if entry.enabled is None:
|
||||
return not default_enabled
|
||||
return not entry.enabled
|
||||
|
||||
def effective_enabled(self, skill_name: str, *, default_enabled: bool) -> bool:
|
||||
"""The inverse of :meth:`is_disabled`, for readability at call sites."""
|
||||
return not self.is_disabled(skill_name, default_enabled=default_enabled)
|
||||
|
||||
def get(self, skill_name: str) -> OverrideEntry | None:
|
||||
return self.overrides.get(skill_name)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Persistence
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def save(self) -> None:
|
||||
"""Atomic write: tmp + rename. Creates the parent dir if needed."""
|
||||
self.path.parent.mkdir(parents=True, exist_ok=True)
|
||||
payload: dict[str, Any] = {
|
||||
"version": self.version,
|
||||
"all_defaults_disabled": self.all_defaults_disabled,
|
||||
"overrides": {name: entry.to_dict() for name, entry in sorted(self.overrides.items())},
|
||||
}
|
||||
if self.deleted_ui_skills:
|
||||
payload["deleted_ui_skills"] = sorted(self.deleted_ui_skills)
|
||||
tmp = self.path.with_suffix(self.path.suffix + ".tmp")
|
||||
tmp.write_text(json.dumps(payload, indent=2), encoding="utf-8")
|
||||
tmp.replace(self.path)
|
||||
|
||||
|
||||
def utc_now() -> datetime:
|
||||
"""Single source of truth for override timestamps."""
|
||||
return datetime.now(tz=UTC)
|
||||
@@ -20,9 +20,17 @@ from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_SKILLS_DIR = Path(__file__).parent / "_default_skills"
|
||||
# Bundled skills live in two sibling dirs: ``_default_skills`` (always-on
|
||||
# infra) and ``_preset_skills`` (capability packs, off by default but
|
||||
# still bundled). Tool-gated pre-activation walks both so ``browser_*``
|
||||
# tools still pull in the browser-automation preset even though it isn't
|
||||
# default-enabled in the catalog.
|
||||
_BUNDLED_DIRS: tuple[Path, ...] = (
|
||||
Path(__file__).parent / "_default_skills",
|
||||
Path(__file__).parent / "_preset_skills",
|
||||
)
|
||||
|
||||
# (tool-name prefix, default skill directory name, display name)
|
||||
# (tool-name prefix, skill directory name, display name)
|
||||
_TOOL_GATED_SKILLS: list[tuple[str, str, str]] = [
|
||||
("browser_", "browser-automation", "hive.browser-automation"),
|
||||
]
|
||||
@@ -31,12 +39,23 @@ _BODY_CACHE: dict[str, str] = {}
|
||||
|
||||
|
||||
def _load_body(dir_name: str) -> str:
|
||||
"""Load the markdown body of a framework default skill, cached."""
|
||||
"""Load the markdown body of a bundled skill, cached. Searches every
|
||||
bundled directory (default + preset) so the mapping table doesn't
|
||||
need to know which dir a skill lives in.
|
||||
"""
|
||||
if dir_name in _BODY_CACHE:
|
||||
return _BODY_CACHE[dir_name]
|
||||
|
||||
path = _DEFAULT_SKILLS_DIR / dir_name / "SKILL.md"
|
||||
path: Path | None = None
|
||||
for parent in _BUNDLED_DIRS:
|
||||
candidate = parent / dir_name / "SKILL.md"
|
||||
if candidate.exists():
|
||||
path = candidate
|
||||
break
|
||||
body = ""
|
||||
if path is None:
|
||||
_BODY_CACHE[dir_name] = body
|
||||
return body
|
||||
try:
|
||||
raw = path.read_text(encoding="utf-8")
|
||||
# Strip YAML frontmatter (between the first two '---' fences)
|
||||
|
||||
@@ -318,13 +318,19 @@ class TrustGate:
|
||||
) -> list[ParsedSkill]:
|
||||
"""Return the subset of skills that are trusted for loading.
|
||||
|
||||
- Framework and user-scope skills: always included.
|
||||
- Framework, user, queen_ui, and colony_ui scopes: always included.
|
||||
(UI-created skills are authenticated by the user creating them
|
||||
through the authenticated UI — they do not go through the
|
||||
trusted_repos.json flow.)
|
||||
- Project-scope skills: classified; consent prompt shown if untrusted.
|
||||
"""
|
||||
import os
|
||||
|
||||
# Separate project skills from always-trusted scopes
|
||||
always_trusted = [s for s in skills if s.source_scope != "project"]
|
||||
# UI-authored scopes bypass the trust gate — they're implicitly
|
||||
# trusted because the user authored them through the UI. ``preset``
|
||||
# ships with the framework distribution, so it's trusted too.
|
||||
_bypass_scopes = {"framework", "preset", "user", "queen_ui", "colony_ui"}
|
||||
always_trusted = [s for s in skills if s.source_scope in _bypass_scopes]
|
||||
project_skills = [s for s in skills if s.source_scope == "project"]
|
||||
|
||||
if not project_skills:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,6 +5,8 @@ import ColonyChat from "./pages/colony-chat";
|
||||
import QueenDM from "./pages/queen-dm";
|
||||
import OrgChart from "./pages/org-chart";
|
||||
import PromptLibrary from "./pages/prompt-library";
|
||||
import SkillsLibrary from "./pages/skills-library";
|
||||
import ToolLibrary from "./pages/tool-library";
|
||||
import CredentialsPage from "./pages/credentials";
|
||||
import NotFound from "./pages/not-found";
|
||||
|
||||
@@ -16,7 +18,9 @@ function App() {
|
||||
<Route path="/colony/:colonyId" element={<ColonyChat />} />
|
||||
<Route path="/queen/:queenId" element={<QueenDM />} />
|
||||
<Route path="/org-chart" element={<OrgChart />} />
|
||||
<Route path="/skills-library" element={<SkillsLibrary />} />
|
||||
<Route path="/prompt-library" element={<PromptLibrary />} />
|
||||
<Route path="/tool-library" element={<ToolLibrary />} />
|
||||
<Route path="/credentials" element={<CredentialsPage />} />
|
||||
<Route path="*" element={<NotFound />} />
|
||||
</Route>
|
||||
|
||||
@@ -7,4 +7,8 @@ export const agentsApi = {
|
||||
/** Permanently delete an agent and all its sessions/files. */
|
||||
deleteAgent: (agentPath: string) =>
|
||||
api.delete<{ deleted: string }>("/agents", { agent_path: agentPath }),
|
||||
|
||||
/** Update colony metadata (e.g. icon). */
|
||||
updateMetadata: (agentPath: string, updates: { icon?: string }) =>
|
||||
api.patch<{ ok: boolean }>("/agents/metadata", { agent_path: agentPath, ...updates }),
|
||||
};
|
||||
|
||||
@@ -12,12 +12,13 @@ export class ApiError extends Error {
|
||||
|
||||
async function request<T>(path: string, options: RequestInit = {}): Promise<T> {
|
||||
const url = `${API_BASE}${path}`;
|
||||
const isFormData = options.body instanceof FormData;
|
||||
const headers: Record<string, string> = isFormData
|
||||
? {} // Let browser set Content-Type with boundary for multipart
|
||||
: { "Content-Type": "application/json", ...options.headers as Record<string, string> };
|
||||
const response = await fetch(url, {
|
||||
...options,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
...options.headers,
|
||||
},
|
||||
headers,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
@@ -52,4 +53,6 @@ export const api = {
|
||||
method: "PATCH",
|
||||
body: body ? JSON.stringify(body) : undefined,
|
||||
}),
|
||||
upload: <T>(path: string, formData: FormData) =>
|
||||
request<T>(path, { method: "POST", body: formData }),
|
||||
};
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
import { api } from "./client";
|
||||
import type { ToolMeta, McpServerTools } from "./queens";
|
||||
|
||||
export interface ColonySummary {
|
||||
name: string;
|
||||
queen_name: string | null;
|
||||
created_at: string | null;
|
||||
has_allowlist: boolean;
|
||||
enabled_count: number | null;
|
||||
}
|
||||
|
||||
export interface ColonyToolsResponse {
|
||||
colony_name: string;
|
||||
enabled_mcp_tools: string[] | null;
|
||||
stale: boolean;
|
||||
lifecycle: ToolMeta[];
|
||||
synthetic: ToolMeta[];
|
||||
mcp_servers: McpServerTools[];
|
||||
}
|
||||
|
||||
export interface ColonyToolsUpdateResult {
|
||||
colony_name: string;
|
||||
enabled_mcp_tools: string[] | null;
|
||||
refreshed_runtimes: number;
|
||||
note?: string;
|
||||
}
|
||||
|
||||
export const coloniesApi = {
|
||||
/** List every colony on disk with a summary of its tool allowlist. */
|
||||
list: () =>
|
||||
api.get<{ colonies: ColonySummary[] }>(`/colonies/tools-index`),
|
||||
|
||||
/** Enumerate a colony's tool surface (lifecycle + synthetic + MCP). */
|
||||
getTools: (colonyName: string) =>
|
||||
api.get<ColonyToolsResponse>(
|
||||
`/colony/${encodeURIComponent(colonyName)}/tools`,
|
||||
),
|
||||
|
||||
/** Persist a colony's MCP tool allowlist.
|
||||
*
|
||||
* ``null`` resets to "allow every MCP tool". A list of names enables
|
||||
* only those MCP tools. Changes take effect on the next worker spawn;
|
||||
* in-flight workers keep their booted tool list.
|
||||
*/
|
||||
updateTools: (colonyName: string, enabled: string[] | null) =>
|
||||
api.patch<ColonyToolsUpdateResult>(
|
||||
`/colony/${encodeURIComponent(colonyName)}/tools`,
|
||||
{ enabled_mcp_tools: enabled },
|
||||
),
|
||||
};
|
||||
@@ -0,0 +1,80 @@
|
||||
import { api } from "./client";
|
||||
|
||||
/** A SQLite cell value, constrained to JSON-serialisable types that
|
||||
* Python maps into sqlite3 param placeholders without surprises. */
|
||||
export type CellValue = string | number | boolean | null;
|
||||
|
||||
export interface ColumnInfo {
|
||||
name: string;
|
||||
/** SQLite declared type (e.g. "TEXT", "INTEGER"). May be empty string. */
|
||||
type: string;
|
||||
notnull: boolean;
|
||||
/** >0 means part of the primary key (ordinal position). 0 = not PK. */
|
||||
pk: number;
|
||||
dflt_value: string | null;
|
||||
}
|
||||
|
||||
export interface TableOverview {
|
||||
name: string;
|
||||
columns: ColumnInfo[];
|
||||
row_count: number;
|
||||
primary_key: string[];
|
||||
}
|
||||
|
||||
export interface TableRowsResponse {
|
||||
table: string;
|
||||
columns: ColumnInfo[];
|
||||
primary_key: string[];
|
||||
rows: Record<string, CellValue>[];
|
||||
total: number;
|
||||
limit: number;
|
||||
offset: number;
|
||||
}
|
||||
|
||||
export interface UpdateRowRequest {
|
||||
/** Primary key column(s) → value(s). All PK columns must be present. */
|
||||
pk: Record<string, CellValue>;
|
||||
/** Column(s) → new value(s). Cannot include PK columns. */
|
||||
updates: Record<string, CellValue>;
|
||||
}
|
||||
|
||||
export const colonyDataApi = {
|
||||
/** List user tables in the colony's progress.db with row counts.
|
||||
*
|
||||
* Routed by colony directory name (not session) because progress.db
|
||||
* is per-colony — one DB serves every session for that colony, and
|
||||
* the data is reachable even when no session is live. */
|
||||
listTables: (colonyName: string) =>
|
||||
api.get<{ tables: TableOverview[] }>(
|
||||
`/colonies/${encodeURIComponent(colonyName)}/data/tables`,
|
||||
),
|
||||
|
||||
/** Paginated rows for a table. Server enforces limit ≤ 500. */
|
||||
listRows: (
|
||||
colonyName: string,
|
||||
table: string,
|
||||
opts: {
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
orderBy?: string | null;
|
||||
orderDir?: "asc" | "desc";
|
||||
} = {},
|
||||
) => {
|
||||
const params = new URLSearchParams();
|
||||
if (opts.limit != null) params.set("limit", String(opts.limit));
|
||||
if (opts.offset != null) params.set("offset", String(opts.offset));
|
||||
if (opts.orderBy) params.set("order_by", opts.orderBy);
|
||||
if (opts.orderDir) params.set("order_dir", opts.orderDir);
|
||||
const qs = params.toString();
|
||||
return api.get<TableRowsResponse>(
|
||||
`/colonies/${encodeURIComponent(colonyName)}/data/tables/${encodeURIComponent(table)}/rows${qs ? `?${qs}` : ""}`,
|
||||
);
|
||||
},
|
||||
|
||||
/** Update a single row by primary key. Returns {updated: 0|1}. */
|
||||
updateRow: (colonyName: string, table: string, body: UpdateRowRequest) =>
|
||||
api.patch<{ updated: number }>(
|
||||
`/colonies/${encodeURIComponent(colonyName)}/data/tables/${encodeURIComponent(table)}/rows`,
|
||||
body,
|
||||
),
|
||||
};
|
||||
@@ -0,0 +1,105 @@
|
||||
import { api } from "./client";
|
||||
|
||||
export interface WorkerResult {
|
||||
status: string;
|
||||
summary: string;
|
||||
error: string | null;
|
||||
tokens_used: number;
|
||||
duration_seconds: number;
|
||||
}
|
||||
|
||||
export interface WorkerSummary {
|
||||
worker_id: string;
|
||||
task: string;
|
||||
status: string;
|
||||
started_at: number;
|
||||
result: WorkerResult | null;
|
||||
}
|
||||
|
||||
export interface ColonySkill {
|
||||
name: string;
|
||||
description: string;
|
||||
location: string;
|
||||
base_dir: string;
|
||||
source_scope: string;
|
||||
}
|
||||
|
||||
export interface ColonyTool {
|
||||
name: string;
|
||||
description: string;
|
||||
/** Canonical credential/provider key (e.g. "hubspot", "gmail") for
|
||||
* tools bound to an Aden credential. ``null`` for framework/core
|
||||
* tools that don't require a provider credential. */
|
||||
provider: string | null;
|
||||
}
|
||||
|
||||
export interface ProgressTask {
|
||||
id: string;
|
||||
seq: number | null;
|
||||
priority: number;
|
||||
goal: string;
|
||||
payload: string | null;
|
||||
status: string;
|
||||
worker_id: string | null;
|
||||
claim_token: string | null;
|
||||
claimed_at: string | null;
|
||||
started_at: string | null;
|
||||
completed_at: string | null;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
retry_count: number;
|
||||
max_retries: number;
|
||||
last_error: string | null;
|
||||
parent_task_id: string | null;
|
||||
source: string | null;
|
||||
}
|
||||
|
||||
export interface ProgressStep {
|
||||
id: string;
|
||||
task_id: string;
|
||||
seq: number;
|
||||
title: string;
|
||||
detail: string | null;
|
||||
status: string;
|
||||
evidence: string | null;
|
||||
worker_id: string | null;
|
||||
started_at: string | null;
|
||||
completed_at: string | null;
|
||||
/** Present only on upsert events; not on snapshot rows. */
|
||||
_ts?: string | null;
|
||||
}
|
||||
|
||||
export interface ProgressSnapshot {
|
||||
tasks: ProgressTask[];
|
||||
steps: ProgressStep[];
|
||||
}
|
||||
|
||||
export const colonyWorkersApi = {
|
||||
/** List spawned workers (live + completed) for a colony session. */
|
||||
list: (sessionId: string) =>
|
||||
api.get<{ workers: WorkerSummary[] }>(`/sessions/${sessionId}/workers`),
|
||||
|
||||
/** List the colony's shared skills catalog. */
|
||||
listSkills: (sessionId: string) =>
|
||||
api.get<{ skills: ColonySkill[] }>(`/sessions/${sessionId}/colony/skills`),
|
||||
|
||||
/** List the colony's default tools. */
|
||||
listTools: (sessionId: string) =>
|
||||
api.get<{ tools: ColonyTool[] }>(`/sessions/${sessionId}/colony/tools`),
|
||||
|
||||
/** Snapshot of progress.db tasks + steps, optionally filtered by
|
||||
* worker_id. Routed by colony directory name (not session) because
|
||||
* progress.db is per-colony. */
|
||||
progressSnapshot: (colonyName: string, workerId?: string) => {
|
||||
const qs = workerId ? `?worker_id=${encodeURIComponent(workerId)}` : "";
|
||||
return api.get<ProgressSnapshot>(
|
||||
`/colonies/${encodeURIComponent(colonyName)}/progress/snapshot${qs}`,
|
||||
);
|
||||
},
|
||||
|
||||
/** Build the URL for the live progress SSE stream. */
|
||||
progressStreamUrl: (colonyName: string, workerId?: string): string => {
|
||||
const qs = workerId ? `?worker_id=${encodeURIComponent(workerId)}` : "";
|
||||
return `/api/colonies/${encodeURIComponent(colonyName)}/progress/stream${qs}`;
|
||||
},
|
||||
};
|
||||
@@ -64,4 +64,10 @@ export const configApi = {
|
||||
about,
|
||||
...(theme ? { theme } : {}),
|
||||
}),
|
||||
|
||||
uploadAvatar: (file: File) => {
|
||||
const fd = new FormData();
|
||||
fd.append("avatar", file);
|
||||
return api.upload<{ avatar_url: string }>("/config/profile/avatar", fd);
|
||||
},
|
||||
};
|
||||
|
||||
@@ -74,4 +74,28 @@ export const executionApi = {
|
||||
`/sessions/${sessionId}/colony-spawn`,
|
||||
{ colony_name: colonyName, task },
|
||||
),
|
||||
|
||||
/** Lock a queen DM session because the user opened a spawned colony.
|
||||
* After this call /chat returns 409 until compactAndFork creates a new session.
|
||||
*/
|
||||
markColonySpawned: (sessionId: string, colonyName: string) =>
|
||||
api.post<{
|
||||
session_id: string;
|
||||
colony_spawned: boolean;
|
||||
spawned_colony_name: string;
|
||||
}>(`/sessions/${sessionId}/mark-colony-spawned`, {
|
||||
colony_name: colonyName,
|
||||
}),
|
||||
|
||||
/** Compact the locked session and fork into a fresh session under the same queen.
|
||||
* Returns the new session ID; the frontend should navigate the user to it.
|
||||
*/
|
||||
compactAndFork: (sessionId: string) =>
|
||||
api.post<{
|
||||
new_session_id: string;
|
||||
queen_id: string;
|
||||
compacted_from: string;
|
||||
summary_chars: number;
|
||||
messages_compacted: number;
|
||||
}>(`/sessions/${sessionId}/compact-and-fork`),
|
||||
};
|
||||
|
||||
@@ -0,0 +1,66 @@
|
||||
import { api } from "./client";
|
||||
|
||||
export type McpTransport = "stdio" | "http" | "sse" | "unix";
|
||||
|
||||
export interface McpServer {
|
||||
name: string;
|
||||
/** "local": added via UI/CLI (user-editable). "registry": installed from
|
||||
* the remote MCP registry. "built-in": baked into the queen package —
|
||||
* visible but not removable from the UI. */
|
||||
source: "local" | "registry" | "built-in";
|
||||
transport: McpTransport | string;
|
||||
description: string;
|
||||
enabled: boolean;
|
||||
last_health_status: "healthy" | "unhealthy" | null;
|
||||
last_error: string | null;
|
||||
last_health_check_at: string | null;
|
||||
tool_count: number | null;
|
||||
/** Servers flagged removable:false cannot be deleted from the UI. */
|
||||
removable?: boolean;
|
||||
}
|
||||
|
||||
export interface AddMcpServerBody {
|
||||
name: string;
|
||||
transport: McpTransport;
|
||||
/** stdio */
|
||||
command?: string;
|
||||
args?: string[];
|
||||
env?: Record<string, string>;
|
||||
cwd?: string;
|
||||
/** http / sse */
|
||||
url?: string;
|
||||
headers?: Record<string, string>;
|
||||
/** unix */
|
||||
socket_path?: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
export interface McpHealthResult {
|
||||
name: string;
|
||||
status: "healthy" | "unhealthy" | "unknown";
|
||||
tools: number;
|
||||
error: string | null;
|
||||
}
|
||||
|
||||
/** Backend MCPError shape when an operation fails. */
|
||||
export interface McpErrorBody {
|
||||
error: string;
|
||||
code?: string;
|
||||
what?: string;
|
||||
why?: string;
|
||||
fix?: string;
|
||||
}
|
||||
|
||||
export const mcpApi = {
|
||||
listServers: () => api.get<{ servers: McpServer[] }>("/mcp/servers"),
|
||||
addServer: (body: AddMcpServerBody) =>
|
||||
api.post<{ server: McpServer; hint: string }>("/mcp/servers", body),
|
||||
removeServer: (name: string) =>
|
||||
api.delete<{ removed: string }>(`/mcp/servers/${encodeURIComponent(name)}`),
|
||||
setEnabled: (name: string, enabled: boolean) =>
|
||||
api.post<{ name: string; enabled: boolean }>(
|
||||
`/mcp/servers/${encodeURIComponent(name)}/${enabled ? "enable" : "disable"}`,
|
||||
),
|
||||
checkHealth: (name: string) =>
|
||||
api.post<McpHealthResult>(`/mcp/servers/${encodeURIComponent(name)}/health`),
|
||||
};
|
||||
@@ -0,0 +1,19 @@
|
||||
import { api } from "./client";
|
||||
|
||||
export interface CustomPrompt {
|
||||
id: string;
|
||||
title: string;
|
||||
category: string;
|
||||
content: string;
|
||||
custom: true;
|
||||
}
|
||||
|
||||
export const promptsApi = {
|
||||
list: () => api.get<{ prompts: CustomPrompt[] }>("/prompts"),
|
||||
|
||||
create: (title: string, category: string, content: string) =>
|
||||
api.post<CustomPrompt>("/prompts", { title, category, content }),
|
||||
|
||||
delete: (promptId: string) =>
|
||||
api.delete<{ deleted: string }>(`/prompts/${promptId}`),
|
||||
};
|
||||
@@ -16,6 +16,45 @@ export interface QueenSessionResult {
|
||||
status: "live" | "resumed" | "created";
|
||||
}
|
||||
|
||||
export interface ToolMeta {
|
||||
name: string;
|
||||
description: string;
|
||||
input_schema?: Record<string, unknown>;
|
||||
editable?: boolean;
|
||||
}
|
||||
|
||||
export interface McpServerTools {
|
||||
name: string;
|
||||
tools: Array<ToolMeta & { enabled: boolean }>;
|
||||
}
|
||||
|
||||
export interface QueenToolsResponse {
|
||||
queen_id: string;
|
||||
enabled_mcp_tools: string[] | null;
|
||||
/** True when the effective allowlist comes from the role-based default
|
||||
* (no tools.json sidecar saved for this queen). False means the user
|
||||
* has explicitly saved an allowlist. */
|
||||
is_role_default: boolean;
|
||||
stale: boolean;
|
||||
lifecycle: ToolMeta[];
|
||||
synthetic: ToolMeta[];
|
||||
mcp_servers: McpServerTools[];
|
||||
}
|
||||
|
||||
export interface QueenToolsUpdateResult {
|
||||
queen_id: string;
|
||||
enabled_mcp_tools: string[] | null;
|
||||
refreshed_sessions: number;
|
||||
}
|
||||
|
||||
export interface QueenToolsResetResult {
|
||||
queen_id: string;
|
||||
removed: boolean;
|
||||
enabled_mcp_tools: string[] | null;
|
||||
is_role_default: true;
|
||||
refreshed_sessions: number;
|
||||
}
|
||||
|
||||
export const queensApi = {
|
||||
/** List all queen profiles (id, name, title). */
|
||||
list: () =>
|
||||
@@ -31,6 +70,13 @@ export const queensApi = {
|
||||
updateProfile: (queenId: string, updates: Partial<QueenProfile>) =>
|
||||
api.patch<QueenProfile>(`/queen/${queenId}/profile`, updates),
|
||||
|
||||
/** Upload queen avatar image. */
|
||||
uploadAvatar: (queenId: string, file: File) => {
|
||||
const fd = new FormData();
|
||||
fd.append("avatar", file);
|
||||
return api.upload<{ avatar_url: string }>(`/queen/${queenId}/avatar`, fd);
|
||||
},
|
||||
|
||||
/** Get or create a persistent session for a queen. */
|
||||
getOrCreateSession: (queenId: string, initialPrompt?: string, initialPhase?: string) =>
|
||||
api.post<QueenSessionResult>(`/queen/${queenId}/session`, {
|
||||
@@ -50,4 +96,24 @@ export const queensApi = {
|
||||
initial_prompt: initialPrompt,
|
||||
initial_phase: initialPhase || undefined,
|
||||
}),
|
||||
|
||||
/** Enumerate the queen's tool surface (lifecycle + synthetic + MCP). */
|
||||
getTools: (queenId: string) =>
|
||||
api.get<QueenToolsResponse>(`/queen/${queenId}/tools`),
|
||||
|
||||
/** Persist the MCP tool allowlist for a queen.
|
||||
*
|
||||
* Pass ``null`` to explicitly allow every MCP tool, or a list to
|
||||
* restrict the queen's tool surface. Lifecycle and synthetic tools
|
||||
* are always enabled and cannot be listed here.
|
||||
*/
|
||||
updateTools: (queenId: string, enabled: string[] | null) =>
|
||||
api.patch<QueenToolsUpdateResult>(`/queen/${queenId}/tools`, {
|
||||
enabled_mcp_tools: enabled,
|
||||
}),
|
||||
|
||||
/** Drop the queen's tools.json sidecar so she falls back to the
|
||||
* role-based default (or allow-all for queens without a role entry). */
|
||||
resetTools: (queenId: string) =>
|
||||
api.delete<QueenToolsResetResult>(`/queen/${queenId}/tools`),
|
||||
};
|
||||
|
||||
@@ -84,6 +84,11 @@ export const sessionsApi = {
|
||||
`/sessions/${sessionId}/triggers/${triggerId}/deactivate`,
|
||||
),
|
||||
|
||||
runTrigger: (sessionId: string, triggerId: string) =>
|
||||
api.post<{ status: string; trigger_id: string }>(
|
||||
`/sessions/${sessionId}/triggers/${triggerId}/run`,
|
||||
),
|
||||
|
||||
colonies: (sessionId: string) =>
|
||||
api.get<{ colonies: string[] }>(`/sessions/${sessionId}/colonies`),
|
||||
|
||||
|
||||
@@ -0,0 +1,154 @@
|
||||
import { api } from "./client";
|
||||
|
||||
export type SkillScopeKind = "queen" | "colony" | "user";
|
||||
|
||||
export type SkillProvenance =
|
||||
| "framework"
|
||||
| "preset"
|
||||
| "user_dropped"
|
||||
| "user_ui_created"
|
||||
| "queen_created"
|
||||
| "learned_runtime"
|
||||
| "project_dropped"
|
||||
| "other";
|
||||
|
||||
export interface SkillOwner {
|
||||
type: "queen" | "colony";
|
||||
id: string;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface SkillRow {
|
||||
name: string;
|
||||
description: string;
|
||||
source_scope: string;
|
||||
provenance: SkillProvenance;
|
||||
enabled: boolean;
|
||||
editable: boolean;
|
||||
deletable: boolean;
|
||||
location: string;
|
||||
base_dir?: string;
|
||||
visibility: string[] | null;
|
||||
trust: string | null;
|
||||
created_at: string | null;
|
||||
created_by: string | null;
|
||||
notes: string | null;
|
||||
param_overrides?: Record<string, unknown>;
|
||||
owner?: SkillOwner | null;
|
||||
visible_to?: { queens: string[]; colonies: string[] };
|
||||
enabled_by_default?: boolean;
|
||||
}
|
||||
|
||||
export interface ScopeSkillsResponse {
|
||||
queen_id?: string;
|
||||
colony_name?: string;
|
||||
all_defaults_disabled: boolean;
|
||||
skills: SkillRow[];
|
||||
inherited_from_queen?: string[];
|
||||
}
|
||||
|
||||
export interface AggregatedSkillsResponse {
|
||||
skills: SkillRow[];
|
||||
queens: Array<{ id: string; name: string }>;
|
||||
colonies: Array<{ name: string; queen_id: string | null }>;
|
||||
}
|
||||
|
||||
export interface SkillScopesResponse {
|
||||
queens: Array<{ id: string; name: string }>;
|
||||
colonies: Array<{ name: string; queen_id: string | null }>;
|
||||
}
|
||||
|
||||
export interface SkillDetailResponse {
|
||||
name: string;
|
||||
description: string;
|
||||
source_scope: string;
|
||||
location: string;
|
||||
base_dir: string;
|
||||
body: string;
|
||||
visibility: string[] | null;
|
||||
}
|
||||
|
||||
export interface SkillCreatePayload {
|
||||
name: string;
|
||||
description: string;
|
||||
body: string;
|
||||
files?: Array<{ path: string; content: string }>;
|
||||
enabled?: boolean;
|
||||
notes?: string | null;
|
||||
replace_existing?: boolean;
|
||||
}
|
||||
|
||||
export interface SkillPatchPayload {
|
||||
enabled?: boolean;
|
||||
param_overrides?: Record<string, unknown>;
|
||||
notes?: string | null;
|
||||
all_defaults_disabled?: boolean;
|
||||
}
|
||||
|
||||
const scopePath = (scope: "queen" | "colony", targetId: string) =>
|
||||
scope === "queen"
|
||||
? `/queen/${encodeURIComponent(targetId)}/skills`
|
||||
: `/colonies/${encodeURIComponent(targetId)}/skills`;
|
||||
|
||||
export const skillsApi = {
|
||||
// Aggregated library
|
||||
listAll: () => api.get<AggregatedSkillsResponse>("/skills"),
|
||||
listScopes: () => api.get<SkillScopesResponse>("/skills/scopes"),
|
||||
getDetail: (name: string) =>
|
||||
api.get<SkillDetailResponse>(`/skills/${encodeURIComponent(name)}`),
|
||||
|
||||
// Per-scope
|
||||
listForQueen: (queenId: string) =>
|
||||
api.get<ScopeSkillsResponse>(`/queen/${encodeURIComponent(queenId)}/skills`),
|
||||
listForColony: (colonyName: string) =>
|
||||
api.get<ScopeSkillsResponse>(
|
||||
`/colonies/${encodeURIComponent(colonyName)}/skills`,
|
||||
),
|
||||
|
||||
create: (
|
||||
scope: "queen" | "colony",
|
||||
targetId: string,
|
||||
payload: SkillCreatePayload,
|
||||
) => api.post<SkillRow>(scopePath(scope, targetId), payload),
|
||||
|
||||
patch: (
|
||||
scope: "queen" | "colony",
|
||||
targetId: string,
|
||||
skillName: string,
|
||||
payload: SkillPatchPayload,
|
||||
) =>
|
||||
api.patch<{ name: string; enabled: boolean | null; ok: boolean }>(
|
||||
`${scopePath(scope, targetId)}/${encodeURIComponent(skillName)}`,
|
||||
payload,
|
||||
),
|
||||
|
||||
putBody: (
|
||||
scope: "queen" | "colony",
|
||||
targetId: string,
|
||||
skillName: string,
|
||||
payload: { body: string; description?: string },
|
||||
) =>
|
||||
api.put<{ name: string; installed_path: string }>(
|
||||
`${scopePath(scope, targetId)}/${encodeURIComponent(skillName)}/body`,
|
||||
payload,
|
||||
),
|
||||
|
||||
remove: (scope: "queen" | "colony", targetId: string, skillName: string) =>
|
||||
api.delete<{ name: string; removed: boolean }>(
|
||||
`${scopePath(scope, targetId)}/${encodeURIComponent(skillName)}`,
|
||||
),
|
||||
|
||||
reload: (scope: "queen" | "colony", targetId: string) =>
|
||||
api.post<{ ok: boolean }>(`${scopePath(scope, targetId)}/reload`),
|
||||
|
||||
// Multipart upload. File may be a SKILL.md or a .zip bundle.
|
||||
upload: (formData: FormData) =>
|
||||
api.upload<{
|
||||
name: string;
|
||||
installed_path: string;
|
||||
replaced: boolean;
|
||||
scope: SkillScopeKind;
|
||||
target_id: string | null;
|
||||
enabled: boolean;
|
||||
}>("/skills/upload", formData),
|
||||
};
|
||||
@@ -12,14 +12,21 @@ export interface LiveSession {
|
||||
loaded_at: number;
|
||||
uptime_seconds: number;
|
||||
intro_message?: string;
|
||||
/** Queen operating phase — "independent" (DM), "working" (workers running), or "reviewing" (workers done) */
|
||||
queen_phase?: "independent" | "working" | "reviewing";
|
||||
/** Queen operating phase — "independent" (DM), "incubating" (drafting a
|
||||
* colony spec after the readiness eval approved), "working" (workers
|
||||
* running), or "reviewing" (workers done) */
|
||||
queen_phase?: "independent" | "incubating" | "working" | "reviewing";
|
||||
/** Whether the queen's LLM supports image content in messages */
|
||||
queen_supports_images?: boolean;
|
||||
/** Selected queen identity ID (e.g. "queen_technology") */
|
||||
queen_id?: string | null;
|
||||
/** Selected queen display name (e.g. "Alexandra") */
|
||||
queen_name?: string | null;
|
||||
/** True after the user has clicked into a colony spawned from this DM —
|
||||
* /chat returns 409 until the user compacts and forks into a new session. */
|
||||
colony_spawned?: boolean;
|
||||
/** Name of the colony that locked this session (set with colony_spawned). */
|
||||
spawned_colony_name?: string | null;
|
||||
/** Present in 409 conflict responses when worker is still loading */
|
||||
loading?: boolean;
|
||||
}
|
||||
@@ -55,6 +62,12 @@ export interface EntryPoint {
|
||||
task?: string;
|
||||
/** Seconds until the next timer fire (only present for timer entry points). */
|
||||
next_fire_in?: number;
|
||||
/** Absolute wall-clock time of the next timer fire (epoch ms). */
|
||||
next_fire_at?: number;
|
||||
/** Number of times this trigger has fired during the session's lifetime. */
|
||||
fire_count?: number;
|
||||
/** Wall-clock time of the most recent fire (epoch ms). */
|
||||
last_fired_at?: number;
|
||||
}
|
||||
|
||||
export interface WorkerEntry {
|
||||
@@ -73,6 +86,7 @@ export interface DiscoverEntry {
|
||||
name: string;
|
||||
description: string;
|
||||
category: string;
|
||||
created_at?: string | null;
|
||||
session_count: number;
|
||||
run_count: number;
|
||||
node_count: number;
|
||||
@@ -80,6 +94,7 @@ export interface DiscoverEntry {
|
||||
tags: string[];
|
||||
last_active: string | null;
|
||||
is_loaded: boolean;
|
||||
icon?: string | null;
|
||||
workers: WorkerEntry[];
|
||||
}
|
||||
|
||||
@@ -264,6 +279,7 @@ export type EventTypeName =
|
||||
| "colony_created"
|
||||
| "credentials_required"
|
||||
| "queen_phase_changed"
|
||||
| "colony_fork_marker"
|
||||
| "subagent_report"
|
||||
| "trigger_available"
|
||||
| "trigger_activated"
|
||||
|
||||
@@ -1,11 +1,31 @@
|
||||
import { useState } from "react";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { useColony } from "@/context/ColonyContext";
|
||||
import { useHeaderActions } from "@/context/HeaderActionsContext";
|
||||
import { useModel } from "@/context/ModelContext";
|
||||
import { getQueenForAgent } from "@/lib/colony-registry";
|
||||
import { Crown, KeyRound, Network } from "lucide-react";
|
||||
import { Crown, Component, KeyRound, Network, ChevronDown } from "lucide-react";
|
||||
import SettingsModal from "@/components/SettingsModal";
|
||||
import ModelSwitcher from "@/components/ModelSwitcher";
|
||||
|
||||
function UserAvatarButton({ initials, onClick, avatarVersion }: { initials: string; onClick: () => void; avatarVersion: number }) {
|
||||
const [hasAvatar, setHasAvatar] = useState(true);
|
||||
const url = `/api/config/profile/avatar?v=${avatarVersion}`;
|
||||
// Reset hasAvatar when version changes (new upload)
|
||||
useEffect(() => setHasAvatar(true), [avatarVersion]);
|
||||
return (
|
||||
<button
|
||||
onClick={onClick}
|
||||
className="w-7 h-7 rounded-full bg-primary/15 flex items-center justify-center hover:bg-primary/25 transition-colors overflow-hidden"
|
||||
title="Profile settings"
|
||||
>
|
||||
{hasAvatar ? (
|
||||
<img src={url} alt="" className="w-full h-full object-cover" onError={() => setHasAvatar(false)} />
|
||||
) : (
|
||||
<span className="text-[10px] font-bold text-primary">{initials || "U"}</span>
|
||||
)}
|
||||
</button>
|
||||
);
|
||||
}
|
||||
|
||||
interface AppHeaderProps {
|
||||
onOpenQueenProfile?: (queenId: string) => void;
|
||||
@@ -13,11 +33,23 @@ interface AppHeaderProps {
|
||||
|
||||
export default function AppHeader({ onOpenQueenProfile }: AppHeaderProps) {
|
||||
const location = useLocation();
|
||||
const { colonies, queens, queenProfiles, userProfile } = useColony();
|
||||
const { colonies, queens, queenProfiles, userProfile, userAvatarVersion } = useColony();
|
||||
const { actions } = useHeaderActions();
|
||||
const { currentModel, currentProvider, availableModels, activeSubscription, subscriptions } = useModel();
|
||||
const [settingsOpen, setSettingsOpen] = useState(false);
|
||||
const [settingsSection, setSettingsSection] = useState<"profile" | "byok">("profile");
|
||||
|
||||
// Derive active model display label
|
||||
const activeSubInfo = activeSubscription
|
||||
? subscriptions.find((s) => s.id === activeSubscription)
|
||||
: null;
|
||||
const modelsProvider = activeSubInfo?.provider || currentProvider;
|
||||
const models = availableModels[modelsProvider] || [];
|
||||
const currentModelInfo = models.find((m) => m.id === currentModel);
|
||||
const modelLabel = currentModelInfo
|
||||
? currentModelInfo.label.split(" - ")[0]
|
||||
: currentModel || "No model";
|
||||
|
||||
// Derive page title + icon from current route
|
||||
const colonyMatch = location.pathname.match(/^\/colony\/(.+)/);
|
||||
const queenMatch = location.pathname.match(/^\/queen\/(.+)/);
|
||||
@@ -31,15 +63,14 @@ export default function AppHeader({ onOpenQueenProfile }: AppHeaderProps) {
|
||||
const colonyId = colonyMatch[1];
|
||||
const colony = colonies.find((c) => c.id === colonyId);
|
||||
title = colony?.name ?? colonyId;
|
||||
// Show queen profile button when the colony has a linked queen profile
|
||||
if (colony?.queenProfileId) {
|
||||
const profile = queenProfiles.find((q) => q.id === colony.queenProfileId);
|
||||
if (profile) {
|
||||
queenIdForProfile = profile.id;
|
||||
queenTitle = profile.title ?? null;
|
||||
icon = <Crown className="w-4 h-4 text-primary" />;
|
||||
}
|
||||
}
|
||||
icon = <Component className="w-4 h-4 text-primary" />;
|
||||
} else if (queenMatch) {
|
||||
const queenId = queenMatch[1];
|
||||
const profile = queenProfiles.find((q) => q.id === queenId);
|
||||
@@ -95,24 +126,24 @@ export default function AppHeader({ onOpenQueenProfile }: AppHeaderProps) {
|
||||
)}
|
||||
<div className="flex items-center gap-2">
|
||||
{actions}
|
||||
<ModelSwitcher
|
||||
onOpenSettings={() => {
|
||||
<button
|
||||
onClick={() => {
|
||||
setSettingsSection("byok");
|
||||
setSettingsOpen(true);
|
||||
}}
|
||||
/>
|
||||
<button
|
||||
className="flex items-center gap-1.5 px-2.5 py-1 rounded-md text-xs font-medium text-muted-foreground hover:text-foreground hover:bg-muted/40 transition-colors border border-transparent hover:border-border/40"
|
||||
>
|
||||
<span className="max-w-[120px] truncate">{modelLabel}</span>
|
||||
<ChevronDown className="w-3 h-3" />
|
||||
</button>
|
||||
<UserAvatarButton
|
||||
initials={initials}
|
||||
avatarVersion={userAvatarVersion}
|
||||
onClick={() => {
|
||||
setSettingsSection("profile");
|
||||
setSettingsOpen(true);
|
||||
}}
|
||||
className="w-7 h-7 rounded-full bg-primary/15 flex items-center justify-center hover:bg-primary/25 transition-colors"
|
||||
title="Profile settings"
|
||||
>
|
||||
<span className="text-[10px] font-bold text-primary">
|
||||
{initials || "U"}
|
||||
</span>
|
||||
</button>
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -2,38 +2,34 @@ import { useState, useEffect } from "react";
|
||||
|
||||
type BridgeStatus = "checking" | "connected" | "disconnected" | "offline";
|
||||
|
||||
const BRIDGE_STATUS_URL = "/api/browser/status";
|
||||
const POLL_INTERVAL_MS = 3000;
|
||||
const BRIDGE_STATUS_STREAM_URL = "/api/browser/status/stream";
|
||||
|
||||
export default function BrowserStatusBadge() {
|
||||
const [status, setStatus] = useState<BridgeStatus>("checking");
|
||||
|
||||
useEffect(() => {
|
||||
let cancelled = false;
|
||||
const es = new EventSource(BRIDGE_STATUS_STREAM_URL);
|
||||
|
||||
const check = async () => {
|
||||
es.addEventListener("status", (e) => {
|
||||
try {
|
||||
const res = await fetch(BRIDGE_STATUS_URL, {
|
||||
signal: AbortSignal.timeout(2000),
|
||||
});
|
||||
if (cancelled) return;
|
||||
if (res.ok) {
|
||||
const data = await res.json();
|
||||
setStatus(data.connected ? "connected" : "disconnected");
|
||||
} else {
|
||||
setStatus("offline");
|
||||
}
|
||||
const data = JSON.parse((e as MessageEvent).data) as {
|
||||
bridge: boolean;
|
||||
connected: boolean;
|
||||
};
|
||||
if (!data.bridge) setStatus("offline");
|
||||
else setStatus(data.connected ? "connected" : "disconnected");
|
||||
} catch {
|
||||
if (!cancelled) setStatus("offline");
|
||||
setStatus("offline");
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
check();
|
||||
const timer = setInterval(check, POLL_INTERVAL_MS);
|
||||
return () => {
|
||||
cancelled = true;
|
||||
clearInterval(timer);
|
||||
};
|
||||
// EventSource auto-reconnects on transient errors; the next
|
||||
// successful ``status`` event will overwrite this. We only flip
|
||||
// to "offline" so the badge doesn't get stuck on "connected"
|
||||
// after a backend restart.
|
||||
es.onerror = () => setStatus("offline");
|
||||
|
||||
return () => es.close();
|
||||
}, []);
|
||||
|
||||
if (status === "checking") return null;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,27 @@
|
||||
import { useCallback } from "react";
|
||||
import { coloniesApi } from "@/api/colonies";
|
||||
import ToolsEditor from "./ToolsEditor";
|
||||
|
||||
export default function ColonyToolsSection({
|
||||
colonyName,
|
||||
}: {
|
||||
colonyName: string;
|
||||
}) {
|
||||
const fetchSnapshot = useCallback(
|
||||
() => coloniesApi.getTools(colonyName),
|
||||
[colonyName],
|
||||
);
|
||||
const saveAllowlist = useCallback(
|
||||
(enabled: string[] | null) => coloniesApi.updateTools(colonyName, enabled),
|
||||
[colonyName],
|
||||
);
|
||||
return (
|
||||
<ToolsEditor
|
||||
subjectKey={`colony:${colonyName}`}
|
||||
title="Tools"
|
||||
caveat="Changes apply to the next worker spawn. Running workers keep the tool list they booted with."
|
||||
fetchSnapshot={fetchSnapshot}
|
||||
saveAllowlist={saveAllowlist}
|
||||
/>
|
||||
);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user