Compare commits
424 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3a94f52009 | |||
| 522e0f511e | |||
| e6310f1243 | |||
| 12ffacccab | |||
| 8c36b1575c | |||
| 6540f7b31e | |||
| a09eac06f1 | |||
| b939a875a7 | |||
| b826e70d8c | |||
| 6f2f037c9c | |||
| c147364d8c | |||
| 35bd497750 | |||
| 574c4bbe33 | |||
| d22a01682a | |||
| 0c6f0f8aef | |||
| 0e8efa7bcc | |||
| 7b1dda7bf3 | |||
| 725dd1f410 | |||
| de4b2dc151 | |||
| 0784cea314 | |||
| 20bbf08278 | |||
| f8233bda56 | |||
| 76a7dd4bd5 | |||
| 73511a3c59 | |||
| a0817fcde4 | |||
| 628ce9ca12 | |||
| cc4213a942 | |||
| d12d5b7e8b | |||
| 038c5fd807 | |||
| 3d5f2595c9 | |||
| 7881177f1f | |||
| 2cfea915f4 | |||
| ac46a1be72 | |||
| 7b0b472167 | |||
| 697aae33fe | |||
| d26e7f33d2 | |||
| 6357597e88 | |||
| 579f1d7512 | |||
| 965264c973 | |||
| e80d275321 | |||
| 5b45fac435 | |||
| 4794c8b816 | |||
| 5492366c31 | |||
| ae2aa30edf | |||
| dd69a53de1 | |||
| 062a4e3166 | |||
| fe9a903928 | |||
| 7c3bada70c | |||
| 4ef951447d | |||
| ccb6556a41 | |||
| 5ca5021fc1 | |||
| 9eeba74851 | |||
| facd919371 | |||
| cb1484be85 | |||
| 82ce6bed68 | |||
| efdb404655 | |||
| da361f735d | |||
| eea0429f93 | |||
| 833aa4bc7a | |||
| 0af597881f | |||
| 6fae1f04c8 | |||
| 8c4085f5e8 | |||
| 53240eb888 | |||
| de8d6f0946 | |||
| ea707438f2 | |||
| 445c9600ab | |||
| 2ab5e6d784 | |||
| e7f9b7d791 | |||
| 3cb0c69a96 | |||
| 22d75bfb05 | |||
| 357df1bbcb | |||
| 386bbd5780 | |||
| 235022b35d | |||
| 4d8f312c3e | |||
| 4651a6a85a | |||
| ea9c163438 | |||
| 77cc169606 | |||
| 8c6428f445 | |||
| 44cb0c0f4c | |||
| 2621fb88b1 | |||
| a70f92edbe | |||
| b2efa179ea | |||
| 8c6e76d052 | |||
| c7f1fbf19f | |||
| 7047ecbf46 | |||
| b96ee5aaab | |||
| 6744bea01a | |||
| 390038225b | |||
| b55c8fdf86 | |||
| e9aea0bbc4 | |||
| 0ba1fa8262 | |||
| 0fd96d410e | |||
| c658a7c50b | |||
| 56c3659bda | |||
| 14f927996c | |||
| 8a0ec070b8 | |||
| 80cd77ac30 | |||
| c67521a09c | |||
| 8da06f4f90 | |||
| 46e0413eb8 | |||
| 81731587ff | |||
| 4e9d9bf1ea | |||
| 2644ab953d | |||
| e7daa59573 | |||
| 1bec43afad | |||
| 3d1357595d | |||
| 59ccbba810 | |||
| 8b2ae369ac | |||
| 96a667cbd9 | |||
| 17150a53bd | |||
| c1d7b0ee69 | |||
| 16ea9b52d3 | |||
| dcbfd4ab01 | |||
| b762020793 | |||
| 4ffddc53e6 | |||
| 24bcc5aea7 | |||
| 3c91119f67 | |||
| 923e773c14 | |||
| 199c3a235e | |||
| a881fe68da | |||
| 6b9040477f | |||
| c7cc031060 | |||
| 93c0ef672a | |||
| 67d55e6cce | |||
| 0907ff9cec | |||
| ed2e7125ac | |||
| f39c1c87af | |||
| 1229b4ad4d | |||
| 0d11a946a5 | |||
| b007ed753b | |||
| bb39424e99 | |||
| b27c7a029e | |||
| a3433f2c9e | |||
| 24ef2c247d | |||
| a8f9661626 | |||
| 3005bcaa96 | |||
| 40c4591d65 | |||
| e2bfb9d3af | |||
| e55cea97ef | |||
| ddaafe0307 | |||
| c17205a453 | |||
| 8e4468851c | |||
| ccf4216841 | |||
| 82ffcb17ac | |||
| 4da5bcc1e4 | |||
| 3df7194003 | |||
| 6f1f27b6e9 | |||
| 7b52ed9fa7 | |||
| 4d32526a29 | |||
| 656401e199 | |||
| f2e51157dc | |||
| 0d13c805b1 | |||
| b1ec64438c | |||
| 90aadf247a | |||
| 49317ac5f5 | |||
| 7216e9d9f0 | |||
| 91b1070d80 | |||
| 08aeffd977 | |||
| 651b57b928 | |||
| 8c10fc2e1c | |||
| e3154ca0ee | |||
| 84a92af41b | |||
| 78fc62210a | |||
| 2fd7e9172a | |||
| ca63fd9ee9 | |||
| b99f25c8d7 | |||
| e972112074 | |||
| 6e97191f21 | |||
| 023fb9b8d0 | |||
| b7924b1ad0 | |||
| b6640b8592 | |||
| 43a1d5797c | |||
| 5cb814f2dc | |||
| f52c44821a | |||
| 97432ea08c | |||
| 0abd1125b7 | |||
| 803337ec74 | |||
| 2b055d4d42 | |||
| dde4dfaec9 | |||
| 6be026fcb1 | |||
| 3c2161aad5 | |||
| e74ebe6835 | |||
| d788e5b2f7 | |||
| 583a5b41b4 | |||
| 83cc44bdef | |||
| 558813e7fa | |||
| aba0ff07ba | |||
| 4303a36df0 | |||
| e68d8ef10b | |||
| c6b6a5a2f7 | |||
| 18f5f078fc | |||
| cc6ec97a75 | |||
| 44d114f0d0 | |||
| 9e71f16d15 | |||
| 28cad2376c | |||
| 8222cd306e | |||
| b50f237506 | |||
| 916803889f | |||
| 59b1bc9338 | |||
| 37672c5581 | |||
| 7b0948cd62 | |||
| 4aa5fd7a90 | |||
| d20b617008 | |||
| c4ee12532f | |||
| 36ebf27e3e | |||
| ae1599c66a | |||
| 810cf5a6d3 | |||
| 1ee0d5a2e8 | |||
| 9051c443fb | |||
| e5a93b059f | |||
| 589c5b06fe | |||
| be94c611bd | |||
| 45df68c146 | |||
| 4fdbc438f9 | |||
| 2231dc5742 | |||
| 446844b2ad | |||
| 78301274cd | |||
| e719523434 | |||
| 451a5d55d2 | |||
| e2a21b3613 | |||
| 5c251645d3 | |||
| 8783f372fc | |||
| 2790d13bb6 | |||
| 900d94e49f | |||
| 70e3eb539b | |||
| deeb7de800 | |||
| 57ad98005d | |||
| 79c5d43006 | |||
| 252710fb41 | |||
| 22df99ef51 | |||
| edc3135797 | |||
| 27b15789fb | |||
| 5ba5933edc | |||
| 50eb4b0e8f | |||
| 3e4a4c9924 | |||
| c47987e73c | |||
| 256b52b818 | |||
| 8f5daf0569 | |||
| af5c72e785 | |||
| 958bafea29 | |||
| 5cdc01cb8c | |||
| 6979ea825d | |||
| d6093a560f | |||
| 2f58cce781 | |||
| ab76a66646 | |||
| c575ff3fe7 | |||
| 8668d103a8 | |||
| 133f393f8b | |||
| fd3ef36a15 | |||
| aa281aad34 | |||
| a3d0c7e0cb | |||
| de3042ba3f | |||
| 326d7f201c | |||
| db30ef3094 | |||
| e3d1cb6739 | |||
| 846f3f2470 | |||
| 913437ea0b | |||
| 520bd635e2 | |||
| b7d850ddd0 | |||
| 0a251278f1 | |||
| 857af8e6a3 | |||
| 273d4ec66e | |||
| eeb46a2b3e | |||
| b5e05fefae | |||
| bdfbb7698a | |||
| 35b1eadb7f | |||
| 38036eb7bd | |||
| 70d90fda19 | |||
| 9dc214cfd2 | |||
| 1e3dcbbbc2 | |||
| 53b095cdcb | |||
| d04862053f | |||
| df0e0ea082 | |||
| b1724ee360 | |||
| a59493835d | |||
| 334af2b74e | |||
| 81c72949ce | |||
| 97fd45d36a | |||
| caebbea1aa | |||
| 574a3a284e | |||
| 8ea3fb8cfe | |||
| 69d16a8f6c | |||
| f16cb0ea1f | |||
| e0f1e9d494 | |||
| 7fb0da26fc | |||
| f5f72c1c9c | |||
| 06d0a16201 | |||
| 0964758b12 | |||
| c25abdfd84 | |||
| af720bb569 | |||
| b763226a64 | |||
| 9b7580d22b | |||
| c23c274ac7 | |||
| 1335a15341 | |||
| 2a1cbaa582 | |||
| 74cba57cce | |||
| 7616de2417 | |||
| d96875932a | |||
| 238d90871a | |||
| e38e1563ba | |||
| e3d8b89b69 | |||
| ec64c14d37 | |||
| fb5b7ed9de | |||
| da0aa65c31 | |||
| cbf7cc0a37 | |||
| 802f64f4a7 | |||
| 9ad95fde59 | |||
| b812f6a03a | |||
| 0299a87d0c | |||
| 4aa2358211 | |||
| bc8a97079e | |||
| 6eaa609f63 | |||
| 8f0101b273 | |||
| 5ee98ac7cf | |||
| c058029ac0 | |||
| 6a79728d99 | |||
| 200c202465 | |||
| 791da46f59 | |||
| 6377c5b094 | |||
| 8f4e901c3c | |||
| 4be61ebfc7 | |||
| ac46ce7bfb | |||
| 110d7e0075 | |||
| 749185e760 | |||
| 5cb75d1822 | |||
| 3febef106d | |||
| db18186825 | |||
| 87918b5263 | |||
| 01f258c4c4 | |||
| 3d992bbda3 | |||
| df43f36385 | |||
| bdd099bb78 | |||
| acca008772 | |||
| 0bf4d8b9fa | |||
| 7a2752eb42 | |||
| c65b43c21b | |||
| 90f376136e | |||
| d5ea28f8f3 | |||
| 1ccfc7aefa | |||
| 64830a6720 | |||
| 514d2828fa | |||
| 5705647364 | |||
| 8a3e1e68a9 | |||
| 4c900e9ab2 | |||
| fa0518b249 | |||
| 6a5bc0d484 | |||
| d288c865d0 | |||
| 81051a11fc | |||
| c4a8c73b24 | |||
| 2b8ed0eb05 | |||
| 40c530603b | |||
| dee3980dbe | |||
| d19cb2843e | |||
| ea31b037b8 | |||
| 5fe924318d | |||
| 8e6a812ce6 | |||
| 1565fd52e1 | |||
| 53f5f93deb | |||
| 21afac2b59 | |||
| c03f1caa58 | |||
| a5e928ac95 | |||
| 648e3cd52a | |||
| b216df76a0 | |||
| ddee82eaef | |||
| 6e88bb0205 | |||
| 0aa19721c3 | |||
| cf1e26b012 | |||
| 47e02c0821 | |||
| 7e1ebf1c26 | |||
| ecbf543e4c | |||
| 7daca39bb2 | |||
| d8712ceb72 | |||
| 5a90a4ba42 | |||
| e69c381331 | |||
| 8f608048f9 | |||
| df29c49bd0 | |||
| b3759db83b | |||
| 8308207be8 | |||
| 6b86c602c7 | |||
| d9644eaa39 | |||
| 3976ea6934 | |||
| cc00ae8999 | |||
| 70bf337c03 | |||
| 6ecdbf47b0 | |||
| e0e1abbb64 | |||
| cb8c26ee18 | |||
| 3d6beca577 | |||
| bed9670395 | |||
| 61bb0b6594 | |||
| e7506fcd25 | |||
| 7cc92eb8c3 | |||
| 3a70243b82 | |||
| a5b17a293b | |||
| c92662bdb1 | |||
| 19469ff404 | |||
| 7fcb51985d | |||
| 3dbd20040a | |||
| c9d62139af | |||
| 6637bc8d96 | |||
| 30ad3edfbf | |||
| d9037172d8 | |||
| df41732e95 | |||
| cd9a625041 | |||
| 420d703138 | |||
| 66866e524d | |||
| 33e6c018a3 | |||
| 1ac50ab532 | |||
| 4df924d3d7 | |||
| 8f2d87cc5d | |||
| 4b795584f6 | |||
| 6024ae4241 | |||
| aaa5d661c3 | |||
| 2e5670ace6 | |||
| 634658e829 | |||
| dc64cc68a1 | |||
| e8d56c815d | |||
| 6022f6c911 | |||
| dacda3337f | |||
| 267f797abc | |||
| 42fd1ec8d1 | |||
| 81774d5d0e | |||
| d1cbfd1e54 | |||
| fd71501215 | |||
| 406bfb23b9 |
@@ -1,4 +1,73 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(grep -n \"_is_context_too_large_error\" core/framework/agent_loop/agent_loop.py core/framework/agent_loop/internals/*.py)",
|
||||
"Read(//^class/ {cls=$3} /def test_/**)",
|
||||
"Read(//^ @pytest.mark.asyncio/{getline n; print NR\": \"n} /^ def test_/**)",
|
||||
"Bash(python3)",
|
||||
"Bash(grep -nE 'Tool\\\\\\(\\\\s*$|name=\"[a-z_]+\",' core/framework/tools/queen_lifecycle_tools.py)",
|
||||
"Bash(awk -F'\"' '{print $2}')",
|
||||
"Bash(grep -n \"create_colony\\\\|colony-spawn\\\\|colony_spawn\" /home/timothy/aden/hive/core/framework/agents/queen/nodes/__init__.py /home/timothy/aden/hive/core/framework/tools/*.py)",
|
||||
"Bash(git stash:*)",
|
||||
"Bash(python3 -c \"import sys,json; d=json.loads\\(sys.stdin.read\\(\\)\\); print\\('keys:', list\\(d.keys\\(\\)\\)[:10]\\)\")",
|
||||
"Bash(python3 -c ':*)",
|
||||
"Bash(uv run:*)",
|
||||
"Read(//tmp/**)",
|
||||
"Bash(grep -n \"useColony\\\\|const { queens, queenProfiles\" /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
|
||||
"Bash(awk 'NR==385,/\\\\}, \\\\[/' /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
|
||||
"Bash(xargs -I{} sh -c 'if ! grep -q \"^import base64\\\\|^from base64\" \"{}\"; then echo \"MISSING: {}\"; fi')",
|
||||
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -type f -exec grep -l \"FileConversationStore\\\\|class.*ConversationStore\" {} \\\\;)",
|
||||
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -exec grep -l \"run_parallel_workers\\\\|create_colony\" {} \\\\;)",
|
||||
"Bash(awk '/^ async def execute\\\\\\(self, ctx: AgentContext\\\\\\)/,/^ async def [a-z_]+/ {print NR\": \"$0}' /home/timothy/aden/hive/core/framework/agent_loop/agent_loop.py)",
|
||||
"Bash(grep -r \"max_concurrent_workers\\\\|max_depth\\\\|recursion\\\\|spawn.*bomb\" /home/timothy/aden/hive/core/framework/host/*.py)",
|
||||
"Bash(wc -l /home/timothy/aden/hive/tools/src/gcu/browser/*.py /home/timothy/aden/hive/tools/src/gcu/browser/tools/*.py)",
|
||||
"Bash(file /tmp/gcu_verify/*.png)",
|
||||
"Bash(ps -eo pid,cmd)",
|
||||
"Bash(ps -o pid,lstart,cmd -p 746640)",
|
||||
"Bash(kill 746636)",
|
||||
"Bash(ps -eo pid,lstart,cmd)",
|
||||
"Bash(grep -E \"^d|\\\\.py$\")",
|
||||
"Bash(grep -E \"\\\\.\\(ts|tsx\\)$\")",
|
||||
"Bash(xargs cat:*)",
|
||||
"Bash(find /home/timothy/aden/hive -path \"*/.venv\" -prune -o -name \"*.py\" -type f -exec grep -l \"frontend\\\\|UI\\\\|terminal\\\\|interactive\\\\|TUI\" {} \\\\;)",
|
||||
"Bash(wc -l /home/timothy/.hive/backup/*/SKILL.md)",
|
||||
"Bash(awk -F'::' '{print $1}')",
|
||||
"Bash(wait)",
|
||||
"Bash(pkill -f \"pytest.*test_event_loop_node\")",
|
||||
"Bash(pkill -f \"pytest.*TestToolConcurrency\")",
|
||||
"Bash(grep -n \"def.*discover\\\\|/api/agents\\\\|agents_discover\" /home/timothy/aden/hive/core/framework/server/*.py)",
|
||||
"Bash(bun run:*)",
|
||||
"Bash(npx eslint:*)",
|
||||
"Bash(npm run:*)",
|
||||
"Bash(npm test:*)",
|
||||
"Bash(grep -n \"PIL\\\\|Image\\\\|to_thread\\\\|run_in_executor\" /home/timothy/aden/hive/tools/src/gcu/browser/*.py /home/timothy/aden/hive/tools/src/gcu/browser/tools/*.py)",
|
||||
"WebFetch(domain:docs.litellm.ai)",
|
||||
"Bash(cat /home/timothy/aden/hive/.venv/lib/python3.11/site-packages/litellm-*.dist-info/METADATA)",
|
||||
"Bash(find \"/home/timothy/.hive/agents/queens/queen_brand_design/sessions/session_20260415_100751_d49f4c28/\" -type f -name \"*.json*\" -exec grep -l \"协日\" {} \\\\;)",
|
||||
"Bash(grep -v ':0$')",
|
||||
"Bash(curl -s -m 2 http://127.0.0.1:4002/sse -o /dev/null -w 'status=%{http_code} time=%{time_total}s\\\\n')",
|
||||
"mcp__gcu-tools__browser_status",
|
||||
"mcp__gcu-tools__browser_navigate",
|
||||
"mcp__gcu-tools__browser_evaluate",
|
||||
"mcp__gcu-tools__browser_screenshot",
|
||||
"mcp__gcu-tools__browser_open",
|
||||
"mcp__gcu-tools__browser_click_coordinate",
|
||||
"mcp__gcu-tools__browser_get_rect",
|
||||
"mcp__gcu-tools__browser_type_focused",
|
||||
"mcp__gcu-tools__browser_wait",
|
||||
"Bash(python3 -c ' *)",
|
||||
"Bash(python3 scripts/debug_queen_prompt.py independent)",
|
||||
"Bash(curl -s --max-time 2 http://127.0.0.1:9230/status)",
|
||||
"Bash(python3 -c \"import json, sys; print\\(json.loads\\(sys.stdin.read\\(\\)\\)['data']['content']\\)\")",
|
||||
"Bash(python3 -c \"import json; json.load\\(open\\('/home/timothy/aden/hive/tools/browser-extension/manifest.json'\\)\\)\")"
|
||||
],
|
||||
"additionalDirectories": [
|
||||
"/home/timothy/.hive/skills/writing-hive-skills",
|
||||
"/tmp",
|
||||
"/home/timothy/.hive/skills",
|
||||
"/home/timothy/aden/hive/core/frontend/src/components"
|
||||
]
|
||||
},
|
||||
"hooks": {
|
||||
"PostToolUse": [
|
||||
{
|
||||
|
||||
@@ -64,7 +64,7 @@ snapshot = await browser_snapshot(tab_id)
|
||||
|---------|--------------|-------|
|
||||
| Scroll doesn't move | Nested scroll container | Look for `overflow: scroll` divs |
|
||||
| Click no effect | Element covered | Check `getBoundingClientRect` vs viewport |
|
||||
| Type clears | Autocomplete/React | Check for event listeners on input |
|
||||
| Type clears | Autocomplete/React | Check for event listeners on input; try `browser_type_focused` |
|
||||
| Snapshot hangs | Huge DOM | Check node count in snapshot |
|
||||
| Snapshot stale | SPA hydration | Wait after navigation |
|
||||
|
||||
@@ -229,7 +229,7 @@ function queryShadow(selector) {
|
||||
|-------|-------------|----------|
|
||||
| Scroll not working | Find scrollable container | Mouse wheel at container center |
|
||||
| Click no effect | JavaScript click() | CDP mouse events |
|
||||
| Type clears | Add delay_ms | Use execCommand |
|
||||
| Type clears | Add delay_ms | Use `browser_type_focused` (Input.insertText) |
|
||||
| Snapshot hangs | Add timeout_s | DOM snapshot fallback |
|
||||
| Stale content | Wait for selector | Increase wait_until timeout |
|
||||
| Shadow DOM | Pierce selector | JavaScript traversal |
|
||||
|
||||
@@ -214,7 +214,7 @@ Curated list of known browser automation edge cases with symptoms, causes, and f
|
||||
| **Symptom** | `browser_open()` returns `"No group with id: XXXXXXX"` even though `browser_status` shows `running: true` |
|
||||
| **Root Cause** | In-memory `_contexts` dict has a stale `groupId` from a Chrome tab group that was closed outside the tool (e.g. user closed the tab group) |
|
||||
| **Detection** | `browser_status` returns `running: true` but `browser_open` fails with "No group with id" |
|
||||
| **Fix** | Call `browser_stop()` to clear stale context from `_contexts`, then `browser_start()` again |
|
||||
| **Fix** | Call `browser_stop()` to clear stale context from `_contexts`, then `browser_open(url)` to lazy-create a fresh one |
|
||||
| **Code** | `tools/lifecycle.py:144-160` - `already_running` check uses cached dict without validating against Chrome |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
|
||||
@@ -57,8 +57,7 @@ async def test_twitter_lazy_scroll():
|
||||
# Count initial tweets
|
||||
initial_count = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
print(f"Initial tweet count: {initial_count.get('result', 0)}")
|
||||
|
||||
@@ -78,8 +77,7 @@ async def test_twitter_lazy_scroll():
|
||||
# Count tweets after scroll
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
count = count_result.get("result", 0)
|
||||
print(f" Tweet count after scroll: {count}")
|
||||
@@ -87,8 +85,7 @@ async def test_twitter_lazy_scroll():
|
||||
# Final count
|
||||
final_count = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
"(function() { return document.querySelectorAll('[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
final = final_count.get("result", 0)
|
||||
initial = initial_count.get("result", 0)
|
||||
|
||||
@@ -130,9 +130,7 @@ async def test_shadow_dom():
|
||||
print(f"JS click result: {click_result.get('result', {})}")
|
||||
|
||||
# Verify click was registered
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return window.shadowClickCount || 0; })()"
|
||||
)
|
||||
count_result = await bridge.evaluate(tab_id, "(function() { return window.shadowClickCount || 0; })()")
|
||||
count = count_result.get("result") or 0
|
||||
print(f"Shadow click count: {count}")
|
||||
|
||||
|
||||
@@ -200,9 +200,7 @@ async def test_autocomplete():
|
||||
print(f"Value after fast typing: '{fast_value}'")
|
||||
|
||||
# Check events
|
||||
events_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return window.inputEvents; })()"
|
||||
)
|
||||
events_result = await bridge.evaluate(tab_id, "(function() { return window.inputEvents; })()")
|
||||
print(f"Events logged: {events_result.get('result', [])}")
|
||||
|
||||
# Test 2: Slow typing (with delay) - should work
|
||||
@@ -220,8 +218,7 @@ async def test_autocomplete():
|
||||
# Check if dropdown appeared
|
||||
dropdown_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'.autocomplete-items div').length; })()",
|
||||
"(function() { return document.querySelectorAll('.autocomplete-items div').length; })()",
|
||||
)
|
||||
dropdown_count = dropdown_result.get("result", 0)
|
||||
print(f"Dropdown items: {dropdown_count}")
|
||||
|
||||
@@ -87,9 +87,7 @@ async def test_huge_dom():
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
|
||||
# Count elements
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
|
||||
)
|
||||
count_result = await bridge.evaluate(tab_id, "(function() { return document.querySelectorAll('*').length; })()")
|
||||
elem_count = count_result.get("result", 0)
|
||||
print(f"DOM elements: {elem_count}")
|
||||
|
||||
@@ -122,14 +120,10 @@ async def test_huge_dom():
|
||||
|
||||
# Test 3: Real LinkedIn
|
||||
print("\n--- Test 3: Real LinkedIn Feed ---")
|
||||
await bridge.navigate(
|
||||
tab_id, "https://www.linkedin.com/feed", wait_until="load", timeout_ms=30000
|
||||
)
|
||||
await bridge.navigate(tab_id, "https://www.linkedin.com/feed", wait_until="load", timeout_ms=30000)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
|
||||
)
|
||||
count_result = await bridge.evaluate(tab_id, "(function() { return document.querySelectorAll('*').length; })()")
|
||||
elem_count = count_result.get("result", 0)
|
||||
print(f"LinkedIn DOM elements: {elem_count}")
|
||||
|
||||
|
||||
@@ -136,10 +136,7 @@ async def test_selector_screenshot(bridge: BeelineBridge, tab_id: int, data_url:
|
||||
print(" ⚠ WARNING: Selector screenshot not smaller (may be full page)")
|
||||
return False
|
||||
else:
|
||||
print(
|
||||
" ⚠ NOT IMPLEMENTED: selector param ignored"
|
||||
f" (returns full page) - error={result.get('error')}"
|
||||
)
|
||||
print(f" ⚠ NOT IMPLEMENTED: selector param ignored (returns full page) - error={result.get('error')}")
|
||||
print(" NOTE: selector parameter exists in signature but is not used in implementation")
|
||||
return False
|
||||
|
||||
@@ -181,9 +178,7 @@ async def test_screenshot_timeout(bridge: BeelineBridge, tab_id: int, data_url:
|
||||
print(f" ⚠ Fast enough to beat timeout: {err!r} in {elapsed:.3f}s")
|
||||
return True # Not a failure, just fast
|
||||
else:
|
||||
print(
|
||||
f" ⚠ Screenshot completed before timeout ({elapsed:.3f}s) - too fast to test timeout"
|
||||
)
|
||||
print(f" ⚠ Screenshot completed before timeout ({elapsed:.3f}s) - too fast to test timeout")
|
||||
return True # Still ok, just very fast
|
||||
|
||||
|
||||
|
||||
@@ -137,14 +137,8 @@ async def test_problematic_site(bridge: BeelineBridge, tab_id: int) -> dict:
|
||||
changed = False
|
||||
for key in after_data:
|
||||
if key in before_data:
|
||||
b_val = (
|
||||
before_data[key].get("scrollTop", 0)
|
||||
if isinstance(before_data[key], dict)
|
||||
else 0
|
||||
)
|
||||
a_val = (
|
||||
after_data[key].get("scrollTop", 0) if isinstance(after_data[key], dict) else 0
|
||||
)
|
||||
b_val = before_data[key].get("scrollTop", 0) if isinstance(before_data[key], dict) else 0
|
||||
a_val = after_data[key].get("scrollTop", 0) if isinstance(after_data[key], dict) else 0
|
||||
if a_val != b_val:
|
||||
print(f" ✓ CHANGE DETECTED: {key} scrolled from {b_val} to {a_val}")
|
||||
changed = True
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
This project uses ruff for Python linting and formatting.
|
||||
|
||||
Rules:
|
||||
- Line length: 100 characters
|
||||
- Python target: 3.11+
|
||||
- Use double quotes for strings
|
||||
- Sort imports with isort (ruff I rules): stdlib, third-party, first-party (framework), local
|
||||
- Combine as-imports
|
||||
- Use type hints on all function signatures
|
||||
- Use `from __future__ import annotations` for modern type syntax
|
||||
- Raise exceptions with `from` in except blocks (B904)
|
||||
- No unused imports (F401), no unused variables (F841)
|
||||
- Prefer list/dict/set comprehensions over map/filter (C4)
|
||||
|
||||
Run `make lint` to auto-fix, `make check` to verify without modifying files.
|
||||
Run `make format` to apply ruff formatting.
|
||||
|
||||
The ruff config lives in core/pyproject.toml under [tool.ruff].
|
||||
@@ -1,35 +0,0 @@
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
docs/
|
||||
LICENSE
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Dependencies (rebuilt in container)
|
||||
node_modules/
|
||||
|
||||
# Build artifacts
|
||||
dist/
|
||||
build/
|
||||
coverage/
|
||||
|
||||
# Environment files
|
||||
.env*
|
||||
config.yaml
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# GitHub
|
||||
.github/
|
||||
@@ -22,3 +22,6 @@ indent_size = 2
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
|
||||
[*.{sh,ps1}]
|
||||
end_of_line = lf
|
||||
|
||||
+5
-1
@@ -16,7 +16,6 @@
|
||||
|
||||
# Shell scripts (must use LF)
|
||||
*.sh text eol=lf
|
||||
quickstart.sh text eol=lf
|
||||
|
||||
# PowerShell scripts (Windows-friendly)
|
||||
*.ps1 text eol=lf
|
||||
@@ -122,3 +121,8 @@ CODE_OF_CONDUCT* text
|
||||
*.db binary
|
||||
*.sqlite binary
|
||||
*.sqlite3 binary
|
||||
|
||||
# Lockfiles — mark generated so GitHub collapses them in PR diffs
|
||||
*.lock linguist-generated=true -diff
|
||||
package-lock.json linguist-generated=true -diff
|
||||
uv.lock linguist-generated=true -diff
|
||||
|
||||
@@ -81,3 +81,4 @@ core/tests/*dumps/*
|
||||
screenshots/*
|
||||
|
||||
.gemini/*
|
||||
.coverage
|
||||
|
||||
+19
-3
@@ -333,6 +333,22 @@ make test-live # Run live API integration tests (requires credentials)
|
||||
- **WebSocket** for real-time updates
|
||||
- **Tailwind CSS** for styling
|
||||
|
||||
### Frontend Dev Workflow
|
||||
|
||||
> **Note:** `./quickstart.sh` handles the full setup including the web UI.
|
||||
> The commands below are for contributors iterating on the frontend code after
|
||||
> initial setup is complete.
|
||||
|
||||
```bash
|
||||
# Start the backend server
|
||||
hive serve
|
||||
|
||||
# In a separate terminal, run the frontend dev server with hot-reload
|
||||
cd core/frontend
|
||||
npm install # only needed after dependency changes
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### Useful Development Commands
|
||||
|
||||
```bash
|
||||
@@ -943,7 +959,7 @@ uv run pytest -m "not live"
|
||||
**Unit Test**
|
||||
```python
|
||||
import pytest
|
||||
from framework.graph.node import Node
|
||||
from framework.orchestrator import NodeSpec as Node
|
||||
|
||||
def test_node_creation():
|
||||
node = Node(id="test", name="Test Node", node_type="event_loop")
|
||||
@@ -961,8 +977,8 @@ async def test_node_execution():
|
||||
**Integration Test**
|
||||
```python
|
||||
import pytest
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.graph.node import Node
|
||||
from framework.orchestrator.orchestrator import Orchestrator as GraphExecutor
|
||||
from framework.orchestrator import NodeSpec as Node
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_graph_execution_with_multiple_nodes():
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img width="100%" alt="Hive Banner" src="https://github.com/user-attachments/assets/a027429b-5d3c-4d34-88e4-0feaeaabbab3" />
|
||||
<img width="100%" alt="Hive Banner" src="https://asset.acho.io/github/img/banner.gif" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -40,7 +40,16 @@
|
||||
|
||||
## Overview
|
||||
|
||||
Hive is a runtime harness for AI agents in production. You describe your goal in natural language; a coding agent (the queen) generates the agent graph and connection code to achieve it. During execution, the harness manages state isolation, checkpoint-based crash recovery, cost enforcement, and real-time observability. When agents fail, the framework captures failure data, evolves the graph through the coding agent, and redeploys automatically. Built-in human-in-the-loop nodes, browser control, credential management, and parallel execution give you production reliability without sacrificing adaptability.
|
||||
OpenHive is a zero-setup, model-agnostic execution harness that dynamically generates multi-agent topologies to tackle complex, long-running business workflows without requiring any orchestration boilerplate. By simply defining your objective, the runtime compiles a strict, graph-based execution DAG that safely coordinates specialized agents to execute concurrent tasks in parallel. Backed by persistent, role-based memory that intelligently evolves with your project's context, OpenHive ensures deterministic fault tolerance, deep state observability, and seamless asynchronous execution across whichever underlying LLMs you choose to plug in.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Multi-Agent Coordination for parallel task execution
|
||||
- ✅ Graph-based execution for recurring and complex processes
|
||||
- ✅ Role-based memory that evolves with your projects
|
||||
- ✅ Zero Setup - No technical configuration required
|
||||
- ✅ General Compute Use and Browser Use with Native Extension
|
||||
- ✅ Custom Model Support
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
@@ -51,7 +60,7 @@ https://github.com/user-attachments/assets/bf10edc3-06ba-48b6-98ba-d069b15fb69d
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is the harness layer for teams moving AI agents from prototype to production. Models are getting better on their own — the bottleneck is the infrastructure around them: state management, failure recovery, cost control, and observability.
|
||||
Hive is the multi-agent harness layer for teams moving AI agents from prototype to production. Single agents like Openclaw and Cowork can finish personal jobs pretty well but lack the rigor to fulfil business processes.
|
||||
|
||||
Hive is a good fit if you:
|
||||
|
||||
@@ -139,17 +148,6 @@ Now you can run an agent by selecting the agent (either an existing agent or exa
|
||||
|
||||
<img width="2549" height="1174" alt="Screenshot 2026-03-12 at 9 27 36 PM" src="https://github.com/user-attachments/assets/7c7d30fa-9ceb-4c23-95af-b1caa405547d" />
|
||||
|
||||
## Features
|
||||
|
||||
- **Browser-Use** - Control the browser on your computer to achieve hard tasks
|
||||
- **Parallel Execution** - Execute the generated graph in parallel. This way you can have multiple agents completing the jobs for you
|
||||
- **[Goal-Driven Generation](docs/key_concepts/goals_outcome.md)** - Define objectives in natural language; the coding agent generates the agent graph and connection code to achieve them
|
||||
- **[Adaptiveness](docs/key_concepts/evolution.md)** - Framework captures failures, calibrates according to the objectives, and evolves the agent graph
|
||||
- **[Dynamic Node Connections](docs/key_concepts/graph.md)** - No predefined edges; connection code is generated by any capable LLM based on your goals
|
||||
- **SDK-Wrapped Nodes** - Every node gets a shared data buffer, local RLM memory, monitoring, tools, and LLM access out of the box
|
||||
- **[Human-in-the-Loop](docs/key_concepts/graph.md#human-in-the-loop)** - Intervention nodes that pause execution for human input with configurable timeouts and escalation
|
||||
- **Real-time Observability** - WebSocket streaming for live monitoring of agent execution, decisions, and node-to-node communication
|
||||
|
||||
## Integration
|
||||
|
||||
<a href="https://github.com/aden-hive/hive/tree/main/tools/src/aden_tools/tools"><img width="100%" alt="Integration" src="https://github.com/user-attachments/assets/a1573f93-cf02-4bb8-b3d5-b305b05b1e51" /></a>
|
||||
@@ -194,18 +192,6 @@ flowchart LR
|
||||
style V6 fill:#fff,stroke:#ed8c00,stroke-width:1px,color:#cc5d00
|
||||
```
|
||||
|
||||
### The Hive Advantage
|
||||
|
||||
| Typical Agent Frameworks | Hive |
|
||||
| -------------------------- | -------------------------------------- |
|
||||
| Focus on model orchestration | **Production harness**: state, recovery, observability |
|
||||
| Hardcode agent workflows | Describe goals in natural language |
|
||||
| Manual graph definition | Auto-generated agent graphs |
|
||||
| Reactive error handling | Outcome-evaluation and adaptiveness |
|
||||
| Static tool configurations | Dynamic SDK-wrapped nodes |
|
||||
| Separate monitoring setup | Built-in real-time observability |
|
||||
| DIY budget management | Integrated cost controls & degradation |
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **[Define Your Goal](docs/key_concepts/goals_outcome.md)** → Describe what you want to achieve in plain English
|
||||
@@ -221,131 +207,6 @@ flowchart LR
|
||||
- [Configuration Guide](docs/configuration.md) - All configuration options
|
||||
- [Architecture Overview](docs/architecture/README.md) - System design and structure
|
||||
|
||||
## Roadmap
|
||||
|
||||
Aden Hive Agent Framework aims to help developers build outcome-oriented, self-adaptive agents. See [roadmap.md](docs/roadmap.md) for details.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
%% Main Entity
|
||||
User([User])
|
||||
|
||||
%% =========================================
|
||||
%% EXTERNAL EVENT SOURCES
|
||||
%% =========================================
|
||||
subgraph ExtEventSource [External Event Source]
|
||||
E_Sch["Schedulers"]
|
||||
E_WH["Webhook"]
|
||||
E_SSE["SSE"]
|
||||
end
|
||||
|
||||
%% =========================================
|
||||
%% SYSTEM NODES
|
||||
%% =========================================
|
||||
subgraph WorkerBees [Worker Bees]
|
||||
WB_C["Conversation"]
|
||||
WB_SP["System prompt"]
|
||||
|
||||
subgraph Graph [Graph]
|
||||
direction TB
|
||||
N1["Node"] --> N2["Node"] --> N3["Node"]
|
||||
N1 -.-> AN["Active Node"]
|
||||
N2 -.-> AN
|
||||
N3 -.-> AN
|
||||
|
||||
%% Nested Event Loop Node
|
||||
subgraph EventLoopNode [Event Loop Node]
|
||||
ELN_L["listener"]
|
||||
ELN_SP["System Prompt<br/>(Task)"]
|
||||
ELN_EL["Event loop"]
|
||||
ELN_C["Conversation"]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
subgraph JudgeNode [Judge]
|
||||
J_C["Criteria"]
|
||||
J_P["Principles"]
|
||||
J_EL["Event loop"] <--> J_S["Scheduler"]
|
||||
end
|
||||
|
||||
subgraph QueenBee [Queen Bee]
|
||||
QB_SP["System prompt"]
|
||||
QB_EL["Event loop"]
|
||||
QB_C["Conversation"]
|
||||
end
|
||||
|
||||
subgraph Infra [Infra]
|
||||
SA["Sub Agent"]
|
||||
TR["Tool Registry"]
|
||||
WTM["Write through Conversation Memory<br/>(Logs/RAM/Harddrive)"]
|
||||
SM["Shared Memory<br/>(State/Harddrive)"]
|
||||
EB["Event Bus<br/>(RAM)"]
|
||||
CS["Credential Store<br/>(Harddrive/Cloud)"]
|
||||
end
|
||||
|
||||
subgraph PC [PC]
|
||||
B["Browser"]
|
||||
CB["Codebase<br/>v 0.0.x ... v n.n.n"]
|
||||
end
|
||||
|
||||
%% =========================================
|
||||
%% CONNECTIONS & DATA FLOW
|
||||
%% =========================================
|
||||
|
||||
%% External Event Routing
|
||||
E_Sch --> ELN_L
|
||||
E_WH --> ELN_L
|
||||
E_SSE --> ELN_L
|
||||
ELN_L -->|"triggers"| ELN_EL
|
||||
|
||||
%% User Interactions
|
||||
User -->|"Talk"| WB_C
|
||||
User -->|"Talk"| QB_C
|
||||
User -->|"Read/Write Access"| CS
|
||||
|
||||
%% Inter-System Logic
|
||||
ELN_C <-->|"Mirror"| WB_C
|
||||
WB_C -->|"Focus"| AN
|
||||
|
||||
WorkerBees -->|"Inquire"| JudgeNode
|
||||
JudgeNode -->|"Approve"| WorkerBees
|
||||
|
||||
%% Judge Alignments
|
||||
J_C <-.->|"aligns"| WB_SP
|
||||
J_P <-.->|"aligns"| QB_SP
|
||||
|
||||
%% Escalate path
|
||||
J_EL -->|"Report (Escalate)"| QB_EL
|
||||
|
||||
%% Pub/Sub Logic
|
||||
AN -->|"publish"| EB
|
||||
EB -->|"subscribe"| QB_C
|
||||
|
||||
%% Infra and Process Spawning
|
||||
ELN_EL -->|"Spawn"| SA
|
||||
SA -->|"Inform"| ELN_EL
|
||||
SA -->|"Starts"| B
|
||||
B -->|"Report"| ELN_EL
|
||||
TR -->|"Assigned"| ELN_EL
|
||||
CB -->|"Modify Worker Bee"| WB_C
|
||||
|
||||
%% =========================================
|
||||
%% SHARED MEMORY & LOGS ACCESS
|
||||
%% =========================================
|
||||
|
||||
%% Worker Bees Access (link to node inside Graph subgraph)
|
||||
AN <-->|"Read/Write"| WTM
|
||||
AN <-->|"Read/Write"| SM
|
||||
|
||||
%% Queen Bee Access
|
||||
QB_C <-->|"Read/Write"| WTM
|
||||
QB_EL <-->|"Read/Write"| SM
|
||||
|
||||
%% Credentials Access
|
||||
CS -->|"Read Access"| QB_C
|
||||
```
|
||||
|
||||
## Contributing
|
||||
We welcome contributions from the community! We’re especially looking for help building tools, integrations, and example agents for the framework ([check #2805](https://github.com/aden-hive/hive/issues/2805)). If you’re interested in extending its functionality, this is the perfect place to start. Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
|
||||
@@ -72,17 +72,16 @@ Register an MCP server as a tool source for your agent.
|
||||
"cwd": "../tools",
|
||||
"description": "Aden tools..."
|
||||
},
|
||||
"tools_discovered": 6,
|
||||
"tools_discovered": 5,
|
||||
"tools": [
|
||||
"web_search",
|
||||
"web_scrape",
|
||||
"file_read",
|
||||
"file_write",
|
||||
"pdf_read",
|
||||
"example_tool"
|
||||
"pdf_read"
|
||||
],
|
||||
"total_mcp_servers": 1,
|
||||
"note": "MCP server 'tools' registered with 6 tools. These tools can now be used in event_loop nodes."
|
||||
"note": "MCP server 'tools' registered with 5 tools. These tools can now be used in event_loop nodes."
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# MCP Server Guide - Agent Building Tools
|
||||
|
||||
> **Note:** The standalone `agent-builder` MCP server (`framework.mcp.agent_builder_server`) has been replaced. Agent building is now done via the `coder-tools` server's `initialize_and_build_agent` tool, with underlying logic in `tools/coder_tools_server.py`.
|
||||
> **Note:** This document is stale. The previous `coder-tools` MCP server has been replaced by `files-tools` (`tools/files_server.py`), which only exposes file I/O (`read_file`, `write_file`, `edit_file`, `hashline_edit`, `search_files`). The agent-building, shell, and snapshot tools that used to live here have been removed.
|
||||
|
||||
This guide covers the MCP tools available for building goal-driven agents.
|
||||
|
||||
@@ -20,9 +20,9 @@ Add to your MCP client configuration (e.g., Claude Desktop):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"coder-tools": {
|
||||
"files-tools": {
|
||||
"command": "uv",
|
||||
"args": ["run", "coder_tools_server.py", "--stdio"],
|
||||
"args": ["run", "files_server.py", "--stdio"],
|
||||
"cwd": "/path/to/hive/tools"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,6 @@ uv pip install -e .
|
||||
|
||||
## Agent Building
|
||||
|
||||
Agent scaffolding is handled by the `coder-tools` MCP server (in `tools/coder_tools_server.py`), which provides the `initialize_and_build_agent` tool and related utilities. The package generation logic lives directly in `tools/coder_tools_server.py`.
|
||||
|
||||
See the [Getting Started Guide](../docs/getting-started.md) for building agents.
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -52,9 +52,7 @@ _DEFAULT_REDIRECT_PORT = 51121
|
||||
# This project reverse-engineered and published the public OAuth credentials
|
||||
# for Google's Antigravity/Cloud Code Assist API.
|
||||
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
|
||||
_CREDENTIALS_URL = (
|
||||
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
|
||||
)
|
||||
_CREDENTIALS_URL = "https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
|
||||
|
||||
# Cached credentials fetched from public source
|
||||
_cached_client_id: str | None = None
|
||||
@@ -68,9 +66,7 @@ def _fetch_credentials_from_public_source() -> tuple[str | None, str | None]:
|
||||
return _cached_client_id, _cached_client_secret
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"}
|
||||
)
|
||||
req = urllib.request.Request(_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"})
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
content = resp.read().decode("utf-8")
|
||||
import re
|
||||
@@ -168,10 +164,7 @@ class OAuthCallbackHandler(BaseHTTPRequestHandler):
|
||||
if "code" in query and "state" in query:
|
||||
OAuthCallbackHandler.auth_code = query["code"][0]
|
||||
OAuthCallbackHandler.state = query["state"][0]
|
||||
self._send_response(
|
||||
"Authentication successful! You can close this window "
|
||||
"and return to the terminal."
|
||||
)
|
||||
self._send_response("Authentication successful! You can close this window and return to the terminal.")
|
||||
return
|
||||
|
||||
self._send_response("Waiting for authentication...")
|
||||
@@ -296,8 +289,7 @@ def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_I
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
|
||||
),
|
||||
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
|
||||
}
|
||||
@@ -316,9 +308,7 @@ def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_I
|
||||
return False
|
||||
|
||||
|
||||
def refresh_access_token(
|
||||
refresh_token: str, client_id: str, client_secret: str | None
|
||||
) -> dict | None:
|
||||
def refresh_access_token(refresh_token: str, client_id: str, client_secret: str | None) -> dict | None:
|
||||
"""Refresh the access token using the refresh token."""
|
||||
data = {
|
||||
"grant_type": "refresh_token",
|
||||
@@ -361,9 +351,7 @@ def cmd_account_add(args: argparse.Namespace) -> int:
|
||||
access_token = account.get("access")
|
||||
refresh_token_str = account.get("refresh", "")
|
||||
refresh_token = refresh_token_str.split("|")[0] if refresh_token_str else None
|
||||
project_id = (
|
||||
refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
|
||||
)
|
||||
project_id = refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
|
||||
email = account.get("email", "unknown")
|
||||
expires_ms = account.get("expires", 0)
|
||||
expires_at = expires_ms / 1000.0 if expires_ms else 0.0
|
||||
@@ -390,9 +378,7 @@ def cmd_account_add(args: argparse.Namespace) -> int:
|
||||
# Update the account
|
||||
account["access"] = new_access
|
||||
account["expires"] = int((time.time() + expires_in) * 1000)
|
||||
accounts_data["last_refresh"] = time.strftime(
|
||||
"%Y-%m-%dT%H:%M:%SZ", time.gmtime()
|
||||
)
|
||||
accounts_data["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
save_accounts(accounts_data)
|
||||
|
||||
# Validate the refreshed token
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
"""
|
||||
Minimal Manual Agent Example
|
||||
----------------------------
|
||||
This example demonstrates how to build and run an agent programmatically
|
||||
without using the Claude Code CLI or external LLM APIs.
|
||||
|
||||
It uses custom NodeProtocol implementations to define logic in pure Python,
|
||||
making it perfect for understanding the core runtime loop:
|
||||
Setup -> Graph definition -> Execution -> Result
|
||||
|
||||
Run with:
|
||||
uv run python core/examples/manual_agent.py
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from framework.graph import EdgeCondition, EdgeSpec, Goal, GraphSpec, NodeSpec
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.graph.node import NodeContext, NodeProtocol, NodeResult
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
|
||||
# 1. Define Node Logic (Custom NodeProtocol implementations)
|
||||
class GreeterNode(NodeProtocol):
|
||||
"""Generate a simple greeting."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
name = ctx.input_data.get("name", "World")
|
||||
greeting = f"Hello, {name}!"
|
||||
ctx.buffer.write("greeting", greeting)
|
||||
return NodeResult(success=True, output={"greeting": greeting})
|
||||
|
||||
|
||||
class UppercaserNode(NodeProtocol):
|
||||
"""Convert text to uppercase."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
greeting = ctx.input_data.get("greeting") or ctx.buffer.read("greeting") or ""
|
||||
result = greeting.upper()
|
||||
ctx.buffer.write("final_greeting", result)
|
||||
return NodeResult(success=True, output={"final_greeting": result})
|
||||
|
||||
|
||||
async def main():
|
||||
print("Setting up Manual Agent...")
|
||||
|
||||
# 2. Define the Goal
|
||||
# Every agent needs a goal with success criteria
|
||||
goal = Goal(
|
||||
id="greet-user",
|
||||
name="Greet User",
|
||||
description="Generate a friendly uppercase greeting",
|
||||
success_criteria=[
|
||||
{
|
||||
"id": "greeting_generated",
|
||||
"description": "Greeting produced",
|
||||
"metric": "custom",
|
||||
"target": "any",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# 3. Define Nodes
|
||||
# Nodes describe steps in the process
|
||||
node1 = NodeSpec(
|
||||
id="greeter",
|
||||
name="Greeter",
|
||||
description="Generates a simple greeting",
|
||||
node_type="event_loop",
|
||||
input_keys=["name"],
|
||||
output_keys=["greeting"],
|
||||
)
|
||||
|
||||
node2 = NodeSpec(
|
||||
id="uppercaser",
|
||||
name="Uppercaser",
|
||||
description="Converts greeting to uppercase",
|
||||
node_type="event_loop",
|
||||
input_keys=["greeting"],
|
||||
output_keys=["final_greeting"],
|
||||
)
|
||||
|
||||
# 4. Define Edges
|
||||
# Edges define the flow between nodes
|
||||
edge1 = EdgeSpec(
|
||||
id="greet-to-upper",
|
||||
source="greeter",
|
||||
target="uppercaser",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
)
|
||||
|
||||
# 5. Create Graph
|
||||
# The graph works like a blueprint connecting nodes and edges
|
||||
graph = GraphSpec(
|
||||
id="greeting-agent",
|
||||
goal_id="greet-user",
|
||||
entry_node="greeter",
|
||||
terminal_nodes=["uppercaser"],
|
||||
nodes=[node1, node2],
|
||||
edges=[edge1],
|
||||
)
|
||||
|
||||
# 6. Initialize Runtime & Executor
|
||||
# Runtime handles state/memory; Executor runs the graph
|
||||
from pathlib import Path
|
||||
|
||||
runtime = Runtime(storage_path=Path("./agent_logs"))
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
|
||||
# 7. Register Node Implementations
|
||||
# Connect node IDs in the graph to actual Python implementations
|
||||
executor.register_node("greeter", GreeterNode())
|
||||
executor.register_node("uppercaser", UppercaserNode())
|
||||
|
||||
# 8. Execute Agent
|
||||
print("Executing agent with input: name='Alice'...")
|
||||
|
||||
result = await executor.execute(graph=graph, goal=goal, input_data={"name": "Alice"})
|
||||
|
||||
# 9. Verify Results
|
||||
if result.success:
|
||||
print("\nSuccess!")
|
||||
print(f"Path taken: {' -> '.join(result.path)}")
|
||||
print(f"Final output: {result.output.get('final_greeting')}")
|
||||
else:
|
||||
print(f"\nFailed: {result.error}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Optional: Enable logging to see internal decision flow
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
asyncio.run(main())
|
||||
@@ -1,119 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example: Integrating MCP Servers with the Core Framework
|
||||
|
||||
This example demonstrates how to:
|
||||
1. Register MCP servers programmatically
|
||||
2. Use MCP tools in agents
|
||||
3. Load MCP servers from configuration files
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
|
||||
from framework.runner.runner import AgentRunner
|
||||
|
||||
|
||||
async def example_1_programmatic_registration():
|
||||
"""Example 1: Register MCP server programmatically"""
|
||||
print("\n=== Example 1: Programmatic MCP Server Registration ===\n")
|
||||
|
||||
# Load an existing agent
|
||||
runner = AgentRunner.load("exports/task-planner")
|
||||
|
||||
# Register tools MCP server via STDIO
|
||||
num_tools = runner.register_mcp_server(
|
||||
name="tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args=["-m", "aden_tools.mcp_server", "--stdio"],
|
||||
cwd="../tools",
|
||||
)
|
||||
|
||||
print(f"Registered {num_tools} tools from tools MCP server")
|
||||
|
||||
# List all available tools
|
||||
tools = runner._tool_registry.get_tools()
|
||||
print(f"\nAvailable tools: {list(tools.keys())}")
|
||||
|
||||
# Run the agent with MCP tools available
|
||||
result = await runner.run(
|
||||
{"objective": "Search for 'Claude AI' and summarize the top 3 results"}
|
||||
)
|
||||
|
||||
print(f"\nAgent result: {result}")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
|
||||
async def example_2_http_transport():
|
||||
"""Example 2: Connect to MCP server via HTTP"""
|
||||
print("\n=== Example 2: HTTP MCP Server Connection ===\n")
|
||||
|
||||
# First, start the tools MCP server in HTTP mode:
|
||||
# cd tools && python mcp_server.py --port 4001
|
||||
|
||||
runner = AgentRunner.load("exports/task-planner")
|
||||
|
||||
# Register tools via HTTP
|
||||
num_tools = runner.register_mcp_server(
|
||||
name="tools-http",
|
||||
transport="http",
|
||||
url="http://localhost:4001",
|
||||
)
|
||||
|
||||
print(f"Registered {num_tools} tools from HTTP MCP server")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
|
||||
async def example_3_config_file():
|
||||
"""Example 3: Load MCP servers from configuration file"""
|
||||
print("\n=== Example 3: Load from Configuration File ===\n")
|
||||
|
||||
# Create a test agent folder with mcp_servers.json
|
||||
test_agent_path = Path("exports/task-planner")
|
||||
|
||||
# Copy example config (in practice, you'd place this in your agent folder)
|
||||
import shutil
|
||||
|
||||
shutil.copy(Path(__file__).parent / "mcp_servers.json", test_agent_path / "mcp_servers.json")
|
||||
|
||||
# Load agent - MCP servers will be auto-discovered
|
||||
runner = AgentRunner.load(test_agent_path)
|
||||
|
||||
# Tools are automatically available
|
||||
tools = runner._tool_registry.get_tools()
|
||||
print(f"Available tools: {list(tools.keys())}")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
# Clean up the test config
|
||||
(test_agent_path / "mcp_servers.json").unlink()
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all examples"""
|
||||
print("=" * 60)
|
||||
print("MCP Integration Examples")
|
||||
print("=" * 60)
|
||||
|
||||
try:
|
||||
# Run examples
|
||||
await example_1_programmatic_registration()
|
||||
# await example_2_http_transport() # Requires HTTP server running
|
||||
# await example_3_config_file()
|
||||
# await example_4_custom_agent_with_mcp_tools()
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nError running example: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
+12
-63
@@ -1,71 +1,20 @@
|
||||
"""
|
||||
Aden Hive Framework: A goal-driven agent runtime optimized for Builder observability.
|
||||
"""Hive Agent Framework.
|
||||
|
||||
The runtime is designed around DECISIONS, not just actions. Every significant
|
||||
choice the agent makes is captured with:
|
||||
- What it was trying to do (intent)
|
||||
- What options it considered
|
||||
- What it chose and why
|
||||
- What happened as a result
|
||||
- Whether that was good or bad (evaluated post-hoc)
|
||||
|
||||
This gives the Builder LLM the information it needs to improve agent behavior.
|
||||
|
||||
## Testing Framework
|
||||
|
||||
The framework includes a Goal-Based Testing system (Goal → Agent → Eval):
|
||||
- Generate tests from Goal success_criteria and constraints
|
||||
- Mandatory user approval before tests are stored
|
||||
- Parallel test execution with error categorization
|
||||
- Debug tools with fix suggestions
|
||||
|
||||
See `framework.testing` for details.
|
||||
Core classes:
|
||||
ColonyRuntime -- orchestrates parallel worker clones in a colony
|
||||
AgentLoop -- the LLM + tool execution loop (one per worker)
|
||||
AgentLoader -- loads agent config from disk, builds pipeline
|
||||
DecisionTracker -- records decisions for post-hoc analysis
|
||||
"""
|
||||
|
||||
from framework.llm import LLMProvider
|
||||
|
||||
try:
|
||||
from framework.llm import AnthropicProvider # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
from framework.agent_loop import AgentLoop
|
||||
from framework.host import ColonyRuntime
|
||||
from framework.loader import AgentLoader
|
||||
from framework.tracker.decision_tracker import DecisionTracker # noqa: F401
|
||||
from framework.schemas.decision import Decision, DecisionEvaluation, Option, Outcome
|
||||
from framework.schemas.run import Problem, Run, RunSummary
|
||||
|
||||
# Testing framework
|
||||
from framework.testing import (
|
||||
ApprovalStatus,
|
||||
DebugTool,
|
||||
ErrorCategory,
|
||||
Test,
|
||||
TestResult,
|
||||
TestStorage,
|
||||
TestSuiteResult,
|
||||
)
|
||||
from framework.tracker import DecisionTracker
|
||||
|
||||
__all__ = [
|
||||
# Schemas
|
||||
"Decision",
|
||||
"Option",
|
||||
"Outcome",
|
||||
"DecisionEvaluation",
|
||||
"Run",
|
||||
"RunSummary",
|
||||
"Problem",
|
||||
# Runtime
|
||||
"Runtime",
|
||||
# LLM
|
||||
"LLMProvider",
|
||||
"AnthropicProvider",
|
||||
# Runner
|
||||
"ColonyRuntime",
|
||||
"AgentLoader",
|
||||
# Testing
|
||||
"Test",
|
||||
"TestResult",
|
||||
"TestSuiteResult",
|
||||
"TestStorage",
|
||||
"ApprovalStatus",
|
||||
"ErrorCategory",
|
||||
"DebugTool",
|
||||
"AgentLoop",
|
||||
"DecisionTracker",
|
||||
]
|
||||
|
||||
@@ -5,11 +5,12 @@ from framework.agent_loop.conversation import ( # noqa: F401
|
||||
Message,
|
||||
NodeConversation,
|
||||
)
|
||||
|
||||
# Lazy import to avoid circular dependency with graph/event_loop/
|
||||
# (graph/event_loop/* imports framework.graph.conversation which is a shim
|
||||
# pointing here, which would trigger agent_loop.py loading, which imports
|
||||
# graph/event_loop/* again)
|
||||
from framework.agent_loop.types import ( # noqa: F401
|
||||
AgentContext,
|
||||
AgentProtocol,
|
||||
AgentResult,
|
||||
AgentSpec,
|
||||
)
|
||||
|
||||
|
||||
def __getattr__(name: str):
|
||||
@@ -21,6 +22,7 @@ def __getattr__(name: str):
|
||||
LoopConfig,
|
||||
OutputAccumulator,
|
||||
)
|
||||
|
||||
_exports = {
|
||||
"AgentLoop": AgentLoop,
|
||||
"JudgeProtocol": JudgeProtocol,
|
||||
|
||||
+1683
-656
File diff suppressed because it is too large
Load Diff
@@ -3,12 +3,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal, Protocol, runtime_checkable
|
||||
|
||||
LEGACY_RUN_ID = "__legacy_run__"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def is_legacy_run_id(run_id: str | None) -> bool:
|
||||
@@ -46,6 +48,24 @@ class Message:
|
||||
is_skill_content: bool = False
|
||||
# Logical worker run identifier for shared-session persistence
|
||||
run_id: str | None = None
|
||||
# True when this is a framework-injected continuation hint (continue-nudge
|
||||
# on stream stall). Stored as a user message for API compatibility, but
|
||||
# the UI should render it as a compact system notice, not user speech.
|
||||
is_system_nudge: bool = False
|
||||
# True when this message is a partial/truncated assistant turn reconstructed
|
||||
# from a crashed or watchdog-cancelled stream. Signals that the original
|
||||
# turn never finished — the model may or may not choose to redo it.
|
||||
truncated: bool = False
|
||||
# When non-None, identifies the parent session id this message was
|
||||
# carried over from — used by fork_session_into_colony on the single
|
||||
# compacted-summary message it writes when a colony is born from a
|
||||
# queen DM. Presence of the field IS the "inherited" signal.
|
||||
inherited_from: str | None = None
|
||||
# True when this user message was synthesized from one or more
|
||||
# fired triggers (timer/webhook), not typed by a human. The LLM still
|
||||
# sees the message as a regular user turn; the UI uses this flag to
|
||||
# render it as a trigger banner instead of a speech bubble.
|
||||
is_trigger: bool = False
|
||||
|
||||
def to_llm_dict(self) -> dict[str, Any]:
|
||||
"""Convert to OpenAI-format message dict."""
|
||||
@@ -59,9 +79,12 @@ class Message:
|
||||
return {"role": "user", "content": self.content}
|
||||
|
||||
if self.role == "assistant":
|
||||
d: dict[str, Any] = {"role": "assistant", "content": self.content}
|
||||
d: dict[str, Any] = {"role": "assistant"}
|
||||
if self.tool_calls:
|
||||
d["tool_calls"] = self.tool_calls
|
||||
d["content"] = self.content if self.content else None
|
||||
else:
|
||||
d["content"] = self.content or ""
|
||||
return d
|
||||
|
||||
# role == "tool"
|
||||
@@ -104,6 +127,14 @@ class Message:
|
||||
d["image_content"] = self.image_content
|
||||
if self.run_id is not None:
|
||||
d["run_id"] = self.run_id
|
||||
if self.is_system_nudge:
|
||||
d["is_system_nudge"] = self.is_system_nudge
|
||||
if self.truncated:
|
||||
d["truncated"] = self.truncated
|
||||
if self.inherited_from is not None:
|
||||
d["inherited_from"] = self.inherited_from
|
||||
if self.is_trigger:
|
||||
d["is_trigger"] = self.is_trigger
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
@@ -121,6 +152,10 @@ class Message:
|
||||
is_client_input=data.get("is_client_input", False),
|
||||
image_content=data.get("image_content"),
|
||||
run_id=data.get("run_id"),
|
||||
is_system_nudge=data.get("is_system_nudge", False),
|
||||
truncated=data.get("truncated", False),
|
||||
inherited_from=data.get("inherited_from"),
|
||||
is_trigger=data.get("is_trigger", False),
|
||||
)
|
||||
|
||||
|
||||
@@ -157,10 +192,17 @@ def update_run_cursor(
|
||||
def _extract_spillover_filename(content: str) -> str | None:
|
||||
"""Extract spillover filename from a tool result annotation.
|
||||
|
||||
Matches patterns produced by EventLoopNode._truncate_tool_result():
|
||||
- Large result: "saved to 'web_search_1.txt'"
|
||||
- Small result: "[Saved to 'web_search_1.txt']"
|
||||
Matches patterns produced by ``truncate_tool_result``:
|
||||
- New large-result header: "Full result saved at: /abs/path/file.txt"
|
||||
- Legacy bracketed trailer: "[Saved to 'file.txt']" (pre-2026-04-15,
|
||||
retained here so cold conversations still resolve)
|
||||
"""
|
||||
# New prose format — ``saved at: <absolute path>``, terminated by
|
||||
# newline or end-of-string.
|
||||
match = re.search(r"[Ss]aved at:\s*(\S+)", content)
|
||||
if match:
|
||||
return match.group(1)
|
||||
# Legacy format.
|
||||
match = re.search(r"[Ss]aved to '([^']+)'", content)
|
||||
return match.group(1) if match else None
|
||||
|
||||
@@ -233,8 +275,8 @@ def extract_tool_call_history(messages: list[Message], max_entries: int = 30) ->
|
||||
return args.get("query", "")
|
||||
if name == "web_scrape":
|
||||
return args.get("url", "")
|
||||
if name in ("load_data", "save_data"):
|
||||
return args.get("filename", "")
|
||||
if name == "read_file":
|
||||
return args.get("path", "")
|
||||
return ""
|
||||
|
||||
for msg in messages:
|
||||
@@ -250,8 +292,8 @@ def extract_tool_call_history(messages: list[Message], max_entries: int = 30) ->
|
||||
summary = _summarize_input(name, args)
|
||||
tool_calls_detail.setdefault(name, []).append(summary)
|
||||
|
||||
if name == "save_data" and args.get("filename"):
|
||||
files_saved.append(args["filename"])
|
||||
if name == "read_file" and args.get("path"):
|
||||
files_saved.append(args["path"])
|
||||
if name == "set_output" and args.get("key"):
|
||||
outputs_set.append(args["key"])
|
||||
|
||||
@@ -305,6 +347,14 @@ class ConversationStore(Protocol):
|
||||
|
||||
async def delete_parts_before(self, seq: int, run_id: str | None = None) -> None: ...
|
||||
|
||||
async def write_partial(self, seq: int, data: dict[str, Any]) -> None: ...
|
||||
|
||||
async def read_partial(self, seq: int) -> dict[str, Any] | None: ...
|
||||
|
||||
async def read_all_partials(self) -> list[dict[str, Any]]: ...
|
||||
|
||||
async def clear_partial(self, seq: int) -> None: ...
|
||||
|
||||
async def close(self) -> None: ...
|
||||
|
||||
async def destroy(self) -> None: ...
|
||||
@@ -376,10 +426,36 @@ class NodeConversation:
|
||||
output_keys: list[str] | None = None,
|
||||
store: ConversationStore | None = None,
|
||||
run_id: str | None = None,
|
||||
compaction_buffer_tokens: int | None = None,
|
||||
compaction_buffer_ratio: float | None = None,
|
||||
compaction_warning_buffer_tokens: int | None = None,
|
||||
) -> None:
|
||||
self._system_prompt = system_prompt
|
||||
# Optional split: when a caller updates the prompt with a
|
||||
# ``dynamic_suffix`` argument, we remember the static prefix and
|
||||
# suffix separately so the LLM wrapper can emit them as two
|
||||
# Anthropic system content blocks with a cache breakpoint between
|
||||
# them. ``_system_prompt`` stays as the concatenated form used for
|
||||
# persistence and for the legacy single-block LLM path.
|
||||
# On restore, these default to the concat/empty pair — the next
|
||||
# AgentLoop iteration's dynamic-prompt refresh step repopulates.
|
||||
self._system_prompt_static: str = system_prompt
|
||||
self._system_prompt_dynamic_suffix: str = ""
|
||||
self._max_context_tokens = max_context_tokens
|
||||
self._compaction_threshold = compaction_threshold
|
||||
# Buffer-based compaction trigger (Gap 7). When set, takes
|
||||
# precedence over the multiplicative compaction_threshold so the
|
||||
# loop reserves a fixed headroom for the next turn's input+output
|
||||
# instead of trying to get exactly X% of the way to the hard
|
||||
# limit. If left as None the legacy threshold-based rule is
|
||||
# used, keeping old call sites behaving identically.
|
||||
self._compaction_buffer_tokens = compaction_buffer_tokens
|
||||
# Ratio component of the hybrid buffer. Combines additively with
|
||||
# _compaction_buffer_tokens so callers can express "reserve N tokens
|
||||
# plus M% of the window" — the absolute floor matters on tiny
|
||||
# windows, the ratio matters on large ones.
|
||||
self._compaction_buffer_ratio = compaction_buffer_ratio
|
||||
self._compaction_warning_buffer_tokens = compaction_warning_buffer_tokens
|
||||
self._output_keys = output_keys
|
||||
self._store = store
|
||||
self._messages: list[Message] = []
|
||||
@@ -393,15 +469,56 @@ class NodeConversation:
|
||||
|
||||
@property
|
||||
def system_prompt(self) -> str:
|
||||
"""Full concatenated system prompt (static + dynamic suffix, if any).
|
||||
|
||||
This is the canonical form used for persistence and for the legacy
|
||||
single-block LLM path. Split-prompt callers should read
|
||||
``system_prompt_static`` and ``system_prompt_dynamic_suffix`` instead.
|
||||
"""
|
||||
return self._system_prompt
|
||||
|
||||
def update_system_prompt(self, new_prompt: str) -> None:
|
||||
@property
|
||||
def system_prompt_static(self) -> str:
|
||||
"""Static prefix of the system prompt (cache-stable).
|
||||
|
||||
Equals ``system_prompt`` when no split is in use. When the AgentLoop
|
||||
calls ``update_system_prompt(static, dynamic_suffix=...)``, this is
|
||||
the piece sent as the cache-controlled first block.
|
||||
"""
|
||||
return self._system_prompt_static
|
||||
|
||||
@property
|
||||
def system_prompt_dynamic_suffix(self) -> str:
|
||||
"""Dynamic tail of the system prompt (not cached).
|
||||
|
||||
Empty unless the consumer splits its prompt. The LLM wrapper uses a
|
||||
non-empty suffix to emit a two-block system content list with a
|
||||
cache breakpoint between the static prefix and this tail.
|
||||
"""
|
||||
return self._system_prompt_dynamic_suffix
|
||||
|
||||
def update_system_prompt(self, new_prompt: str, dynamic_suffix: str | None = None) -> None:
|
||||
"""Update the system prompt.
|
||||
|
||||
Used in continuous conversation mode at phase transitions to swap
|
||||
Layer 3 (focus) while preserving the conversation history.
|
||||
|
||||
When ``dynamic_suffix`` is provided, ``new_prompt`` is interpreted as
|
||||
the STATIC prefix and ``dynamic_suffix`` as the per-turn tail; they
|
||||
travel to the LLM as two separate cache-controlled blocks but are
|
||||
persisted as a single concatenated string for backward-compat
|
||||
restore. ``new_prompt`` alone (suffix left None) keeps the legacy
|
||||
single-string behavior.
|
||||
"""
|
||||
self._system_prompt = new_prompt
|
||||
if dynamic_suffix is None:
|
||||
# Legacy single-string path — static == full, no suffix split.
|
||||
self._system_prompt = new_prompt
|
||||
self._system_prompt_static = new_prompt
|
||||
self._system_prompt_dynamic_suffix = ""
|
||||
else:
|
||||
self._system_prompt_static = new_prompt
|
||||
self._system_prompt_dynamic_suffix = dynamic_suffix
|
||||
self._system_prompt = f"{new_prompt}\n\n{dynamic_suffix}" if dynamic_suffix else new_prompt
|
||||
self._meta_persisted = False # re-persist with new prompt
|
||||
|
||||
def set_current_phase(self, phase_id: str) -> None:
|
||||
@@ -440,6 +557,8 @@ class NodeConversation:
|
||||
is_transition_marker: bool = False,
|
||||
is_client_input: bool = False,
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
is_system_nudge: bool = False,
|
||||
is_trigger: bool = False,
|
||||
) -> Message:
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
@@ -450,6 +569,8 @@ class NodeConversation:
|
||||
is_transition_marker=is_transition_marker,
|
||||
is_client_input=is_client_input,
|
||||
image_content=image_content,
|
||||
is_system_nudge=is_system_nudge,
|
||||
is_trigger=is_trigger,
|
||||
)
|
||||
self._messages.append(msg)
|
||||
self._next_seq += 1
|
||||
@@ -463,6 +584,8 @@ class NodeConversation:
|
||||
self,
|
||||
content: str,
|
||||
tool_calls: list[dict[str, Any]] | None = None,
|
||||
*,
|
||||
truncated: bool = False,
|
||||
) -> Message:
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
@@ -471,6 +594,7 @@ class NodeConversation:
|
||||
tool_calls=tool_calls,
|
||||
phase_id=self._current_phase,
|
||||
run_id=self._run_id,
|
||||
truncated=truncated,
|
||||
)
|
||||
self._messages.append(msg)
|
||||
self._next_seq += 1
|
||||
@@ -486,6 +610,27 @@ class NodeConversation:
|
||||
image_content: list[dict[str, Any]] | None = None,
|
||||
is_skill_content: bool = False,
|
||||
) -> Message:
|
||||
# Dedup guard: reject a second tool_result for the same tool_use_id.
|
||||
# Anthropic's API only accepts one result per tool_call, and a duplicate
|
||||
# causes a hard 400 two turns later ("messages with role 'tool' must
|
||||
# be a response to a preceding message with 'tool_calls'"). Duplicates
|
||||
# can arise when a tool_call_timeout fires and records a placeholder
|
||||
# error, then the real executor thread eventually delivers the actual
|
||||
# result (the thread kept running inside run_in_executor — see
|
||||
# tool_result_handler.execute_tool). We keep the FIRST result to
|
||||
# preserve whatever state the agent already reasoned about.
|
||||
for existing in reversed(self._messages):
|
||||
if existing.role == "tool" and existing.tool_use_id == tool_use_id:
|
||||
import logging as _logging
|
||||
|
||||
_logging.getLogger(__name__).warning(
|
||||
"add_tool_result: dropping duplicate result for tool_use_id=%s "
|
||||
"(first result preserved, %d chars; new result ignored, %d chars)",
|
||||
tool_use_id,
|
||||
len(existing.content),
|
||||
len(content),
|
||||
)
|
||||
return existing
|
||||
msg = Message(
|
||||
seq=self._next_seq,
|
||||
role="tool",
|
||||
@@ -505,6 +650,59 @@ class NodeConversation:
|
||||
|
||||
# --- Query -------------------------------------------------------------
|
||||
|
||||
def find_completed_tool_call(
|
||||
self,
|
||||
name: str,
|
||||
tool_input: dict[str, Any],
|
||||
within_last_turns: int = 3,
|
||||
) -> Message | None:
|
||||
"""Return the most recent assistant message that issued a tool call
|
||||
with the same (name + canonical-json args) AND received a non-error
|
||||
tool result, within the last ``within_last_turns`` assistant turns.
|
||||
|
||||
Used by the replay detector to flag when the model is about to redo
|
||||
a successful call — we prepend a steer onto the upcoming result but
|
||||
still execute, so tools like browser_screenshot that are legitimately
|
||||
repeated are not silently skipped.
|
||||
"""
|
||||
try:
|
||||
target_canonical = json.dumps(tool_input, sort_keys=True, default=str)
|
||||
except (TypeError, ValueError):
|
||||
target_canonical = str(tool_input)
|
||||
|
||||
# Walk backwards over recent assistant messages
|
||||
assistant_turns_seen = 0
|
||||
for idx in range(len(self._messages) - 1, -1, -1):
|
||||
m = self._messages[idx]
|
||||
if m.role != "assistant":
|
||||
continue
|
||||
assistant_turns_seen += 1
|
||||
if assistant_turns_seen > within_last_turns:
|
||||
break
|
||||
if not m.tool_calls:
|
||||
continue
|
||||
for tc in m.tool_calls:
|
||||
func = tc.get("function", {}) if isinstance(tc, dict) else {}
|
||||
tc_name = func.get("name")
|
||||
if tc_name != name:
|
||||
continue
|
||||
args_str = func.get("arguments", "")
|
||||
try:
|
||||
parsed = json.loads(args_str) if isinstance(args_str, str) else args_str
|
||||
canonical = json.dumps(parsed, sort_keys=True, default=str)
|
||||
except (TypeError, ValueError):
|
||||
canonical = str(args_str)
|
||||
if canonical != target_canonical:
|
||||
continue
|
||||
# Found a match — now verify its result was not an error.
|
||||
tc_id = tc.get("id")
|
||||
for later in self._messages[idx + 1 :]:
|
||||
if later.role == "tool" and later.tool_use_id == tc_id:
|
||||
if not later.is_error:
|
||||
return m
|
||||
break
|
||||
return None
|
||||
|
||||
def to_llm_messages(self) -> list[dict[str, Any]]:
|
||||
"""Return messages as OpenAI-format dicts (system prompt excluded).
|
||||
|
||||
@@ -513,7 +711,48 @@ class NodeConversation:
|
||||
can happen when a loop is cancelled mid-tool-execution.
|
||||
"""
|
||||
msgs = [m.to_llm_dict() for m in self._messages]
|
||||
return self._repair_orphaned_tool_calls(msgs)
|
||||
msgs = self._repair_orphaned_tool_calls(msgs)
|
||||
msgs = self._sanitize_for_api(msgs)
|
||||
return msgs
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_for_api(msgs: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
"""Final pass: ensure message sequence is valid for strict APIs.
|
||||
|
||||
Rules:
|
||||
1. No two consecutive messages with the same role (merge or drop)
|
||||
2. Tool messages must have a tool_call_id
|
||||
3. Assistant messages with tool_calls must have content=null, not ""
|
||||
4. First message must not be 'tool' or 'assistant' (without prior context)
|
||||
"""
|
||||
cleaned: list[dict[str, Any]] = []
|
||||
for m in msgs:
|
||||
role = m.get("role")
|
||||
|
||||
# Fix assistant content when tool_calls present
|
||||
if role == "assistant" and m.get("tool_calls"):
|
||||
if m.get("content") == "":
|
||||
m["content"] = None
|
||||
|
||||
# Drop tool messages without tool_call_id
|
||||
if role == "tool" and not m.get("tool_call_id"):
|
||||
continue
|
||||
|
||||
# Drop consecutive duplicate roles (merge user messages)
|
||||
if cleaned and cleaned[-1].get("role") == role == "user":
|
||||
prev_content = cleaned[-1].get("content", "")
|
||||
curr_content = m.get("content", "")
|
||||
if isinstance(prev_content, str) and isinstance(curr_content, str):
|
||||
cleaned[-1]["content"] = f"{prev_content}\n{curr_content}"
|
||||
continue
|
||||
|
||||
cleaned.append(m)
|
||||
|
||||
# Drop leading assistant/tool messages (no prior context)
|
||||
while cleaned and cleaned[0].get("role") in ("assistant", "tool"):
|
||||
cleaned.pop(0)
|
||||
|
||||
return cleaned
|
||||
|
||||
@staticmethod
|
||||
def _repair_orphaned_tool_calls(
|
||||
@@ -521,11 +760,18 @@ class NodeConversation:
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Ensure tool_call / tool_result pairs are consistent.
|
||||
|
||||
1. **Orphaned tool results** (tool_result with no preceding tool_use)
|
||||
are dropped. This happens when compaction removes an assistant
|
||||
message but leaves its tool-result messages behind.
|
||||
2. **Orphaned tool calls** (tool_use with no following tool_result)
|
||||
get a synthetic error result appended. This happens when a loop
|
||||
1. **Orphaned tool results** (tool_result with no matching tool_use
|
||||
anywhere) are dropped. Happens after compaction removes the
|
||||
parent assistant message.
|
||||
2. **Positionally orphaned tool results** (tool_result separated
|
||||
from its parent by a non-tool message, e.g. a user injection)
|
||||
are dropped. The Anthropic API requires tool messages to
|
||||
follow immediately after the assistant message that issued
|
||||
the matching tool_call.
|
||||
3. **Duplicate tool results** (same tool_call_id appearing more
|
||||
than once) are dropped; only the first is kept.
|
||||
4. **Orphaned tool calls** (tool_use with no following tool_result)
|
||||
get a synthetic error result appended. Happens when the loop
|
||||
is cancelled mid-tool-execution.
|
||||
"""
|
||||
# Pass 1: collect all tool_call IDs from assistant messages so we
|
||||
@@ -538,41 +784,75 @@ class NodeConversation:
|
||||
if tc_id:
|
||||
all_tool_call_ids.add(tc_id)
|
||||
|
||||
# Pass 2: build repaired list — drop orphaned tool results, patch
|
||||
# missing tool results.
|
||||
# Pass 2: build repaired list — drop orphaned tool results, drop
|
||||
# positional orphans and duplicates, patch missing tool results.
|
||||
#
|
||||
# ``open_tool_calls`` holds the tool_call IDs we're still expecting
|
||||
# results for: it's populated when we emit an assistant-with-tool_calls
|
||||
# and drained as matching tool messages follow. Any tool message
|
||||
# whose id is not currently open is positionally invalid and gets
|
||||
# dropped — that closes the gap that caused the tool-after-user
|
||||
# 400 errors.
|
||||
repaired: list[dict[str, Any]] = []
|
||||
for i, m in enumerate(msgs):
|
||||
# Drop tool-result messages whose tool_call_id has no matching
|
||||
# tool_use in any assistant message (orphaned by compaction).
|
||||
if m.get("role") == "tool":
|
||||
tid = m.get("tool_call_id")
|
||||
if tid and tid not in all_tool_call_ids:
|
||||
continue # skip orphaned result
|
||||
open_tool_calls: set[str] = set()
|
||||
seen_tool_ids: set[str] = set()
|
||||
for m in msgs:
|
||||
role = m.get("role")
|
||||
|
||||
repaired.append(m)
|
||||
tool_calls = m.get("tool_calls")
|
||||
if m.get("role") != "assistant" or not tool_calls:
|
||||
if role == "tool":
|
||||
tid = m.get("tool_call_id")
|
||||
# Drop tool results with no matching tool_use anywhere.
|
||||
if not tid or tid not in all_tool_call_ids:
|
||||
continue
|
||||
# Drop duplicates (same id appearing twice) — keep first.
|
||||
if tid in seen_tool_ids:
|
||||
continue
|
||||
# Drop positional orphans — tool messages whose parent
|
||||
# assistant isn't the still-open assistant block.
|
||||
if tid not in open_tool_calls:
|
||||
continue
|
||||
open_tool_calls.discard(tid)
|
||||
seen_tool_ids.add(tid)
|
||||
repaired.append(m)
|
||||
continue
|
||||
# Collect IDs of tool results that follow this assistant message
|
||||
answered: set[str] = set()
|
||||
for j in range(i + 1, len(msgs)):
|
||||
if msgs[j].get("role") == "tool":
|
||||
tid = msgs[j].get("tool_call_id")
|
||||
if tid:
|
||||
answered.add(tid)
|
||||
else:
|
||||
break # stop at first non-tool message
|
||||
# Patch any missing results
|
||||
for tc in tool_calls:
|
||||
tc_id = tc.get("id")
|
||||
if tc_id and tc_id not in answered:
|
||||
|
||||
# Any non-tool message closes the current assistant tool block.
|
||||
# If the previous assistant left tool_calls unanswered, patch
|
||||
# synthetic error results before emitting this message so the
|
||||
# API sees a complete pairing.
|
||||
if open_tool_calls:
|
||||
for stale_id in list(open_tool_calls):
|
||||
repaired.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": tc_id,
|
||||
"tool_call_id": stale_id,
|
||||
"content": "ERROR: Tool execution was interrupted.",
|
||||
}
|
||||
)
|
||||
seen_tool_ids.add(stale_id)
|
||||
open_tool_calls.clear()
|
||||
|
||||
repaired.append(m)
|
||||
|
||||
if role == "assistant":
|
||||
for tc in m.get("tool_calls") or []:
|
||||
tc_id = tc.get("id")
|
||||
if tc_id and tc_id not in seen_tool_ids:
|
||||
open_tool_calls.add(tc_id)
|
||||
|
||||
# Tail: if the conversation ends with an assistant that issued
|
||||
# tool_calls and no results followed, patch them so the next
|
||||
# turn's first message can be a valid assistant/user response.
|
||||
if open_tool_calls:
|
||||
for stale_id in list(open_tool_calls):
|
||||
repaired.append(
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": stale_id,
|
||||
"content": "ERROR: Tool execution was interrupted.",
|
||||
}
|
||||
)
|
||||
|
||||
return repaired
|
||||
|
||||
def estimate_tokens(self) -> int:
|
||||
@@ -621,8 +901,48 @@ class NodeConversation:
|
||||
return self.estimate_tokens() / self._max_context_tokens
|
||||
|
||||
def needs_compaction(self) -> bool:
|
||||
"""True when the conversation should be compacted before the
|
||||
next LLM call.
|
||||
|
||||
Hybrid buffer rule: the headroom reserved before compaction fires
|
||||
is the SUM of an absolute fixed component and a ratio of the hard
|
||||
context limit:
|
||||
|
||||
effective_buffer = compaction_buffer_tokens
|
||||
+ compaction_buffer_ratio * max_context_tokens
|
||||
|
||||
The fixed component gives a floor on tiny windows; the ratio
|
||||
keeps the trigger meaningful on large windows where any constant
|
||||
buffer becomes a rounding error (an 8k buffer is 75% on a 32k
|
||||
window but 96% on a 200k window). Compaction fires when the
|
||||
current estimate would consume more than (limit - effective_buffer).
|
||||
|
||||
When neither component is configured, falls back to the legacy
|
||||
multiplicative threshold so old callers keep behaving identically.
|
||||
"""
|
||||
if self._max_context_tokens <= 0:
|
||||
return False
|
||||
fixed = self._compaction_buffer_tokens
|
||||
ratio = self._compaction_buffer_ratio
|
||||
if fixed is not None or ratio is not None:
|
||||
effective_buffer = (fixed or 0) + (ratio or 0.0) * self._max_context_tokens
|
||||
budget = self._max_context_tokens - effective_buffer
|
||||
return self.estimate_tokens() >= max(0.0, budget)
|
||||
return self.estimate_tokens() >= self._max_context_tokens * self._compaction_threshold
|
||||
|
||||
def compaction_warning(self) -> bool:
|
||||
"""True when the conversation has crossed the warning threshold
|
||||
but not yet the hard compaction trigger.
|
||||
|
||||
Used by telemetry / UI to show a "context getting tight" hint
|
||||
before a compaction pass actually runs. Returns False when no
|
||||
warning buffer is configured (legacy behaviour).
|
||||
"""
|
||||
if self._max_context_tokens <= 0 or self._compaction_warning_buffer_tokens is None:
|
||||
return False
|
||||
warn_at = self._max_context_tokens - self._compaction_warning_buffer_tokens
|
||||
return self.estimate_tokens() >= max(0, warn_at)
|
||||
|
||||
# --- Output-key extraction ---------------------------------------------
|
||||
|
||||
def _extract_protected_values(self, messages: list[Message]) -> dict[str, str]:
|
||||
@@ -699,7 +1019,7 @@ class NodeConversation:
|
||||
continue # never prune errors
|
||||
if msg.is_skill_content:
|
||||
continue # never prune activated skill instructions (AS-10)
|
||||
if msg.content.startswith("[Pruned tool result"):
|
||||
if msg.content.startswith(("Pruned tool result", "[Pruned tool result")):
|
||||
continue # already pruned
|
||||
# Tiny results (set_output acks, confirmations) — pruning
|
||||
# saves negligible space but makes the LLM think the call
|
||||
@@ -731,12 +1051,12 @@ class NodeConversation:
|
||||
|
||||
if spillover:
|
||||
placeholder = (
|
||||
f"[Pruned tool result: {orig_len} chars. "
|
||||
f"Full data in '{spillover}'. "
|
||||
f"Use load_data('{spillover}') to retrieve.]"
|
||||
f"Pruned tool result ({orig_len:,} chars) cleared from context. "
|
||||
f"Full data saved at: {spillover}\n"
|
||||
f"Read the complete data with read_file(path='{spillover}')."
|
||||
)
|
||||
else:
|
||||
placeholder = f"[Pruned tool result: {orig_len} chars cleared from context.]"
|
||||
placeholder = f"Pruned tool result ({orig_len:,} chars) cleared from context."
|
||||
|
||||
self._messages[i] = Message(
|
||||
seq=msg.seq,
|
||||
@@ -758,6 +1078,78 @@ class NodeConversation:
|
||||
self._last_api_input_tokens = None
|
||||
return count
|
||||
|
||||
async def evict_old_images(self, keep_latest: int = 2) -> int:
|
||||
"""Strip ``image_content`` from older messages, keeping the most recent.
|
||||
|
||||
Screenshots from ``browser_screenshot`` are inlined into the
|
||||
message's ``image_content`` as base64 data URLs. Each screenshot
|
||||
costs ~250k tokens when the provider counts the base64 as
|
||||
text — four screenshots push a conversation over gemini's 1M
|
||||
context limit and trigger out-of-context garbage output (see
|
||||
``session_20260415_104727_5c4ed7ff`` for the terminal case
|
||||
where the model emitted ``协日`` as its final text then stopped).
|
||||
|
||||
This method walks backward through messages and keeps
|
||||
``image_content`` intact on the most recent ``keep_latest``
|
||||
messages that have images. Older messages get their
|
||||
``image_content`` nulled out — the text content (metadata
|
||||
like url, dimensions, scale hints) stays, but the raw bytes
|
||||
are dropped. Storage is updated too so cold-restore sees the
|
||||
same evicted state.
|
||||
|
||||
Run this right after every tool result is recorded so image
|
||||
context stays bounded even within a single iteration (the
|
||||
compaction pipeline only fires at iteration boundaries, too
|
||||
late for a single turn that takes 4 screenshots).
|
||||
|
||||
Returns the number of messages whose image_content was evicted.
|
||||
"""
|
||||
if not self._messages or keep_latest < 0:
|
||||
return 0
|
||||
|
||||
# Find messages carrying images, walking newest → oldest.
|
||||
image_indices: list[int] = []
|
||||
for i in range(len(self._messages) - 1, -1, -1):
|
||||
if self._messages[i].image_content:
|
||||
image_indices.append(i)
|
||||
|
||||
# Nothing to evict if we have ≤ keep_latest images total.
|
||||
if len(image_indices) <= keep_latest:
|
||||
return 0
|
||||
|
||||
# Evict everything past the first keep_latest (newest) entries.
|
||||
to_evict = image_indices[keep_latest:]
|
||||
evicted = 0
|
||||
for idx in to_evict:
|
||||
msg = self._messages[idx]
|
||||
self._messages[idx] = Message(
|
||||
seq=msg.seq,
|
||||
role=msg.role,
|
||||
content=msg.content,
|
||||
tool_use_id=msg.tool_use_id,
|
||||
tool_calls=msg.tool_calls,
|
||||
is_error=msg.is_error,
|
||||
phase_id=msg.phase_id,
|
||||
is_transition_marker=msg.is_transition_marker,
|
||||
is_client_input=msg.is_client_input,
|
||||
image_content=None, # ← dropped
|
||||
is_skill_content=msg.is_skill_content,
|
||||
run_id=msg.run_id,
|
||||
)
|
||||
evicted += 1
|
||||
if self._store:
|
||||
await self._store.write_part(msg.seq, self._messages[idx].to_storage_dict())
|
||||
|
||||
if evicted:
|
||||
# Reset token estimate — image blocks no longer contribute.
|
||||
self._last_api_input_tokens = None
|
||||
logger.info(
|
||||
"evict_old_images: dropped image_content from %d message(s), kept %d most recent",
|
||||
evicted,
|
||||
keep_latest,
|
||||
)
|
||||
return evicted
|
||||
|
||||
async def compact(
|
||||
self,
|
||||
summary: str,
|
||||
@@ -910,9 +1302,7 @@ class NodeConversation:
|
||||
for msg in old_messages:
|
||||
if msg.role != "assistant" or not msg.tool_calls:
|
||||
continue
|
||||
has_protected = any(
|
||||
tc.get("function", {}).get("name") == "set_output" for tc in msg.tool_calls
|
||||
)
|
||||
has_protected = any(tc.get("function", {}).get("name") == "set_output" for tc in msg.tool_calls)
|
||||
tc_ids = {tc.get("id", "") for tc in msg.tool_calls}
|
||||
if has_protected:
|
||||
protected_tc_ids |= tc_ids
|
||||
@@ -1018,16 +1408,18 @@ class NodeConversation:
|
||||
# Nothing to save — skip file creation
|
||||
conv_filename = ""
|
||||
|
||||
# Build reference message
|
||||
# Build reference message. Prose format (no brackets) — see the
|
||||
# poison-pattern note on truncate_tool_result. Frontier models
|
||||
# autocomplete `[...']` trailers into their own text turns.
|
||||
ref_parts: list[str] = []
|
||||
if conv_filename:
|
||||
full_path = str((spill_path / conv_filename).resolve())
|
||||
ref_parts.append(
|
||||
f"[Previous conversation saved to '{full_path}'. "
|
||||
f"Use load_data('{conv_filename}') to review if needed.]"
|
||||
f"Previous conversation saved at: {full_path}\n"
|
||||
f"Read the full transcript with read_file('{conv_filename}')."
|
||||
)
|
||||
elif not collapsed_msgs:
|
||||
ref_parts.append("[Previous freeform messages compacted.]")
|
||||
ref_parts.append("(Previous freeform messages compacted.)")
|
||||
|
||||
# Aggressive: add collapsed tool-call history to the reference
|
||||
if collapsed_msgs:
|
||||
@@ -1106,11 +1498,7 @@ class NodeConversation:
|
||||
|
||||
def export_summary(self) -> str:
|
||||
"""Structured summary with [STATS], [CONFIG], [RECENT_MESSAGES] sections."""
|
||||
prompt_preview = (
|
||||
self._system_prompt[:80] + "..."
|
||||
if len(self._system_prompt) > 80
|
||||
else self._system_prompt
|
||||
)
|
||||
prompt_preview = self._system_prompt[:80] + "..." if len(self._system_prompt) > 80 else self._system_prompt
|
||||
|
||||
lines = [
|
||||
"[STATS]",
|
||||
@@ -1143,6 +1531,45 @@ class NodeConversation:
|
||||
await self._persist_meta()
|
||||
await self._store.write_part(message.seq, message.to_storage_dict())
|
||||
await self._write_next_seq()
|
||||
# Any partial checkpoint for this seq is now superseded by the real
|
||||
# part — clear it so a future restore doesn't resurrect stale text.
|
||||
try:
|
||||
await self._store.clear_partial(message.seq)
|
||||
except AttributeError:
|
||||
# Older stores may not implement partials; ignore.
|
||||
pass
|
||||
|
||||
async def checkpoint_partial_assistant(
|
||||
self,
|
||||
accumulated_text: str,
|
||||
tool_calls: list[dict[str, Any]] | None = None,
|
||||
) -> None:
|
||||
"""Write an in-flight assistant turn's state to disk under the next seq.
|
||||
|
||||
Called from the stream event loop. Safe to call repeatedly — each call
|
||||
overwrites the prior checkpoint. Persisted via ``write_partial`` so it
|
||||
does NOT appear in ``read_parts()`` and cannot be double-loaded. Cleared
|
||||
automatically when ``add_assistant_message`` for this seq lands.
|
||||
"""
|
||||
if self._store is None:
|
||||
return
|
||||
if not self._meta_persisted:
|
||||
await self._persist_meta()
|
||||
payload: dict[str, Any] = {
|
||||
"seq": self._next_seq,
|
||||
"role": "assistant",
|
||||
"content": accumulated_text,
|
||||
"phase_id": self._current_phase,
|
||||
"run_id": self._run_id,
|
||||
"truncated": True,
|
||||
}
|
||||
if tool_calls:
|
||||
payload["tool_calls"] = tool_calls
|
||||
try:
|
||||
await self._store.write_partial(self._next_seq, payload)
|
||||
except AttributeError:
|
||||
# Older stores may not implement partials; ignore.
|
||||
pass
|
||||
|
||||
async def _persist_meta(self) -> None:
|
||||
"""Lazily write conversation metadata to the store (called once).
|
||||
@@ -1156,6 +1583,9 @@ class NodeConversation:
|
||||
"system_prompt": self._system_prompt,
|
||||
"max_context_tokens": self._max_context_tokens,
|
||||
"compaction_threshold": self._compaction_threshold,
|
||||
"compaction_buffer_tokens": self._compaction_buffer_tokens,
|
||||
"compaction_buffer_ratio": self._compaction_buffer_ratio,
|
||||
"compaction_warning_buffer_tokens": (self._compaction_warning_buffer_tokens),
|
||||
"output_keys": self._output_keys,
|
||||
}
|
||||
await self._store.write_meta(run_meta)
|
||||
@@ -1203,12 +1633,28 @@ class NodeConversation:
|
||||
output_keys=meta.get("output_keys"),
|
||||
store=store,
|
||||
run_id=run_id,
|
||||
compaction_buffer_tokens=meta.get("compaction_buffer_tokens"),
|
||||
compaction_buffer_ratio=meta.get("compaction_buffer_ratio"),
|
||||
compaction_warning_buffer_tokens=meta.get("compaction_warning_buffer_tokens"),
|
||||
)
|
||||
conv._meta_persisted = True
|
||||
|
||||
parts = await store.read_parts()
|
||||
if phase_id:
|
||||
parts = [p for p in parts if p.get("phase_id") == phase_id]
|
||||
filtered_parts = [p for p in parts if p.get("phase_id") == phase_id]
|
||||
if filtered_parts:
|
||||
parts = filtered_parts
|
||||
elif parts and all(p.get("phase_id") is None for p in parts):
|
||||
# Backward compatibility: older isolated stores (including queen
|
||||
# sessions) persisted parts without phase_id. In that case, the
|
||||
# phase filter would incorrectly hide the entire conversation.
|
||||
logger.info(
|
||||
"Restoring legacy unphased conversation without applying phase filter (phase_id=%s, parts=%d)",
|
||||
phase_id,
|
||||
len(parts),
|
||||
)
|
||||
else:
|
||||
parts = filtered_parts
|
||||
# Filter by run_id so intentional restarts (new run_id) start fresh
|
||||
# while crash recovery (same run_id) loads prior parts.
|
||||
if run_id and not is_legacy_run_id(run_id):
|
||||
@@ -1222,4 +1668,45 @@ class NodeConversation:
|
||||
elif conv._messages:
|
||||
conv._next_seq = conv._messages[-1].seq + 1
|
||||
|
||||
# Surface any leftover partial checkpoints as truncated messages so
|
||||
# the next turn sees what the interrupted stream was in the middle
|
||||
# of producing. Only partials whose seq is >= next_seq are meaningful;
|
||||
# anything lower was already superseded by a real part.
|
||||
try:
|
||||
partials = await store.read_all_partials()
|
||||
except AttributeError:
|
||||
partials = []
|
||||
for p in partials:
|
||||
pseq = p.get("seq", -1)
|
||||
if pseq < conv._next_seq:
|
||||
# Stale — clean it up.
|
||||
try:
|
||||
await store.clear_partial(pseq)
|
||||
except AttributeError:
|
||||
pass
|
||||
continue
|
||||
# Only resurrect partials relevant to this run / phase.
|
||||
if run_id and not is_legacy_run_id(run_id) and p.get("run_id") != run_id:
|
||||
continue
|
||||
if phase_id and p.get("phase_id") is not None and p.get("phase_id") != phase_id:
|
||||
continue
|
||||
# Reconstruct as a truncated assistant message.
|
||||
msg = Message(
|
||||
seq=pseq,
|
||||
role="assistant",
|
||||
content=p.get("content", "") or "",
|
||||
tool_calls=p.get("tool_calls"),
|
||||
phase_id=p.get("phase_id"),
|
||||
run_id=p.get("run_id"),
|
||||
truncated=True,
|
||||
)
|
||||
conv._messages.append(msg)
|
||||
conv._next_seq = max(conv._next_seq, pseq + 1)
|
||||
logger.info(
|
||||
"restore: resurrected truncated partial seq=%d (text=%d chars, tool_calls=%d)",
|
||||
pseq,
|
||||
len(msg.content),
|
||||
len(msg.tool_calls or []),
|
||||
)
|
||||
|
||||
return conv
|
||||
|
||||
@@ -16,14 +16,13 @@ import os
|
||||
import re
|
||||
import time
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import Message, NodeConversation
|
||||
from framework.agent_loop.internals.event_publishing import publish_context_usage
|
||||
from framework.agent_loop.internals.types import LoopConfig, OutputAccumulator
|
||||
from framework.orchestrator.node import NodeContext
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -31,19 +30,38 @@ logger = logging.getLogger(__name__)
|
||||
LLM_COMPACT_CHAR_LIMIT: int = 240_000
|
||||
LLM_COMPACT_MAX_DEPTH: int = 10
|
||||
|
||||
# Microcompaction: tools whose results can be safely cleared
|
||||
# Microcompaction: tools whose results can be safely cleared from context
|
||||
# because the agent can re-derive them on demand. The bar for inclusion is
|
||||
# "old result has no irreversible value": file content can be re-read, a
|
||||
# search can be re-run, a screenshot can be re-captured, terminal output can
|
||||
# be re-fetched, etc. Write / edit results are short confirmations whose
|
||||
# value is in the side effect, not the message — also fair game.
|
||||
COMPACTABLE_TOOLS: frozenset[str] = frozenset(
|
||||
{
|
||||
# File ops — content lives on disk, re-readable.
|
||||
"read_file",
|
||||
"run_command",
|
||||
"web_search",
|
||||
"web_fetch",
|
||||
"grep_search",
|
||||
"glob_search",
|
||||
"search_files",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"pdf_read",
|
||||
# Terminal — re-runnable; advanced job/output tools produce verbose
|
||||
# logs whose recent state is what matters.
|
||||
"terminal_exec",
|
||||
"terminal_rg",
|
||||
"terminal_find",
|
||||
"terminal_output_get",
|
||||
"terminal_job_logs",
|
||||
# Web / research — pages and queries can be re-fetched.
|
||||
"web_scrape",
|
||||
"search_papers",
|
||||
"download_paper",
|
||||
"search_wikipedia",
|
||||
# Browser read-only inspection — current page state is what matters,
|
||||
# old snapshots are stale by definition.
|
||||
"browser_screenshot",
|
||||
"list_directory",
|
||||
"browser_snapshot",
|
||||
"browser_html",
|
||||
"browser_get_text",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -80,7 +98,7 @@ def microcompact(
|
||||
msg = messages[i]
|
||||
if msg.role != "tool" or msg.is_error or msg.is_skill_content:
|
||||
continue
|
||||
if msg.content.startswith(("[Pruned tool result", "[Old tool result")):
|
||||
if msg.content.startswith(("Pruned tool result", "[Pruned tool result", "[Old tool result")):
|
||||
continue
|
||||
if len(msg.content) < 100:
|
||||
continue
|
||||
@@ -102,12 +120,12 @@ def microcompact(
|
||||
orig_len = len(msg.content)
|
||||
if spillover:
|
||||
placeholder = (
|
||||
f"[Old tool result cleared: {orig_len} chars. "
|
||||
f"Full data in '{spillover}'. "
|
||||
f"Use load_data('{spillover}') to retrieve.]"
|
||||
f"Old tool result ({orig_len:,} chars) cleared from context. "
|
||||
f"Full data saved at: {spillover}\n"
|
||||
f"Read the complete data with read_file(path='{spillover}')."
|
||||
)
|
||||
else:
|
||||
placeholder = f"[Old tool result cleared: {orig_len} chars.]"
|
||||
placeholder = f"Old tool result ({orig_len:,} chars) cleared from context."
|
||||
|
||||
# Mutate in-place (microcompact is synchronous, no store writes)
|
||||
conversation._messages[i] = Message(
|
||||
@@ -142,7 +160,14 @@ def _find_tool_name_for_result(messages: list[Message], tool_msg: Message) -> st
|
||||
|
||||
|
||||
def _extract_spillover_filename_inline(content: str) -> str | None:
|
||||
"""Quick inline check for spillover filename in tool result content."""
|
||||
"""Quick inline check for spillover filename in tool result content.
|
||||
|
||||
Matches both the new prose format ("saved at: /path") and the
|
||||
legacy bracketed trailer ("saved to '/path'").
|
||||
"""
|
||||
match = re.search(r"saved at:\s*(\S+)", content, re.IGNORECASE)
|
||||
if match:
|
||||
return match.group(1)
|
||||
match = re.search(r"saved to '([^']+)'", content, re.IGNORECASE)
|
||||
return match.group(1) if match else None
|
||||
|
||||
@@ -168,13 +193,17 @@ async def compact(
|
||||
"""
|
||||
conv_id = id(conversation)
|
||||
|
||||
# Circuit breaker: stop auto-compacting after repeated failures
|
||||
if _failure_counts.get(conv_id, 0) >= MAX_CONSECUTIVE_FAILURES:
|
||||
# Circuit breaker: stop LLM-based compaction after repeated failures,
|
||||
# but still fall through to the emergency deterministic summary so
|
||||
# the conversation doesn't silently grow past the context window.
|
||||
# Without this, a persistent LLM outage during compaction would
|
||||
# leave the agent stuck sending oversized prompts until the API 400s.
|
||||
_llm_compaction_skipped = _failure_counts.get(conv_id, 0) >= MAX_CONSECUTIVE_FAILURES
|
||||
if _llm_compaction_skipped:
|
||||
logger.warning(
|
||||
"Circuit breaker: skipping compaction after %d consecutive failures",
|
||||
"Circuit breaker: LLM compaction disabled after %d failures — skipping straight to emergency summary",
|
||||
_failure_counts[conv_id],
|
||||
)
|
||||
return
|
||||
|
||||
# Recompaction detection
|
||||
now = time.monotonic()
|
||||
@@ -256,7 +285,7 @@ async def compact(
|
||||
return
|
||||
|
||||
# --- Step 3: LLM summary compaction ---
|
||||
if ctx.llm is not None:
|
||||
if ctx.llm is not None and not _llm_compaction_skipped:
|
||||
logger.info(
|
||||
"LLM summary compaction triggered (%.0f%% usage)",
|
||||
conversation.usage_ratio() * 100,
|
||||
@@ -360,6 +389,7 @@ async def llm_compact(
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Summarise *messages* with LLM, splitting recursively if too large.
|
||||
|
||||
@@ -367,6 +397,11 @@ async def llm_compact(
|
||||
rejects the call with a context-length error, the messages are split
|
||||
in half and each half is summarised independently. Tool history is
|
||||
appended once at the top-level call (``_depth == 0``).
|
||||
|
||||
When ``preserve_user_messages`` is True, the prompt and system message
|
||||
are amplified to instruct the LLM to keep every user message verbatim
|
||||
and in full — used by the manual /compact-and-fork endpoint where the
|
||||
user wants their voice carried into the new session intact.
|
||||
"""
|
||||
from framework.agent_loop.conversation import extract_tool_call_history
|
||||
from framework.agent_loop.internals.tool_result_handler import is_context_too_large_error
|
||||
@@ -390,6 +425,7 @@ async def llm_compact(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
else:
|
||||
prompt = build_llm_compaction_prompt(
|
||||
@@ -397,17 +433,30 @@ async def llm_compact(
|
||||
accumulator,
|
||||
formatted,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
if preserve_user_messages:
|
||||
system_msg = (
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. CRITICAL: reproduce every user "
|
||||
"message verbatim and in full inside the 'User Messages' "
|
||||
"section — do not paraphrase, truncate, or merge them. "
|
||||
"Assistant turns and tool results may be summarised, but "
|
||||
"user input is sacred."
|
||||
)
|
||||
else:
|
||||
system_msg = (
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. Preserve user-stated rules, "
|
||||
"constraints, and account/identity preferences verbatim."
|
||||
)
|
||||
summary_budget = max(1024, max_context_tokens // 2)
|
||||
try:
|
||||
response = await ctx.llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
system=(
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. Preserve user-stated rules, "
|
||||
"constraints, and account/identity preferences verbatim."
|
||||
),
|
||||
system=system_msg,
|
||||
max_tokens=summary_budget,
|
||||
)
|
||||
summary = response.content
|
||||
@@ -426,6 +475,7 @@ async def llm_compact(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
else:
|
||||
raise
|
||||
@@ -448,6 +498,7 @@ async def _llm_compact_split(
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Split messages in half and summarise each half independently."""
|
||||
mid = max(1, len(messages) // 2)
|
||||
@@ -459,6 +510,7 @@ async def _llm_compact_split(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
s2 = await llm_compact(
|
||||
ctx,
|
||||
@@ -468,6 +520,7 @@ async def _llm_compact_split(
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
preserve_user_messages=preserve_user_messages,
|
||||
)
|
||||
return s1 + "\n\n" + s2
|
||||
|
||||
@@ -499,6 +552,7 @@ def build_llm_compaction_prompt(
|
||||
formatted_messages: str,
|
||||
*,
|
||||
max_context_tokens: int = 128_000,
|
||||
preserve_user_messages: bool = False,
|
||||
) -> str:
|
||||
"""Build prompt for LLM compaction targeting 50% of token budget.
|
||||
|
||||
@@ -506,7 +560,7 @@ def build_llm_compaction_prompt(
|
||||
service. Each section focuses on a different aspect of the conversation
|
||||
so the summariser produces consistently useful, well-organised output.
|
||||
"""
|
||||
spec = ctx.node_spec
|
||||
spec = ctx.agent_spec
|
||||
ctx_lines = [f"NODE: {spec.name} (id={spec.id})"]
|
||||
if spec.description:
|
||||
ctx_lines.append(f"PURPOSE: {spec.description}")
|
||||
@@ -518,10 +572,7 @@ def build_llm_compaction_prompt(
|
||||
done = {k: v for k, v in acc.items() if v is not None}
|
||||
todo = [k for k, v in acc.items() if v is None]
|
||||
if done:
|
||||
ctx_lines.append(
|
||||
"OUTPUTS ALREADY SET:\n"
|
||||
+ "\n".join(f" {k}: {str(v)[:150]}" for k, v in done.items())
|
||||
)
|
||||
ctx_lines.append("OUTPUTS ALREADY SET:\n" + "\n".join(f" {k}: {str(v)[:150]}" for k, v in done.items()))
|
||||
if todo:
|
||||
ctx_lines.append(f"OUTPUTS STILL NEEDED: {', '.join(todo)}")
|
||||
elif spec.output_keys:
|
||||
@@ -531,6 +582,18 @@ def build_llm_compaction_prompt(
|
||||
target_chars = target_tokens * 4
|
||||
node_ctx = "\n".join(ctx_lines)
|
||||
|
||||
user_messages_section = (
|
||||
"6. **User Messages** — Reproduce EVERY user message verbatim and "
|
||||
"in full, in chronological order, each on its own line prefixed "
|
||||
'with the message index (e.g. "[U1] ..."). Do NOT paraphrase, '
|
||||
"summarise, merge, or omit any user message. Preserve markdown, "
|
||||
"code fences, whitespace, and punctuation exactly as the user "
|
||||
"wrote them.\n"
|
||||
if preserve_user_messages
|
||||
else "6. **User Messages** — Preserve ALL user-stated rules, constraints, "
|
||||
"identity preferences, and account details verbatim.\n"
|
||||
)
|
||||
|
||||
return (
|
||||
"You are compacting an AI agent's conversation history. "
|
||||
"The agent is still working and needs to continue.\n\n"
|
||||
@@ -551,8 +614,7 @@ def build_llm_compaction_prompt(
|
||||
"resolved. Include root causes so the agent doesn't repeat them.\n"
|
||||
"5. **Problem Solving Efforts** — Approaches tried, dead ends hit, "
|
||||
"and reasoning behind the current strategy.\n"
|
||||
"6. **User Messages** — Preserve ALL user-stated rules, constraints, "
|
||||
"identity preferences, and account details verbatim.\n"
|
||||
f"{user_messages_section}"
|
||||
"7. **Pending Tasks** — Work remaining, outputs still needed, and "
|
||||
"any blockers.\n"
|
||||
"8. **Current Work** — The most recent action taken and the immediate "
|
||||
@@ -575,12 +637,8 @@ def build_message_inventory(conversation: NodeConversation) -> list[dict[str, An
|
||||
if message.tool_calls:
|
||||
for tool_call in message.tool_calls:
|
||||
args = tool_call.get("function", {}).get("arguments", "")
|
||||
tool_call_args_chars += (
|
||||
len(args) if isinstance(args, str) else len(json.dumps(args))
|
||||
)
|
||||
names = [
|
||||
tool_call.get("function", {}).get("name", "?") for tool_call in message.tool_calls
|
||||
]
|
||||
tool_call_args_chars += len(args) if isinstance(args, str) else len(json.dumps(args))
|
||||
names = [tool_call.get("function", {}).get("name", "?") for tool_call in message.tool_calls]
|
||||
tool_name = ", ".join(names)
|
||||
elif message.role == "tool" and message.tool_use_id:
|
||||
for previous in conversation.messages:
|
||||
@@ -617,18 +675,20 @@ def write_compaction_debug_log(
|
||||
level: str,
|
||||
inventory: list[dict[str, Any]] | None,
|
||||
) -> None:
|
||||
"""Write detailed compaction analysis to ~/.hive/compaction_log/."""
|
||||
log_dir = Path.home() / ".hive" / "compaction_log"
|
||||
"""Write detailed compaction analysis to $HIVE_HOME/compaction_log/."""
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
log_dir = HIVE_HOME / "compaction_log"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ts = datetime.now(UTC).strftime("%Y%m%dT%H%M%S_%f")
|
||||
node_label = ctx.node_id.replace("/", "_")
|
||||
node_label = ctx.agent_id.replace("/", "_")
|
||||
log_path = log_dir / f"{ts}_{node_label}.md"
|
||||
|
||||
lines: list[str] = [
|
||||
f"# Compaction Debug — {ctx.node_id}",
|
||||
f"# Compaction Debug — {ctx.agent_id}",
|
||||
f"**Time:** {datetime.now(UTC).isoformat()}",
|
||||
f"**Node:** {ctx.node_spec.name} (`{ctx.node_id}`)",
|
||||
f"**Node:** {ctx.agent_spec.name} (`{ctx.agent_id}`)",
|
||||
]
|
||||
if ctx.stream_id:
|
||||
lines.append(f"**Stream:** {ctx.stream_id}")
|
||||
@@ -637,14 +697,8 @@ def write_compaction_debug_log(
|
||||
lines.append("")
|
||||
|
||||
if inventory:
|
||||
total_chars = sum(
|
||||
entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0)
|
||||
for entry in inventory
|
||||
)
|
||||
lines.append(
|
||||
"## Pre-Compaction Message Inventory "
|
||||
f"({len(inventory)} messages, {total_chars:,} total chars)"
|
||||
)
|
||||
total_chars = sum(entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0) for entry in inventory)
|
||||
lines.append(f"## Pre-Compaction Message Inventory ({len(inventory)} messages, {total_chars:,} total chars)")
|
||||
lines.append("")
|
||||
ranked = sorted(
|
||||
inventory,
|
||||
@@ -663,8 +717,7 @@ def write_compaction_debug_log(
|
||||
if entry.get("phase"):
|
||||
flags.append(f"phase={entry['phase']}")
|
||||
lines.append(
|
||||
f"| {i} | {entry['seq']} | {entry['role']} | {tool} "
|
||||
f"| {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
|
||||
f"| {i} | {entry['seq']} | {entry['role']} | {tool} | {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
|
||||
)
|
||||
|
||||
large = [entry for entry in ranked if entry.get("preview")]
|
||||
@@ -672,9 +725,7 @@ def write_compaction_debug_log(
|
||||
lines.append("")
|
||||
lines.append("### Large message previews")
|
||||
for entry in large:
|
||||
lines.append(
|
||||
f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):"
|
||||
)
|
||||
lines.append(f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):")
|
||||
lines.append(f"```\n{entry['preview']}\n```")
|
||||
lines.append("")
|
||||
|
||||
@@ -715,7 +766,7 @@ async def log_compaction(
|
||||
|
||||
if ctx.runtime_logger:
|
||||
ctx.runtime_logger.log_step(
|
||||
node_id=ctx.node_id,
|
||||
node_id=ctx.agent_id,
|
||||
node_type="event_loop",
|
||||
step_index=-1,
|
||||
llm_text=f"Context compacted ({level}): {before_pct}% \u2192 {after_pct}%",
|
||||
@@ -736,8 +787,8 @@ async def log_compaction(
|
||||
await event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.CONTEXT_COMPACTED,
|
||||
stream_id=ctx.stream_id or ctx.node_id,
|
||||
node_id=ctx.node_id,
|
||||
stream_id=ctx.stream_id or ctx.agent_id,
|
||||
node_id=ctx.agent_id,
|
||||
data=event_data,
|
||||
)
|
||||
)
|
||||
@@ -762,13 +813,10 @@ def build_emergency_summary(
|
||||
node's known state so the LLM can continue working after
|
||||
compaction without losing track of its task and inputs.
|
||||
"""
|
||||
parts = [
|
||||
"EMERGENCY COMPACTION — previous conversation was too large "
|
||||
"and has been replaced with this summary.\n"
|
||||
]
|
||||
parts = ["EMERGENCY COMPACTION — previous conversation was too large and has been replaced with this summary.\n"]
|
||||
|
||||
# 1. Node identity
|
||||
spec = ctx.node_spec
|
||||
spec = ctx.agent_spec
|
||||
parts.append(f"NODE: {spec.name} (id={spec.id})")
|
||||
if spec.description:
|
||||
parts.append(f"PURPOSE: {spec.description}")
|
||||
@@ -776,7 +824,7 @@ def build_emergency_summary(
|
||||
# 2. Inputs the node received
|
||||
input_lines = []
|
||||
for key in spec.input_keys:
|
||||
value = ctx.input_data.get(key) or ctx.buffer.read(key)
|
||||
value = ctx.input_data.get(key)
|
||||
if value is not None:
|
||||
# Truncate long values but keep them recognisable
|
||||
v_str = str(value)
|
||||
@@ -818,28 +866,21 @@ def build_emergency_summary(
|
||||
data_files = [f for f in all_files if f not in conv_files]
|
||||
|
||||
if conv_files:
|
||||
conv_list = "\n".join(
|
||||
f" - {f} (full path: {data_dir / f})" for f in conv_files
|
||||
)
|
||||
conv_list = "\n".join(f" - {f} (full path: {data_dir / f})" for f in conv_files)
|
||||
parts.append(
|
||||
"CONVERSATION HISTORY (freeform messages saved during compaction — "
|
||||
"use load_data('<filename>') to review earlier dialogue):\n" + conv_list
|
||||
"use read_file('<filename>') to review earlier dialogue):\n" + conv_list
|
||||
)
|
||||
if data_files:
|
||||
file_list = "\n".join(
|
||||
f" - {f} (full path: {data_dir / f})" for f in data_files[:30]
|
||||
)
|
||||
parts.append("DATA FILES (use load_data('<filename>') to read):\n" + file_list)
|
||||
file_list = "\n".join(f" - {f} (full path: {data_dir / f})" for f in data_files[:30])
|
||||
parts.append("DATA FILES (use read_file('<filename>') to read):\n" + file_list)
|
||||
if not all_files:
|
||||
parts.append(
|
||||
"NOTE: Large tool results may have been saved to files. "
|
||||
"Use list_directory to check the data directory."
|
||||
"Use search_files(target='files', path='.') to check the data directory."
|
||||
)
|
||||
except Exception:
|
||||
parts.append(
|
||||
"NOTE: Large tool results were saved to files. "
|
||||
"Use read_file(path='<path>') to read them."
|
||||
)
|
||||
parts.append("NOTE: Large tool results were saved to files. Use read_file(path='<path>') to read them.")
|
||||
|
||||
# 6. Tool call history (prevent re-calling tools)
|
||||
if conversation is not None:
|
||||
@@ -847,10 +888,7 @@ def build_emergency_summary(
|
||||
if tool_history:
|
||||
parts.append(tool_history)
|
||||
|
||||
parts.append(
|
||||
"\nContinue working towards setting the remaining outputs. "
|
||||
"Use your tools and the inputs above."
|
||||
)
|
||||
parts.append("\nContinue working towards setting the remaining outputs. Use your tools and the inputs above.")
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
|
||||
@@ -12,12 +12,13 @@ import json
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import ConversationStore, NodeConversation
|
||||
from framework.agent_loop.internals.types import LoopConfig, OutputAccumulator, TriggerEvent
|
||||
from framework.orchestrator.node import NodeContext
|
||||
from framework.llm.capabilities import supports_image_tool_results
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -53,15 +54,31 @@ async def restore(
|
||||
# continuous mode (or when _restore is called for timer-resume)
|
||||
# load all parts — the full conversation threads across nodes.
|
||||
_is_continuous = getattr(ctx, "continuous_mode", False)
|
||||
phase_filter = None if _is_continuous else ctx.node_id
|
||||
# The queen has agent_id="queen" but messages are stored with phase_id=None.
|
||||
# Only apply phase filtering for non-queen workers in a multi-agent setup.
|
||||
phase_filter = None if (_is_continuous or ctx.agent_id == "queen") else ctx.agent_id
|
||||
conversation = await NodeConversation.restore(
|
||||
conversation_store,
|
||||
phase_id=phase_filter,
|
||||
run_id=ctx.effective_run_id,
|
||||
)
|
||||
if conversation is None:
|
||||
logger.info(
|
||||
"[restore] No conversation found for agent_id=%s phase_filter=%s run_id=%s",
|
||||
ctx.agent_id,
|
||||
phase_filter,
|
||||
ctx.effective_run_id,
|
||||
)
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
"[restore] Restored %d messages for agent_id=%s phase_filter=%s run_id=%s",
|
||||
conversation.message_count,
|
||||
ctx.agent_id,
|
||||
phase_filter,
|
||||
ctx.effective_run_id,
|
||||
)
|
||||
|
||||
# If run_id filtering removed all messages, this is an intentional
|
||||
# restart (new run), not a crash recovery. Return None so the caller
|
||||
# falls through to the fresh-conversation path.
|
||||
@@ -124,7 +141,7 @@ async def write_cursor(
|
||||
cursor.update(
|
||||
{
|
||||
"iteration": iteration,
|
||||
"node_id": ctx.node_id,
|
||||
"node_id": ctx.agent_id,
|
||||
"outputs": accumulator.to_dict(),
|
||||
}
|
||||
)
|
||||
@@ -133,9 +150,7 @@ async def write_cursor(
|
||||
cursor["recent_responses"] = recent_responses
|
||||
if recent_tool_fingerprints is not None:
|
||||
# Convert list[list[tuple]] → list[list[list]] for JSON
|
||||
cursor["recent_tool_fingerprints"] = [
|
||||
[list(pair) for pair in fps] for fps in recent_tool_fingerprints
|
||||
]
|
||||
cursor["recent_tool_fingerprints"] = [[list(pair) for pair in fps] for fps in recent_tool_fingerprints]
|
||||
# Persist blocked-input state so restored runs re-block instead of
|
||||
# manufacturing a synthetic continuation turn.
|
||||
cursor["pending_input"] = pending_input
|
||||
@@ -147,11 +162,18 @@ async def drain_injection_queue(
|
||||
conversation: NodeConversation,
|
||||
*,
|
||||
ctx: NodeContext,
|
||||
describe_images_as_text_fn: (
|
||||
Callable[[list[dict[str, Any]]], Awaitable[str | None]] | None
|
||||
) = None,
|
||||
caption_image_fn: (Callable[[str, list[dict[str, Any]]], Awaitable[tuple[str, str] | None]] | None) = None,
|
||||
) -> int:
|
||||
"""Drain all pending injected events as user messages. Returns count."""
|
||||
"""Drain all pending injected events as user messages. Returns count.
|
||||
|
||||
``caption_image_fn`` is the unified vision fallback hook. It takes
|
||||
``(intent, image_content)`` and returns ``(caption, model)`` on
|
||||
success — the model id is logged so the destination is observable.
|
||||
The user's typed ``content`` (the injected message body) is passed
|
||||
as the intent so the captioner can answer the user's specific
|
||||
question about the image rather than producing a generic
|
||||
description; an empty content falls back to a generic intent.
|
||||
"""
|
||||
count = 0
|
||||
logger.debug(
|
||||
"[drain_injection_queue] Starting to drain queue, initial queue size: %s",
|
||||
@@ -171,23 +193,34 @@ async def drain_injection_queue(
|
||||
"Model '%s' does not support images; attempting vision fallback",
|
||||
ctx.llm.model,
|
||||
)
|
||||
if describe_images_as_text_fn is not None:
|
||||
description = await describe_images_as_text_fn(image_content)
|
||||
if description:
|
||||
if caption_image_fn is not None:
|
||||
intent = content or ("Describe these user-injected images for a text-only agent.")
|
||||
caption_result = await caption_image_fn(intent, image_content)
|
||||
if caption_result:
|
||||
description, vision_model = caption_result
|
||||
content = f"{content}\n\n{description}" if content else description
|
||||
logger.info("[drain] image described as text via vision fallback")
|
||||
logger.info(
|
||||
"[drain] image described as text via vision fallback (model '%s')",
|
||||
vision_model,
|
||||
)
|
||||
else:
|
||||
logger.info("[drain] no vision fallback available; images dropped")
|
||||
image_content = None
|
||||
# Real user input is stored as-is; external events get a prefix
|
||||
# Stamp every injected event with its arrival time so the model
|
||||
# has a consistent temporal log to reason over (and so the
|
||||
# stamp lives inside byte-stable conversation history instead
|
||||
# of a per-turn system-prompt tail). Minute precision is what
|
||||
# the queen needs for conversational / scheduling context.
|
||||
stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
if is_client_input:
|
||||
stamped = f"[{stamp}] {content}" if content else f"[{stamp}]"
|
||||
await conversation.add_user_message(
|
||||
content,
|
||||
stamped,
|
||||
is_client_input=True,
|
||||
image_content=image_content,
|
||||
)
|
||||
else:
|
||||
await conversation.add_user_message(f"[External event]: {content}")
|
||||
await conversation.add_user_message(f"[{stamp}] [External event] {content}")
|
||||
count += 1
|
||||
except asyncio.QueueEmpty:
|
||||
break
|
||||
@@ -220,9 +253,12 @@ async def drain_trigger_queue(
|
||||
payload_str = json.dumps(t.payload, default=str)
|
||||
parts.append(f"[TRIGGER: {t.trigger_type}/{t.source_id}]{task_line}\n{payload_str}")
|
||||
|
||||
combined = "\n\n".join(parts)
|
||||
stamp = datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
|
||||
combined = f"[{stamp}]\n" + "\n\n".join(parts)
|
||||
logger.info("[drain] %d trigger(s): %s", len(triggers), combined[:200])
|
||||
await conversation.add_user_message(combined)
|
||||
# Tag the message so the UI can render a banner instead of the raw
|
||||
# `[TRIGGER: ...]` text. The LLM still sees `combined` verbatim.
|
||||
await conversation.add_user_message(combined, is_trigger=True)
|
||||
return len(triggers)
|
||||
|
||||
|
||||
@@ -245,11 +281,6 @@ async def check_pause(
|
||||
|
||||
# Check context-level pause flags (legacy/alternative methods)
|
||||
pause_requested = ctx.input_data.get("pause_requested", False)
|
||||
if not pause_requested:
|
||||
try:
|
||||
pause_requested = ctx.buffer.read("pause_requested") or False
|
||||
except (PermissionError, KeyError):
|
||||
pause_requested = False
|
||||
if pause_requested:
|
||||
completed = iteration
|
||||
logger.info(f"⏸ Pausing after {completed} iteration(s) completed (context-level)")
|
||||
|
||||
@@ -11,8 +11,8 @@ import time
|
||||
|
||||
from framework.agent_loop.conversation import NodeConversation
|
||||
from framework.agent_loop.internals.types import HookContext
|
||||
from framework.orchestrator.node import NodeContext
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -45,14 +45,14 @@ async def generate_action_plan(
|
||||
Runs as a fire-and-forget task so it never blocks the main loop.
|
||||
"""
|
||||
try:
|
||||
system_prompt = ctx.node_spec.system_prompt or ""
|
||||
system_prompt = ctx.agent_spec.system_prompt or ""
|
||||
# Trim to keep the prompt small
|
||||
prompt_summary = system_prompt[:500]
|
||||
if len(system_prompt) > 500:
|
||||
prompt_summary += "..."
|
||||
|
||||
tool_names = [t.name for t in ctx.available_tools]
|
||||
output_keys = ctx.node_spec.output_keys or []
|
||||
output_keys = ctx.agent_spec.output_keys or []
|
||||
|
||||
prompt = (
|
||||
f'You are about to work on a task as node "{node_id}".\n\n'
|
||||
@@ -108,6 +108,8 @@ async def publish_llm_turn_complete(
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
cost_usd: float = 0.0,
|
||||
execution_id: str = "",
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
@@ -120,6 +122,8 @@ async def publish_llm_turn_complete(
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
cache_creation_tokens=cache_creation_tokens,
|
||||
cost_usd=cost_usd,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
)
|
||||
@@ -185,8 +189,8 @@ async def publish_context_usage(
|
||||
await event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.CONTEXT_USAGE_UPDATED,
|
||||
stream_id=ctx.stream_id or ctx.node_id,
|
||||
node_id=ctx.node_id,
|
||||
stream_id=ctx.stream_id or ctx.agent_id,
|
||||
node_id=ctx.agent_id,
|
||||
data={
|
||||
"usage_ratio": round(ratio, 4),
|
||||
"usage_pct": round(ratio * 100),
|
||||
@@ -319,9 +323,7 @@ async def publish_output_key_set(
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_output_key_set(
|
||||
stream_id=stream_id, node_id=node_id, key=key, execution_id=execution_id
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
async def run_hooks(
|
||||
|
||||
@@ -31,14 +31,10 @@ class SubagentJudge:
|
||||
|
||||
if remaining <= 3:
|
||||
urgency = (
|
||||
f"URGENT: Only {remaining} iterations left. "
|
||||
f"Stop all other work and call set_output NOW for: {missing}"
|
||||
f"URGENT: Only {remaining} iterations left. Stop all other work and call set_output NOW for: {missing}"
|
||||
)
|
||||
elif remaining <= self._max_iterations // 2:
|
||||
urgency = (
|
||||
f"WARNING: {remaining} iterations remaining. "
|
||||
f"You must call set_output for: {missing}"
|
||||
)
|
||||
urgency = f"WARNING: {remaining} iterations remaining. You must call set_output for: {missing}"
|
||||
else:
|
||||
urgency = f"Missing output keys: {missing}. Use set_output to provide them."
|
||||
|
||||
@@ -79,7 +75,7 @@ async def judge_turn(
|
||||
if mark_complete_flag:
|
||||
return JudgeVerdict(action="ACCEPT")
|
||||
|
||||
if ctx.node_spec.skip_judge:
|
||||
if ctx.agent_spec.skip_judge:
|
||||
return JudgeVerdict(action="RETRY") # feedback=None → not logged
|
||||
|
||||
# --- Level 1: custom judge -----------------------------------------
|
||||
@@ -92,9 +88,9 @@ async def judge_turn(
|
||||
"accumulator": accumulator,
|
||||
"iteration": iteration,
|
||||
"conversation_summary": conversation.export_summary(),
|
||||
"output_keys": ctx.node_spec.output_keys,
|
||||
"output_keys": ctx.agent_spec.output_keys,
|
||||
"missing_keys": get_missing_output_keys_fn(
|
||||
accumulator, ctx.node_spec.output_keys, ctx.node_spec.nullable_output_keys
|
||||
accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys
|
||||
),
|
||||
}
|
||||
verdict = await judge.evaluate(context)
|
||||
@@ -109,9 +105,7 @@ async def judge_turn(
|
||||
if tool_results:
|
||||
return JudgeVerdict(action="RETRY") # feedback=None → not logged
|
||||
|
||||
missing = get_missing_output_keys_fn(
|
||||
accumulator, ctx.node_spec.output_keys, ctx.node_spec.nullable_output_keys
|
||||
)
|
||||
missing = get_missing_output_keys_fn(accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys)
|
||||
|
||||
if missing:
|
||||
return JudgeVerdict(
|
||||
@@ -124,8 +118,8 @@ async def judge_turn(
|
||||
|
||||
# All output keys present — run safety checks before accepting.
|
||||
|
||||
output_keys = ctx.node_spec.output_keys or []
|
||||
nullable_keys = set(ctx.node_spec.nullable_output_keys or [])
|
||||
output_keys = ctx.agent_spec.output_keys or []
|
||||
nullable_keys = set(ctx.agent_spec.nullable_output_keys or [])
|
||||
|
||||
# All-nullable with nothing set → node produced nothing useful.
|
||||
all_nullable = output_keys and nullable_keys >= set(output_keys)
|
||||
@@ -133,36 +127,19 @@ async def judge_turn(
|
||||
if all_nullable and none_set:
|
||||
return JudgeVerdict(
|
||||
action="RETRY",
|
||||
feedback=(
|
||||
f"No output keys have been set yet. "
|
||||
f"Use set_output to set at least one of: {output_keys}"
|
||||
),
|
||||
)
|
||||
|
||||
# Queen with no output keys → continuous interaction node.
|
||||
# Inject tool-use pressure instead of auto-accepting.
|
||||
if not output_keys and ctx.supports_direct_user_io:
|
||||
return JudgeVerdict(
|
||||
action="RETRY",
|
||||
feedback=(
|
||||
"STOP describing what you will do. "
|
||||
"You have FULL access to all tools — file creation, "
|
||||
"shell commands, MCP tools — and you CAN call them "
|
||||
"directly in your response. Respond ONLY with tool "
|
||||
"calls, no prose. Execute the task now."
|
||||
),
|
||||
feedback=(f"No output keys have been set yet. Use set_output to set at least one of: {output_keys}"),
|
||||
)
|
||||
|
||||
# Level 2b: conversation-aware quality check (if success_criteria set)
|
||||
if ctx.node_spec.success_criteria and ctx.llm:
|
||||
if ctx.agent_spec.success_criteria and ctx.llm:
|
||||
from framework.orchestrator.conversation_judge import evaluate_phase_completion
|
||||
|
||||
verdict = await evaluate_phase_completion(
|
||||
llm=ctx.llm,
|
||||
conversation=conversation,
|
||||
phase_name=ctx.node_spec.name,
|
||||
phase_description=ctx.node_spec.description,
|
||||
success_criteria=ctx.node_spec.success_criteria,
|
||||
phase_name=ctx.agent_spec.name,
|
||||
phase_description=ctx.agent_spec.description,
|
||||
success_criteria=ctx.agent_spec.success_criteria,
|
||||
accumulator_state=accumulator.to_dict(),
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
|
||||
@@ -15,100 +15,148 @@ from typing import Any
|
||||
from framework.llm.provider import Tool, ToolResult
|
||||
|
||||
|
||||
def sanitize_ask_user_inputs(
|
||||
raw_question: Any,
|
||||
raw_options: Any,
|
||||
) -> tuple[str, list[str] | None]:
|
||||
"""Self-heal a malformed ``ask_user`` tool call.
|
||||
|
||||
Some model families (notably when the system prompt teaches them
|
||||
XML-ish scratchpad tags like ``<relationship>...</relationship>``)
|
||||
carry that style into tool arguments and produce calls like::
|
||||
|
||||
ask_user({
|
||||
"question": "What now?</question>\\n_OPTIONS: [\\"A\\", \\"B\\"]"
|
||||
})
|
||||
|
||||
Symptoms:
|
||||
- The chat UI renders ``</question>`` and ``_OPTIONS: [...]`` as
|
||||
literal text in the question bubble.
|
||||
- No buttons appear because the real ``options`` parameter is
|
||||
empty.
|
||||
|
||||
This function:
|
||||
- Strips leading/trailing whitespace.
|
||||
- Removes a trailing ``</question>`` (with optional preceding
|
||||
whitespace) from the question text.
|
||||
- Detects an inline ``_OPTIONS:``, ``OPTIONS:``, or ``options:``
|
||||
line followed by a JSON array, parses it, and returns the
|
||||
recovered list as the second element.
|
||||
- Removes the parsed line from the returned question text.
|
||||
|
||||
Returns ``(cleaned_question, recovered_options_or_None)``. The
|
||||
caller should treat the recovered list as a fallback only when
|
||||
the model did not also supply a real ``options`` array.
|
||||
"""
|
||||
import json as _json
|
||||
import re as _re
|
||||
|
||||
if raw_question is None:
|
||||
return "", None
|
||||
q = str(raw_question)
|
||||
|
||||
# Strip a stray </question> tag (case-insensitive, with optional
|
||||
# preceding whitespace) anywhere in the string. This is the most
|
||||
# common failure mode and never represents valid content.
|
||||
q = _re.sub(r"\s*</\s*question\s*>\s*", "\n", q, flags=_re.IGNORECASE)
|
||||
|
||||
# Look for an inline options line. Match _OPTIONS, OPTIONS, options
|
||||
# (with or without leading underscore), followed by ':' or '=', then
|
||||
# a JSON array on the same line OR on the next line.
|
||||
inline_options_re = _re.compile(
|
||||
r"(?im)^\s*_?options\s*[:=]\s*(\[.*?\])\s*$",
|
||||
_re.DOTALL,
|
||||
)
|
||||
|
||||
recovered: list[str] | None = None
|
||||
match = inline_options_re.search(q)
|
||||
if match is not None:
|
||||
try:
|
||||
parsed = _json.loads(match.group(1))
|
||||
if isinstance(parsed, list):
|
||||
cleaned = [str(o).strip() for o in parsed if str(o).strip()]
|
||||
if 1 <= len(cleaned) <= 8:
|
||||
recovered = cleaned
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
if recovered is not None:
|
||||
# Remove the parsed line so it doesn't leak into the
|
||||
# rendered question text.
|
||||
q = inline_options_re.sub("", q, count=1)
|
||||
|
||||
# Strip any final whitespace / leftover blank lines from the
|
||||
# question after removals.
|
||||
q = _re.sub(r"\n{3,}", "\n\n", q).strip()
|
||||
|
||||
return q, recovered
|
||||
|
||||
|
||||
ask_user_prompt = """\
|
||||
Use this tool when you need to ask the user questions during execution. Reach for it when:
|
||||
|
||||
- The task is ambiguous and the user needs to choose an approach
|
||||
- You need missing information to continue
|
||||
- You want approval before taking a meaningful action
|
||||
- A decision has real trade-offs the user should weigh in on
|
||||
- You want post-task feedback, or to offer saving a skill or updating memory
|
||||
|
||||
Usage notes:
|
||||
- Users will always be able to select "Other" to provide custom text input, \
|
||||
so do not include catch-all options like "Other" or "Something else" yourself.
|
||||
- Each option is a plain string. Do NOT wrap options in `{"label": "..."}` or \
|
||||
`{"value": "..."}` objects — pass the raw choice text directly, e.g. `"Email"`, \
|
||||
not `{"label": "Email"}`.
|
||||
- If you recommend a specific option, make that the first option in the list \
|
||||
and append " (Recommended)" to the end of its text.
|
||||
- Call this tool whenever you need the user's response.
|
||||
- The prompt field must be plain text only.
|
||||
- Do not include XML, pseudo-tags, or inline option lists inside prompt.
|
||||
- Omit options only when the question truly requires a free-form response the \
|
||||
user must type out, such as describing an idea or pasting an error message.
|
||||
- Do not repeat the questions in your normal text response. The widget renders \
|
||||
them, so keep any surrounding text to a brief intro only.
|
||||
Example — single question with options:
|
||||
{"questions": [{"id": "next", "prompt": "What would you like to do?", \
|
||||
"options": ["Build a new agent (Recommended)", "Modify existing agent", "Run tests"]}]}
|
||||
|
||||
Example — batch:
|
||||
{"questions": [
|
||||
{"id": "scope", "prompt": "What scope?", "options": ["Full", "Partial"]},
|
||||
{"id": "format", "prompt": "Output format?", "options": ["PDF", "CSV", "JSON"]},
|
||||
{"id": "details", "prompt": "Any special requirements?"}
|
||||
]}
|
||||
|
||||
Example — free-form (queen only):
|
||||
{"questions": [{"id": "idea", "prompt": "Describe the agent you want to build."}]}
|
||||
"""
|
||||
|
||||
|
||||
def build_ask_user_tool() -> Tool:
|
||||
"""Build the synthetic ask_user tool for explicit user-input requests.
|
||||
|
||||
The queen calls ask_user() when it needs to pause and wait
|
||||
for user input. Text-only turns WITHOUT ask_user flow through without
|
||||
blocking, allowing progress updates and summaries to stream freely.
|
||||
The queen calls ask_user() when it needs to pause and wait for user
|
||||
input. Accepts an array of 1-8 questions — a single question for the
|
||||
common case, or a batch when several clarifications are needed at once.
|
||||
Text-only turns WITHOUT ask_user flow through without blocking, allowing
|
||||
progress updates and summaries to stream freely.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user",
|
||||
description=(
|
||||
"You MUST call this tool whenever you need the user's response. "
|
||||
"Always call it after greeting the user, asking a question, or "
|
||||
"requesting approval. Do NOT call it for status updates or "
|
||||
"summaries that don't require a response. "
|
||||
"Always include 2-3 predefined options. The UI automatically "
|
||||
"appends an 'Other' free-text input after your options, so NEVER "
|
||||
"include catch-all options like 'Custom idea', 'Something else', "
|
||||
"'Other', or 'None of the above' — the UI handles that. "
|
||||
"When the question primarily needs a typed answer but you must "
|
||||
"include options, make one option signal that typing is expected "
|
||||
"(e.g. 'I\\'ll type my response'). This helps users discover the "
|
||||
"free-text input. "
|
||||
"The ONLY exception: omit options when the question demands a "
|
||||
"free-form answer the user must type out (e.g. 'Describe your "
|
||||
"agent idea', 'Paste the error message'). "
|
||||
'{"question": "What would you like to do?", "options": '
|
||||
'["Build a new agent", "Modify existing agent", "Run tests"]} '
|
||||
"Free-form example: "
|
||||
'{"question": "Describe the agent you want to build."}'
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "The question or prompt shown to the user.",
|
||||
},
|
||||
"options": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 specific predefined choices. Include in most cases. "
|
||||
'Example: ["Option A", "Option B", "Option C"]. '
|
||||
"The UI always appends an 'Other' free-text input, so "
|
||||
"do NOT include catch-alls like 'Custom idea' or 'Other'. "
|
||||
"Omit ONLY when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
"maxItems": 3,
|
||||
},
|
||||
},
|
||||
"required": ["question"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_ask_user_multiple_tool() -> Tool:
|
||||
"""Build the synthetic ask_user_multiple tool for batched questions.
|
||||
|
||||
Queen-only tool that presents multiple questions at once so the user
|
||||
can answer them all in a single interaction rather than one at a time.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user_multiple",
|
||||
description=(
|
||||
"Ask the user multiple questions at once. Use this instead of "
|
||||
"ask_user when you have 2 or more questions to ask in the same "
|
||||
"turn — it lets the user answer everything in one go rather than "
|
||||
"going back and forth. Each question can have its own predefined "
|
||||
"options (2-3 choices) or be free-form. The UI renders all "
|
||||
"questions together with a single Submit button. "
|
||||
"ALWAYS prefer this over ask_user when you have multiple things "
|
||||
"to clarify. "
|
||||
"IMPORTANT: Do NOT repeat the questions in your text response — "
|
||||
"the widget renders them. Keep your text to a brief intro only. "
|
||||
'{"questions": ['
|
||||
' {"id": "scope", "prompt": "What scope?", "options": ["Full", "Partial"]},'
|
||||
' {"id": "format", "prompt": "Output format?", "options": ["PDF", "CSV", "JSON"]},'
|
||||
' {"id": "details", "prompt": "Any special requirements?"}'
|
||||
"]}"
|
||||
),
|
||||
description=ask_user_prompt,
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"questions": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"maxItems": 8,
|
||||
"description": "List of questions to present to the user.",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Short identifier for this question (used in the response)."
|
||||
),
|
||||
"description": ("Short identifier for this question (used in the response)."),
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
@@ -118,8 +166,13 @@ def build_ask_user_multiple_tool() -> Tool:
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 predefined choices. The UI appends an "
|
||||
"'Other' free-text input automatically. "
|
||||
"2-3 predefined choices as plain strings "
|
||||
'(e.g. ["Yes", "No", "Maybe"]). Do NOT '
|
||||
'wrap items in {"label": "..."} or '
|
||||
'{"value": "..."} objects — pass the raw '
|
||||
"choice text directly. The UI appends an "
|
||||
"'Other' free-text input automatically, "
|
||||
"so don't include catch-all options. "
|
||||
"Omit only when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
@@ -128,9 +181,6 @@ def build_ask_user_multiple_tool() -> Tool:
|
||||
},
|
||||
"required": ["id", "prompt"],
|
||||
},
|
||||
"minItems": 2,
|
||||
"maxItems": 8,
|
||||
"description": "List of questions to present to the user.",
|
||||
},
|
||||
},
|
||||
"required": ["questions"],
|
||||
@@ -164,10 +214,7 @@ def build_set_output_tool(output_keys: list[str] | None) -> Tool | None:
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The output value — a brief note, count, status, "
|
||||
"or data filename reference."
|
||||
),
|
||||
"description": ("The output value — a brief note, count, status, or data filename reference."),
|
||||
},
|
||||
},
|
||||
"required": ["key", "value"],
|
||||
@@ -191,9 +238,7 @@ def build_escalate_tool() -> Tool:
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Short reason for escalation (e.g. 'Tool repeatedly failing')."
|
||||
),
|
||||
"description": ("Short reason for escalation (e.g. 'Tool repeatedly failing')."),
|
||||
},
|
||||
"context": {
|
||||
"type": "string",
|
||||
@@ -206,64 +251,89 @@ def build_escalate_tool() -> Tool:
|
||||
|
||||
|
||||
def build_report_to_parent_tool() -> Tool:
|
||||
"""Build the synthetic report_to_parent tool for sub-agent progress reports.
|
||||
"""Build the synthetic ``report_to_parent`` tool.
|
||||
|
||||
Sub-agents call this to send one-way progress updates, partial findings,
|
||||
or status reports to the parent node (and external observers via event bus)
|
||||
without blocking execution.
|
||||
Parallel workers (those spawned by the overseer via
|
||||
``run_parallel_workers``) call this to send a structured report back
|
||||
to the overseer queen when they have finished their task. Calling
|
||||
``report_to_parent`` terminates the worker's loop cleanly -- do not
|
||||
call other tools after it.
|
||||
|
||||
When ``wait_for_response`` is True, the sub-agent blocks until the parent
|
||||
relays the user's response — used for escalation (e.g. login pages, CAPTCHAs).
|
||||
|
||||
When ``mark_complete`` is True, the sub-agent terminates immediately after
|
||||
sending the report — no need to call set_output for each output key.
|
||||
The overseer receives these as ``SUBAGENT_REPORT`` events and
|
||||
aggregates them into a single summary for the user.
|
||||
"""
|
||||
return Tool(
|
||||
name="report_to_parent",
|
||||
description=(
|
||||
"Send a report to the parent agent. By default this is fire-and-forget: "
|
||||
"the parent receives the report but does not respond. "
|
||||
"Set wait_for_response=true to BLOCK until the user replies — use this "
|
||||
"when you need human intervention (e.g. login pages, CAPTCHAs, "
|
||||
"authentication walls). The user's response is returned as the tool result. "
|
||||
"Set mark_complete=true to finish your task and terminate immediately "
|
||||
"after sending the report — use this when your findings are in the "
|
||||
"message/data fields and you don't need to call set_output."
|
||||
"Send a structured report back to the parent overseer and "
|
||||
"terminate. Call this when you have finished your task "
|
||||
"(success, partial, or failed) or cannot make further "
|
||||
"progress. Your loop ends after this call -- do not call any "
|
||||
"other tool afterwards. The overseer reads the summary + "
|
||||
"data fields and aggregates them into a user-facing response."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "A human-readable status or progress message.",
|
||||
"enum": ["success", "partial", "failed"],
|
||||
"description": (
|
||||
"Overall outcome. 'success' = task complete. "
|
||||
"'partial' = some progress but incomplete. "
|
||||
"'failed' = could not make progress."
|
||||
),
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"One-paragraph narrative for the overseer. What "
|
||||
"you did, what you found, and any notable issues."
|
||||
),
|
||||
},
|
||||
"data": {
|
||||
"type": "object",
|
||||
"description": "Optional structured data to include with the report.",
|
||||
},
|
||||
"wait_for_response": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true, block execution until the user responds. "
|
||||
"Use for escalation scenarios requiring human intervention."
|
||||
"Optional structured payload (rows fetched, IDs "
|
||||
"processed, files written, etc.) that the "
|
||||
"overseer can merge into its final summary."
|
||||
),
|
||||
"default": False,
|
||||
},
|
||||
"mark_complete": {
|
||||
"type": "boolean",
|
||||
"description": (
|
||||
"If true, terminate the sub-agent immediately after sending "
|
||||
"this report. The report message and data are delivered to the "
|
||||
"parent as the final result. No set_output calls are needed."
|
||||
),
|
||||
"default": False,
|
||||
},
|
||||
},
|
||||
"required": ["message"],
|
||||
"required": ["status", "summary"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def handle_report_to_parent(tool_input: dict[str, Any]) -> ToolResult:
|
||||
"""Normalise + validate a ``report_to_parent`` tool call.
|
||||
|
||||
Returns a ``ToolResult`` with the acknowledgement text the LLM sees;
|
||||
the side effects (record on Worker, emit SUBAGENT_REPORT, terminate
|
||||
loop) are performed by ``AgentLoop`` after this helper returns.
|
||||
"""
|
||||
status = str(tool_input.get("status", "success")).strip().lower()
|
||||
if status not in ("success", "partial", "failed"):
|
||||
status = "success"
|
||||
summary = str(tool_input.get("summary", "")).strip()
|
||||
if not summary:
|
||||
summary = f"(worker returned {status} with no summary)"
|
||||
data = tool_input.get("data") or {}
|
||||
if not isinstance(data, dict):
|
||||
data = {"value": data}
|
||||
# Store the normalised payload back on the input dict so the caller
|
||||
# can pick it up without re-parsing.
|
||||
tool_input["_normalised"] = {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"data": data,
|
||||
}
|
||||
return ToolResult(
|
||||
tool_use_id=tool_input.get("tool_use_id", ""),
|
||||
content=(f"Report delivered to overseer (status={status}). This worker will terminate now."),
|
||||
)
|
||||
|
||||
|
||||
def handle_set_output(
|
||||
tool_input: dict[str, Any],
|
||||
output_keys: list[str] | None,
|
||||
|
||||
@@ -0,0 +1,291 @@
|
||||
"""Generic coercion of LLM-emitted tool arguments to match each tool's JSON schema.
|
||||
|
||||
Small/mid-size models drift from tool schemas in predictable, boring ways:
|
||||
|
||||
- A number field comes back as a string (``"42"`` instead of ``42``).
|
||||
- A boolean field comes back as a string (``"true"`` instead of ``True``).
|
||||
- An array-of-string field comes back as an array of objects
|
||||
(``[{"label": "A"}, ...]`` instead of ``["A", ...]``).
|
||||
- An array/object field comes back as a JSON-encoded string
|
||||
(``'["A","B"]'`` instead of ``["A", "B"]``).
|
||||
- A lone scalar arrives where the schema expects an array.
|
||||
|
||||
This module centralizes the healing in one schema-driven pass that runs
|
||||
on every tool call before dispatch. Coercion is conservative:
|
||||
|
||||
- Values that already match the expected type are untouched.
|
||||
- Shapes we don't recognize are returned as-is, so real bugs surface
|
||||
instead of getting silently munged into something plausible.
|
||||
- Every actual coercion is logged with the tool, property, and shape
|
||||
transition so we can see which models/tools are drifting.
|
||||
|
||||
Tool-specific prompt drift (e.g. ``</question>`` tags leaking into an
|
||||
``ask_user`` prompt string) is NOT this module's job — that belongs in
|
||||
per-tool sanitizers, because it's about prompt style, not schema shape.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import Tool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# When an ``array<string>`` field arrives as an array of objects, look
|
||||
# for a text-carrying field in preference order. Covers the wrappers
|
||||
# small models tend to produce: ``[{"label": "A"}]``, ``[{"value": "A"}]``,
|
||||
# ``[{"text": "A"}]``, etc.
|
||||
_STRING_EXTRACT_KEYS: tuple[str, ...] = (
|
||||
"label",
|
||||
"value",
|
||||
"text",
|
||||
"name",
|
||||
"title",
|
||||
"display",
|
||||
)
|
||||
|
||||
|
||||
def coerce_tool_input(tool: Tool, raw_input: dict[str, Any] | None) -> dict[str, Any]:
|
||||
"""Coerce *raw_input* in place to match *tool*'s JSON schema.
|
||||
|
||||
Returns the mutated input dict (same object as *raw_input* when
|
||||
possible, for callers that assume in-place mutation). Properties
|
||||
not present in the schema are left untouched.
|
||||
"""
|
||||
if not isinstance(raw_input, dict):
|
||||
return raw_input or {}
|
||||
|
||||
schema = tool.parameters or {}
|
||||
props = schema.get("properties")
|
||||
if not isinstance(props, dict):
|
||||
return raw_input
|
||||
|
||||
for key in list(raw_input.keys()):
|
||||
prop_schema = props.get(key)
|
||||
if not isinstance(prop_schema, dict):
|
||||
continue
|
||||
original = raw_input[key]
|
||||
coerced = _coerce(original, prop_schema)
|
||||
if coerced is not original:
|
||||
logger.info(
|
||||
"coerced tool input tool=%s prop=%s from=%s to=%s",
|
||||
tool.name,
|
||||
key,
|
||||
_shape(original),
|
||||
_shape(coerced),
|
||||
)
|
||||
raw_input[key] = coerced
|
||||
|
||||
return raw_input
|
||||
|
||||
|
||||
def _coerce(value: Any, schema: dict[str, Any]) -> Any:
|
||||
"""Dispatch on the schema's ``type`` field.
|
||||
|
||||
Returns the *same object* on passthrough so callers can detect
|
||||
no-ops via identity (``coerced is value``).
|
||||
"""
|
||||
expected = schema.get("type")
|
||||
if not expected:
|
||||
return value
|
||||
|
||||
# Union type: try each in order, return the first coercion that
|
||||
# actually changes the value. Falls back to the original.
|
||||
if isinstance(expected, list):
|
||||
for t in expected:
|
||||
sub_schema = {**schema, "type": t}
|
||||
coerced = _coerce(value, sub_schema)
|
||||
if coerced is not value:
|
||||
return coerced
|
||||
return value
|
||||
|
||||
if expected == "integer":
|
||||
return _coerce_integer(value)
|
||||
if expected == "number":
|
||||
return _coerce_number(value)
|
||||
if expected == "boolean":
|
||||
return _coerce_boolean(value)
|
||||
if expected == "string":
|
||||
return _coerce_string(value)
|
||||
if expected == "array":
|
||||
return _coerce_array(value, schema)
|
||||
if expected == "object":
|
||||
return _coerce_object(value, schema)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_integer(value: Any) -> Any:
|
||||
# bool is a subclass of int in Python; don't mistake True for 1 here.
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
parsed = _parse_number(value)
|
||||
if parsed is None:
|
||||
return value
|
||||
if parsed != int(parsed):
|
||||
# Has a fractional part — caller asked for int, don't truncate.
|
||||
return value
|
||||
return int(parsed)
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_number(value: Any) -> Any:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
parsed = _parse_number(value)
|
||||
if parsed is None:
|
||||
return value
|
||||
if parsed == int(parsed):
|
||||
return int(parsed)
|
||||
return parsed
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_boolean(value: Any) -> Any:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
low = value.strip().lower()
|
||||
if low == "true":
|
||||
return True
|
||||
if low == "false":
|
||||
return False
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_string(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
# Common drift: model sent ``{"label": "..."}`` when we wanted "...".
|
||||
if isinstance(value, dict):
|
||||
extracted = _extract_string_from_object(value)
|
||||
if extracted is not None:
|
||||
return extracted
|
||||
return value
|
||||
|
||||
|
||||
def _coerce_array(value: Any, schema: dict[str, Any]) -> Any:
|
||||
# Heal: JSON-encoded array string → array.
|
||||
if isinstance(value, str):
|
||||
parsed = _try_parse_json(value)
|
||||
if isinstance(parsed, list):
|
||||
value = parsed
|
||||
else:
|
||||
# Scalar string where an array is expected — wrap it.
|
||||
return [value]
|
||||
elif not isinstance(value, list):
|
||||
# Any other scalar (int, bool, dict, ...) — wrap.
|
||||
return [value]
|
||||
|
||||
items_schema = schema.get("items")
|
||||
if not isinstance(items_schema, dict):
|
||||
return value
|
||||
|
||||
coerced_items: list[Any] = []
|
||||
changed = False
|
||||
for item in value:
|
||||
c = _coerce(item, items_schema)
|
||||
if c is not item:
|
||||
changed = True
|
||||
coerced_items.append(c)
|
||||
return coerced_items if changed else value
|
||||
|
||||
|
||||
def _coerce_object(value: Any, schema: dict[str, Any]) -> Any:
|
||||
# Heal: JSON-encoded object string → object.
|
||||
if isinstance(value, str):
|
||||
parsed = _try_parse_json(value)
|
||||
if isinstance(parsed, dict):
|
||||
value = parsed
|
||||
else:
|
||||
return value
|
||||
if not isinstance(value, dict):
|
||||
return value
|
||||
|
||||
sub_props = schema.get("properties")
|
||||
if not isinstance(sub_props, dict):
|
||||
return value
|
||||
|
||||
changed = False
|
||||
for k in list(value.keys()):
|
||||
sub_schema = sub_props.get(k)
|
||||
if not isinstance(sub_schema, dict):
|
||||
continue
|
||||
original = value[k]
|
||||
coerced = _coerce(original, sub_schema)
|
||||
if coerced is not original:
|
||||
value[k] = coerced
|
||||
changed = True
|
||||
# Return the same dict on mutation so callers that passed a shared
|
||||
# reference see the updates. ``changed`` is only used to decide
|
||||
# whether we need to log at a coarser level upstream.
|
||||
return value if changed or not sub_props else value
|
||||
|
||||
|
||||
def _extract_string_from_object(obj: dict[str, Any]) -> str | None:
|
||||
"""Pick a likely-text field out of a wrapper object.
|
||||
|
||||
Tries the known keys first, falls back to the sole value if the
|
||||
object has exactly one entry. Returns None when nothing plausible
|
||||
is found — the caller keeps the original.
|
||||
"""
|
||||
for k in _STRING_EXTRACT_KEYS:
|
||||
v = obj.get(k)
|
||||
if isinstance(v, str) and v:
|
||||
return v
|
||||
if len(obj) == 1:
|
||||
(only,) = obj.values()
|
||||
if isinstance(only, str) and only:
|
||||
return only
|
||||
return None
|
||||
|
||||
|
||||
def _try_parse_json(raw: str) -> Any:
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def _parse_number(raw: str) -> float | None:
|
||||
try:
|
||||
f = float(raw)
|
||||
except (ValueError, OverflowError):
|
||||
return None
|
||||
# Reject NaN and inf — they pass float() but aren't useful numeric
|
||||
# values for tool arguments.
|
||||
if f != f or f == float("inf") or f == float("-inf"):
|
||||
return None
|
||||
return f
|
||||
|
||||
|
||||
def _shape(value: Any) -> str:
|
||||
"""Short type/shape description used in coercion log lines."""
|
||||
if value is None:
|
||||
return "None"
|
||||
if isinstance(value, bool):
|
||||
return "bool"
|
||||
if isinstance(value, int):
|
||||
return "int"
|
||||
if isinstance(value, float):
|
||||
return "float"
|
||||
if isinstance(value, str):
|
||||
return f"str[{len(value)}]"
|
||||
if isinstance(value, list):
|
||||
if not value:
|
||||
return "list[0]"
|
||||
return f"list[{len(value)}]<{_shape(value[0])}>"
|
||||
if isinstance(value, dict):
|
||||
keys = sorted(value.keys())[:3]
|
||||
suffix = ",…" if len(value) > 3 else ""
|
||||
return f"dict{{{','.join(keys)}{suffix}}}"
|
||||
return type(value).__name__
|
||||
@@ -215,14 +215,30 @@ def truncate_tool_result(
|
||||
"""Persist tool result to file and optionally truncate for context.
|
||||
|
||||
When *spillover_dir* is configured, EVERY non-error tool result is
|
||||
saved to a file (short filename like ``web_search_1.txt``). A
|
||||
``[Saved to '...']`` annotation is appended so the reference
|
||||
survives pruning and compaction.
|
||||
written to disk for debugging. The LLM-visible content is then
|
||||
shaped to avoid a **poison pattern** that we traced on 2026-04-15
|
||||
through a gemini-3.1-pro-preview-customtools queen session: the prior format
|
||||
appended ``\\n\\n[Saved to '/abs/path/file.txt']`` after every
|
||||
small result, and frontier pattern-matching models (gemini 3.x in
|
||||
particular) learned to autocomplete the `[Saved to '...']` trailer
|
||||
in their own assistant turns, eventually degenerating into echoing
|
||||
the whole tool result instead of deciding what to do next. See
|
||||
``session_20260415_100751_d49f4c28/conversations/parts/0000000056.json``
|
||||
for the terminal case where the model's "text" output was the full
|
||||
tool_result JSON.
|
||||
|
||||
- Small results (≤ limit): full content kept + file annotation
|
||||
- Large results (> limit): preview + file reference
|
||||
- Errors: pass through unchanged
|
||||
- read_file/load_data results: truncate with pagination hint (no re-spill)
|
||||
Rules after the fix:
|
||||
- **Small results (≤ limit):** pass content through unchanged. No
|
||||
trailer. No annotation. The full content is already in the
|
||||
message; the disk copy is for debugging only.
|
||||
- **Large results (> limit):** preview + file reference, but
|
||||
formatted as plain prose instead of a bracketed ``[...]``
|
||||
pattern. Structured JSON metadata ("_saved_to") is embedded
|
||||
inside the JSON body when the preview is JSON-shaped so the
|
||||
model can locate the full file without seeing a mimicry-prone
|
||||
bracket token outside the body.
|
||||
- **Errors:** pass through unchanged.
|
||||
- **read_file results:** truncate with pagination hint (no re-spill).
|
||||
"""
|
||||
limit = max_tool_result_chars
|
||||
|
||||
@@ -230,9 +246,9 @@ def truncate_tool_result(
|
||||
if result.is_error:
|
||||
return result
|
||||
|
||||
# read_file/load_data reads FROM spilled files — never re-spill (circular).
|
||||
# read_file reads FROM spilled files — never re-spill (circular).
|
||||
# Just truncate with a pagination hint if the result is too large.
|
||||
if tool_name in ("load_data", "read_file"):
|
||||
if tool_name == "read_file":
|
||||
if limit <= 0 or len(result.content) <= limit:
|
||||
return result # Small result — pass through as-is
|
||||
# Large result — truncate with smart preview
|
||||
@@ -252,18 +268,19 @@ def truncate_tool_result(
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
# Prose header (no brackets).
|
||||
header = (
|
||||
f"[{tool_name} result: {len(result.content):,} chars — "
|
||||
f"too large for context. Use offset_bytes/limit_bytes "
|
||||
f"parameters to read smaller chunks.]"
|
||||
f"Tool `{tool_name}` returned {len(result.content):,} characters "
|
||||
f"(too large for context). Use offset_bytes / limit_bytes "
|
||||
f"parameters to paginate smaller chunks."
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\n\nData structure:\n{metadata_str}"
|
||||
header += (
|
||||
"\n\nWARNING: This is an INCOMPLETE preview. Do NOT draw conclusions or counts from it."
|
||||
"\n\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
|
||||
)
|
||||
|
||||
truncated = f"{header}\n\nPreview (small sample only):\n{preview_block}"
|
||||
truncated = f"{header}\n\nPreview (truncated):\n{preview_block}"
|
||||
logger.info(
|
||||
"%s result truncated: %d → %d chars (use offset/limit to paginate)",
|
||||
tool_name,
|
||||
@@ -301,7 +318,10 @@ def truncate_tool_result(
|
||||
|
||||
if limit > 0 and len(result.content) > limit:
|
||||
# Large result: build a small, metadata-rich preview so the
|
||||
# LLM cannot mistake it for the complete dataset.
|
||||
# LLM cannot mistake it for the complete dataset. The
|
||||
# preview is introduced as plain prose (no bracketed
|
||||
# ``[Result from …]`` token) so it doesn't prime the model
|
||||
# to autocomplete the same pattern in its next turn.
|
||||
PREVIEW_CAP = 5000
|
||||
|
||||
# Extract structural metadata (array lengths, key names)
|
||||
@@ -316,21 +336,21 @@ def truncate_tool_result(
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
# Assemble header with structural info + warning
|
||||
# Prose header (no brackets). Absolute path still surfaced
|
||||
# so the agent can read the full file, but it's framed as
|
||||
# a sentence, not a bracketed trailer.
|
||||
header = (
|
||||
f"[Result from {tool_name}: {len(result.content):,} chars — "
|
||||
f"too large for context, saved to '{abs_path}'.]\n"
|
||||
f"Tool `{tool_name}` returned {len(result.content):,} characters "
|
||||
f"(too large for context). Full result saved at: {abs_path}\n"
|
||||
f"Read the complete data with read_file(path='{abs_path}').\n"
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\nData structure:\n{metadata_str}"
|
||||
header += f"\nData structure:\n{metadata_str}\n"
|
||||
header += (
|
||||
f"\n\nWARNING: The preview below is INCOMPLETE. "
|
||||
f"Do NOT draw conclusions or counts from it. "
|
||||
f"Use read_file(path='{abs_path}') to read the "
|
||||
f"full data before analysis."
|
||||
"\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
|
||||
)
|
||||
|
||||
content = f"{header}\n\nPreview (small sample only):\n{preview_block}"
|
||||
content = f"{header}\n\nPreview (truncated):\n{preview_block}"
|
||||
logger.info(
|
||||
"Tool result spilled to file: %s (%d chars → %s)",
|
||||
tool_name,
|
||||
@@ -338,10 +358,22 @@ def truncate_tool_result(
|
||||
abs_path,
|
||||
)
|
||||
else:
|
||||
# Small result: keep full content + annotation with absolute path
|
||||
content = f"{result.content}\n\n[Saved to '{abs_path}']"
|
||||
# Small result: pass content through UNCHANGED.
|
||||
#
|
||||
# The prior design appended `\n\n[Saved to '/abs/path']`
|
||||
# after every small result so the agent could re-read the
|
||||
# file later. But (a) the full content is already in the
|
||||
# message, so there's nothing to re-read; (b) the
|
||||
# `[Saved to '…']` trailer is a repeating token pattern
|
||||
# that frontier pattern-matching models autocomplete into
|
||||
# their own assistant turns, eventually echoing whole tool
|
||||
# results as "text" instead of making decisions. Dropping
|
||||
# the trailer entirely kills the poison pattern. Spilled
|
||||
# files on disk still exist for debugging — they just
|
||||
# aren't advertised in the LLM-visible message.
|
||||
content = result.content
|
||||
logger.info(
|
||||
"Tool result saved to file: %s (%d chars → %s)",
|
||||
"Tool result saved to file: %s (%d chars → %s, no trailer)",
|
||||
tool_name,
|
||||
len(result.content),
|
||||
filename,
|
||||
@@ -373,15 +405,16 @@ def truncate_tool_result(
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
# Prose header (no brackets) — see docstring for the poison
|
||||
# pattern that the bracket format triggered.
|
||||
header = (
|
||||
f"[Result from {tool_name}: {len(result.content):,} chars — "
|
||||
f"truncated to fit context budget.]"
|
||||
f"Tool `{tool_name}` returned {len(result.content):,} characters "
|
||||
f"(truncated to fit context budget — no spillover dir configured)."
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\n\nData structure:\n{metadata_str}"
|
||||
header += (
|
||||
"\n\nWARNING: This is an INCOMPLETE preview. "
|
||||
"Do NOT draw conclusions or counts from the preview alone."
|
||||
"\n\nWARNING: the preview below is a SAMPLE only — do NOT draw counts, totals, or conclusions from it."
|
||||
)
|
||||
|
||||
truncated = f"{header}\n\n{preview_block}"
|
||||
@@ -423,7 +456,7 @@ async def execute_tool(
|
||||
)
|
||||
|
||||
skill_dirs = skill_dirs or []
|
||||
skill_read_tools = {"view_file", "load_data", "read_file"}
|
||||
skill_read_tools = {"view_file", "read_file"}
|
||||
if tc.tool_name in skill_read_tools and skill_dirs:
|
||||
raw_path = tc.tool_input.get("path", "")
|
||||
if raw_path:
|
||||
@@ -467,6 +500,22 @@ async def execute_tool(
|
||||
result = await _run()
|
||||
except TimeoutError:
|
||||
logger.warning("Tool '%s' timed out after %.0fs", tc.tool_name, timeout)
|
||||
# asyncio.wait_for cancels the awaiting coroutine, but the sync
|
||||
# executor running inside run_in_executor keeps going — and so
|
||||
# does any MCP subprocess it is blocked on. Reach through to the
|
||||
# owning MCPClient and force-disconnect it so the subprocess is
|
||||
# torn down. Next call_tool triggers a reconnect. Without this
|
||||
# the executor thread and MCP child leak on every timeout.
|
||||
kill_for_tool = getattr(tool_executor, "kill_for_tool", None)
|
||||
if callable(kill_for_tool):
|
||||
try:
|
||||
await asyncio.to_thread(kill_for_tool, tc.tool_name)
|
||||
except Exception as exc: # defensive — never let cleanup crash the loop
|
||||
logger.warning(
|
||||
"kill_for_tool('%s') raised during timeout handling: %s",
|
||||
tc.tool_name,
|
||||
exc,
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
@@ -49,28 +50,85 @@ class LoopConfig:
|
||||
"""Configuration for the event loop."""
|
||||
|
||||
max_iterations: int = 50
|
||||
max_tool_calls_per_turn: int = 30
|
||||
# 0 (or any non-positive value) disables the per-turn hard limit,
|
||||
# letting a single assistant turn fan out arbitrarily many tool
|
||||
# calls. Models like Gemini 3.1 Pro routinely emit 40-80 tool
|
||||
# calls in one turn during browser exploration; capping them
|
||||
# strands work half-finished and makes the next turn repeat the
|
||||
# discarded calls, which is worse than just running them.
|
||||
max_tool_calls_per_turn: int = 0
|
||||
judge_every_n_turns: int = 1
|
||||
stall_detection_threshold: int = 3
|
||||
stall_similarity_threshold: float = 0.85
|
||||
max_context_tokens: int = 32_000
|
||||
# Headroom reserved for the NEXT turn's input + output so that
|
||||
# proactive compaction always finishes before the hard context limit
|
||||
# is hit mid-stream. Scaled to match Claude Code's 13k-buffer-on-
|
||||
# 200k-window ratio (~6.5%) applied to hive's default 32k window,
|
||||
# with extra margin because hive's token estimator is char-based
|
||||
# and less tight than Anthropic's own counting. Override via
|
||||
# LoopConfig for larger windows.
|
||||
compaction_buffer_tokens: int = 8_000
|
||||
# Ratio-based component of the hybrid compaction buffer. Effective
|
||||
# headroom reserved before compaction fires is
|
||||
# compaction_buffer_tokens + compaction_buffer_ratio * max_context_tokens
|
||||
# The ratio scales with the model's window where the absolute fixed
|
||||
# component does not (an 8k absolute buffer is 75% trigger on a 32k
|
||||
# window but 96% on a 200k window). Combining them gives an absolute
|
||||
# floor sized for the worst-case single tool result (one un-spilled
|
||||
# max_tool_result_chars payload ≈ 30k chars ≈ 7.5k tokens, rounded to
|
||||
# 8k) plus a fractional headroom that keeps the trigger meaningful on
|
||||
# large windows, so the inner tool loop always has room to grow
|
||||
# without tripping the mid-turn pre-send guard. Defaults: 8k + 15%.
|
||||
# On 32k that's a 12.8k buffer (~60% trigger); on 200k it's 38k
|
||||
# (~81% trigger); on 1M it's 158k (~84% trigger).
|
||||
compaction_buffer_ratio: float = 0.15
|
||||
# Warning is emitted one buffer earlier so the user/telemetry gets
|
||||
# a "we're close" signal without triggering a compaction pass.
|
||||
compaction_warning_buffer_tokens: int = 12_000
|
||||
store_prefix: str = ""
|
||||
|
||||
# Overflow margin for max_tool_calls_per_turn. Tool calls are only
|
||||
# discarded when the count exceeds max_tool_calls_per_turn * (1 + margin).
|
||||
# Overflow margin for max_tool_calls_per_turn. When the limit is
|
||||
# enabled (>0), tool calls are only discarded when the count
|
||||
# exceeds max_tool_calls_per_turn * (1 + margin). Ignored when
|
||||
# max_tool_calls_per_turn is 0.
|
||||
tool_call_overflow_margin: float = 0.5
|
||||
|
||||
# Tool result context management.
|
||||
max_tool_result_chars: int = 30_000
|
||||
spillover_dir: str | None = None
|
||||
|
||||
# Image retention in conversation history.
|
||||
# Screenshots from ``browser_screenshot`` are inlined as base64
|
||||
# data URLs inside message ``image_content``. Each full-page
|
||||
# screenshot costs ~250k tokens when the provider counts the
|
||||
# base64 as text (gemini, most non-Anthropic providers). Four
|
||||
# screenshots in one conversation push gemini's 1M context over
|
||||
# the limit and the model starts emitting garbage.
|
||||
#
|
||||
# The framework strips image_content from older messages after
|
||||
# every tool-result batch, keeping only the most recent N
|
||||
# screenshots. The text metadata on evicted messages (url, size,
|
||||
# scale hints) is preserved so the agent can still reason about
|
||||
# "I took a screenshot at step N that showed the compose modal".
|
||||
# Raise this only if you genuinely need longer visual history AND
|
||||
# you know your provider is using native image tokenization.
|
||||
max_retained_screenshots: int = 2
|
||||
|
||||
# set_output value spilling.
|
||||
max_output_value_chars: int = 2_000
|
||||
|
||||
# Stream retry.
|
||||
max_stream_retries: int = 3
|
||||
max_stream_retries: int = 5
|
||||
stream_retry_backoff_base: float = 2.0
|
||||
stream_retry_max_delay: float = 60.0
|
||||
# Persistent retry for capacity-class errors (429, 529, overloaded).
|
||||
# Unlike the bounded retry above, these keep trying until the wall-clock
|
||||
# budget below is exhausted — modelled after claude-code's withRetry.
|
||||
# The loop still publishes a retry event each attempt so the UI can
|
||||
# see progress. Set to 0 to disable and fall back to bounded retry.
|
||||
capacity_retry_max_seconds: float = 600.0
|
||||
capacity_retry_max_delay: float = 60.0
|
||||
|
||||
# Tool doom loop detection.
|
||||
tool_doom_loop_threshold: int = 3
|
||||
@@ -80,10 +138,46 @@ class LoopConfig:
|
||||
# Worker auto-escalation: text-only turns before escalating to queen.
|
||||
worker_escalation_grace_turns: int = 1
|
||||
tool_doom_loop_enabled: bool = True
|
||||
# Silent worker: consecutive tool-only turns (no user-facing text)
|
||||
# before injecting a nudge to communicate progress.
|
||||
silent_tool_streak_threshold: int = 5
|
||||
|
||||
# Per-tool-call timeout.
|
||||
tool_call_timeout_seconds: float = 60.0
|
||||
|
||||
# LLM stream inactivity watchdog. Split into two budgets so legitimate
|
||||
# slow TTFT on large contexts doesn't get mistaken for a dead connection.
|
||||
# - ttft: stream open -> first event. Large-context local models can
|
||||
# legitimately take minutes before the first token arrives.
|
||||
# - inter_event: last event -> now, ONLY after the first event. A stream
|
||||
# that started producing and then went silent is a real stall.
|
||||
# Whichever fires first cancels the stream. Set to 0 to disable that
|
||||
# individual budget; set both to 0 to fully disable the watchdog.
|
||||
llm_stream_ttft_timeout_seconds: float = 600.0
|
||||
llm_stream_inter_event_idle_seconds: float = 120.0
|
||||
# Deprecated alias — kept so existing configs keep working. If set to a
|
||||
# non-default value it overrides inter_event_idle (historical behavior).
|
||||
llm_stream_inactivity_timeout_seconds: float = 120.0
|
||||
|
||||
# Continue-nudge recovery. When the idle watchdog fires on a live but
|
||||
# stuck stream, cancel the stream and append a short continuation
|
||||
# hint to the conversation instead of raising a ConnectionError and
|
||||
# re-running the whole turn. Preserves any partial text/tool-calls the
|
||||
# stream emitted before the stall.
|
||||
continue_nudge_enabled: bool = True
|
||||
# Cap so a truly dead endpoint eventually falls back to the error path
|
||||
# instead of nudging forever.
|
||||
continue_nudge_max_per_turn: int = 3
|
||||
|
||||
# Tool-call replay detector. When the model emits a tool call whose
|
||||
# (name + canonical-args) matches a prior successful call in the last
|
||||
# K assistant turns, emit telemetry and prepend a short steer onto the
|
||||
# tool result — but still execute. Weaker models legitimately repeat
|
||||
# read-only calls (screenshot, evaluate), so silent skipping would
|
||||
# cause surprising behavior.
|
||||
replay_detector_enabled: bool = True
|
||||
replay_detector_within_last_turns: int = 3
|
||||
|
||||
# Subagent delegation timeout (wall-clock max).
|
||||
subagent_timeout_seconds: float = 3600.0
|
||||
|
||||
@@ -129,7 +223,7 @@ class OutputAccumulator:
|
||||
|
||||
async def set(self, key: str, value: Any) -> None:
|
||||
"""Set a key-value pair, auto-spilling large values to files."""
|
||||
value = self._auto_spill(key, value)
|
||||
value = await self._auto_spill(key, value)
|
||||
self.values[key] = value
|
||||
if self.store:
|
||||
cursor = await self.store.read_cursor() or {}
|
||||
@@ -138,41 +232,65 @@ class OutputAccumulator:
|
||||
cursor["outputs"] = outputs
|
||||
await self.store.write_cursor(cursor)
|
||||
|
||||
def _auto_spill(self, key: str, value: Any) -> Any:
|
||||
"""Save large values to a file and return a reference string."""
|
||||
async def _auto_spill(self, key: str, value: Any) -> Any:
|
||||
"""Save large values to a file and return a reference string.
|
||||
|
||||
Runs the JSON serialization and file write on a worker thread
|
||||
so they don't block the asyncio event loop. For a 100k-char
|
||||
dict this used to freeze every concurrent tool call for ~50ms
|
||||
of ``json.dumps(indent=2)`` + a sync disk write; for bigger
|
||||
payloads or slow storage (NFS, networked FS) the freeze was
|
||||
proportionally worse.
|
||||
"""
|
||||
if self.max_value_chars <= 0 or not self.spillover_dir:
|
||||
return value
|
||||
|
||||
val_str = json.dumps(value, ensure_ascii=False) if not isinstance(value, str) else value
|
||||
if len(val_str) <= self.max_value_chars:
|
||||
# Cheap size probe first — if the value is already a short
|
||||
# string we can skip both the JSON round-trip and the thread
|
||||
# hop entirely.
|
||||
if isinstance(value, str) and len(value) <= self.max_value_chars:
|
||||
return value
|
||||
|
||||
spill_path = Path(self.spillover_dir)
|
||||
spill_path.mkdir(parents=True, exist_ok=True)
|
||||
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
|
||||
filename = f"output_{key}{ext}"
|
||||
write_content = (
|
||||
json.dumps(value, indent=2, ensure_ascii=False)
|
||||
if isinstance(value, (dict, list))
|
||||
else str(value)
|
||||
)
|
||||
file_path = spill_path / filename
|
||||
file_path.write_text(write_content, encoding="utf-8")
|
||||
file_size = file_path.stat().st_size
|
||||
logger.info(
|
||||
"set_output value auto-spilled: key=%s, %d chars -> %s (%d bytes)",
|
||||
key,
|
||||
len(val_str),
|
||||
filename,
|
||||
file_size,
|
||||
)
|
||||
# Use absolute path so parent agents can find files from subagents
|
||||
abs_path = str(file_path.resolve())
|
||||
return (
|
||||
f"[Saved to '{abs_path}' ({file_size:,} bytes). "
|
||||
f"Use read_file(path='{abs_path}') "
|
||||
f"to access full data.]"
|
||||
)
|
||||
def _spill_sync() -> Any:
|
||||
# JSON serialization for size check (only for non-strings).
|
||||
if isinstance(value, str):
|
||||
val_str = value
|
||||
else:
|
||||
val_str = json.dumps(value, ensure_ascii=False)
|
||||
if len(val_str) <= self.max_value_chars:
|
||||
return value
|
||||
|
||||
spill_path = Path(self.spillover_dir)
|
||||
spill_path.mkdir(parents=True, exist_ok=True)
|
||||
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
|
||||
filename = f"output_{key}{ext}"
|
||||
write_content = (
|
||||
json.dumps(value, indent=2, ensure_ascii=False) if isinstance(value, (dict, list)) else str(value)
|
||||
)
|
||||
file_path = spill_path / filename
|
||||
file_path.write_text(write_content, encoding="utf-8")
|
||||
file_size = file_path.stat().st_size
|
||||
logger.info(
|
||||
"set_output value auto-spilled: key=%s, %d chars -> %s (%d bytes)",
|
||||
key,
|
||||
len(val_str),
|
||||
filename,
|
||||
file_size,
|
||||
)
|
||||
# Use absolute path so parent agents can find files from subagents.
|
||||
#
|
||||
# Prose format (no brackets) — same fix as tool_result_handler:
|
||||
# frontier pattern-matching models autocomplete bracketed
|
||||
# `[Saved to '...']` trailers into their own assistant turns,
|
||||
# eventually degenerating into echoing the file path as text.
|
||||
# Keep the path accessible but frame it as plain prose.
|
||||
abs_path = str(file_path.resolve())
|
||||
return (
|
||||
f"Output saved at: {abs_path} ({file_size:,} bytes). "
|
||||
f"Read the full data with read_file(path='{abs_path}')."
|
||||
)
|
||||
|
||||
return await asyncio.to_thread(_spill_sync)
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
return self.values.get(key)
|
||||
|
||||
@@ -0,0 +1,306 @@
|
||||
"""Vision-fallback subagent for tool-result images on text-only LLMs.
|
||||
|
||||
When a tool returns image content but the main agent's model can't
|
||||
accept image blocks (i.e. its catalog entry has ``supports_vision: false``),
|
||||
the framework strips the images before they ever reach the LLM. Without
|
||||
this module, the agent then sees only the tool's text envelope (URL,
|
||||
dimensions, size) and is blind to whatever the image actually shows.
|
||||
|
||||
This module provides:
|
||||
|
||||
* ``caption_tool_image()`` — direct LiteLLM call to a configured
|
||||
vision model (``vision_fallback`` block in ``~/.hive/configuration.json``)
|
||||
that takes the agent's intent + the image(s) and returns a textual
|
||||
description tailored to that intent.
|
||||
* ``extract_intent_for_tool()`` — pull the most recent assistant text
|
||||
+ the tool call descriptor and concatenate them into a ≤2KB intent
|
||||
string the vision subagent can reason against.
|
||||
|
||||
Both helpers degrade silently — return ``None`` / a placeholder rather
|
||||
than raise — so a vision-fallback failure can never kill the main
|
||||
agent's run. The agent-loop call site retries the configured model
|
||||
once on a None return, then falls back to
|
||||
``gemini/gemini-3-flash-preview`` via the ``model_override`` parameter
|
||||
of :func:`caption_tool_image`.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from framework.config import (
|
||||
get_vision_fallback_api_base,
|
||||
get_vision_fallback_api_key,
|
||||
get_vision_fallback_model,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..conversation import NodeConversation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Hard cap on the intent string handed to the vision subagent. The
|
||||
# subagent only needs the agent's recent reasoning + the tool descriptor;
|
||||
# anything longer is wasted tokens (and risks pushing past the vision
|
||||
# model's context with the image attached).
|
||||
_INTENT_MAX_CHARS = 4096
|
||||
|
||||
# Cap on the tool args JSON snippet inside the intent. Some tool inputs
|
||||
# (large strings, file contents) would dominate the intent if uncapped.
|
||||
_TOOL_ARGS_MAX_CHARS = 4096
|
||||
|
||||
# Subagent system prompt — kept short so it fits within any provider's
|
||||
# system-prompt budget alongside the user message + image. Tells the
|
||||
# subagent its role and constrains output format.
|
||||
#
|
||||
# Coordinate labeling: the main agent's browser tools
|
||||
# (browser_click_coordinate / browser_hover_coordinate / browser_press_at)
|
||||
# accept VIEWPORT FRACTIONS (x, y) in [0..1] where (0,0) is the top-left
|
||||
# and (1,1) is the bottom-right of the screenshot. Without coordinates
|
||||
# the text-only agent has no way to act on what we describe — it can
|
||||
# read the caption but cannot point. So for every interactive element
|
||||
# we name (button, link, input, icon, tab, menu item, dialog control),
|
||||
# include its approximate viewport-fraction centre as ``(fx, fy)``
|
||||
# right after the element's name, e.g. ``"Submit" button (0.83, 0.92)``.
|
||||
# Three rules: (1) coordinates only for things plausibly clickable /
|
||||
# hoverable / typeable — don't tag pure body text or decorative
|
||||
# graphics. (2) Eyeball to two decimal places; precision beyond that
|
||||
# is false confidence. (3) Never invent — if an element is partly
|
||||
# off-screen or you can't locate it, omit the coordinate rather than
|
||||
# guessing.
|
||||
_VISION_SUBAGENT_SYSTEM = (
|
||||
"You are a vision subagent for a text-only main agent. The main "
|
||||
"agent invoked a tool that returned the image(s) attached. Their "
|
||||
"intent (their reasoning + the tool call) is below. Describe what "
|
||||
"the image shows in service of their intent — concrete, factual, "
|
||||
"no speculation. If their intent asks a yes/no question, answer it "
|
||||
"directly first.\n\n"
|
||||
"Coordinate labeling: the main agent uses fractional viewport "
|
||||
"coordinates (x, y) in [0..1] — (0, 0) is the top-left of the "
|
||||
"image, (1, 1) is the bottom-right — to drive its click / hover / "
|
||||
"key-press tools. For every interactive element you mention "
|
||||
"(button, link, input, checkbox, radio, dropdown, tab, menu item, "
|
||||
"dialog control, icon), append its approximate centre as "
|
||||
"``(fx, fy)`` immediately after the element's name or label, e.g. "
|
||||
'``"Submit" button (0.83, 0.92)`` or ``profile avatar icon '
|
||||
"(0.05, 0.07)``. Use two decimal places — more is false precision. "
|
||||
"Skip coordinates for pure body text and decorative elements that "
|
||||
"aren't clickable. If an element is partially off-screen or you "
|
||||
"cannot reliably locate its centre, omit the coordinate rather "
|
||||
"than guessing.\n\n"
|
||||
"Output plain text, no markdown, ≤ 600 words."
|
||||
)
|
||||
|
||||
|
||||
def extract_intent_for_tool(
|
||||
conversation: NodeConversation,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | None,
|
||||
) -> str:
|
||||
"""Build the intent string passed to the vision subagent.
|
||||
|
||||
Combines the most recent assistant text (the LLM's reasoning right
|
||||
before invoking the tool) with a structured tool-call descriptor.
|
||||
Truncates to ``_INTENT_MAX_CHARS`` total, favouring the head of the
|
||||
assistant text where goal-stating sentences usually live.
|
||||
|
||||
If no preceding assistant text exists (rare — first turn), falls
|
||||
back to ``"<no preceding reasoning>"`` so the subagent still gets
|
||||
the tool descriptor.
|
||||
"""
|
||||
args_json: str
|
||||
try:
|
||||
args_json = json.dumps(tool_args or {}, default=str)
|
||||
except Exception:
|
||||
args_json = repr(tool_args)
|
||||
if len(args_json) > _TOOL_ARGS_MAX_CHARS:
|
||||
args_json = args_json[:_TOOL_ARGS_MAX_CHARS] + "…"
|
||||
|
||||
tool_line = f"Called: {tool_name}({args_json})"
|
||||
|
||||
# Walk newest → oldest, take the first assistant message with text.
|
||||
assistant_text = ""
|
||||
try:
|
||||
messages = getattr(conversation, "_messages", []) or []
|
||||
for msg in reversed(messages):
|
||||
if getattr(msg, "role", None) != "assistant":
|
||||
continue
|
||||
content = getattr(msg, "content", "") or ""
|
||||
if isinstance(content, str) and content.strip():
|
||||
assistant_text = content.strip()
|
||||
break
|
||||
except Exception:
|
||||
# Defensive — the agent loop must keep running even if the
|
||||
# conversation structure changes shape.
|
||||
assistant_text = ""
|
||||
|
||||
if not assistant_text:
|
||||
assistant_text = "<no preceding reasoning>"
|
||||
|
||||
# Intent = tool descriptor (always intact) + reasoning (truncated).
|
||||
head = f"{tool_line}\n\nReasoning before call:\n"
|
||||
budget = _INTENT_MAX_CHARS - len(head)
|
||||
if budget < 100:
|
||||
# Tool descriptor is huge somehow — truncate it.
|
||||
return head[:_INTENT_MAX_CHARS]
|
||||
if len(assistant_text) > budget:
|
||||
assistant_text = assistant_text[: budget - 1] + "…"
|
||||
return head + assistant_text
|
||||
|
||||
|
||||
async def caption_tool_image(
|
||||
intent: str,
|
||||
image_content: list[dict[str, Any]],
|
||||
*,
|
||||
timeout_s: float = 30.0,
|
||||
model_override: str | None = None,
|
||||
) -> tuple[str, str] | None:
|
||||
"""Caption the given images using the configured ``vision_fallback`` model.
|
||||
|
||||
Returns ``(caption, model)`` on success or ``None`` on any failure
|
||||
(no config, no API key, timeout, exception, empty response).
|
||||
|
||||
``model_override`` swaps in a different litellm model id while
|
||||
keeping the configured ``vision_fallback`` ``api_key`` / ``api_base``
|
||||
untouched. That's deliberate: Hive subscribers configure
|
||||
``vision_fallback`` to point at the Hive proxy, which routes to
|
||||
multiple models including Gemini — so reusing the credentials lets
|
||||
a Gemini-3-flash override still work without a separate
|
||||
``GEMINI_API_KEY``. When no creds are configured, litellm falls
|
||||
back to env-var resolution.
|
||||
|
||||
Logs each call to ``~/.hive/llm_logs`` via ``log_llm_turn``.
|
||||
"""
|
||||
model = model_override or get_vision_fallback_model()
|
||||
if not model:
|
||||
return None
|
||||
api_key = get_vision_fallback_api_key()
|
||||
api_base = get_vision_fallback_api_base()
|
||||
if not api_key and not model_override:
|
||||
logger.debug("vision_fallback configured but no API key resolved; skipping")
|
||||
return None
|
||||
|
||||
try:
|
||||
import litellm
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
user_blocks: list[dict[str, Any]] = [{"type": "text", "text": intent}]
|
||||
user_blocks.extend(image_content)
|
||||
messages = [
|
||||
{"role": "system", "content": _VISION_SUBAGENT_SYSTEM},
|
||||
{"role": "user", "content": user_blocks},
|
||||
]
|
||||
|
||||
# Apply the same proxy rewrites the main LLM provider uses so a
|
||||
# `hive/...` / `kimi/...` model resolves to the right Anthropic-
|
||||
# compatible endpoint with the right auth header. Without this,
|
||||
# litellm doesn't know what `hive/kimi-k2.5` is and rejects the call
|
||||
# with "LLM Provider NOT provided."
|
||||
from framework.llm.litellm import rewrite_proxy_model
|
||||
|
||||
rewritten_model, rewritten_base, extra_headers = rewrite_proxy_model(model, api_key, api_base)
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"model": rewritten_model,
|
||||
"messages": messages,
|
||||
"max_tokens": 8192,
|
||||
"timeout": timeout_s,
|
||||
}
|
||||
# Always pass api_key when we have one, even alongside proxy-rewritten
|
||||
# extra_headers. litellm's anthropic handler refuses to dispatch
|
||||
# without an api_key (it sends it as x-api-key); the proxy itself
|
||||
# authenticates via the Authorization: Bearer header in
|
||||
# extra_headers. Both are needed — matches LiteLLMProvider's path.
|
||||
if api_key:
|
||||
kwargs["api_key"] = api_key
|
||||
if rewritten_base:
|
||||
kwargs["api_base"] = rewritten_base
|
||||
if extra_headers:
|
||||
kwargs["extra_headers"] = extra_headers
|
||||
|
||||
# Surface where the request is going so the user can verify the
|
||||
# vision fallback is hitting the expected proxy / model. Redacts
|
||||
# the API key to a length+head+tail digest so it can be cross-
|
||||
# correlated with other auth-related log lines.
|
||||
key_digest = (
|
||||
f"len={len(api_key)} {api_key[:8]}…{api_key[-4:]}"
|
||||
if api_key and len(api_key) >= 12
|
||||
else f"len={len(api_key) if api_key else 0}"
|
||||
)
|
||||
logger.info(
|
||||
"[vision_fallback] dispatching: configured_model=%s rewritten_model=%s "
|
||||
"api_base=%s api_key=%s images=%d intent_chars=%d timeout_s=%.1f",
|
||||
model,
|
||||
rewritten_model,
|
||||
rewritten_base or "<litellm-default>",
|
||||
key_digest,
|
||||
len(image_content),
|
||||
len(intent),
|
||||
timeout_s,
|
||||
)
|
||||
|
||||
started = datetime.now()
|
||||
caption: str | None = None
|
||||
error_text: str | None = None
|
||||
try:
|
||||
response = await litellm.acompletion(**kwargs)
|
||||
text = (response.choices[0].message.content or "").strip()
|
||||
if text:
|
||||
caption = text
|
||||
logger.info(
|
||||
"[vision_fallback] response: model=%s api_base=%s elapsed_s=%.2f chars=%d",
|
||||
rewritten_model,
|
||||
rewritten_base or "<litellm-default>",
|
||||
(datetime.now() - started).total_seconds(),
|
||||
len(text),
|
||||
)
|
||||
except Exception as exc:
|
||||
error_text = f"{type(exc).__name__}: {exc}"
|
||||
logger.warning(
|
||||
"[vision_fallback] failed: model=%s api_base=%s error=%s",
|
||||
rewritten_model,
|
||||
rewritten_base or "<litellm-default>",
|
||||
error_text,
|
||||
)
|
||||
|
||||
# Best-effort audit log so users can grep ~/.hive/llm_logs/ for
|
||||
# vision-fallback subagent calls. Failures here must not bubble.
|
||||
try:
|
||||
from framework.tracker.llm_debug_logger import log_llm_turn
|
||||
|
||||
# Don't dump the base64 image data into the log file — that
|
||||
# would balloon the jsonl with mostly-binary noise.
|
||||
elided_blocks: list[dict[str, Any]] = [{"type": "text", "text": intent}]
|
||||
elided_blocks.extend({"type": "image_url", "image_url": {"url": "<elided>"}} for _ in range(len(image_content)))
|
||||
log_llm_turn(
|
||||
node_id="vision_fallback_subagent",
|
||||
stream_id="vision_fallback",
|
||||
execution_id="vision_fallback_subagent",
|
||||
iteration=0,
|
||||
system_prompt=_VISION_SUBAGENT_SYSTEM,
|
||||
messages=[{"role": "user", "content": elided_blocks}],
|
||||
assistant_text=caption or "",
|
||||
tool_calls=[],
|
||||
tool_results=[],
|
||||
token_counts={
|
||||
"model": model,
|
||||
"elapsed_s": (datetime.now() - started).total_seconds(),
|
||||
"error": error_text,
|
||||
"num_images": len(image_content),
|
||||
"intent_chars": len(intent),
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if caption is None:
|
||||
return None
|
||||
return caption, model
|
||||
|
||||
|
||||
__all__ = ["caption_tool_image", "extract_intent_for_tool"]
|
||||
@@ -0,0 +1,105 @@
|
||||
"""Prompt composition for agent loops.
|
||||
|
||||
Builds canonical system prompts from AgentContext fields.
|
||||
Extracted from the former orchestrator/prompting module.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PromptSpec:
|
||||
identity_prompt: str = ""
|
||||
focus_prompt: str = ""
|
||||
narrative: str = ""
|
||||
accounts_prompt: str = ""
|
||||
skills_catalog_prompt: str = ""
|
||||
protocols_prompt: str = ""
|
||||
memory_prompt: str = ""
|
||||
agent_type: str = "event_loop"
|
||||
output_keys: tuple[str, ...] = ()
|
||||
|
||||
|
||||
def stamp_prompt_datetime(prompt: str) -> str:
|
||||
local = datetime.now().astimezone()
|
||||
stamp = f"Current date and time: {local.strftime('%Y-%m-%d %H:%M %Z (UTC%z)')}"
|
||||
return f"{prompt}\n\n{stamp}" if prompt else stamp
|
||||
|
||||
|
||||
def build_prompt_spec(
|
||||
ctx: Any,
|
||||
*,
|
||||
focus_prompt: str | None = None,
|
||||
narrative: str | None = None,
|
||||
memory_prompt: str | None = None,
|
||||
) -> PromptSpec:
|
||||
from framework.skills.tool_gating import augment_catalog_for_tools
|
||||
|
||||
resolved_memory = memory_prompt
|
||||
if resolved_memory is None:
|
||||
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
|
||||
dynamic = getattr(ctx, "dynamic_memory_provider", None)
|
||||
if dynamic is not None:
|
||||
try:
|
||||
resolved_memory = dynamic() or ""
|
||||
except Exception:
|
||||
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
|
||||
|
||||
# Tool-gated pre-activation: inject full body of default skills whose
|
||||
# trigger tools are present in this agent's tool list (e.g. browser_*
|
||||
# pulls in hive.browser-automation). Keeps non-browser agents lean.
|
||||
tool_names = [getattr(t, "name", "") for t in (getattr(ctx, "available_tools", None) or [])]
|
||||
raw_catalog = ctx.skills_catalog_prompt or ""
|
||||
dynamic_catalog = getattr(ctx, "dynamic_skills_catalog_provider", None)
|
||||
if dynamic_catalog is not None:
|
||||
try:
|
||||
raw_catalog = dynamic_catalog() or ""
|
||||
except Exception:
|
||||
raw_catalog = ctx.skills_catalog_prompt or ""
|
||||
skills_catalog_prompt = augment_catalog_for_tools(raw_catalog, tool_names)
|
||||
|
||||
return PromptSpec(
|
||||
identity_prompt=ctx.identity_prompt or "",
|
||||
focus_prompt=focus_prompt if focus_prompt is not None else (ctx.agent_spec.system_prompt or ""),
|
||||
narrative=narrative if narrative is not None else (ctx.narrative or ""),
|
||||
accounts_prompt=ctx.accounts_prompt or "",
|
||||
skills_catalog_prompt=skills_catalog_prompt,
|
||||
protocols_prompt=ctx.protocols_prompt or "",
|
||||
memory_prompt=resolved_memory,
|
||||
agent_type=ctx.agent_spec.agent_type,
|
||||
output_keys=tuple(ctx.agent_spec.output_keys or ()),
|
||||
)
|
||||
|
||||
|
||||
def build_system_prompt(spec: PromptSpec) -> str:
|
||||
parts: list[str] = []
|
||||
if spec.identity_prompt:
|
||||
parts.append(spec.identity_prompt)
|
||||
if spec.accounts_prompt:
|
||||
parts.append(f"\n{spec.accounts_prompt}")
|
||||
if spec.skills_catalog_prompt:
|
||||
parts.append(f"\n{spec.skills_catalog_prompt}")
|
||||
if spec.protocols_prompt:
|
||||
parts.append(f"\n{spec.protocols_prompt}")
|
||||
if spec.memory_prompt:
|
||||
parts.append(f"\n{spec.memory_prompt}")
|
||||
if spec.focus_prompt:
|
||||
parts.append(f"\n{spec.focus_prompt}")
|
||||
if spec.narrative:
|
||||
parts.append(f"\n{spec.narrative}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def build_system_prompt_for_context(
|
||||
ctx: Any,
|
||||
*,
|
||||
focus_prompt: str | None = None,
|
||||
narrative: str | None = None,
|
||||
memory_prompt: str | None = None,
|
||||
) -> str:
|
||||
spec = build_prompt_spec(ctx, focus_prompt=focus_prompt, narrative=narrative, memory_prompt=memory_prompt)
|
||||
return build_system_prompt(spec)
|
||||
@@ -0,0 +1,294 @@
|
||||
"""Core types for the agent loop — the execution primitive of the colony.
|
||||
|
||||
AgentSpec: Declarative definition of what an agent does.
|
||||
AgentContext: Everything an agent loop needs to execute.
|
||||
AgentResult: What comes out of an agent loop execution.
|
||||
AgentProtocol: Interface that all agent implementations must satisfy.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.tracker.decision_tracker import DecisionTracker
|
||||
|
||||
|
||||
class AgentSpec(BaseModel):
|
||||
"""Declarative definition of an agent's capabilities and configuration.
|
||||
|
||||
This is the blueprint from which AgentLoop instances are created.
|
||||
Workers in a colony are exact copies of the queen's AgentSpec.
|
||||
"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
|
||||
agent_type: str = Field(
|
||||
default="event_loop",
|
||||
description="Type: 'event_loop' (recommended), 'gcu' (browser automation).",
|
||||
)
|
||||
|
||||
input_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Keys this agent reads from input data",
|
||||
)
|
||||
output_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Keys this agent produces as output",
|
||||
)
|
||||
nullable_output_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Output keys that can be None without triggering validation errors",
|
||||
)
|
||||
|
||||
input_schema: dict[str, dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Optional schema for input validation.",
|
||||
)
|
||||
output_schema: dict[str, dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Optional schema for output validation.",
|
||||
)
|
||||
|
||||
system_prompt: str | None = Field(default=None, description="System prompt for the LLM")
|
||||
tools: list[str] = Field(default_factory=list, description="Tool names this agent can use")
|
||||
tool_access_policy: str = Field(
|
||||
default="explicit",
|
||||
description=(
|
||||
"'all' = all tools from registry, "
|
||||
"'explicit' = only tools listed in `tools` (default), "
|
||||
"'none' = no tools at all."
|
||||
),
|
||||
)
|
||||
model: str | None = Field(default=None, description="Specific model override")
|
||||
|
||||
function: str | None = Field(default=None, description="Function name or path")
|
||||
routes: dict[str, str] = Field(default_factory=dict, description="Condition -> target mapping")
|
||||
|
||||
max_retries: int = Field(default=3)
|
||||
retry_on: list[str] = Field(default_factory=list, description="Error types to retry on")
|
||||
|
||||
max_visits: int = Field(
|
||||
default=0,
|
||||
description=("Max times this agent executes in one colony run. 0 = unlimited. Set >1 for one-shot agents."),
|
||||
)
|
||||
|
||||
output_model: type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Optional Pydantic model for validating LLM output.",
|
||||
)
|
||||
max_validation_retries: int = Field(
|
||||
default=2,
|
||||
description="Maximum retries when Pydantic validation fails",
|
||||
)
|
||||
|
||||
client_facing: bool = Field(
|
||||
default=False,
|
||||
description="Deprecated — the queen is intrinsically interactive.",
|
||||
)
|
||||
|
||||
success_criteria: str | None = Field(
|
||||
default=None,
|
||||
description="Natural-language criteria for phase completion.",
|
||||
)
|
||||
|
||||
skip_judge: bool = Field(
|
||||
default=False,
|
||||
description="When True, the implicit judge is bypassed entirely.",
|
||||
)
|
||||
|
||||
model_config = {"extra": "allow", "arbitrary_types_allowed": True}
|
||||
|
||||
def is_queen(self) -> bool:
|
||||
return self.id == "queen"
|
||||
|
||||
def supports_direct_user_io(self) -> bool:
|
||||
return self.is_queen()
|
||||
|
||||
|
||||
def deprecated_client_facing_warning(spec: AgentSpec) -> str | None:
|
||||
if spec.client_facing and not spec.is_queen():
|
||||
return (
|
||||
f"Agent '{spec.id}' sets deprecated client_facing=True. "
|
||||
"Non-queen direct human I/O is no longer supported; route worker "
|
||||
"questions and approvals through queen escalation instead."
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def warn_if_deprecated_client_facing(spec: AgentSpec) -> None:
|
||||
import logging
|
||||
|
||||
warning = deprecated_client_facing_warning(spec)
|
||||
if warning:
|
||||
logging.getLogger(__name__).warning(warning)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentContext:
|
||||
"""Everything an agent loop needs to execute.
|
||||
|
||||
Passed to every agent implementation and provides:
|
||||
- Runtime (for decision logging)
|
||||
- LLM access
|
||||
- Tools
|
||||
- Goal context
|
||||
- Execution metadata
|
||||
"""
|
||||
|
||||
runtime: DecisionTracker
|
||||
|
||||
agent_id: str
|
||||
agent_spec: AgentSpec
|
||||
|
||||
input_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
llm: LLMProvider | None = None
|
||||
available_tools: list[Tool] = field(default_factory=list)
|
||||
|
||||
goal_context: str = ""
|
||||
goal: Any = None
|
||||
|
||||
max_tokens: int = 4096
|
||||
|
||||
attempt: int = 1
|
||||
max_attempts: int = 3
|
||||
|
||||
runtime_logger: Any = None
|
||||
pause_event: Any = None
|
||||
|
||||
accounts_prompt: str = ""
|
||||
|
||||
identity_prompt: str = ""
|
||||
narrative: str = ""
|
||||
memory_prompt: str = ""
|
||||
|
||||
event_triggered: bool = False
|
||||
|
||||
execution_id: str = ""
|
||||
run_id: str = ""
|
||||
|
||||
@property
|
||||
def effective_run_id(self) -> str | None:
|
||||
return self.run_id or None
|
||||
|
||||
stream_id: str = ""
|
||||
|
||||
# ----- Task system fields (see framework/tasks) -------------------
|
||||
# task_list_id: this agent's own session-scoped list, e.g.
|
||||
# session:{agent_id}:{session_id}. Set by the runner / ColonyRuntime
|
||||
# before the loop starts; immutable after first task_create.
|
||||
task_list_id: str | None = None
|
||||
# colony_id: set on the queen of a colony AND on every spawned worker
|
||||
# so workers can render the "picked up" chip and the queen can address
|
||||
# her colony template via colony_template_* tools.
|
||||
colony_id: str | None = None
|
||||
# picked_up_from: for workers, the (colony_task_list_id, template_task_id)
|
||||
# pair their session was spawned for. None for the queen and queen-DM.
|
||||
picked_up_from: tuple[str, int] | None = None
|
||||
|
||||
dynamic_tools_provider: Any = None
|
||||
dynamic_prompt_provider: Any = None
|
||||
# Optional Callable[[], str]: when set alongside ``dynamic_prompt_provider``,
|
||||
# the AgentLoop sends the system prompt as two pieces — the result of
|
||||
# ``dynamic_prompt_provider`` is the STATIC block (cached), and this
|
||||
# provider returns the DYNAMIC suffix (not cached). The LLM wrapper
|
||||
# emits them as two Anthropic system content blocks with a cache
|
||||
# breakpoint between them for providers that honor ``cache_control``.
|
||||
# For providers that don't, the two strings are concatenated. Used by
|
||||
# the Queen to keep her persona/role/tools block warm across iterations
|
||||
# while the recall + timestamp tail refreshes per user turn.
|
||||
dynamic_prompt_suffix_provider: Any = None
|
||||
dynamic_memory_provider: Any = None
|
||||
# Optional Callable[[], str]: when set, the current skills-catalog
|
||||
# prompt is sourced from this provider each iteration. Lets workers
|
||||
# pick up UI toggles without restarting the run. Queen agents already
|
||||
# rebuild the whole prompt via dynamic_prompt_provider — this field
|
||||
# is a surgical alternative used by colony workers where the rest of
|
||||
# the prompt stays constant and we don't want to thrash the cache.
|
||||
dynamic_skills_catalog_provider: Any = None
|
||||
|
||||
skills_catalog_prompt: str = ""
|
||||
protocols_prompt: str = ""
|
||||
skill_dirs: list[str] = field(default_factory=list)
|
||||
default_skill_batch_nudge: str | None = None
|
||||
default_skill_warn_ratio: float | None = None
|
||||
|
||||
iteration_metadata_provider: Any = None
|
||||
|
||||
@property
|
||||
def is_queen_stream(self) -> bool:
|
||||
return self.stream_id == "queen" or self.agent_spec.is_queen()
|
||||
|
||||
@property
|
||||
def emits_client_io(self) -> bool:
|
||||
return self.is_queen_stream
|
||||
|
||||
@property
|
||||
def supports_direct_user_io(self) -> bool:
|
||||
return self.is_queen_stream and not self.event_triggered
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentResult:
|
||||
"""Output of an agent loop execution."""
|
||||
|
||||
success: bool
|
||||
output: dict[str, Any] = field(default_factory=dict)
|
||||
error: str | None = None
|
||||
|
||||
next_agent: str | None = None
|
||||
route_reason: str | None = None
|
||||
|
||||
tokens_used: int = 0
|
||||
latency_ms: int = 0
|
||||
|
||||
validation_errors: list[str] = field(default_factory=list)
|
||||
|
||||
conversation: Any = None
|
||||
|
||||
# Machine-readable reason the loop stopped (see LoopExitReason in
|
||||
# agent_loop/internals/types.py). "?" means the loop didn't set one,
|
||||
# which should itself be treated as a diagnostic.
|
||||
exit_reason: str = "?"
|
||||
# Counters for reliability events surfaced during this execution.
|
||||
# Populated from the loop's TaskRegistry-style counters at return
|
||||
# time so callers can spot recurring failure modes without tailing
|
||||
# logs. Keys are stable strings; missing keys mean "zero".
|
||||
reliability_stats: dict[str, int] = field(default_factory=dict)
|
||||
|
||||
def to_summary(self, spec: Any = None) -> str:
|
||||
if not self.success:
|
||||
return f"Failed: {self.error}"
|
||||
|
||||
if not self.output:
|
||||
return "Completed (no output)"
|
||||
|
||||
parts = [f"Completed with {len(self.output)} outputs:"]
|
||||
for key, value in list(self.output.items())[:5]:
|
||||
value_str = str(value)[:100]
|
||||
if len(str(value)) > 100:
|
||||
value_str += "..."
|
||||
parts.append(f" - {key}: {value_str}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
class AgentProtocol(ABC):
|
||||
"""Interface all agent implementations must satisfy."""
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, ctx: AgentContext) -> AgentResult:
|
||||
pass
|
||||
|
||||
def validate_input(self, ctx: AgentContext) -> list[str]:
|
||||
errors = []
|
||||
for key in ctx.agent_spec.input_keys:
|
||||
if key not in ctx.input_data:
|
||||
errors.append(f"Missing required input: {key}")
|
||||
return errors
|
||||
@@ -11,11 +11,7 @@ def list_framework_agents() -> list[Path]:
|
||||
[
|
||||
p
|
||||
for p in FRAMEWORK_AGENTS_DIR.iterdir()
|
||||
if p.is_dir()
|
||||
and (
|
||||
(p / "agent.json").exists()
|
||||
or (p / "agent.py").exists()
|
||||
)
|
||||
if p.is_dir() and ((p / "agent.json").exists() or (p / "agent.py").exists())
|
||||
],
|
||||
key=lambda p: p.name,
|
||||
)
|
||||
|
||||
@@ -21,15 +21,15 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from framework.config import get_max_context_tokens
|
||||
from framework.host.agent_host import AgentHost
|
||||
from framework.host.execution_manager import EntryPointSpec
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
from framework.orchestrator import Goal, NodeSpec, SuccessCriterion
|
||||
from framework.orchestrator.checkpoint_config import CheckpointConfig
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.orchestrator import ExecutionResult
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
from framework.host.agent_host import AgentHost, create_agent_runtime
|
||||
from framework.host.execution_manager import EntryPointSpec
|
||||
|
||||
from .config import default_config
|
||||
from .nodes import build_tester_node
|
||||
@@ -126,9 +126,7 @@ def _list_local_accounts() -> list[dict]:
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
return [
|
||||
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
|
||||
]
|
||||
return [info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()]
|
||||
except ImportError as exc:
|
||||
logger.debug("Local credential registry unavailable: %s", exc)
|
||||
return []
|
||||
@@ -181,9 +179,7 @@ def _list_env_fallback_accounts() -> list[dict]:
|
||||
if spec.credential_group in seen_groups:
|
||||
continue
|
||||
group_available = all(
|
||||
_is_configured(n, s)
|
||||
for n, s in CREDENTIAL_SPECS.items()
|
||||
if s.credential_group == spec.credential_group
|
||||
_is_configured(n, s) for n, s in CREDENTIAL_SPECS.items() if s.credential_group == spec.credential_group
|
||||
)
|
||||
if not group_available:
|
||||
continue
|
||||
@@ -215,9 +211,7 @@ def list_connected_accounts() -> list[dict]:
|
||||
|
||||
# Show env-var fallbacks only for credentials not already in the named registry
|
||||
local_providers = {a["provider"] for a in local}
|
||||
env_fallbacks = [
|
||||
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
|
||||
]
|
||||
env_fallbacks = [a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers]
|
||||
|
||||
return aden + local + env_fallbacks
|
||||
|
||||
@@ -272,9 +266,7 @@ def _activate_local_account(credential_id: str, alias: str) -> None:
|
||||
group_specs = [
|
||||
(cred_name, spec)
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items()
|
||||
if spec.credential_group == credential_id
|
||||
or spec.credential_id == credential_id
|
||||
or cred_name == credential_id
|
||||
if spec.credential_group == credential_id or spec.credential_id == credential_id or cred_name == credential_id
|
||||
]
|
||||
# Deduplicate — credential_id and credential_group may both match the same spec
|
||||
seen_env_vars: set[str] = set()
|
||||
@@ -419,10 +411,7 @@ nodes = [
|
||||
NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
description=(
|
||||
"Interactive credential testing — lets the user pick an account "
|
||||
"and verify it via API calls."
|
||||
),
|
||||
description=("Interactive credential testing — lets the user pick an account and verify it via API calls."),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
@@ -469,10 +458,7 @@ pause_nodes = []
|
||||
terminal_nodes = ["tester"] # Tester node can terminate
|
||||
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = (
|
||||
"You are a credential tester that verifies connected accounts and API keys "
|
||||
"can make real API calls."
|
||||
)
|
||||
identity_prompt = "You are a credential tester that verifies connected accounts and API keys can make real API calls."
|
||||
loop_config = {
|
||||
"max_iterations": 50,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
@@ -574,7 +560,9 @@ class CredentialTesterAgent:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "credential_tester"
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
self._storage_path = HIVE_HOME / "agents" / "credential_tester"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._tool_registry = ToolRegistry()
|
||||
@@ -613,7 +601,7 @@ class CredentialTesterAgent:
|
||||
|
||||
graph = self._build_graph()
|
||||
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
self._agent_runtime = AgentHost(
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
storage_path=self._storage_path,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"hive_tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Hive tools MCP server with provider-specific tools"
|
||||
"description": "hive_tools MCP server with provider-specific tools"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,36 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import UTC
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkerEntry:
|
||||
"""A single worker within a colony."""
|
||||
|
||||
name: str
|
||||
config_path: Path
|
||||
description: str = ""
|
||||
tool_count: int = 0
|
||||
task: str = ""
|
||||
spawned_at: str = ""
|
||||
queen_name: str = ""
|
||||
colony_name: str = ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"name": self.name,
|
||||
"config_path": str(self.config_path),
|
||||
"description": self.description,
|
||||
"tool_count": self.tool_count,
|
||||
"task": self.task,
|
||||
"spawned_at": self.spawned_at,
|
||||
"queen_name": self.queen_name,
|
||||
"colony_name": self.colony_name,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentEntry:
|
||||
"""Lightweight agent metadata for the picker / API discover endpoint."""
|
||||
@@ -21,14 +48,17 @@ class AgentEntry:
|
||||
tool_count: int = 0
|
||||
tags: list[str] = field(default_factory=list)
|
||||
last_active: str | None = None
|
||||
created_at: str | None = None
|
||||
icon: str | None = None
|
||||
workers: list[WorkerEntry] = field(default_factory=list)
|
||||
|
||||
|
||||
def _get_last_active(agent_path: Path) -> str | None:
|
||||
"""Return the most recent updated_at timestamp across all sessions.
|
||||
|
||||
Checks both worker sessions (``~/.hive/agents/{name}/sessions/``) and
|
||||
queen sessions (``~/.hive/queen/session/``) whose ``meta.json`` references
|
||||
the same *agent_path*.
|
||||
queen sessions (``~/.hive/agents/queens/default/sessions/``) whose
|
||||
``meta.json`` references the same *agent_path*.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
@@ -36,7 +66,9 @@ def _get_last_active(agent_path: Path) -> str | None:
|
||||
latest: str | None = None
|
||||
|
||||
# 1. Worker sessions
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
sessions_dir = HIVE_HOME / "agents" / agent_name / "sessions"
|
||||
if sessions_dir.exists():
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
@@ -52,33 +84,42 @@ def _get_last_active(agent_path: Path) -> str | None:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# 2. Queen sessions
|
||||
queen_sessions_dir = Path.home() / ".hive" / "queen" / "session"
|
||||
if queen_sessions_dir.exists():
|
||||
# 2. Queen sessions (scan all queen identity directories)
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
if QUEENS_DIR.exists():
|
||||
resolved = agent_path.resolve()
|
||||
for d in queen_sessions_dir.iterdir():
|
||||
if not d.is_dir():
|
||||
for queen_dir in QUEENS_DIR.iterdir():
|
||||
if not queen_dir.is_dir():
|
||||
continue
|
||||
meta_file = d / "meta.json"
|
||||
if not meta_file.exists():
|
||||
sessions_dir = queen_dir / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
continue
|
||||
try:
|
||||
meta = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
stored = meta.get("agent_path")
|
||||
if not stored or Path(stored).resolve() != resolved:
|
||||
for d in sessions_dir.iterdir():
|
||||
if not d.is_dir():
|
||||
continue
|
||||
meta_file = d / "meta.json"
|
||||
if not meta_file.exists():
|
||||
continue
|
||||
try:
|
||||
meta = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
stored = meta.get("agent_path")
|
||||
if not stored or Path(stored).resolve() != resolved:
|
||||
continue
|
||||
ts = datetime.fromtimestamp(d.stat().st_mtime).isoformat()
|
||||
if latest is None or ts > latest:
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
ts = datetime.fromtimestamp(d.stat().st_mtime).isoformat()
|
||||
if latest is None or ts > latest:
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return latest
|
||||
|
||||
|
||||
def _count_sessions(agent_name: str) -> int:
|
||||
"""Count session directories under ~/.hive/agents/{agent_name}/sessions/."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
sessions_dir = HIVE_HOME / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return 0
|
||||
return sum(1 for d in sessions_dir.iterdir() if d.is_dir() and d.name.startswith("session_"))
|
||||
@@ -86,7 +127,9 @@ def _count_sessions(agent_name: str) -> int:
|
||||
|
||||
def _count_runs(agent_name: str) -> int:
|
||||
"""Count unique run_ids across all sessions for an agent."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
sessions_dir = HIVE_HOME / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return 0
|
||||
run_ids: set[str] = set()
|
||||
@@ -109,96 +152,119 @@ def _count_runs(agent_name: str) -> int:
|
||||
return len(run_ids)
|
||||
|
||||
|
||||
_EXCLUDED_JSON_STEMS = {"agent", "flowchart", "triggers", "configuration", "metadata", "tasks"}
|
||||
|
||||
|
||||
def _is_colony_dir(path: Path) -> bool:
|
||||
"""Check if a directory is a colony with worker config files."""
|
||||
if not path.is_dir():
|
||||
return False
|
||||
return any(f.suffix == ".json" and f.stem not in _EXCLUDED_JSON_STEMS for f in path.iterdir() if f.is_file())
|
||||
|
||||
|
||||
def _find_worker_configs(colony_dir: Path) -> list[Path]:
|
||||
"""Find all worker config JSON files in a colony directory."""
|
||||
return sorted(
|
||||
p for p in colony_dir.iterdir() if p.is_file() and p.suffix == ".json" and p.stem not in _EXCLUDED_JSON_STEMS
|
||||
)
|
||||
|
||||
|
||||
def _extract_agent_stats(agent_path: Path) -> tuple[int, int, list[str]]:
|
||||
"""Extract node count, tool count, and tags from an agent directory.
|
||||
"""Extract worker count, tool count, and tags from a colony directory."""
|
||||
tags: list[str] = []
|
||||
|
||||
Checks agent.json (declarative) first, then agent.py (legacy).
|
||||
"""
|
||||
import ast
|
||||
worker_configs = _find_worker_configs(agent_path)
|
||||
if worker_configs:
|
||||
all_tools: set[str] = set()
|
||||
for wc_path in worker_configs:
|
||||
try:
|
||||
data = json.loads(wc_path.read_text(encoding="utf-8"))
|
||||
if isinstance(data, dict):
|
||||
tools = data.get("tools", [])
|
||||
if isinstance(tools, list):
|
||||
all_tools.update(tools)
|
||||
except Exception:
|
||||
pass
|
||||
return len(worker_configs), len(all_tools), tags
|
||||
|
||||
node_count, tool_count, tags = 0, 0, []
|
||||
|
||||
# Declarative JSON agents (preferred)
|
||||
agent_json = agent_path / "agent.json"
|
||||
if agent_json.exists():
|
||||
try:
|
||||
data = json.loads(agent_json.read_text(encoding="utf-8"))
|
||||
if isinstance(data, dict):
|
||||
json_nodes = data.get("nodes", [])
|
||||
node_count = len(json_nodes)
|
||||
tools: set[str] = set()
|
||||
for n in json_nodes:
|
||||
node_tools = n.get("tools", {})
|
||||
if isinstance(node_tools, dict):
|
||||
tools.update(node_tools.get("allowed", []))
|
||||
elif isinstance(node_tools, list):
|
||||
tools.update(node_tools)
|
||||
tool_count = len(tools)
|
||||
return node_count, tool_count, tags
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Legacy: agent.py (AST-parsed)
|
||||
agent_py = agent_path / "agent.py"
|
||||
if agent_py.exists():
|
||||
try:
|
||||
tree = ast.parse(agent_py.read_text(encoding="utf-8"))
|
||||
for node in ast.walk(tree):
|
||||
if isinstance(node, ast.Assign):
|
||||
for target in node.targets:
|
||||
if isinstance(target, ast.Name) and target.id == "nodes":
|
||||
if isinstance(node.value, ast.List):
|
||||
node_count = len(node.value.elts)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return node_count, tool_count, tags
|
||||
return 0, 0, tags
|
||||
|
||||
|
||||
def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
"""Discover agents from all known sources grouped by category."""
|
||||
from framework.loader.cli import (
|
||||
_extract_python_agent_metadata,
|
||||
_get_framework_agents_dir,
|
||||
_is_valid_agent_dir,
|
||||
)
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
groups: dict[str, list[AgentEntry]] = {}
|
||||
sources = [
|
||||
("Your Agents", Path("exports")),
|
||||
("Framework", _get_framework_agents_dir()),
|
||||
("Examples", Path("examples/templates")),
|
||||
("Your Agents", COLONIES_DIR),
|
||||
]
|
||||
|
||||
# Track seen agent directory names to avoid duplicates when the same
|
||||
# agent exists in both colonies/ and exports/ (colonies takes priority).
|
||||
_seen_agent_names: set[str] = set()
|
||||
|
||||
for category, base_dir in sources:
|
||||
if not base_dir.exists():
|
||||
continue
|
||||
entries: list[AgentEntry] = []
|
||||
for path in sorted(base_dir.iterdir(), key=lambda p: p.name):
|
||||
if not _is_valid_agent_dir(path):
|
||||
if not _is_colony_dir(path):
|
||||
continue
|
||||
if path.name in _seen_agent_names:
|
||||
continue
|
||||
_seen_agent_names.add(path.name)
|
||||
|
||||
name, desc = _extract_python_agent_metadata(path)
|
||||
config_fallback_name = path.name.replace("_", " ").title()
|
||||
used_config = name != config_fallback_name
|
||||
name = config_fallback_name
|
||||
desc = ""
|
||||
|
||||
node_count, tool_count, tags = _extract_agent_stats(path)
|
||||
if not used_config:
|
||||
# Try agent.json (declarative) for metadata
|
||||
agent_json_path = path / "agent.json"
|
||||
if agent_json_path.exists():
|
||||
try:
|
||||
data = json.loads(
|
||||
agent_json_path.read_text(encoding="utf-8"),
|
||||
# Read colony metadata for queen provenance and timestamps
|
||||
colony_queen_name = ""
|
||||
colony_created_at: str | None = None
|
||||
colony_icon: str | None = None
|
||||
metadata_path = path / "metadata.json"
|
||||
if metadata_path.exists():
|
||||
try:
|
||||
mdata = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
colony_queen_name = mdata.get("queen_name", "")
|
||||
colony_created_at = mdata.get("created_at")
|
||||
colony_icon = mdata.get("icon")
|
||||
except Exception:
|
||||
pass
|
||||
# Fallback: use directory creation time if metadata lacks created_at
|
||||
if not colony_created_at:
|
||||
try:
|
||||
from datetime import datetime
|
||||
|
||||
stat = path.stat()
|
||||
colony_created_at = datetime.fromtimestamp(stat.st_birthtime, tz=UTC).isoformat()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
worker_entries: list[WorkerEntry] = []
|
||||
worker_configs = _find_worker_configs(path)
|
||||
for wc_path in worker_configs:
|
||||
try:
|
||||
data = json.loads(wc_path.read_text(encoding="utf-8"))
|
||||
if isinstance(data, dict):
|
||||
w = WorkerEntry(
|
||||
name=data.get("name", wc_path.stem),
|
||||
config_path=wc_path,
|
||||
description=data.get("description", ""),
|
||||
tool_count=len(data.get("tools", [])),
|
||||
task=data.get("goal", {}).get("description", ""),
|
||||
spawned_at=data.get("spawned_at", ""),
|
||||
queen_name=colony_queen_name,
|
||||
colony_name=path.name,
|
||||
)
|
||||
if isinstance(data, dict):
|
||||
raw_name = data.get("name", name)
|
||||
if "-" in raw_name and " " not in raw_name:
|
||||
raw_name = raw_name.replace("-", " ").title()
|
||||
name = raw_name
|
||||
desc = data.get("description", desc)
|
||||
except Exception:
|
||||
pass
|
||||
worker_entries.append(w)
|
||||
if not desc:
|
||||
desc = data.get("description", "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
node_count = len(worker_entries)
|
||||
tool_count = max((w.tool_count for w in worker_entries), default=0)
|
||||
|
||||
entries.append(
|
||||
AgentEntry(
|
||||
@@ -210,11 +276,16 @@ def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
run_count=_count_runs(path.name),
|
||||
node_count=node_count,
|
||||
tool_count=tool_count,
|
||||
tags=tags,
|
||||
tags=[],
|
||||
last_active=_get_last_active(path),
|
||||
created_at=colony_created_at,
|
||||
icon=colony_icon,
|
||||
workers=worker_entries,
|
||||
)
|
||||
)
|
||||
if entries:
|
||||
groups[category] = entries
|
||||
existing = groups.get(category, [])
|
||||
existing.extend(entries)
|
||||
groups[category] = existing
|
||||
|
||||
return groups
|
||||
|
||||
@@ -1,19 +1,13 @@
|
||||
"""
|
||||
Queen — Native agent builder for the Hive framework.
|
||||
"""Queen -- the agent builder for the Hive framework."""
|
||||
|
||||
Deeply understands the agent framework and produces complete Python packages
|
||||
with goals, nodes, edges, system prompts, MCP configuration, and tests
|
||||
from natural language specifications.
|
||||
"""
|
||||
|
||||
from .agent import queen_goal, queen_graph
|
||||
from .agent import queen_goal, queen_loop_config
|
||||
from .config import AgentMetadata, RuntimeConfig, default_config, metadata
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"queen_goal",
|
||||
"queen_graph",
|
||||
"queen_loop_config",
|
||||
"RuntimeConfig",
|
||||
"AgentMetadata",
|
||||
"default_config",
|
||||
|
||||
@@ -1,38 +1,26 @@
|
||||
"""Queen graph definition."""
|
||||
"""Queen agent definition.
|
||||
|
||||
from framework.orchestrator import Goal
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
The queen is a single AgentLoop — no orchestrator dependency.
|
||||
Loaded by queen_orchestrator.create_queen().
|
||||
"""
|
||||
|
||||
from framework.schemas.goal import Goal
|
||||
|
||||
from .nodes import queen_node
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Queen graph — the primary persistent conversation.
|
||||
# Loaded by queen_orchestrator.create_queen(), NOT by AgentRunner.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
queen_goal = Goal(
|
||||
id="queen-manager",
|
||||
name="Queen Manager",
|
||||
description=(
|
||||
"Manage the worker agent lifecycle and serve as the user's primary interactive interface."
|
||||
),
|
||||
description=("Manage the worker agent lifecycle and serve as the user's primary interactive interface."),
|
||||
success_criteria=[],
|
||||
constraints=[],
|
||||
)
|
||||
|
||||
queen_graph = GraphSpec(
|
||||
id="queen-graph",
|
||||
goal_id=queen_goal.id,
|
||||
version="1.0.0",
|
||||
entry_node="queen",
|
||||
entry_points={"start": "queen"},
|
||||
terminal_nodes=[],
|
||||
pause_nodes=[],
|
||||
nodes=[queen_node],
|
||||
edges=[],
|
||||
conversation_mode="continuous",
|
||||
loop_config={
|
||||
"max_iterations": 999_999,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
},
|
||||
)
|
||||
# Loop config -- used by queen_orchestrator to build LoopConfig
|
||||
queen_loop_config = {
|
||||
"max_iterations": 999_999,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
"max_context_tokens": 180_000,
|
||||
}
|
||||
|
||||
__all__ = ["queen_goal", "queen_loop_config", "queen_node"]
|
||||
|
||||
@@ -2,12 +2,13 @@
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _load_preferred_model() -> str:
|
||||
"""Load preferred model from ~/.hive/configuration.json."""
|
||||
config_path = Path.home() / ".hive" / "configuration.json"
|
||||
"""Load preferred model from $HIVE_HOME/configuration.json."""
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
config_path = HIVE_HOME / "configuration.json"
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path, encoding="utf-8") as f:
|
||||
|
||||
@@ -0,0 +1,235 @@
|
||||
"""One-shot LLM gate that decides if a queen DM is ready to fork a colony.
|
||||
|
||||
The queen's ``start_incubating_colony`` tool calls :func:`evaluate` with
|
||||
the queen's recent conversation and a proposed ``colony_name``. The
|
||||
evaluator returns a structured verdict:
|
||||
|
||||
{
|
||||
"ready": bool,
|
||||
"reasons": [str],
|
||||
"missing_prerequisites": [str],
|
||||
}
|
||||
|
||||
On ``ready=False`` the queen receives the verdict as her tool result and
|
||||
self-corrects (asks the user, refines scope, drops the idea). On
|
||||
``ready=True`` the tool flips the queen's phase to ``incubating``.
|
||||
|
||||
Failure mode is **fail-closed**: any LLM error or unparseable response
|
||||
returns ``ready=False`` with reason ``"evaluation_failed"`` so the queen
|
||||
cannot accidentally proceed past a broken gate.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import Message
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_INCUBATING_EVALUATOR_SYSTEM_PROMPT = """\
|
||||
You gate whether a queen agent should commit to forking a persistent
|
||||
"colony" (a headless worker spec written to disk). Forking is
|
||||
expensive: it ends the user's chat with this queen and the worker runs
|
||||
unattended afterward, so the spec must be settled before you approve.
|
||||
|
||||
Read the conversation excerpt and the queen's proposed colony_name,
|
||||
then decide.
|
||||
|
||||
APPROVE (ready=true) only when ALL of the following hold:
|
||||
1. The user has explicitly asked for work that needs to outlive this
|
||||
chat — recurring (cron / interval), monitoring + alert, scheduled
|
||||
batch, or "fire-and-forget background job". A one-shot question
|
||||
that the queen can answer in chat does NOT qualify.
|
||||
2. The scope of the work is concrete enough to write down — what
|
||||
inputs, what outputs, what success looks like. Vague ("help me
|
||||
with my workflow") does NOT qualify.
|
||||
3. The technical approach is at least sketched — what data sources,
|
||||
APIs, or tools the worker will use. The queen does not have to
|
||||
have written the SKILL.md yet, but she must have the operational
|
||||
ingredients available.
|
||||
4. There are no open clarifying questions on the table that the user
|
||||
hasn't answered. If the queen recently asked the user something
|
||||
and is still waiting, do NOT approve.
|
||||
|
||||
REJECT (ready=false) on any of:
|
||||
- Conversation is too short / too generic to support a settled spec.
|
||||
- User is still describing what they want.
|
||||
- User has expressed doubts, change-of-direction, or "let me think".
|
||||
- Work is one-shot and could be done in chat instead.
|
||||
- Open question awaiting user reply.
|
||||
|
||||
Reply with a JSON object exactly matching this shape:
|
||||
|
||||
{
|
||||
"ready": true | false,
|
||||
"reasons": ["short phrase", ...], // at least one entry
|
||||
"missing_prerequisites": ["short phrase", ...] // empty when ready
|
||||
}
|
||||
|
||||
``reasons`` explains the verdict in 1-3 short phrases.
|
||||
``missing_prerequisites`` lists what's missing in queen-actionable
|
||||
form ("user hasn't confirmed schedule", "no API auth flow discussed").
|
||||
Empty list when ``ready=true``.
|
||||
|
||||
Output JSON only. Do not wrap in markdown. Do not add prose.
|
||||
"""
|
||||
|
||||
|
||||
# Bound the formatted excerpt so the eval call stays cheap and fits well
|
||||
# under the LLM's context window even for long DM sessions.
|
||||
_MAX_MESSAGES = 30
|
||||
_MAX_TOOL_CONTENT_CHARS = 400
|
||||
_MAX_USER_CONTENT_CHARS = 2_000
|
||||
_MAX_ASSISTANT_CONTENT_CHARS = 2_000
|
||||
|
||||
|
||||
def format_conversation_excerpt(messages: list[Message]) -> str:
|
||||
"""Format the tail of a queen conversation for the evaluator prompt.
|
||||
|
||||
Keeps the most recent ``_MAX_MESSAGES`` messages. Tool results are
|
||||
truncated hard since they're rarely load-bearing for the readiness
|
||||
decision; user/assistant text is truncated more generously to
|
||||
preserve the actual conversation signal.
|
||||
"""
|
||||
if not messages:
|
||||
return "(no messages)"
|
||||
|
||||
tail = messages[-_MAX_MESSAGES:]
|
||||
parts: list[str] = []
|
||||
for msg in tail:
|
||||
role = msg.role.upper()
|
||||
content = (msg.content or "").strip()
|
||||
if msg.role == "tool":
|
||||
if len(content) > _MAX_TOOL_CONTENT_CHARS:
|
||||
content = content[:_MAX_TOOL_CONTENT_CHARS] + "..."
|
||||
elif msg.role == "assistant":
|
||||
# Surface tool-call intent for empty assistant turns so the
|
||||
# evaluator sees what the queen has been doing.
|
||||
if not content and msg.tool_calls:
|
||||
names = [tc.get("function", {}).get("name", "?") for tc in msg.tool_calls]
|
||||
content = f"(called: {', '.join(names)})"
|
||||
if len(content) > _MAX_ASSISTANT_CONTENT_CHARS:
|
||||
content = content[:_MAX_ASSISTANT_CONTENT_CHARS] + "..."
|
||||
else: # user
|
||||
if len(content) > _MAX_USER_CONTENT_CHARS:
|
||||
content = content[:_MAX_USER_CONTENT_CHARS] + "..."
|
||||
if content:
|
||||
parts.append(f"[{role}]: {content}")
|
||||
|
||||
return "\n\n".join(parts) if parts else "(no messages)"
|
||||
|
||||
|
||||
def _build_user_message(
|
||||
conversation_excerpt: str,
|
||||
colony_name: str,
|
||||
) -> str:
|
||||
return (
|
||||
f"## Proposed colony name\n{colony_name}\n\n"
|
||||
f"## Recent conversation (oldest → newest)\n{conversation_excerpt}\n\n"
|
||||
"Decide: should this queen be approved to enter INCUBATING phase?"
|
||||
)
|
||||
|
||||
|
||||
def _parse_verdict(raw: str) -> dict[str, Any] | None:
|
||||
"""Parse the evaluator's JSON. Returns None if parsing fails."""
|
||||
if not raw:
|
||||
return None
|
||||
raw = raw.strip()
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
# Some models wrap JSON in markdown fences or add preamble.
|
||||
# Pull the first { ... } block out as a best-effort fallback —
|
||||
# mirrors the same recovery pattern used in recall_selector.py.
|
||||
match = re.search(r"\{.*\}", raw, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
return json.loads(match.group())
|
||||
except json.JSONDecodeError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_verdict(parsed: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Coerce a parsed verdict into the shape the tool returns to the queen."""
|
||||
ready = bool(parsed.get("ready"))
|
||||
reasons = parsed.get("reasons") or []
|
||||
if isinstance(reasons, str):
|
||||
reasons = [reasons]
|
||||
reasons = [str(r).strip() for r in reasons if str(r).strip()]
|
||||
missing = parsed.get("missing_prerequisites") or []
|
||||
if isinstance(missing, str):
|
||||
missing = [missing]
|
||||
missing = [str(m).strip() for m in missing if str(m).strip()]
|
||||
|
||||
if ready:
|
||||
# When approved we don't surface missing prerequisites — the
|
||||
# incubating role prompt opens that floor itself.
|
||||
missing = []
|
||||
elif not reasons:
|
||||
# Always give the queen at least one reason to reflect on.
|
||||
reasons = ["evaluator returned no reasons"]
|
||||
|
||||
return {
|
||||
"ready": ready,
|
||||
"reasons": reasons,
|
||||
"missing_prerequisites": missing,
|
||||
}
|
||||
|
||||
|
||||
async def evaluate(
|
||||
llm: Any,
|
||||
messages: list[Message],
|
||||
colony_name: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Run the incubating evaluator against the queen's conversation.
|
||||
|
||||
Args:
|
||||
llm: An LLM provider exposing ``acomplete(messages, system, ...)``.
|
||||
Pass the queen's own ``ctx.llm`` so the eval uses the same
|
||||
model the user is talking to.
|
||||
messages: The queen's conversation messages, oldest first. The
|
||||
evaluator slices its own tail; pass the full list.
|
||||
colony_name: Validated colony slug.
|
||||
|
||||
Returns:
|
||||
``{"ready": bool, "reasons": [str], "missing_prerequisites": [str]}``.
|
||||
Fail-closed on any error.
|
||||
"""
|
||||
excerpt = format_conversation_excerpt(messages)
|
||||
user_msg = _build_user_message(excerpt, colony_name)
|
||||
|
||||
try:
|
||||
response = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": user_msg}],
|
||||
system=_INCUBATING_EVALUATOR_SYSTEM_PROMPT,
|
||||
max_tokens=1024,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
except Exception as exc: # noqa: BLE001 - fail-closed on any LLM failure
|
||||
logger.warning("incubating_evaluator: LLM call failed (%s)", exc)
|
||||
return {
|
||||
"ready": False,
|
||||
"reasons": ["evaluation_failed"],
|
||||
"missing_prerequisites": ["evaluator LLM call failed; retry once the queen can reach the model again"],
|
||||
}
|
||||
|
||||
raw = (getattr(response, "content", "") or "").strip()
|
||||
parsed = _parse_verdict(raw)
|
||||
if parsed is None:
|
||||
logger.warning(
|
||||
"incubating_evaluator: could not parse JSON verdict (raw=%.200s)",
|
||||
raw,
|
||||
)
|
||||
return {
|
||||
"ready": False,
|
||||
"reasons": ["evaluation_failed"],
|
||||
"missing_prerequisites": ["evaluator returned malformed JSON; retry"],
|
||||
}
|
||||
|
||||
return _normalize_verdict(parsed)
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"include": ["gcu-tools", "hive_tools", "terminal-tools", "chart-tools"]
|
||||
}
|
||||
@@ -1,9 +1,23 @@
|
||||
{
|
||||
"coder-tools": {
|
||||
"files-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "coder_tools_server.py", "--stdio"],
|
||||
"args": ["run", "python", "files_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Unsandboxed file system tools for code generation and validation"
|
||||
"description": "File system tools (read/write/edit/search) for code generation"
|
||||
},
|
||||
"gcu-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "-m", "gcu.server", "--stdio", "--capabilities", "browser"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Browser automation tools (Playwright-based)"
|
||||
},
|
||||
"hive_tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Aden integration tools (gmail, calendar, hubspot, etc.) — gated by credentials and the verified manifest"
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,420 +0,0 @@
|
||||
"""Queen global cross-session memory.
|
||||
|
||||
Three-tier memory architecture:
|
||||
~/.hive/queen/MEMORY.md — semantic (who, what, why)
|
||||
~/.hive/queen/memories/MEMORY-YYYY-MM-DD.md — episodic (daily journals)
|
||||
~/.hive/queen/session/{id}/data/adapt.md — working (session-scoped)
|
||||
|
||||
Semantic and episodic files are injected at queen session start.
|
||||
|
||||
Semantic memory (MEMORY.md) is updated automatically at session end via
|
||||
consolidate_queen_memory() — the queen never rewrites this herself.
|
||||
|
||||
Episodic memory (MEMORY-date.md) can be written by the queen during a session
|
||||
via the write_to_diary tool, and is also appended to at session end by
|
||||
consolidate_queen_memory().
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import traceback
|
||||
from datetime import date, datetime
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _queen_dir() -> Path:
|
||||
return Path.home() / ".hive" / "queen"
|
||||
|
||||
|
||||
def format_memory_date(d: date) -> str:
|
||||
"""Return a cross-platform long date label without a zero-padded day."""
|
||||
return f"{d.strftime('%B')} {d.day}, {d.year}"
|
||||
|
||||
|
||||
def semantic_memory_path() -> Path:
|
||||
return _queen_dir() / "MEMORY.md"
|
||||
|
||||
|
||||
def episodic_memory_path(d: date | None = None) -> Path:
|
||||
d = d or date.today()
|
||||
return _queen_dir() / "memories" / f"MEMORY-{d.strftime('%Y-%m-%d')}.md"
|
||||
|
||||
|
||||
def read_semantic_memory() -> str:
|
||||
path = semantic_memory_path()
|
||||
return path.read_text(encoding="utf-8").strip() if path.exists() else ""
|
||||
|
||||
|
||||
def read_episodic_memory(d: date | None = None) -> str:
|
||||
path = episodic_memory_path(d)
|
||||
return path.read_text(encoding="utf-8").strip() if path.exists() else ""
|
||||
|
||||
|
||||
def _find_recent_episodic(lookback: int = 7) -> tuple[date, str] | None:
|
||||
"""Find the most recent non-empty episodic memory within *lookback* days."""
|
||||
from datetime import timedelta
|
||||
|
||||
today = date.today()
|
||||
for offset in range(lookback):
|
||||
d = today - timedelta(days=offset)
|
||||
content = read_episodic_memory(d)
|
||||
if content:
|
||||
return d, content
|
||||
return None
|
||||
|
||||
|
||||
# Budget (in characters) for episodic memory in the system prompt.
|
||||
_EPISODIC_CHAR_BUDGET = 6_000
|
||||
|
||||
|
||||
def format_for_injection() -> str:
|
||||
"""Format cross-session memory for system prompt injection.
|
||||
|
||||
Returns an empty string if no meaningful content exists yet (e.g. first
|
||||
session with only the seed template).
|
||||
"""
|
||||
semantic = read_semantic_memory()
|
||||
recent = _find_recent_episodic()
|
||||
|
||||
# Suppress injection if semantic is still just the seed template
|
||||
if semantic and semantic.startswith("# My Understanding of the User\n\n*No sessions"):
|
||||
semantic = ""
|
||||
|
||||
parts: list[str] = []
|
||||
if semantic:
|
||||
parts.append(semantic)
|
||||
|
||||
if recent:
|
||||
d, content = recent
|
||||
# Trim oversized episodic entries to keep the prompt manageable
|
||||
if len(content) > _EPISODIC_CHAR_BUDGET:
|
||||
content = content[:_EPISODIC_CHAR_BUDGET] + "\n\n…(truncated)"
|
||||
today = date.today()
|
||||
if d == today:
|
||||
label = f"## Today — {format_memory_date(d)}"
|
||||
else:
|
||||
label = f"## {format_memory_date(d)}"
|
||||
parts.append(f"{label}\n\n{content}")
|
||||
|
||||
if not parts:
|
||||
return ""
|
||||
|
||||
body = "\n\n---\n\n".join(parts)
|
||||
return "--- Your Cross-Session Memory ---\n\n" + body + "\n\n--- End Cross-Session Memory ---"
|
||||
|
||||
|
||||
_SEED_TEMPLATE = """\
|
||||
# My Understanding of the User
|
||||
|
||||
*No sessions recorded yet.*
|
||||
|
||||
## Who They Are
|
||||
|
||||
## How They Communicate
|
||||
|
||||
## What They're Trying to Achieve
|
||||
|
||||
## What's Working
|
||||
|
||||
## What I've Learned
|
||||
"""
|
||||
|
||||
|
||||
def append_episodic_entry(content: str) -> None:
|
||||
"""Append a timestamped prose entry to today's episodic memory file.
|
||||
|
||||
Creates the file (with a date heading) if it doesn't exist yet.
|
||||
Used both by the queen's diary tool and by the consolidation hook.
|
||||
"""
|
||||
ep_path = episodic_memory_path()
|
||||
ep_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
today = date.today()
|
||||
today_str = format_memory_date(today)
|
||||
timestamp = datetime.now().strftime("%H:%M")
|
||||
if not ep_path.exists():
|
||||
header = f"# {today_str}\n\n"
|
||||
block = f"{header}### {timestamp}\n\n{content.strip()}\n"
|
||||
else:
|
||||
block = f"\n\n### {timestamp}\n\n{content.strip()}\n"
|
||||
with ep_path.open("a", encoding="utf-8") as f:
|
||||
f.write(block)
|
||||
|
||||
|
||||
def seed_if_missing() -> None:
|
||||
"""Create MEMORY.md with a blank template if it doesn't exist yet."""
|
||||
path = semantic_memory_path()
|
||||
if path.exists():
|
||||
return
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(_SEED_TEMPLATE, encoding="utf-8")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Consolidation prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_SEMANTIC_SYSTEM = """\
|
||||
You maintain the persistent cross-session memory of an AI assistant called the Queen.
|
||||
Review the session notes and rewrite MEMORY.md — the Queen's durable understanding of the
|
||||
person she works with across all sessions.
|
||||
|
||||
Write entirely in the Queen's voice — first person, reflective, honest.
|
||||
Not a log of events, but genuine understanding of who this person is over time.
|
||||
|
||||
Rules:
|
||||
- Update and synthesise: incorporate new understanding, update facts that have changed, remove
|
||||
details that are stale, superseded, or no longer say anything meaningful about the person.
|
||||
- Keep it as structured markdown with named sections about the PERSON, not about today.
|
||||
- Do NOT include diary sections, daily logs, or session summaries. Those belong elsewhere.
|
||||
MEMORY.md is about who they are, what they want, what works — not what happened today.
|
||||
- Maintain a "How They Communicate" section: technical depth, preferred pace
|
||||
(fast/exploratory/thorough), what communication approaches have worked or not,
|
||||
tone preferences. Update based on diary reflections about communication.
|
||||
This section should evolve — "prefers direct answers" is useful on day 1;
|
||||
"prefers direct answers for technical questions but wants more context when
|
||||
discussing architecture trade-offs" is better by day 5.
|
||||
- Reference dates only when noting a lasting milestone (e.g. "since March 8th they prefer X").
|
||||
- If the session had no meaningful new information about the person,
|
||||
return the existing text unchanged.
|
||||
- Do not add fictional details. Only reflect what is evidenced in the notes.
|
||||
- Stay concise. Prune rather than accumulate. A lean, accurate file is more useful than a
|
||||
dense one. If something was true once but has been resolved or superseded, remove it.
|
||||
- Output only the raw markdown content of MEMORY.md. No preamble, no code fences.
|
||||
"""
|
||||
|
||||
_DIARY_SYSTEM = """\
|
||||
You maintain the daily episodic diary of an AI assistant called the Queen.
|
||||
You receive: (1) today's existing diary so far, and (2) notes from the latest session.
|
||||
|
||||
Rewrite the complete diary for today as a single unified narrative —
|
||||
first person, reflective, honest.
|
||||
Merge and deduplicate: if the same story (e.g. a research agent stalling) recurred several times,
|
||||
describe it once with appropriate weight rather than retelling it. Weave in new developments from
|
||||
the session notes. Preserve important milestones, emotional texture, and session path references.
|
||||
Preserve reflections about communication effectiveness — these are important inputs for the
|
||||
Queen's evolving understanding of the user. A reflection like "they responded much better when
|
||||
I led with the recommendation instead of listing options" is as important as
|
||||
"we built a Gmail agent."
|
||||
|
||||
If today's diary is empty, write the initial entry based on the session notes alone.
|
||||
|
||||
Output only the full diary prose — no date heading, no timestamp headers,
|
||||
no preamble, no code fences.
|
||||
"""
|
||||
|
||||
|
||||
def read_session_context(session_dir: Path, max_messages: int = 80) -> str:
|
||||
"""Extract a readable transcript from conversation parts + adapt.md.
|
||||
|
||||
Reads the last ``max_messages`` conversation parts and the session's
|
||||
adapt.md (working memory). Tool results are omitted — only user and
|
||||
assistant turns (with tool-call names noted) are included.
|
||||
"""
|
||||
parts: list[str] = []
|
||||
|
||||
# Working notes
|
||||
adapt_path = session_dir / "data" / "adapt.md"
|
||||
if adapt_path.exists():
|
||||
text = adapt_path.read_text(encoding="utf-8").strip()
|
||||
if text:
|
||||
parts.append(f"## Session Working Notes (adapt.md)\n\n{text}")
|
||||
|
||||
# Conversation transcript
|
||||
parts_dir = session_dir / "conversations" / "parts"
|
||||
if parts_dir.exists():
|
||||
part_files = sorted(parts_dir.glob("*.json"))[-max_messages:]
|
||||
lines: list[str] = []
|
||||
for pf in part_files:
|
||||
try:
|
||||
data = json.loads(pf.read_text(encoding="utf-8"))
|
||||
role = data.get("role", "")
|
||||
content = str(data.get("content", "")).strip()
|
||||
tool_calls = data.get("tool_calls") or []
|
||||
if role == "tool":
|
||||
continue # skip verbose tool results
|
||||
if role == "assistant" and tool_calls and not content:
|
||||
names = [tc.get("function", {}).get("name", "?") for tc in tool_calls]
|
||||
lines.append(f"[queen calls: {', '.join(names)}]")
|
||||
elif content:
|
||||
label = "user" if role == "user" else "queen"
|
||||
lines.append(f"[{label}]: {content[:600]}")
|
||||
except (KeyError, TypeError) as exc:
|
||||
logger.debug("Skipping malformed conversation message: %s", exc)
|
||||
continue
|
||||
except Exception:
|
||||
logger.warning("Unexpected error parsing conversation message", exc_info=True)
|
||||
continue
|
||||
if lines:
|
||||
parts.append("## Conversation\n\n" + "\n".join(lines))
|
||||
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Context compaction (binary-split LLM summarisation)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# If the raw session context exceeds this many characters, compact it first
|
||||
# before sending to the consolidation LLM. ~200 k chars ≈ 50 k tokens.
|
||||
_CTX_COMPACT_CHAR_LIMIT = 200_000
|
||||
_CTX_COMPACT_MAX_DEPTH = 8
|
||||
|
||||
_COMPACT_SYSTEM = (
|
||||
"Summarise this conversation segment. Preserve: user goals, key decisions, "
|
||||
"what was built or changed, emotional tone, and important outcomes. "
|
||||
"Write concisely in third person past tense. Omit routine tool invocations "
|
||||
"unless the result matters."
|
||||
)
|
||||
|
||||
|
||||
async def _compact_context(text: str, llm: object, *, _depth: int = 0) -> str:
|
||||
"""Binary-split and LLM-summarise *text* until it fits within the char limit.
|
||||
|
||||
Mirrors the recursive binary-splitting strategy used by the main agent
|
||||
compaction pipeline (EventLoopNode._llm_compact).
|
||||
"""
|
||||
if len(text) <= _CTX_COMPACT_CHAR_LIMIT or _depth >= _CTX_COMPACT_MAX_DEPTH:
|
||||
return text
|
||||
|
||||
# Split near the midpoint on a line boundary so we don't cut mid-message
|
||||
mid = len(text) // 2
|
||||
split_at = text.rfind("\n", 0, mid) + 1
|
||||
if split_at <= 0:
|
||||
split_at = mid
|
||||
|
||||
half1, half2 = text[:split_at], text[split_at:]
|
||||
|
||||
async def _summarise(chunk: str) -> str:
|
||||
try:
|
||||
resp = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": chunk}],
|
||||
system=_COMPACT_SYSTEM,
|
||||
max_tokens=2048,
|
||||
)
|
||||
return resp.content.strip()
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"queen_memory: context compaction LLM call failed (depth=%d), truncating",
|
||||
_depth,
|
||||
)
|
||||
return chunk[: _CTX_COMPACT_CHAR_LIMIT // 4]
|
||||
|
||||
s1, s2 = await asyncio.gather(_summarise(half1), _summarise(half2))
|
||||
combined = s1 + "\n\n" + s2
|
||||
if len(combined) > _CTX_COMPACT_CHAR_LIMIT:
|
||||
return await _compact_context(combined, llm, _depth=_depth + 1)
|
||||
return combined
|
||||
|
||||
|
||||
async def consolidate_queen_memory(
|
||||
session_id: str,
|
||||
session_dir: Path,
|
||||
llm: object,
|
||||
) -> None:
|
||||
"""Update MEMORY.md and append a diary entry based on the current session.
|
||||
|
||||
Reads conversation parts and adapt.md from session_dir. Called
|
||||
periodically in the background and once at session end. Failures are
|
||||
logged and silently swallowed so they never block teardown.
|
||||
|
||||
Args:
|
||||
session_id: The session ID (used for the adapt.md path reference).
|
||||
session_dir: Path to the session directory (~/.hive/queen/session/{id}).
|
||||
llm: LLMProvider instance (must support acomplete()).
|
||||
"""
|
||||
try:
|
||||
session_context = read_session_context(session_dir)
|
||||
if not session_context:
|
||||
logger.debug("queen_memory: no session context, skipping consolidation")
|
||||
return
|
||||
|
||||
logger.info("queen_memory: consolidating memory for session %s ...", session_id)
|
||||
|
||||
# If the transcript is very large, compact it with recursive binary LLM
|
||||
# summarisation before sending to the consolidation model.
|
||||
if len(session_context) > _CTX_COMPACT_CHAR_LIMIT:
|
||||
logger.info(
|
||||
"queen_memory: session context is %d chars — compacting first",
|
||||
len(session_context),
|
||||
)
|
||||
session_context = await _compact_context(session_context, llm)
|
||||
logger.info("queen_memory: compacted to %d chars", len(session_context))
|
||||
|
||||
existing_semantic = read_semantic_memory()
|
||||
today_journal = read_episodic_memory()
|
||||
today = date.today()
|
||||
today_str = format_memory_date(today)
|
||||
adapt_path = session_dir / "data" / "adapt.md"
|
||||
|
||||
user_msg = (
|
||||
f"## Existing Semantic Memory (MEMORY.md)\n\n"
|
||||
f"{existing_semantic or '(none yet)'}\n\n"
|
||||
f"## Today's Diary So Far ({today_str})\n\n"
|
||||
f"{today_journal or '(none yet)'}\n\n"
|
||||
f"{session_context}\n\n"
|
||||
f"## Session Reference\n\n"
|
||||
f"Session ID: {session_id}\n"
|
||||
f"Session path: {adapt_path}\n"
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"queen_memory: calling LLM (%d chars of context, ~%d tokens est.)",
|
||||
len(user_msg),
|
||||
len(user_msg) // 4,
|
||||
)
|
||||
|
||||
from framework.agents.queen.config import default_config
|
||||
|
||||
semantic_resp, diary_resp = await asyncio.gather(
|
||||
llm.acomplete(
|
||||
messages=[{"role": "user", "content": user_msg}],
|
||||
system=_SEMANTIC_SYSTEM,
|
||||
max_tokens=default_config.max_tokens,
|
||||
),
|
||||
llm.acomplete(
|
||||
messages=[{"role": "user", "content": user_msg}],
|
||||
system=_DIARY_SYSTEM,
|
||||
max_tokens=default_config.max_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
new_semantic = semantic_resp.content.strip()
|
||||
diary_entry = diary_resp.content.strip()
|
||||
|
||||
if new_semantic:
|
||||
path = semantic_memory_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(new_semantic, encoding="utf-8")
|
||||
logger.info("queen_memory: semantic memory updated (%d chars)", len(new_semantic))
|
||||
|
||||
if diary_entry:
|
||||
# Rewrite today's episodic file in-place — the LLM has merged and
|
||||
# deduplicated the full day's content, so we replace rather than append.
|
||||
ep_path = episodic_memory_path()
|
||||
ep_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
heading = f"# {today_str}"
|
||||
ep_path.write_text(f"{heading}\n\n{diary_entry}\n", encoding="utf-8")
|
||||
logger.info(
|
||||
"queen_memory: episodic diary rewritten for %s (%d chars)",
|
||||
today_str,
|
||||
len(diary_entry),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
tb = traceback.format_exc()
|
||||
logger.exception("queen_memory: consolidation failed")
|
||||
# Write to file so the cause is findable regardless of log verbosity.
|
||||
error_path = _queen_dir() / "consolidation_error.txt"
|
||||
try:
|
||||
error_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
error_path.write_text(
|
||||
f"session: {session_id}\ntime: {datetime.now().isoformat()}\n\n{tb}",
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError:
|
||||
pass # Cannot write error file; original exception already logged
|
||||
@@ -1,24 +1,25 @@
|
||||
"""Shared memory helpers for queen/worker recall and reflection.
|
||||
"""Queen global memory helpers.
|
||||
|
||||
Each memory is an individual ``.md`` file in ``~/.hive/queen/memories/``
|
||||
with optional YAML frontmatter (name, type, description). Frontmatter
|
||||
is a convention enforced by prompt instructions — parsing is lenient and
|
||||
malformed files degrade gracefully (appear in scans with ``None`` metadata).
|
||||
Memory hierarchy::
|
||||
|
||||
Cursor-based incremental processing tracks which conversation messages
|
||||
have already been processed by the reflection agent.
|
||||
~/.hive/memories/
|
||||
global/ # shared across all queens and colonies
|
||||
colonies/{name}/ # colony-scoped memories
|
||||
agents/queens/{name}/ # queen-specific memories
|
||||
agents/{name}/ # per-worker-agent memories
|
||||
|
||||
Each memory is an individual ``.md`` file with optional YAML frontmatter
|
||||
(name, type, description).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import shutil
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import date
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import MEMORIES_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -26,54 +27,33 @@ logger = logging.getLogger(__name__)
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
MEMORY_TYPES: tuple[str, ...] = ("goal", "environment", "technique", "reference", "diary")
|
||||
GLOBAL_MEMORY_CATEGORIES: tuple[str, ...] = ("profile", "preference", "environment", "feedback")
|
||||
|
||||
_HIVE_QUEEN_DIR = Path.home() / ".hive" / "queen"
|
||||
# Legacy shared v2 root. Colony memory now lives under queen sessions.
|
||||
MEMORY_DIR: Path = _HIVE_QUEEN_DIR / "memories"
|
||||
|
||||
MAX_FILES: int = 200
|
||||
MAX_FILE_SIZE_BYTES: int = 4096 # 4 KB hard limit per memory file
|
||||
|
||||
# How many lines of a memory file to read for header scanning.
|
||||
_HEADER_LINE_LIMIT: int = 30
|
||||
_MIGRATION_MARKER = ".migrated-from-shared-memory"
|
||||
_GLOBAL_MEMORY_CODE_PATTERN = re.compile(
|
||||
r"(/Users/|~/.hive|\.py\b|\.ts\b|\.tsx\b|\.js\b|"
|
||||
r"\b(graph|node|runtime|session|execution|worker|queen|checkpoint|flowchart)\b)",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
# Frontmatter example provided to the reflection agent via prompt.
|
||||
MEMORY_FRONTMATTER_EXAMPLE: list[str] = [
|
||||
"```markdown",
|
||||
"---",
|
||||
"name: {{memory name}}",
|
||||
(
|
||||
"description: {{one-line description — used to decide "
|
||||
"relevance in future conversations, so be specific}}"
|
||||
),
|
||||
f"type: {{{{{', '.join(MEMORY_TYPES)}}}}}",
|
||||
"---",
|
||||
"",
|
||||
(
|
||||
"{{memory content — for feedback/project types, "
|
||||
"structure as: rule/fact, then **Why:** "
|
||||
"and **How to apply:** lines}}"
|
||||
),
|
||||
"```",
|
||||
]
|
||||
|
||||
|
||||
def colony_memory_dir(colony_id: str) -> Path:
|
||||
"""Return the colony memory directory for a queen session."""
|
||||
return _HIVE_QUEEN_DIR / "session" / colony_id / "memory" / "colony"
|
||||
|
||||
|
||||
def global_memory_dir() -> Path:
|
||||
"""Return the queen-global memory directory."""
|
||||
return _HIVE_QUEEN_DIR / "global_memory"
|
||||
"""Return the global memory directory (shared across all queens/colonies)."""
|
||||
return MEMORIES_DIR / "global"
|
||||
|
||||
|
||||
def colony_memory_dir(colony_name: str) -> Path:
|
||||
"""Return the memory directory for a named colony."""
|
||||
return MEMORIES_DIR / "colonies" / colony_name
|
||||
|
||||
|
||||
def queen_memory_dir(queen_name: str = "default") -> Path:
|
||||
"""Return the memory directory for a named queen."""
|
||||
return MEMORIES_DIR / "agents" / "queens" / queen_name
|
||||
|
||||
|
||||
def agent_memory_dir(agent_name: str) -> Path:
|
||||
"""Return the memory directory for a worker agent."""
|
||||
return MEMORIES_DIR / "agents" / agent_name
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -107,15 +87,6 @@ def parse_frontmatter(text: str) -> dict[str, str]:
|
||||
return result
|
||||
|
||||
|
||||
def parse_memory_type(raw: str | None) -> str | None:
|
||||
"""Validate *raw* against supported memory categories."""
|
||||
if raw is None:
|
||||
return None
|
||||
normalized = raw.strip().lower()
|
||||
allowed = set(MEMORY_TYPES) | set(GLOBAL_MEMORY_CATEGORIES)
|
||||
return normalized if normalized in allowed else None
|
||||
|
||||
|
||||
def parse_global_memory_category(raw: str | None) -> str | None:
|
||||
"""Validate *raw* against ``GLOBAL_MEMORY_CATEGORIES``."""
|
||||
if raw is None:
|
||||
@@ -164,7 +135,7 @@ class MemoryFile:
|
||||
filename=path.name,
|
||||
path=path,
|
||||
name=fm.get("name"),
|
||||
type=parse_memory_type(fm.get("type")),
|
||||
type=parse_global_memory_category(fm.get("type")),
|
||||
description=fm.get("description"),
|
||||
header_lines=lines,
|
||||
mtime=mtime,
|
||||
@@ -182,7 +153,7 @@ def scan_memory_files(memory_dir: Path | None = None) -> list[MemoryFile]:
|
||||
Files are sorted by modification time (newest first). Dotfiles and
|
||||
subdirectories are ignored.
|
||||
"""
|
||||
d = memory_dir or MEMORY_DIR
|
||||
d = memory_dir or global_memory_dir()
|
||||
if not d.is_dir():
|
||||
return []
|
||||
|
||||
@@ -235,307 +206,30 @@ def build_memory_document(
|
||||
)
|
||||
|
||||
|
||||
def diary_filename(d: date | None = None) -> str:
|
||||
"""Return the diary memory filename for date *d* (default: today)."""
|
||||
d = d or date.today()
|
||||
return f"MEMORY-{d.strftime('%Y-%m-%d')}.md"
|
||||
|
||||
|
||||
def build_diary_document(*, date_str: str, body: str) -> str:
|
||||
"""Build a diary memory file with frontmatter."""
|
||||
return build_memory_document(
|
||||
name=f"diary-{date_str}",
|
||||
description=f"Daily session narrative for {date_str}",
|
||||
mem_type="diary",
|
||||
body=body,
|
||||
)
|
||||
|
||||
|
||||
def validate_global_memory_payload(
|
||||
*,
|
||||
category: str,
|
||||
description: str,
|
||||
content: str,
|
||||
) -> str:
|
||||
"""Validate a queen-global memory save request."""
|
||||
parsed = parse_global_memory_category(category)
|
||||
if parsed is None:
|
||||
raise ValueError(
|
||||
"Invalid global memory category. Use one of: " + ", ".join(GLOBAL_MEMORY_CATEGORIES)
|
||||
)
|
||||
if not description.strip():
|
||||
raise ValueError("Global memory description cannot be empty.")
|
||||
if not content.strip():
|
||||
raise ValueError("Global memory content cannot be empty.")
|
||||
|
||||
probe = f"{description}\n{content}"
|
||||
if _GLOBAL_MEMORY_CODE_PATTERN.search(probe):
|
||||
raise ValueError(
|
||||
"Global memory is only for durable user profile, preferences, "
|
||||
"environment, or feedback — not task/code/runtime details."
|
||||
)
|
||||
return parsed
|
||||
|
||||
|
||||
def save_global_memory(
|
||||
*,
|
||||
category: str,
|
||||
description: str,
|
||||
content: str,
|
||||
name: str | None = None,
|
||||
memory_dir: Path | None = None,
|
||||
) -> tuple[str, Path]:
|
||||
"""Persist one queen-global memory entry."""
|
||||
parsed = validate_global_memory_payload(
|
||||
category=category,
|
||||
description=description,
|
||||
content=content,
|
||||
)
|
||||
target_dir = memory_dir or global_memory_dir()
|
||||
target_dir.mkdir(parents=True, exist_ok=True)
|
||||
memory_name = (name or description).strip()
|
||||
filename = allocate_memory_filename(target_dir, memory_name)
|
||||
doc = build_memory_document(
|
||||
name=memory_name,
|
||||
description=description,
|
||||
mem_type=parsed,
|
||||
body=content,
|
||||
)
|
||||
if len(doc.encode("utf-8")) > MAX_FILE_SIZE_BYTES:
|
||||
raise ValueError(f"Global memory entry exceeds the {MAX_FILE_SIZE_BYTES} byte limit.")
|
||||
path = target_dir / filename
|
||||
path.write_text(doc, encoding="utf-8")
|
||||
return filename, path
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Manifest formatting
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _age_label(mtime: float) -> str:
|
||||
"""Human-readable age string from an mtime."""
|
||||
age_days = memory_age_days(mtime)
|
||||
if age_days <= 0:
|
||||
return "today"
|
||||
if age_days == 1:
|
||||
return "1 day ago"
|
||||
return f"{age_days} days ago"
|
||||
|
||||
|
||||
def format_memory_manifest(files: list[MemoryFile]) -> str:
|
||||
"""One-line-per-file text manifest for the recall selector / reflection agent.
|
||||
"""One-line-per-file text manifest.
|
||||
|
||||
Format: ``[type] filename (age): description``
|
||||
Format: ``[type] filename: description``
|
||||
"""
|
||||
lines: list[str] = []
|
||||
for mf in files:
|
||||
t = mf.type or "unknown"
|
||||
desc = mf.description or "(no description)"
|
||||
age = _age_label(mf.mtime)
|
||||
lines.append(f"[{t}] {mf.filename} ({age}): {desc}")
|
||||
lines.append(f"[{t}] {mf.filename}: {desc}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Freshness / staleness
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_SECONDS_PER_DAY = 86_400
|
||||
|
||||
|
||||
def memory_age_days(mtime: float) -> int:
|
||||
"""Return the age of a memory file in whole days."""
|
||||
if mtime <= 0:
|
||||
return 0
|
||||
return int((time.time() - mtime) / _SECONDS_PER_DAY)
|
||||
|
||||
|
||||
def memory_freshness_text(mtime: float) -> str:
|
||||
"""Return a staleness warning for injection, or empty string if fresh."""
|
||||
d = memory_age_days(mtime)
|
||||
if d <= 1:
|
||||
return ""
|
||||
return (
|
||||
f"This memory is {d} days old. "
|
||||
"Memories are point-in-time observations, not live state — "
|
||||
"claims about code behavior or file:line citations may be outdated. "
|
||||
"Verify against current code before asserting as fact."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cursor-based incremental processing
|
||||
# Initialisation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def read_conversation_parts(session_dir: Path) -> list[dict[str, Any]]:
|
||||
"""Read all conversation parts for a session using FileConversationStore.
|
||||
|
||||
Returns a list of raw message dicts in sequence order.
|
||||
"""
|
||||
from framework.storage.conversation_store import FileConversationStore
|
||||
|
||||
store = FileConversationStore(session_dir / "conversations")
|
||||
return await store.read_parts()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Initialisation and legacy migration
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def init_memory_dir(
|
||||
memory_dir: Path | None = None,
|
||||
*,
|
||||
migrate_legacy: bool = False,
|
||||
) -> None:
|
||||
"""Create the memory directory if missing.
|
||||
|
||||
When ``migrate_legacy`` is true, migrate both v1 memory files and the
|
||||
previous shared v2 queen memory store into this directory.
|
||||
"""
|
||||
d = memory_dir or MEMORY_DIR
|
||||
first_run = not d.exists()
|
||||
def init_memory_dir(memory_dir: Path | None = None) -> None:
|
||||
"""Create the memory directory if missing."""
|
||||
d = memory_dir or global_memory_dir()
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
if migrate_legacy:
|
||||
migrate_legacy_memories(d)
|
||||
migrate_shared_v2_memories(d)
|
||||
elif first_run and d == MEMORY_DIR:
|
||||
migrate_legacy_memories(d)
|
||||
|
||||
|
||||
def migrate_legacy_memories(memory_dir: Path | None = None) -> None:
|
||||
"""Convert old MEMORY.md + MEMORY-YYYY-MM-DD.md files to individual memory files.
|
||||
|
||||
Originals are moved to ``{memory_dir}/.legacy/``.
|
||||
"""
|
||||
d = memory_dir or MEMORY_DIR
|
||||
queen_dir = _HIVE_QUEEN_DIR
|
||||
legacy_archive = d / ".legacy"
|
||||
|
||||
migrated_any = False
|
||||
|
||||
# --- Semantic memory (MEMORY.md) ---
|
||||
semantic = queen_dir / "MEMORY.md"
|
||||
if semantic.exists():
|
||||
content = semantic.read_text(encoding="utf-8").strip()
|
||||
# Skip the blank seed template.
|
||||
if content and not content.startswith("# My Understanding of the User\n\n*No sessions"):
|
||||
_write_migration_file(
|
||||
d,
|
||||
filename="legacy-semantic-memory.md",
|
||||
name="legacy-semantic-memory",
|
||||
mem_type="reference",
|
||||
description="Migrated semantic memory from previous memory system",
|
||||
body=content,
|
||||
)
|
||||
migrated_any = True
|
||||
# Archive original.
|
||||
legacy_archive.mkdir(parents=True, exist_ok=True)
|
||||
semantic.rename(legacy_archive / "MEMORY.md")
|
||||
|
||||
# --- Episodic memories (MEMORY-YYYY-MM-DD.md) ---
|
||||
old_memories_dir = queen_dir / "memories"
|
||||
if old_memories_dir.is_dir():
|
||||
for ep_file in sorted(old_memories_dir.glob("MEMORY-*.md")):
|
||||
content = ep_file.read_text(encoding="utf-8").strip()
|
||||
if not content:
|
||||
continue
|
||||
date_part = ep_file.stem.replace("MEMORY-", "")
|
||||
slug = f"legacy-diary-{date_part}.md"
|
||||
_write_migration_file(
|
||||
d,
|
||||
filename=slug,
|
||||
name=f"legacy-diary-{date_part}",
|
||||
mem_type="diary",
|
||||
description=f"Migrated diary entry from {date_part}",
|
||||
body=content,
|
||||
)
|
||||
migrated_any = True
|
||||
# Archive original.
|
||||
legacy_archive.mkdir(parents=True, exist_ok=True)
|
||||
ep_file.rename(legacy_archive / ep_file.name)
|
||||
|
||||
if migrated_any:
|
||||
logger.info("queen_memory_v2: migrated legacy memory files to %s", d)
|
||||
|
||||
|
||||
def migrate_shared_v2_memories(
|
||||
memory_dir: Path | None = None,
|
||||
*,
|
||||
source_dir: Path | None = None,
|
||||
) -> None:
|
||||
"""Move shared queen v2 memory files into a colony directory once."""
|
||||
d = memory_dir or MEMORY_DIR
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
src = source_dir or MEMORY_DIR
|
||||
if d.resolve() == src.resolve():
|
||||
return
|
||||
|
||||
marker = d / _MIGRATION_MARKER
|
||||
if marker.exists():
|
||||
return
|
||||
|
||||
if not src.is_dir():
|
||||
return
|
||||
|
||||
md_files = sorted(f for f in src.glob("*.md") if f.is_file() and not f.name.startswith("."))
|
||||
if not md_files:
|
||||
marker.write_text("no shared memories found\n", encoding="utf-8")
|
||||
return
|
||||
|
||||
archive = src / ".legacy_colony_migration"
|
||||
archive.mkdir(parents=True, exist_ok=True)
|
||||
migrated_any = False
|
||||
|
||||
for src_file in md_files:
|
||||
target = d / src_file.name
|
||||
if not target.exists():
|
||||
try:
|
||||
shutil.copy2(src_file, target)
|
||||
migrated_any = True
|
||||
except OSError:
|
||||
logger.debug("shared memory migration copy failed for %s", src_file, exc_info=True)
|
||||
continue
|
||||
|
||||
archived = archive / src_file.name
|
||||
counter = 2
|
||||
while archived.exists():
|
||||
archived = archive / f"{src_file.stem}-{counter}{src_file.suffix}"
|
||||
counter += 1
|
||||
try:
|
||||
src_file.rename(archived)
|
||||
except OSError:
|
||||
logger.debug("shared memory migration archive failed for %s", src_file, exc_info=True)
|
||||
|
||||
if migrated_any:
|
||||
logger.info("queen_memory_v2: migrated shared queen memories to %s", d)
|
||||
marker.write_text(
|
||||
f"migrated_at={int(time.time())}\nsource={src}\n",
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
|
||||
def _write_migration_file(
|
||||
memory_dir: Path,
|
||||
filename: str,
|
||||
name: str,
|
||||
mem_type: str,
|
||||
description: str,
|
||||
body: str,
|
||||
) -> None:
|
||||
"""Write a single migrated memory file with frontmatter."""
|
||||
# Truncate body to respect file size limit (leave room for frontmatter).
|
||||
header = f"---\nname: {name}\ndescription: {description}\ntype: {mem_type}\n---\n\n"
|
||||
max_body = MAX_FILE_SIZE_BYTES - len(header.encode("utf-8"))
|
||||
if len(body.encode("utf-8")) > max_body:
|
||||
# Rough truncation — cut at character level then trim to last newline.
|
||||
body = body[: max_body - 20]
|
||||
nl = body.rfind("\n")
|
||||
if nl > 0:
|
||||
body = body[:nl]
|
||||
body += "\n\n...(truncated during migration)"
|
||||
|
||||
path = memory_dir / filename
|
||||
path.write_text(header + body + "\n", encoding="utf-8")
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,217 @@
|
||||
"""Per-queen tool configuration sidecar (``tools.json``).
|
||||
|
||||
Lives at ``~/.hive/agents/queens/{queen_id}/tools.json`` alongside
|
||||
``profile.yaml``. Kept separate so identity (name, title, core traits)
|
||||
stays human-authored and lean, while the machine-managed tool allowlist
|
||||
can grow (per-tool overrides, audit timestamps, future per-phase rules)
|
||||
without bloating the profile.
|
||||
|
||||
Schema::
|
||||
|
||||
{
|
||||
"enabled_mcp_tools": ["read_file", ...] | null,
|
||||
"updated_at": "2026-04-21T12:34:56+00:00"
|
||||
}
|
||||
|
||||
- ``null`` / missing file → default "allow every MCP tool".
|
||||
- ``[]`` → explicitly disable every MCP tool.
|
||||
- ``["foo", "bar"]`` → only those MCP tool names pass the filter.
|
||||
|
||||
Atomic writes via ``os.replace`` follow the same pattern as
|
||||
``framework.host.colony_metadata.update_colony_metadata``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tools_config_path(queen_id: str) -> Path:
|
||||
"""Return the on-disk path to a queen's ``tools.json``."""
|
||||
return QUEENS_DIR / queen_id / "tools.json"
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
"""Write ``data`` to ``path`` atomically via tempfile + replace."""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp = tempfile.mkstemp(
|
||||
prefix=".tools.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _migrate_from_profile_if_needed(queen_id: str) -> list[str] | None:
|
||||
"""Hoist a legacy ``enabled_mcp_tools`` field out of ``profile.yaml``.
|
||||
|
||||
Returns the migrated value (or ``None`` if nothing to migrate). After
|
||||
migration the sidecar exists on disk and the profile YAML no longer
|
||||
contains ``enabled_mcp_tools``. Safe to call repeatedly.
|
||||
"""
|
||||
profile_path = QUEENS_DIR / queen_id / "profile.yaml"
|
||||
if not profile_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = yaml.safe_load(profile_path.read_text(encoding="utf-8"))
|
||||
except (yaml.YAMLError, OSError):
|
||||
logger.warning("Could not read profile.yaml during tools migration: %s", queen_id)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
if "enabled_mcp_tools" not in data:
|
||||
return None
|
||||
|
||||
raw = data.pop("enabled_mcp_tools")
|
||||
enabled: list[str] | None
|
||||
if raw is None:
|
||||
enabled = None
|
||||
elif isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
enabled = raw
|
||||
else:
|
||||
logger.warning(
|
||||
"Legacy enabled_mcp_tools on queen %s had unexpected shape %r; dropping",
|
||||
queen_id,
|
||||
raw,
|
||||
)
|
||||
enabled = None
|
||||
|
||||
# Write sidecar first, then rewrite profile — if the second step
|
||||
# fails we still have the config available and won't re-migrate.
|
||||
_atomic_write_json(
|
||||
tools_config_path(queen_id),
|
||||
{
|
||||
"enabled_mcp_tools": enabled,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
profile_path.write_text(
|
||||
yaml.safe_dump(data, sort_keys=False, allow_unicode=True),
|
||||
encoding="utf-8",
|
||||
)
|
||||
logger.info(
|
||||
"Migrated enabled_mcp_tools for queen %s from profile.yaml to tools.json",
|
||||
queen_id,
|
||||
)
|
||||
return enabled
|
||||
|
||||
|
||||
def tools_config_exists(queen_id: str) -> bool:
|
||||
"""Return True when the queen has a persisted ``tools.json`` sidecar.
|
||||
|
||||
Used by callers that need to tell an explicit user save apart from a
|
||||
fallthrough to the role-based default (both can return the same
|
||||
value from ``load_queen_tools_config``).
|
||||
"""
|
||||
return tools_config_path(queen_id).exists()
|
||||
|
||||
|
||||
def delete_queen_tools_config(queen_id: str) -> bool:
|
||||
"""Delete the queen's ``tools.json`` sidecar if present.
|
||||
|
||||
Returns ``True`` if a file was removed, ``False`` if none existed.
|
||||
The next ``load_queen_tools_config`` call falls through to the
|
||||
role-based default (or allow-all for unknown queens).
|
||||
"""
|
||||
path = tools_config_path(queen_id)
|
||||
if not path.exists():
|
||||
return False
|
||||
try:
|
||||
path.unlink()
|
||||
return True
|
||||
except OSError:
|
||||
logger.warning("Failed to delete %s", path, exc_info=True)
|
||||
return False
|
||||
|
||||
|
||||
def load_queen_tools_config(
|
||||
queen_id: str,
|
||||
mcp_catalog: dict[str, list[dict]] | None = None,
|
||||
) -> list[str] | None:
|
||||
"""Return the queen's MCP tool allowlist, or ``None`` for default-allow.
|
||||
|
||||
Order of resolution:
|
||||
1. ``tools.json`` sidecar (authoritative; user has saved).
|
||||
2. Legacy ``profile.yaml`` field (migrated and deleted on first read).
|
||||
3. Role-based default from ``queen_tools_defaults`` when the queen
|
||||
is in the known persona table. ``mcp_catalog`` lets the helper
|
||||
expand ``@server:NAME`` shorthands; without it, shorthand entries
|
||||
are dropped.
|
||||
4. ``None`` — default "allow every MCP tool".
|
||||
"""
|
||||
path = tools_config_path(queen_id)
|
||||
if path.exists():
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Invalid %s; treating as default-allow", path)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
raw = data.get("enabled_mcp_tools")
|
||||
if raw is None:
|
||||
return None
|
||||
if isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
return raw
|
||||
logger.warning("Unexpected enabled_mcp_tools shape in %s; ignoring", path)
|
||||
return None
|
||||
|
||||
migrated = _migrate_from_profile_if_needed(queen_id)
|
||||
if migrated is not None:
|
||||
return migrated
|
||||
# If migration just hoisted an explicit ``null`` out of profile.yaml,
|
||||
# a sidecar with allow-all semantics now exists on disk. Honor that
|
||||
# over the role default so an explicit user choice wins.
|
||||
if tools_config_path(queen_id).exists():
|
||||
return None
|
||||
|
||||
# No sidecar, nothing to migrate — fall back to role-based default.
|
||||
from framework.agents.queen.queen_tools_defaults import resolve_queen_default_tools
|
||||
|
||||
return resolve_queen_default_tools(queen_id, mcp_catalog)
|
||||
|
||||
|
||||
def update_queen_tools_config(
|
||||
queen_id: str,
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[str] | None:
|
||||
"""Persist the queen's MCP allowlist to ``tools.json``.
|
||||
|
||||
Raises ``FileNotFoundError`` if the queen's directory is missing —
|
||||
we refuse to silently create a sidecar for a queen that doesn't
|
||||
exist.
|
||||
"""
|
||||
queen_dir = QUEENS_DIR / queen_id
|
||||
if not queen_dir.exists():
|
||||
raise FileNotFoundError(f"Queen directory not found: {queen_id}")
|
||||
_atomic_write_json(
|
||||
tools_config_path(queen_id),
|
||||
{
|
||||
"enabled_mcp_tools": enabled_mcp_tools,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
return enabled_mcp_tools
|
||||
@@ -0,0 +1,352 @@
|
||||
"""Role-based default tool allowlists for queens.
|
||||
|
||||
Every queen inherits the same MCP surface (all servers loaded for the
|
||||
queen agent), but exposing 94+ tools to every persona clutters the LLM
|
||||
tool catalog and wastes prompt tokens. This module defines a sensible
|
||||
default allowlist per queen persona so, e.g., Head of Legal doesn't
|
||||
see port scanners and Head of Brand & Design doesn't see CSV/SQL tools.
|
||||
|
||||
Defaults apply only when the queen has no ``tools.json`` sidecar — the
|
||||
moment the user saves an allowlist through the Tool Library, the
|
||||
sidecar becomes authoritative. A DELETE on the tools endpoint removes
|
||||
the sidecar and brings the queen back to her role default.
|
||||
|
||||
Category entries support a ``@server:NAME`` shorthand that expands to
|
||||
every tool name registered against that MCP server in the current
|
||||
catalog. This keeps the category table short and drift-free when new
|
||||
tools are added (e.g. browser_* auto-joins the ``browser`` category).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Categories — reusable bundles of MCP tool names.
|
||||
# ---------------------------------------------------------------------------
|
||||
#
|
||||
# Each category is a flat list of either concrete tool names or the
|
||||
# ``@server:NAME`` shorthand. The shorthand expands to every tool the
|
||||
# given MCP server currently exposes (requires a live catalog; when one
|
||||
# is not available the shorthand is silently dropped so we fall back to
|
||||
# the named entries only).
|
||||
|
||||
_TOOL_CATEGORIES: dict[str, list[str]] = {
|
||||
# Unified file ops — read, write, edit, search across the files-tools
|
||||
# MCP server (read_file, write_file, edit_file, search_files). pdf_read
|
||||
# lives in hive_tools so it's listed explicitly; without it queens
|
||||
# cannot read PDF documents by default.
|
||||
"file_ops": [
|
||||
"@server:files-tools",
|
||||
"pdf_read",
|
||||
],
|
||||
# Terminal basic — the 3-tool subset queens get out of the box.
|
||||
# terminal_exec — foreground command execution (Bash equivalent)
|
||||
# terminal_rg — ripgrep content search (Grep equivalent)
|
||||
# terminal_find — glob/find file listing (Glob equivalent)
|
||||
"terminal_basic": [
|
||||
"terminal_exec",
|
||||
"terminal_rg",
|
||||
"terminal_find",
|
||||
],
|
||||
# Terminal advanced — the power-user tools beyond the basics. Not in
|
||||
# any role default; opt in explicitly per-queen via the Tool Library.
|
||||
# terminal_job_* — background job lifecycle (start/manage/logs)
|
||||
# terminal_output_get — fetch captured output from foreground exec
|
||||
# terminal_pty_* — persistent PTY sessions (open/run/close)
|
||||
"terminal_advanced": [
|
||||
"terminal_job_start",
|
||||
"terminal_job_manage",
|
||||
"terminal_job_logs",
|
||||
"terminal_output_get",
|
||||
"terminal_pty_open",
|
||||
"terminal_pty_run",
|
||||
"terminal_pty_close",
|
||||
],
|
||||
# Tabular data. CSV/Excel read/write + DuckDB SQL.
|
||||
"spreadsheet_advanced": [
|
||||
"csv_read",
|
||||
"csv_info",
|
||||
"csv_write",
|
||||
"csv_append",
|
||||
"csv_sql",
|
||||
"excel_read",
|
||||
"excel_info",
|
||||
"excel_write",
|
||||
"excel_append",
|
||||
"excel_search",
|
||||
"excel_sheet_list",
|
||||
"excel_sql",
|
||||
],
|
||||
# Browser lifecycle + read-only inspection (navigation, snapshots, query).
|
||||
# Split out from interaction so personas that only need to *observe* pages
|
||||
# (e.g. research, status checks) don't pull in click/type/drag/etc.
|
||||
"browser_basic": [
|
||||
"browser_setup",
|
||||
"browser_status",
|
||||
"browser_stop",
|
||||
"browser_tabs",
|
||||
"browser_open",
|
||||
"browser_close",
|
||||
"browser_activate_tab",
|
||||
"browser_navigate",
|
||||
"browser_go_back",
|
||||
"browser_go_forward",
|
||||
"browser_reload",
|
||||
"browser_screenshot",
|
||||
"browser_snapshot",
|
||||
"browser_html",
|
||||
"browser_console",
|
||||
"browser_evaluate",
|
||||
"browser_get_text",
|
||||
"browser_get_attribute",
|
||||
"browser_get_rect",
|
||||
"browser_shadow_query",
|
||||
],
|
||||
# Browser interaction — anything that mutates page state (clicks, typing,
|
||||
# drag, scrolling, dialogs, file uploads). Pair with browser_basic for
|
||||
# full automation; omit for read-only personas.
|
||||
"browser_interaction": [
|
||||
"browser_click",
|
||||
"browser_click_coordinate",
|
||||
"browser_type",
|
||||
"browser_type_focused",
|
||||
"browser_press",
|
||||
"browser_press_at",
|
||||
"browser_hover",
|
||||
"browser_hover_coordinate",
|
||||
"browser_select",
|
||||
"browser_scroll",
|
||||
"browser_drag",
|
||||
"browser_wait",
|
||||
"browser_resize",
|
||||
"browser_upload",
|
||||
],
|
||||
# Research — paper search, Wikipedia, ad-hoc web scrape. Pair with
|
||||
# browser_basic for richer site-by-site research; this category is the
|
||||
# lightweight always-available fallback.
|
||||
"research": [
|
||||
"web_scrape",
|
||||
"pdf_read"
|
||||
],
|
||||
# Security — defensive scanning and reconnaissance. Engineering-only
|
||||
# surface; the rest of the queens shouldn't see port scanners.
|
||||
"security": [
|
||||
"port_scan",
|
||||
"dns_security_scan",
|
||||
"http_headers_scan",
|
||||
"ssl_tls_scan",
|
||||
"subdomain_enumerate",
|
||||
"tech_stack_detect",
|
||||
"risk_score",
|
||||
],
|
||||
# Lightweight context helpers — good default for every queen.
|
||||
"context_awareness": [
|
||||
"get_current_time",
|
||||
"get_account_info",
|
||||
],
|
||||
# BI / financial chart + diagram rendering. Calling chart_render
|
||||
# both embeds the chart live in chat and produces a downloadable PNG.
|
||||
"charts": [
|
||||
"@server:chart-tools",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-queen mapping.
|
||||
# ---------------------------------------------------------------------------
|
||||
#
|
||||
# Built from the queen personas in ``queen_profiles.DEFAULT_QUEENS``. The
|
||||
# goal is "just enough" — a queen should see tools she'd plausibly call
|
||||
# for her stated role, nothing more. Users curate further via the Tool
|
||||
# Library if they want.
|
||||
#
|
||||
# A queen whose ID is NOT in this map falls through to "allow every MCP
|
||||
# tool" (the original behavior), which keeps the system compatible with
|
||||
# user-added custom queen IDs that we don't know about.
|
||||
|
||||
QUEEN_DEFAULT_CATEGORIES: dict[str, list[str]] = {
|
||||
# Head of Technology — builds and operates systems. Security tools
|
||||
# (port_scan, subdomain_enumerate, etc.) are intentionally NOT in the
|
||||
# default — users opt in via the Tool Library when an engagement
|
||||
# actually needs reconnaissance.
|
||||
"queen_technology": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
"charts",
|
||||
],
|
||||
# Head of Growth — data, experiments, competitor research; no security.
|
||||
"queen_growth": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
"charts",
|
||||
],
|
||||
# Head of Product Strategy — user research + roadmaps; no security.
|
||||
"queen_product_strategy": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
"charts",
|
||||
],
|
||||
# Head of Finance — financial models (CSV/Excel heavy), market research.
|
||||
"queen_finance_fundraising": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"spreadsheet_advanced",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
"charts",
|
||||
],
|
||||
# Head of Legal — reads contracts/PDFs, researches; no data/security.
|
||||
"queen_legal": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
],
|
||||
# Head of Brand & Design — visual refs, style guides; no data/security.
|
||||
"queen_brand_design": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
],
|
||||
# Head of Talent — candidate pipelines, resumes; data + browser heavy.
|
||||
"queen_talent": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"research",
|
||||
"context_awareness",
|
||||
],
|
||||
# Head of Operations — processes, automation, observability.
|
||||
"queen_operations": [
|
||||
"file_ops",
|
||||
"terminal_basic",
|
||||
"spreadsheet_advanced",
|
||||
"browser_basic",
|
||||
"browser_interaction",
|
||||
"context_awareness",
|
||||
"charts",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def has_role_default(queen_id: str) -> bool:
|
||||
"""Return True when ``queen_id`` is known to the category table."""
|
||||
return queen_id in QUEEN_DEFAULT_CATEGORIES
|
||||
|
||||
|
||||
def list_category_names() -> list[str]:
|
||||
"""Return every category name defined in the table, in declaration order."""
|
||||
return list(_TOOL_CATEGORIES.keys())
|
||||
|
||||
|
||||
def queen_role_categories(queen_id: str) -> list[str]:
|
||||
"""Return the category names assigned to ``queen_id`` by role default.
|
||||
|
||||
Returns an empty list for queens not in the persona table (they fall
|
||||
through to allow-all and have no implicit category membership).
|
||||
"""
|
||||
return list(QUEEN_DEFAULT_CATEGORIES.get(queen_id, []))
|
||||
|
||||
|
||||
def resolve_category_tools(
|
||||
category: str,
|
||||
mcp_catalog: dict[str, list[dict[str, Any]]] | None = None,
|
||||
) -> list[str]:
|
||||
"""Expand a single category to its concrete tool names.
|
||||
|
||||
Mirrors ``resolve_queen_default_tools`` but for a single category, so
|
||||
callers (e.g. the Tool Library API) can present per-category tool
|
||||
membership without re-implementing the ``@server:NAME`` shorthand
|
||||
expansion.
|
||||
"""
|
||||
names: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for entry in _TOOL_CATEGORIES.get(category, []):
|
||||
if entry.startswith("@server:"):
|
||||
server_name = entry[len("@server:") :]
|
||||
if mcp_catalog is None:
|
||||
continue
|
||||
for tool in mcp_catalog.get(server_name, []) or []:
|
||||
tname = tool.get("name") if isinstance(tool, dict) else None
|
||||
if tname and tname not in seen:
|
||||
seen.add(tname)
|
||||
names.append(tname)
|
||||
elif entry not in seen:
|
||||
seen.add(entry)
|
||||
names.append(entry)
|
||||
return names
|
||||
|
||||
|
||||
def resolve_queen_default_tools(
|
||||
queen_id: str,
|
||||
mcp_catalog: dict[str, list[dict[str, Any]]] | None = None,
|
||||
) -> list[str] | None:
|
||||
"""Return the role-based default allowlist for ``queen_id``.
|
||||
|
||||
Arguments:
|
||||
queen_id: Profile ID (e.g. ``"queen_technology"``).
|
||||
mcp_catalog: Optional mapping of ``{server_name: [{"name": ...}, ...]}``
|
||||
used to expand ``@server:NAME`` shorthands in categories.
|
||||
When absent, shorthand entries are dropped and the result
|
||||
contains only the explicitly-named tools.
|
||||
|
||||
Returns:
|
||||
A deduplicated list of tool names, or ``None`` if the queen has
|
||||
no role entry (caller should treat as "allow every MCP tool").
|
||||
"""
|
||||
categories = QUEEN_DEFAULT_CATEGORIES.get(queen_id)
|
||||
if not categories:
|
||||
return None
|
||||
|
||||
names: list[str] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
def _add(name: str) -> None:
|
||||
if name and name not in seen:
|
||||
seen.add(name)
|
||||
names.append(name)
|
||||
|
||||
for cat in categories:
|
||||
for entry in _TOOL_CATEGORIES.get(cat, []):
|
||||
if entry.startswith("@server:"):
|
||||
server_name = entry[len("@server:") :]
|
||||
if mcp_catalog is None:
|
||||
logger.debug(
|
||||
"resolve_queen_default_tools: catalog missing; cannot expand %s",
|
||||
entry,
|
||||
)
|
||||
continue
|
||||
for tool in mcp_catalog.get(server_name, []) or []:
|
||||
tname = tool.get("name") if isinstance(tool, dict) else None
|
||||
if tname:
|
||||
_add(tname)
|
||||
else:
|
||||
_add(entry)
|
||||
|
||||
return names
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Recall selector — pre-turn memory selection for queen and worker memory.
|
||||
"""Recall selector — pre-turn memory selection for the queen.
|
||||
|
||||
Before each conversation turn the system:
|
||||
1. Scans the memory directory for ``.md`` files (cap: 200).
|
||||
1. Scans one or more memory directories for ``.md`` files (cap: 200 each).
|
||||
2. Reads headers (frontmatter + first 30 lines).
|
||||
3. Uses a single LLM call with structured JSON output to pick the ~5
|
||||
most relevant memories.
|
||||
4. Injects them into context with staleness warnings for older ones.
|
||||
3. Uses an LLM call with structured JSON output to pick the most relevant
|
||||
memories for each scope.
|
||||
4. Injects them into the system prompt.
|
||||
|
||||
The selector only sees the user's query string — no full conversation
|
||||
context. This keeps it cheap and fast. Errors are caught and return
|
||||
@@ -20,9 +20,8 @@ from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.agents.queen.queen_memory_v2 import (
|
||||
MEMORY_DIR,
|
||||
format_memory_manifest,
|
||||
memory_freshness_text,
|
||||
global_memory_dir as _default_global_memory_dir,
|
||||
scan_memory_files,
|
||||
)
|
||||
|
||||
@@ -32,29 +31,6 @@ logger = logging.getLogger(__name__)
|
||||
# Structured output schema
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
RECALL_SCHEMA: dict[str, Any] = {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": "memory_selection",
|
||||
"strict": True,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"selected_memories": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
"required": ["selected_memories"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# System prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
SELECT_MEMORIES_SYSTEM_PROMPT = """\
|
||||
You are selecting memories that will be useful to the Queen agent as it \
|
||||
processes a user's query.
|
||||
@@ -72,9 +48,6 @@ name and description.
|
||||
query, then do not include it in your list. Be selective and discerning.
|
||||
- If there are no memories in the list that would clearly be useful, \
|
||||
return an empty list.
|
||||
- If a list of recently-used tools is provided, do not select memories \
|
||||
that are usage reference or API documentation for those tools (the Queen \
|
||||
is already exercising them). Still select warnings or gotchas about them.
|
||||
"""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -86,7 +59,6 @@ async def select_memories(
|
||||
query: str,
|
||||
llm: Any,
|
||||
memory_dir: Path | None = None,
|
||||
active_tools: list[str] | None = None,
|
||||
*,
|
||||
max_results: int = 5,
|
||||
) -> list[str]:
|
||||
@@ -94,51 +66,83 @@ async def select_memories(
|
||||
|
||||
Returns a list of filenames. Best-effort: on any error returns ``[]``.
|
||||
"""
|
||||
mem_dir = memory_dir or MEMORY_DIR
|
||||
mem_dir = memory_dir or _default_global_memory_dir()
|
||||
files = scan_memory_files(mem_dir)
|
||||
if not files:
|
||||
logger.debug("recall: no memory files found, skipping selection")
|
||||
return []
|
||||
|
||||
logger.debug("recall: selecting from %d memory files for query: %.80s", len(files), query)
|
||||
logger.debug("recall: selecting from %d memories for query: %.100s", len(files), query)
|
||||
manifest = format_memory_manifest(files)
|
||||
|
||||
user_msg_parts = [f"## User query\n\n{query}\n\n## Available memories\n\n{manifest}"]
|
||||
if active_tools:
|
||||
user_msg_parts.append(f"\n\n## Recently-used tools\n\n{', '.join(active_tools)}")
|
||||
|
||||
user_msg = "".join(user_msg_parts)
|
||||
user_msg = f"## User query\n\n{query}\n\n## Available memories\n\n{manifest}"
|
||||
|
||||
try:
|
||||
resp = await llm.acomplete(
|
||||
messages=[{"role": "user", "content": user_msg}],
|
||||
system=SELECT_MEMORIES_SYSTEM_PROMPT,
|
||||
max_tokens=512,
|
||||
response_format=RECALL_SCHEMA,
|
||||
max_tokens=1024,
|
||||
response_format={"type": "json_object"},
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
raw = (resp.content or "").strip()
|
||||
if not raw:
|
||||
logger.warning(
|
||||
"recall: LLM returned empty response (model=%s, stop=%s)",
|
||||
resp.model,
|
||||
resp.stop_reason,
|
||||
)
|
||||
return []
|
||||
# Some models wrap JSON in markdown fences or add preamble text.
|
||||
# Try to extract the JSON object if raw parse fails.
|
||||
try:
|
||||
data = json.loads(raw)
|
||||
except json.JSONDecodeError:
|
||||
import re
|
||||
|
||||
m = re.search(r"\{.*\}", raw, re.DOTALL)
|
||||
if m:
|
||||
data = json.loads(m.group())
|
||||
else:
|
||||
logger.warning("recall: LLM returned non-JSON: %.200s", raw)
|
||||
return []
|
||||
selected = data.get("selected_memories", [])
|
||||
# Validate: only return filenames that actually exist.
|
||||
valid_names = {f.filename for f in files}
|
||||
result = [s for s in selected if s in valid_names][:max_results]
|
||||
logger.debug("recall: selected %d memories: %s", len(result), result)
|
||||
return result
|
||||
except Exception:
|
||||
logger.debug("recall: memory selection failed, returning []", exc_info=True)
|
||||
except Exception as exc:
|
||||
logger.warning("recall: memory selection failed (%s), returning []", exc)
|
||||
return []
|
||||
|
||||
|
||||
def _format_relative_age(mtime: float) -> str | None:
|
||||
"""Return age description if memory is older than 48 hours.
|
||||
|
||||
Returns None if 48 hours or newer, otherwise returns "X days old".
|
||||
"""
|
||||
import time
|
||||
|
||||
age_seconds = time.time() - mtime
|
||||
hours = age_seconds / 3600
|
||||
if hours <= 48:
|
||||
return None
|
||||
days = int(age_seconds / 86400)
|
||||
if days == 1:
|
||||
return "1 day old"
|
||||
return f"{days} days old"
|
||||
|
||||
|
||||
def format_recall_injection(
|
||||
filenames: list[str],
|
||||
memory_dir: Path | None = None,
|
||||
*,
|
||||
heading: str = "Selected Memories",
|
||||
label: str = "Global Memories",
|
||||
) -> str:
|
||||
"""Read selected memory files and format for system prompt injection.
|
||||
|
||||
Prepends a staleness warning for memories older than 1 day.
|
||||
Includes relative timestamp (e.g., "3 days old") for memories older than 48 hours.
|
||||
"""
|
||||
mem_dir = memory_dir or MEMORY_DIR
|
||||
|
||||
mem_dir = memory_dir or _default_global_memory_dir()
|
||||
if not filenames:
|
||||
return ""
|
||||
|
||||
@@ -149,88 +153,63 @@ def format_recall_injection(
|
||||
continue
|
||||
try:
|
||||
content = path.read_text(encoding="utf-8").strip()
|
||||
# Get file modification time for age calculation
|
||||
mtime = path.stat().st_mtime
|
||||
age_note = _format_relative_age(mtime)
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
try:
|
||||
mtime = path.stat().st_mtime
|
||||
except OSError:
|
||||
mtime = 0.0
|
||||
|
||||
freshness = memory_freshness_text(mtime)
|
||||
header = f"### {fname}"
|
||||
if freshness:
|
||||
header += f"\n\n> {freshness}"
|
||||
# Build header with optional age note
|
||||
if age_note:
|
||||
header = f"### {fname} ({age_note})"
|
||||
else:
|
||||
header = f"### {fname}"
|
||||
blocks.append(f"{header}\n\n{content}")
|
||||
|
||||
if not blocks:
|
||||
return ""
|
||||
|
||||
body = "\n\n---\n\n".join(blocks)
|
||||
logger.debug("recall: injecting %d memory blocks into context", len(blocks))
|
||||
return f"--- {heading} ---\n\n{body}\n\n--- End {heading} ---"
|
||||
return f"--- {label} ---\n\n{body}\n\n--- End {label} ---"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cache update (called after each queen turn)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
async def update_recall_cache(
|
||||
session_dir: Path,
|
||||
async def build_scoped_recall_blocks(
|
||||
query: str,
|
||||
llm: Any,
|
||||
phase_state: Any | None = None,
|
||||
memory_dir: Path | None = None,
|
||||
*,
|
||||
cache_setter: Any = None,
|
||||
heading: str = "Selected Memories",
|
||||
active_tools: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Update the recall cache on *phase_state* for the next turn.
|
||||
global_memory_dir: Path | None = None,
|
||||
queen_memory_dir: Path | None = None,
|
||||
queen_id: str | None = None,
|
||||
global_max_results: int = 3,
|
||||
queen_max_results: int = 3,
|
||||
) -> tuple[str, str]:
|
||||
"""Build separate recall blocks for global and queen-scoped memory."""
|
||||
global_dir = global_memory_dir or _default_global_memory_dir()
|
||||
global_selected = await select_memories(
|
||||
query,
|
||||
llm,
|
||||
memory_dir=global_dir,
|
||||
max_results=global_max_results,
|
||||
)
|
||||
global_block = format_recall_injection(
|
||||
global_selected,
|
||||
memory_dir=global_dir,
|
||||
label="Global Memories",
|
||||
)
|
||||
|
||||
Reads the latest user message from conversation parts to use as the
|
||||
query for memory selection.
|
||||
"""
|
||||
mem_dir = memory_dir or MEMORY_DIR
|
||||
|
||||
# Extract latest user message as the query.
|
||||
query = _extract_latest_user_query(session_dir)
|
||||
if not query:
|
||||
logger.debug("recall: no user query found, skipping cache update")
|
||||
return
|
||||
logger.debug("recall: updating cache for query: %.80s", query)
|
||||
|
||||
try:
|
||||
selected = await select_memories(
|
||||
queen_block = ""
|
||||
if queen_memory_dir is not None:
|
||||
queen_selected = await select_memories(
|
||||
query,
|
||||
llm,
|
||||
mem_dir,
|
||||
active_tools=active_tools,
|
||||
memory_dir=queen_memory_dir,
|
||||
max_results=queen_max_results,
|
||||
)
|
||||
queen_label = f"Queen Memories: {queen_id}" if queen_id else "Queen Memories"
|
||||
queen_block = format_recall_injection(
|
||||
queen_selected,
|
||||
memory_dir=queen_memory_dir,
|
||||
label=queen_label,
|
||||
)
|
||||
injection = format_recall_injection(selected, mem_dir, heading=heading)
|
||||
if cache_setter is not None:
|
||||
cache_setter(injection)
|
||||
elif phase_state is not None:
|
||||
phase_state._cached_recall_block = injection
|
||||
except Exception:
|
||||
logger.debug("recall: cache update failed", exc_info=True)
|
||||
|
||||
|
||||
def _extract_latest_user_query(session_dir: Path) -> str:
|
||||
"""Read the most recent user message from conversation parts."""
|
||||
parts_dir = session_dir / "conversations" / "parts"
|
||||
if not parts_dir.is_dir():
|
||||
return ""
|
||||
|
||||
part_files = sorted(parts_dir.glob("*.json"), reverse=True)
|
||||
for f in part_files[:20]: # Look back at most 20 messages.
|
||||
try:
|
||||
data = json.loads(f.read_text(encoding="utf-8"))
|
||||
if data.get("role") == "user":
|
||||
content = str(data.get("content", "")).strip()
|
||||
if content:
|
||||
# Truncate very long queries.
|
||||
return content[:1000] if len(content) > 1000 else content
|
||||
except (json.JSONDecodeError, OSError):
|
||||
continue
|
||||
return ""
|
||||
return global_block, queen_block
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
6. **Calling set_output in same turn as tool calls** — Call set_output in a SEPARATE turn.
|
||||
|
||||
## File Template Errors
|
||||
7. **Wrong import paths** — Use `from framework.graph import ...`, NOT `from core.framework.graph import ...`.
|
||||
7. **Wrong import paths** — Use `from framework.orchestrator import ...`, NOT `from framework.graph import ...` or `from core.framework...`.
|
||||
8. **Missing storage path** — Agent class must set `self._storage_path = Path.home() / ".hive" / "agents" / "agent_name"`.
|
||||
9. **Missing mcp_servers.json** — Without this, the agent has no tools at runtime.
|
||||
10. **Bare `python` command** — Use `"command": "uv"` with args `["run", "python", ...]`.
|
||||
|
||||
@@ -55,7 +55,7 @@ metadata = AgentMetadata()
|
||||
```python
|
||||
"""Node definitions for My Agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
from framework.orchestrator import NodeSpec
|
||||
|
||||
# Node 1: Process (autonomous entry node)
|
||||
# The queen handles intake and passes structured input via
|
||||
@@ -123,14 +123,15 @@ __all__ = ["process_node", "handoff_node"]
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.orchestrator import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.orchestrator import ExecutionResult
|
||||
from framework.orchestrator.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
from framework.host.agent_host import AgentHost
|
||||
from framework.host.execution_manager import EntryPointSpec
|
||||
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import process_node, handoff_node
|
||||
@@ -227,7 +228,7 @@ class MyAgent:
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
self._graph = self._build_graph()
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
self._agent_runtime = AgentHost(
|
||||
graph=self._graph, goal=self.goal, storage_path=self._storage_path,
|
||||
entry_points=[EntryPointSpec(id="default", name="Default", entry_node=self.entry_node,
|
||||
trigger_type="manual", isolation_level="shared")],
|
||||
@@ -460,8 +461,8 @@ def tui():
|
||||
from framework.tui.app import AdenTUI
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
from framework.host.agent_host import AgentHost
|
||||
from framework.host.execution_manager import EntryPointSpec
|
||||
|
||||
async def run_tui():
|
||||
agent = MyAgent()
|
||||
@@ -471,7 +472,7 @@ def tui():
|
||||
mcp_cfg = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_cfg.exists(): agent._tool_registry.load_mcp_config(mcp_cfg)
|
||||
llm = LiteLLMProvider(model=agent.config.model, api_key=agent.config.api_key, api_base=agent.config.api_base)
|
||||
runtime = create_agent_runtime(
|
||||
runtime = AgentHost(
|
||||
graph=agent._build_graph(), goal=agent.goal, storage_path=storage,
|
||||
entry_points=[EntryPointSpec(id="start", name="Start", entry_node="process", trigger_type="manual", isolation_level="isolated")],
|
||||
llm=llm, tools=list(agent._tool_registry.get_tools().values()), tool_executor=agent._tool_registry.get_executor())
|
||||
@@ -509,17 +510,17 @@ if __name__ == "__main__":
|
||||
|
||||
## mcp_servers.json
|
||||
|
||||
> **Auto-generated.** `initialize_and_build_agent` creates this file with hive-tools
|
||||
> **Auto-generated.** `initialize_and_build_agent` creates this file with hive_tools
|
||||
> as the default. Only edit manually to add additional MCP servers.
|
||||
|
||||
```json
|
||||
{
|
||||
"hive-tools": {
|
||||
"hive_tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../tools",
|
||||
"description": "Hive tools MCP server"
|
||||
"description": "hive_tools MCP server"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -41,7 +41,7 @@ loop_config:
|
||||
|
||||
# MCP servers to connect (resolved by name from ~/.hive/mcp_registry/)
|
||||
mcp_servers:
|
||||
- name: hive-tools
|
||||
- name: hive_tools
|
||||
- name: gcu-tools
|
||||
|
||||
nodes:
|
||||
@@ -200,7 +200,7 @@ The `mcp_servers.json` file is still loaded automatically if present alongside
|
||||
|
||||
```yaml
|
||||
mcp_servers:
|
||||
- name: hive-tools
|
||||
- name: hive_tools
|
||||
- name: gcu-tools
|
||||
```
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ If `agent.py` exists (legacy), it's loaded as a Python module instead.
|
||||
"max_context_tokens": 32000
|
||||
},
|
||||
"mcp_servers": [
|
||||
{"name": "hive-tools"},
|
||||
{"name": "hive_tools"},
|
||||
{"name": "gcu-tools"}
|
||||
],
|
||||
"variables": {
|
||||
|
||||
@@ -17,20 +17,43 @@ Use browser nodes (with `tools: {policy: "all"}`) when:
|
||||
## Available Browser Tools
|
||||
|
||||
All tools are prefixed with `browser_`:
|
||||
- `browser_start`, `browser_open` -- launch/navigate
|
||||
- `browser_click`, `browser_fill`, `browser_type` -- interact
|
||||
- `browser_snapshot` -- read page content (preferred over screenshot)
|
||||
- `browser_screenshot` -- visual capture
|
||||
- `browser_scroll`, `browser_wait` -- navigation helpers
|
||||
- `browser_evaluate` -- run JavaScript
|
||||
- `browser_open`, `browser_navigate` — both lazy-create the browser context, so a single `browser_open(url)` covers the cold path. To recover from a stale context, call `browser_stop` then `browser_open(url)` again.
|
||||
- `browser_click`, `browser_click_coordinate`, `browser_type`, `browser_type_focused` — interact
|
||||
- `browser_press` (with optional `modifiers=["ctrl"]` etc.) — keyboard shortcuts
|
||||
- `browser_snapshot` — compact accessibility-tree read (structured)
|
||||
<!-- vision-only -->
|
||||
- `browser_screenshot` — visual capture (annotated PNG)
|
||||
<!-- /vision-only -->
|
||||
- `browser_shadow_query`, `browser_get_rect` — locate elements (shadow-piercing via `>>>`)
|
||||
- `browser_scroll`, `browser_wait` — navigation helpers
|
||||
- `browser_evaluate` — run JavaScript
|
||||
- `browser_close` — tab cleanup (call per tab; closes the active tab when `tab_id` is omitted)
|
||||
|
||||
## System Prompt Tips for Browser Nodes
|
||||
## Pick the right reading tool
|
||||
|
||||
**`browser_snapshot`** — compact accessibility tree of interactive elements. Fast, cheap, good for static or form-heavy pages where the DOM matches what's visually rendered (documentation, simple dashboards, search results, settings pages).
|
||||
|
||||
**`browser_screenshot`** — visual capture + metadata (`cssWidth`, `devicePixelRatio`, scale fields). Use this when `browser_snapshot` does not show the thing you need, when refs look stale, or when visual position/layout matters. This often happens on complex SPAs — LinkedIn, Twitter/X, Reddit, Gmail, Notion, Slack, Discord — and on sites using shadow DOM, virtual scrolling, React reconciliation, or dynamic layout.
|
||||
|
||||
Neither tool is "preferred" universally — they're for different jobs. Start with snapshot for page structure and ordinary controls; use screenshot as the fallback when snapshot can't find or verify the visible target. Activate the `browser-automation` skill for the full decision tree.
|
||||
|
||||
## Coordinate rule
|
||||
|
||||
Every browser tool that takes or returns coordinates operates in **fractions of the viewport (0..1 for both axes)**. Read a target's proportional position off `browser_screenshot` ("~35% from the left, ~20% from the top" → `(0.35, 0.20)`) and pass that to `browser_click_coordinate` / `browser_hover_coordinate` / `browser_press_at`. `browser_get_rect` and `browser_shadow_query` return `rect.cx` / `rect.cy` as fractions. The tools multiply by `cssWidth` / `cssHeight` internally — no scale awareness required. Fractions are used because every vision model (Claude, GPT-4o, Gemini, local VLMs) resizes/tiles images differently; proportions are invariant. Avoid raw `getBoundingClientRect()` via `browser_evaluate` for coord lookup; use `browser_get_rect` instead.
|
||||
|
||||
## System prompt tips for browser nodes
|
||||
|
||||
```
|
||||
1. Use browser_snapshot() to read page content (NOT browser_get_text)
|
||||
2. Use browser_wait(seconds=2-3) after navigation for page load
|
||||
3. If you hit an auth wall, call set_output with an error and move on
|
||||
4. Keep tool calls per turn <= 10 for reliability
|
||||
1. Start with browser_snapshot or the snapshot returned by the latest interaction.
|
||||
2. If the target is missing, ambiguous, stale, or visibly present but absent from the tree,
|
||||
use browser_screenshot to orient and then click by fractional coordinates.
|
||||
3. Before typing into a rich-text editor (X compose, LinkedIn DM, Gmail, Reddit),
|
||||
click the input area first with browser_click_coordinate so React / Draft.js /
|
||||
Lexical register a native focus event, then use browser_type_focused(text=...)
|
||||
for shadow-DOM inputs or browser_type(selector, text) for light-DOM inputs.
|
||||
4. Use browser_wait(seconds=2-3) after navigation for SPA hydration.
|
||||
5. If you hit an auth wall, call set_output with an error and move on.
|
||||
6. Keep tool calls per turn <= 10 for reliability.
|
||||
```
|
||||
|
||||
## Example
|
||||
@@ -43,7 +66,7 @@ All tools are prefixed with `browser_`:
|
||||
"tools": {"policy": "all"},
|
||||
"input_keys": ["search_url"],
|
||||
"output_keys": ["profiles"],
|
||||
"system_prompt": "Navigate to the search URL, paginate through results..."
|
||||
"system_prompt": "Navigate to the search URL via browser_navigate(wait_until='load', timeout_ms=20000). Wait 3s for SPA hydration. Use the returned snapshot to look for result cards first. If the cards are missing, stale, or visually present but absent from the tree, use browser_screenshot to orient; paginate through results by scrolling and use screenshots only when the snapshot cannot find or verify the visible cards..."
|
||||
}
|
||||
```
|
||||
|
||||
@@ -51,3 +74,7 @@ Connected via regular edges:
|
||||
```
|
||||
search-setup -> scan-profiles -> process-results
|
||||
```
|
||||
|
||||
## Further detail
|
||||
|
||||
For rich-text editor quirks (Lexical, Draft.js, ProseMirror), shadow-DOM shortcuts, `beforeunload` dialog neutralization, Trusted Types CSP on LinkedIn, keyboard shortcut dispatch, and per-site selector tables — **activate the `browser-automation` skill**. That skill has the full verified guidance and is refreshed against real production sites.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
+28
-52
@@ -2,17 +2,22 @@
|
||||
Command-line interface for Aden Hive.
|
||||
|
||||
Usage:
|
||||
hive run exports/my-agent --input '{"key": "value"}'
|
||||
hive info exports/my-agent
|
||||
hive validate exports/my-agent
|
||||
hive list exports/
|
||||
hive shell exports/my-agent
|
||||
hive serve Start the HTTP API server
|
||||
hive open Start the server and open the dashboard
|
||||
hive queen list List queen profiles
|
||||
hive queen show <queen_id> Inspect a queen profile
|
||||
hive queen sessions <queen_id> List a queen's sessions
|
||||
hive colony list List colonies on disk
|
||||
hive colony info <name> Inspect a colony
|
||||
hive colony delete <name> Delete a colony
|
||||
hive session list List live sessions (use --cold for on-disk)
|
||||
hive session stop <session_id> Stop a live session
|
||||
hive chat <session_id> "msg" Send a message to a live queen
|
||||
|
||||
Testing commands:
|
||||
hive test-run <agent_path> --goal <goal_id>
|
||||
hive test-debug <agent_path> <test_name>
|
||||
hive test-list <agent_path>
|
||||
hive test-stats <agent_path>
|
||||
Subsystems:
|
||||
hive skill ... Manage skills (~/.hive/skills/)
|
||||
hive mcp ... Manage MCP servers
|
||||
hive debugger LLM debug log viewer
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -20,85 +25,56 @@ import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _configure_paths():
|
||||
"""Auto-configure sys.path so agents in exports/ are discoverable.
|
||||
def _configure_paths() -> None:
|
||||
"""Auto-configure sys.path so the framework is importable from any cwd.
|
||||
|
||||
Resolves the project root by walking up from this file (framework/cli.py lives
|
||||
inside core/framework/) or from CWD, then adds the exports/ directory to sys.path
|
||||
if it exists. This eliminates the need for manual PYTHONPATH configuration.
|
||||
Walks up from this file to find the project root, then ensures
|
||||
`core/` is on sys.path so `framework.*` imports resolve when the
|
||||
package isn't installed via `pip install -e .`.
|
||||
"""
|
||||
# Strategy 1: resolve relative to this file (works when installed via pip install -e core/)
|
||||
framework_dir = Path(__file__).resolve().parent # core/framework/
|
||||
core_dir = framework_dir.parent # core/
|
||||
project_root = core_dir.parent # project root
|
||||
|
||||
# Strategy 2: if project_root doesn't look right, fall back to CWD
|
||||
if not (project_root / "exports").is_dir() and not (project_root / "core").is_dir():
|
||||
if not (project_root / "core").is_dir():
|
||||
project_root = Path.cwd()
|
||||
|
||||
# Add exports/ to sys.path so agents are importable as top-level packages
|
||||
exports_dir = project_root / "exports"
|
||||
if exports_dir.is_dir():
|
||||
exports_str = str(exports_dir)
|
||||
if exports_str not in sys.path:
|
||||
sys.path.insert(0, exports_str)
|
||||
|
||||
# Add examples/templates/ to sys.path so template agents are importable
|
||||
templates_dir = project_root / "examples" / "templates"
|
||||
if templates_dir.is_dir():
|
||||
templates_str = str(templates_dir)
|
||||
if templates_str not in sys.path:
|
||||
sys.path.insert(0, templates_str)
|
||||
|
||||
# Ensure core/ is also in sys.path (for non-editable-install scenarios)
|
||||
core_str = str(project_root / "core")
|
||||
if (project_root / "core").is_dir() and core_str not in sys.path:
|
||||
sys.path.insert(0, core_str)
|
||||
|
||||
# Add core/framework/agents/ so framework agents are importable as top-level packages
|
||||
framework_agents_dir = project_root / "core" / "framework" / "agents"
|
||||
if framework_agents_dir.is_dir():
|
||||
fa_str = str(framework_agents_dir)
|
||||
if fa_str not in sys.path:
|
||||
sys.path.insert(0, fa_str)
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
_configure_paths()
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog="hive",
|
||||
description="Aden Hive - Build and run goal-driven agents",
|
||||
description="Aden Hive — Queens, colonies, and live agent sessions",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
default="claude-haiku-4-5-20251001",
|
||||
help="Anthropic model to use",
|
||||
help="Default LLM model (Anthropic ID)",
|
||||
)
|
||||
|
||||
subparsers = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
# Register runner commands (run, info, validate, list, shell)
|
||||
# Core commands: serve, open, queen, colony, session, chat
|
||||
from framework.loader.cli import register_commands
|
||||
|
||||
register_commands(subparsers)
|
||||
|
||||
# Register testing commands (test-run, test-debug, test-list, test-stats)
|
||||
from framework.testing.cli import register_testing_commands
|
||||
|
||||
register_testing_commands(subparsers)
|
||||
|
||||
# Register skill commands (skill list, skill trust, ...)
|
||||
# Skill management (~/.hive/skills/)
|
||||
from framework.skills.cli import register_skill_commands
|
||||
|
||||
register_skill_commands(subparsers)
|
||||
|
||||
# Register debugger commands (debugger)
|
||||
# LLM debug log viewer
|
||||
from framework.debugger.cli import register_debugger_commands
|
||||
|
||||
register_debugger_commands(subparsers)
|
||||
|
||||
# Register MCP registry commands (mcp install, mcp add, ...)
|
||||
# MCP server registry
|
||||
from framework.loader.mcp_registry_cli import register_mcp_commands
|
||||
|
||||
register_mcp_commands(subparsers)
|
||||
|
||||
+137
-7
@@ -12,13 +12,47 @@ from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.orchestrator.edge import DEFAULT_MAX_TOKENS
|
||||
DEFAULT_MAX_TOKENS = 8192
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hive home directory structure
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HIVE_HOME = Path.home() / ".hive"
|
||||
QUEENS_DIR = HIVE_HOME / "agents" / "queens"
|
||||
COLONIES_DIR = HIVE_HOME / "colonies"
|
||||
MEMORIES_DIR = HIVE_HOME / "memories"
|
||||
|
||||
|
||||
def queen_dir(queen_name: str = "default") -> Path:
|
||||
"""Return the storage directory for a named queen agent."""
|
||||
return QUEENS_DIR / queen_name
|
||||
|
||||
|
||||
def colony_dir(colony_name: str) -> Path:
|
||||
"""Return the directory for a named colony."""
|
||||
return COLONIES_DIR / colony_name
|
||||
|
||||
|
||||
def memory_dir(scope: str, name: str | None = None) -> Path:
|
||||
"""Return memory dir for a scope.
|
||||
|
||||
Examples::
|
||||
|
||||
memory_dir("global") -> ~/.hive/memories/global
|
||||
memory_dir("colonies", "my_agent") -> ~/.hive/memories/colonies/my_agent
|
||||
memory_dir("agents/queens", "default")-> ~/.hive/memories/agents/queens/default
|
||||
memory_dir("agents", "worker_name") -> ~/.hive/memories/agents/worker_name
|
||||
"""
|
||||
base = MEMORIES_DIR / scope
|
||||
return base / name if name else base
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Low-level config file access
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
|
||||
HIVE_CONFIG_FILE = HIVE_HOME / "configuration.json"
|
||||
|
||||
# Hive LLM router endpoint (Anthropic-compatible).
|
||||
# litellm's Anthropic handler appends /v1/messages, so this is just the base host.
|
||||
@@ -42,6 +76,48 @@ def get_hive_config() -> dict[str, Any]:
|
||||
return {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Credential store helpers (for BYOK keys)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Provider name → credential store ID mapping
|
||||
_PROVIDER_CRED_MAP: dict[str, str] = {
|
||||
"anthropic": "anthropic",
|
||||
"openai": "openai",
|
||||
"gemini": "gemini",
|
||||
"google": "gemini",
|
||||
"minimax": "minimax",
|
||||
"groq": "groq",
|
||||
"cerebras": "cerebras",
|
||||
"openrouter": "openrouter",
|
||||
"mistral": "mistral",
|
||||
"together": "together",
|
||||
"together_ai": "together",
|
||||
"deepseek": "deepseek",
|
||||
"kimi": "kimi",
|
||||
"hive": "hive",
|
||||
}
|
||||
|
||||
|
||||
def _get_api_key_from_credential_store(provider: str) -> str | None:
|
||||
"""Look up a BYOK API key from the encrypted credential store.
|
||||
|
||||
Returns None if no key is found or the credential store is unavailable.
|
||||
"""
|
||||
if not os.environ.get("HIVE_CREDENTIAL_KEY"):
|
||||
return None
|
||||
cred_id = _PROVIDER_CRED_MAP.get(provider.lower())
|
||||
if not cred_id:
|
||||
return None
|
||||
try:
|
||||
from framework.credentials import CredentialStore
|
||||
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
return store.get(cred_id)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Derived helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -79,6 +155,58 @@ def get_preferred_worker_model() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def get_vision_fallback_model() -> str | None:
|
||||
"""Return the configured vision-fallback model, or None if not configured.
|
||||
|
||||
Reads from the ``vision_fallback`` section of ~/.hive/configuration.json.
|
||||
Used by the agent-loop hook that captions tool-result images when the
|
||||
main agent's model cannot accept image content (text-only LLMs).
|
||||
|
||||
When this returns None the captioning chain's configured + retry
|
||||
attempts both no-op (returning None), and only the final
|
||||
``gemini/gemini-3-flash-preview`` override has a chance to succeed
|
||||
— and only if a ``GEMINI_API_KEY`` is set in the environment.
|
||||
"""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if vision.get("provider") and vision.get("model"):
|
||||
provider = str(vision["provider"])
|
||||
model = str(vision["model"]).strip()
|
||||
if provider.lower() == "openrouter" and model.lower().startswith("openrouter/"):
|
||||
model = model[len("openrouter/") :]
|
||||
if model:
|
||||
return f"{provider}/{model}"
|
||||
return None
|
||||
|
||||
|
||||
def get_vision_fallback_api_key() -> str | None:
|
||||
"""Return the API key for the vision-fallback model.
|
||||
|
||||
Resolution order: ``vision_fallback.api_key_env_var`` from the env,
|
||||
then the default ``get_api_key()``. No subscription-token branches —
|
||||
vision fallback is intended for hosted vision models (Anthropic,
|
||||
OpenAI, Google), not for the subscription-bearer providers.
|
||||
"""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if not vision:
|
||||
return get_api_key()
|
||||
api_key_env_var = vision.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
return get_api_key()
|
||||
|
||||
|
||||
def get_vision_fallback_api_base() -> str | None:
|
||||
"""Return the api_base for the vision-fallback model, or None."""
|
||||
vision = get_hive_config().get("vision_fallback", {})
|
||||
if not vision:
|
||||
return None
|
||||
if vision.get("api_base"):
|
||||
return vision["api_base"]
|
||||
if str(vision.get("provider", "")).lower() == "openrouter":
|
||||
return OPENROUTER_API_BASE
|
||||
return None
|
||||
|
||||
|
||||
def get_worker_api_key() -> str | None:
|
||||
"""Return the API key for the worker LLM, falling back to the default key."""
|
||||
worker_llm = get_hive_config().get("worker_llm", {})
|
||||
@@ -301,8 +429,12 @@ def get_api_key() -> str | None:
|
||||
# Standard env-var path (covers ZAI Code and all API-key providers)
|
||||
api_key_env_var = llm.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
return None
|
||||
key = os.environ.get(api_key_env_var)
|
||||
if key:
|
||||
return key
|
||||
|
||||
# Credential store fallback — BYOK keys stored via the UI
|
||||
return _get_api_key_from_credential_store(llm.get("provider", ""))
|
||||
|
||||
|
||||
# OAuth credentials for Antigravity are fetched from the opencode-antigravity-auth project.
|
||||
@@ -325,9 +457,7 @@ def _fetch_antigravity_credentials() -> tuple[str | None, str | None]:
|
||||
import urllib.request
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"}
|
||||
)
|
||||
req = urllib.request.Request(_ANTIGRAVITY_CREDENTIALS_URL, headers={"User-Agent": "Hive/1.0"})
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
content = resp.read().decode("utf-8")
|
||||
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
|
||||
|
||||
@@ -51,6 +51,7 @@ from .key_storage import (
|
||||
from .models import (
|
||||
CredentialDecryptionError,
|
||||
CredentialError,
|
||||
CredentialExpiredError,
|
||||
CredentialKey,
|
||||
CredentialKeyNotFoundError,
|
||||
CredentialNotFoundError,
|
||||
@@ -84,6 +85,7 @@ from .template import TemplateResolver
|
||||
from .validation import (
|
||||
CredentialStatus,
|
||||
CredentialValidationResult,
|
||||
compute_unavailable_tools,
|
||||
ensure_credential_key_env,
|
||||
validate_agent_credentials,
|
||||
)
|
||||
@@ -136,6 +138,7 @@ __all__ = [
|
||||
"CredentialNotFoundError",
|
||||
"CredentialKeyNotFoundError",
|
||||
"CredentialRefreshError",
|
||||
"CredentialExpiredError",
|
||||
"CredentialValidationError",
|
||||
"CredentialDecryptionError",
|
||||
# Key storage (bootstrap credentials)
|
||||
@@ -148,6 +151,7 @@ __all__ = [
|
||||
# Validation
|
||||
"ensure_credential_key_env",
|
||||
"validate_agent_credentials",
|
||||
"compute_unavailable_tools",
|
||||
"CredentialStatus",
|
||||
"CredentialValidationResult",
|
||||
# Interactive setup
|
||||
|
||||
@@ -332,9 +332,7 @@ class AdenCredentialClient:
|
||||
last_error = e
|
||||
if attempt < self.config.retry_attempts - 1:
|
||||
delay = self.config.retry_delay * (2**attempt)
|
||||
logger.warning(
|
||||
f"Aden request failed (attempt {attempt + 1}), retrying in {delay}s: {e}"
|
||||
)
|
||||
logger.warning(f"Aden request failed (attempt {attempt + 1}), retrying in {delay}s: {e}")
|
||||
time.sleep(delay)
|
||||
else:
|
||||
raise AdenClientError(f"Failed to connect to Aden server: {e}") from e
|
||||
@@ -347,9 +345,7 @@ class AdenCredentialClient:
|
||||
):
|
||||
raise
|
||||
|
||||
raise AdenClientError(
|
||||
f"Request failed after {self.config.retry_attempts} attempts"
|
||||
) from last_error
|
||||
raise AdenClientError(f"Request failed after {self.config.retry_attempts} attempts") from last_error
|
||||
|
||||
def list_integrations(self) -> list[AdenIntegrationInfo]:
|
||||
"""
|
||||
|
||||
@@ -192,9 +192,7 @@ class AdenSyncProvider(CredentialProvider):
|
||||
f"Visit: {e.reauthorization_url or 'your Aden dashboard'}"
|
||||
) from e
|
||||
|
||||
raise CredentialRefreshError(
|
||||
f"Failed to refresh credential '{credential.id}': {e}"
|
||||
) from e
|
||||
raise CredentialRefreshError(f"Failed to refresh credential '{credential.id}': {e}") from e
|
||||
|
||||
except AdenClientError as e:
|
||||
logger.error(f"Aden client error for '{credential.id}': {e}")
|
||||
@@ -206,9 +204,7 @@ class AdenSyncProvider(CredentialProvider):
|
||||
logger.warning(f"Aden unavailable, using cached token for '{credential.id}'")
|
||||
return credential
|
||||
|
||||
raise CredentialRefreshError(
|
||||
f"Aden server unavailable and token expired for '{credential.id}'"
|
||||
) from e
|
||||
raise CredentialRefreshError(f"Aden server unavailable and token expired for '{credential.id}'") from e
|
||||
|
||||
def validate(self, credential: CredentialObject) -> bool:
|
||||
"""
|
||||
|
||||
@@ -168,9 +168,7 @@ class AdenCachedStorage(CredentialStorage):
|
||||
if rid != credential_id:
|
||||
result = self._load_by_id(rid)
|
||||
if result is not None:
|
||||
logger.info(
|
||||
f"Loaded credential '{credential_id}' via provider index (id='{rid}')"
|
||||
)
|
||||
logger.info(f"Loaded credential '{credential_id}' via provider index (id='{rid}')")
|
||||
return result
|
||||
|
||||
# Direct lookup (exact credential_id match)
|
||||
@@ -199,6 +197,19 @@ class AdenCachedStorage(CredentialStorage):
|
||||
if local_cred is None:
|
||||
return None
|
||||
|
||||
# Skip Aden fetch for credentials not managed by Aden (BYOK credentials).
|
||||
# Only OAuth credentials synced from Aden are in the provider index.
|
||||
# BYOK credentials like anthropic, brave_search are local-only.
|
||||
# Also check the _aden_managed flag on the credential itself.
|
||||
is_aden_managed = (
|
||||
credential_id in self._provider_index
|
||||
or any(credential_id in ids for ids in self._provider_index.values())
|
||||
or (local_cred is not None and local_cred.keys.get("_aden_managed") is not None)
|
||||
)
|
||||
if not is_aden_managed:
|
||||
logger.debug(f"Credential '{credential_id}' is local-only, skipping Aden refresh")
|
||||
return local_cred
|
||||
|
||||
# Try to refresh stale local credential from Aden
|
||||
try:
|
||||
aden_cred = self._aden_provider.fetch_from_aden(credential_id)
|
||||
|
||||
@@ -493,9 +493,7 @@ class TestAdenCachedStorage:
|
||||
assert loaded is not None
|
||||
assert loaded.keys["access_token"].value.get_secret_value() == "cached-token"
|
||||
|
||||
def test_load_from_aden_when_stale(
|
||||
self, cached_storage, local_storage, provider, mock_client, aden_response
|
||||
):
|
||||
def test_load_from_aden_when_stale(self, cached_storage, local_storage, provider, mock_client, aden_response):
|
||||
"""Test load fetches from Aden when cache is stale."""
|
||||
# Create stale cached credential
|
||||
cred = CredentialObject(
|
||||
@@ -521,9 +519,7 @@ class TestAdenCachedStorage:
|
||||
assert loaded is not None
|
||||
assert loaded.keys["access_token"].value.get_secret_value() == "test-access-token"
|
||||
|
||||
def test_load_falls_back_to_stale_when_aden_fails(
|
||||
self, cached_storage, local_storage, provider, mock_client
|
||||
):
|
||||
def test_load_falls_back_to_stale_when_aden_fails(self, cached_storage, local_storage, provider, mock_client):
|
||||
"""Test load falls back to stale cache when Aden fails."""
|
||||
# Create stale cached credential
|
||||
cred = CredentialObject(
|
||||
|
||||
@@ -16,9 +16,14 @@ import os
|
||||
import stat
|
||||
from pathlib import Path
|
||||
|
||||
# Resolved once at module import. ``framework.config.HIVE_HOME`` reads
|
||||
# the desktop's ``HIVE_HOME`` env var at its own import time, so the
|
||||
# runtime always sees the per-user root before this constant is computed.
|
||||
from framework.config import HIVE_HOME as _HIVE_HOME
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CREDENTIAL_KEY_PATH = Path.home() / ".hive" / "secrets" / "credential_key"
|
||||
CREDENTIAL_KEY_PATH = _HIVE_HOME / "secrets" / "credential_key"
|
||||
CREDENTIAL_KEY_ENV_VAR = "HIVE_CREDENTIAL_KEY"
|
||||
ADEN_CREDENTIAL_ID = "aden_api_key"
|
||||
ADEN_ENV_VAR = "ADEN_API_KEY"
|
||||
|
||||
@@ -333,6 +333,29 @@ class CredentialRefreshError(CredentialError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialExpiredError(CredentialError):
|
||||
"""Raised when a credential is expired and refresh has failed.
|
||||
|
||||
Carries the metadata an agent (or the tool runner) needs to surface a
|
||||
reauth request to the user without having to look anything else up.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
credential_id: str,
|
||||
message: str,
|
||||
*,
|
||||
provider: str | None = None,
|
||||
alias: str | None = None,
|
||||
help_url: str | None = None,
|
||||
):
|
||||
self.credential_id = credential_id
|
||||
self.provider = provider
|
||||
self.alias = alias
|
||||
self.help_url = help_url
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class CredentialValidationError(CredentialError):
|
||||
"""Raised when credential validation fails."""
|
||||
|
||||
|
||||
@@ -95,9 +95,7 @@ class BaseOAuth2Provider(CredentialProvider):
|
||||
|
||||
self._client = httpx.Client(timeout=self.config.request_timeout)
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"OAuth2 provider requires 'httpx'. Install with: uv pip install httpx"
|
||||
) from e
|
||||
raise ImportError("OAuth2 provider requires 'httpx'. Install with: uv pip install httpx") from e
|
||||
return self._client
|
||||
|
||||
def _close_client(self) -> None:
|
||||
@@ -311,8 +309,7 @@ class BaseOAuth2Provider(CredentialProvider):
|
||||
except OAuth2Error as e:
|
||||
if e.error == "invalid_grant":
|
||||
raise CredentialRefreshError(
|
||||
f"Refresh token for '{credential.id}' is invalid or revoked. "
|
||||
"Re-authorization required."
|
||||
f"Refresh token for '{credential.id}' is invalid or revoked. Re-authorization required."
|
||||
) from e
|
||||
raise CredentialRefreshError(f"Failed to refresh '{credential.id}': {e}") from e
|
||||
|
||||
@@ -422,9 +419,7 @@ class BaseOAuth2Provider(CredentialProvider):
|
||||
if response.status_code != 200 or "error" in response_data:
|
||||
error = response_data.get("error", "unknown_error")
|
||||
description = response_data.get("error_description", response.text)
|
||||
raise OAuth2Error(
|
||||
error=error, description=description, status_code=response.status_code
|
||||
)
|
||||
raise OAuth2Error(error=error, description=description, status_code=response.status_code)
|
||||
|
||||
return OAuth2Token.from_token_response(response_data)
|
||||
|
||||
|
||||
@@ -158,9 +158,7 @@ class TokenLifecycleManager:
|
||||
"""
|
||||
# Run in executor to avoid blocking
|
||||
loop = asyncio.get_event_loop()
|
||||
token = await loop.run_in_executor(
|
||||
None, lambda: self.provider.client_credentials_grant(scopes=scopes)
|
||||
)
|
||||
token = await loop.run_in_executor(None, lambda: self.provider.client_credentials_grant(scopes=scopes))
|
||||
|
||||
self._save_token_to_store(token)
|
||||
self._cached_token = token
|
||||
|
||||
@@ -100,9 +100,7 @@ class ZohoOAuth2Provider(BaseOAuth2Provider):
|
||||
)
|
||||
super().__init__(config, provider_id="zoho_crm_oauth2")
|
||||
self._accounts_domain = base
|
||||
self._api_domain = (
|
||||
api_domain or os.getenv("ZOHO_API_DOMAIN", "https://www.zohoapis.com")
|
||||
).rstrip("/")
|
||||
self._api_domain = (api_domain or os.getenv("ZOHO_API_DOMAIN", "https://www.zohoapis.com")).rstrip("/")
|
||||
|
||||
@property
|
||||
def supported_types(self) -> list[CredentialType]:
|
||||
|
||||
@@ -268,9 +268,7 @@ class CredentialSetupSession:
|
||||
self._print(f"{Colors.YELLOW}Initializing credential store...{Colors.NC}")
|
||||
try:
|
||||
generate_and_save_credential_key()
|
||||
self._print(
|
||||
f"{Colors.GREEN}✓ Encryption key saved to ~/.hive/secrets/credential_key{Colors.NC}"
|
||||
)
|
||||
self._print(f"{Colors.GREEN}✓ Encryption key saved to ~/.hive/secrets/credential_key{Colors.NC}")
|
||||
return True
|
||||
except Exception as e:
|
||||
self._print(f"{Colors.RED}Failed to initialize credential store: {e}{Colors.NC}")
|
||||
@@ -449,9 +447,7 @@ class CredentialSetupSession:
|
||||
logger.warning("Unexpected error exporting credential to env", exc_info=True)
|
||||
return True
|
||||
else:
|
||||
self._print(
|
||||
f"{Colors.YELLOW}⚠ {cred.credential_name} not found in Aden account.{Colors.NC}"
|
||||
)
|
||||
self._print(f"{Colors.YELLOW}⚠ {cred.credential_name} not found in Aden account.{Colors.NC}")
|
||||
self._print("Please connect this integration on https://hive.adenhq.com first.")
|
||||
return False
|
||||
except Exception as e:
|
||||
|
||||
@@ -128,7 +128,9 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
Initialize encrypted storage.
|
||||
|
||||
Args:
|
||||
base_path: Directory for credential files. Defaults to ~/.hive/credentials.
|
||||
base_path: Directory for credential files. Defaults to
|
||||
``$HIVE_HOME/credentials`` (per-user) when HIVE_HOME is set,
|
||||
else ``~/.hive/credentials``.
|
||||
encryption_key: 32-byte Fernet key. If None, reads from env var.
|
||||
key_env_var: Environment variable containing encryption key
|
||||
"""
|
||||
@@ -136,11 +138,17 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
from cryptography.fernet import Fernet
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Encrypted storage requires 'cryptography'. "
|
||||
"Install with: uv pip install cryptography"
|
||||
"Encrypted storage requires 'cryptography'. Install with: uv pip install cryptography"
|
||||
) from e
|
||||
|
||||
self.base_path = Path(base_path or self.DEFAULT_PATH).expanduser()
|
||||
if base_path is None:
|
||||
# Honor HIVE_HOME (set by the desktop shell to a per-user dir) so
|
||||
# the encrypted store doesn't fork between ~/.hive and the desktop
|
||||
# userData root. Falls back to ~/.hive/credentials when standalone.
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
base_path = HIVE_HOME / "credentials"
|
||||
self.base_path = Path(base_path).expanduser()
|
||||
self._ensure_dirs()
|
||||
self._key_env_var = key_env_var
|
||||
|
||||
@@ -161,6 +169,14 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
|
||||
self._fernet = Fernet(self._key)
|
||||
|
||||
# Rebuild the metadata index from disk if it's missing or older than
|
||||
# the current index schema. The index is a developer-readable JSON
|
||||
# snapshot of the encrypted store; the .enc files remain authoritative.
|
||||
try:
|
||||
self._maybe_rebuild_index()
|
||||
except Exception:
|
||||
logger.debug("Initial index rebuild failed (non-fatal)", exc_info=True)
|
||||
|
||||
def _ensure_dirs(self) -> None:
|
||||
"""Create directory structure."""
|
||||
(self.base_path / "credentials").mkdir(parents=True, exist_ok=True)
|
||||
@@ -186,8 +202,8 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
with open(cred_path, "wb") as f:
|
||||
f.write(encrypted)
|
||||
|
||||
# Update index
|
||||
self._update_index(credential.id, "save", credential.credential_type.value)
|
||||
# Update developer-readable index
|
||||
self._index_upsert(credential)
|
||||
logger.debug(f"Saved encrypted credential '{credential.id}'")
|
||||
|
||||
def load(self, credential_id: str) -> CredentialObject | None:
|
||||
@@ -205,9 +221,7 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
json_bytes = self._fernet.decrypt(encrypted)
|
||||
data = json.loads(json_bytes.decode("utf-8-sig"))
|
||||
except Exception as e:
|
||||
raise CredentialDecryptionError(
|
||||
f"Failed to decrypt credential '{credential_id}': {e}"
|
||||
) from e
|
||||
raise CredentialDecryptionError(f"Failed to decrypt credential '{credential_id}': {e}") from e
|
||||
|
||||
# Deserialize
|
||||
return self._deserialize_credential(data)
|
||||
@@ -217,7 +231,7 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
cred_path = self._cred_path(credential_id)
|
||||
if cred_path.exists():
|
||||
cred_path.unlink()
|
||||
self._update_index(credential_id, "delete")
|
||||
self._index_remove(credential_id)
|
||||
logger.debug(f"Deleted credential '{credential_id}'")
|
||||
return True
|
||||
return False
|
||||
@@ -258,33 +272,151 @@ class EncryptedFileStorage(CredentialStorage):
|
||||
|
||||
return CredentialObject.model_validate(data)
|
||||
|
||||
def _update_index(
|
||||
self,
|
||||
credential_id: str,
|
||||
operation: str,
|
||||
credential_type: str | None = None,
|
||||
) -> None:
|
||||
"""Update the metadata index."""
|
||||
index_path = self.base_path / "metadata" / "index.json"
|
||||
# ------------------------------------------------------------------
|
||||
# Developer-readable metadata index
|
||||
#
|
||||
# The index lives at ``<base_path>/metadata/index.json`` and mirrors what
|
||||
# is in the encrypted store at a glance: credential id, provider, alias,
|
||||
# identity, key names, timestamps, and earliest expiry. It contains NO
|
||||
# secret values and is safe to share when filing a bug report. The .enc
|
||||
# files remain authoritative — the index is purely for human inspection
|
||||
# and for cheap ``list_all()`` enumeration.
|
||||
#
|
||||
# Schema version is bumped whenever the entry shape changes; the store
|
||||
# rebuilds the index from the encrypted files on load when the on-disk
|
||||
# version is older.
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
if index_path.exists():
|
||||
with open(index_path, encoding="utf-8-sig") as f:
|
||||
index = json.load(f)
|
||||
else:
|
||||
index = {"credentials": {}, "version": "1.0"}
|
||||
INDEX_VERSION = "2.0"
|
||||
INDEX_INTERNAL_KEY_NAMES = ("_alias", "_integration_type")
|
||||
|
||||
if operation == "save":
|
||||
index["credentials"][credential_id] = {
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
"type": credential_type,
|
||||
}
|
||||
elif operation == "delete":
|
||||
index["credentials"].pop(credential_id, None)
|
||||
def _index_path(self) -> Path:
|
||||
return self.base_path / "metadata" / "index.json"
|
||||
|
||||
index["last_modified"] = datetime.now(UTC).isoformat()
|
||||
def _read_index(self) -> dict[str, Any]:
|
||||
"""Read the index from disk; return an empty skeleton if missing."""
|
||||
path = self._index_path()
|
||||
if not path.exists():
|
||||
return {"version": self.INDEX_VERSION, "credentials": {}}
|
||||
try:
|
||||
with open(path, encoding="utf-8-sig") as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
logger.debug("Failed to read credential index, starting fresh", exc_info=True)
|
||||
return {"version": self.INDEX_VERSION, "credentials": {}}
|
||||
|
||||
with open(index_path, "w", encoding="utf-8") as f:
|
||||
json.dump(index, f, indent=2)
|
||||
def _write_index(self, index: dict[str, Any]) -> None:
|
||||
"""Write the index to disk with consistent envelope fields."""
|
||||
index["version"] = self.INDEX_VERSION
|
||||
index["store_path"] = str(self.base_path)
|
||||
index["generated_at"] = datetime.now(UTC).isoformat()
|
||||
path = self._index_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(path, "w", encoding="utf-8") as f:
|
||||
json.dump(index, f, indent=2, sort_keys=False, default=str)
|
||||
|
||||
def _index_entry_for(self, credential: CredentialObject) -> dict[str, Any]:
|
||||
"""Build a single index entry from a CredentialObject (no secrets)."""
|
||||
# Visible key names: drop internal markers like _alias / _integration_type
|
||||
# / _identity_* so the entry shows what's actually a credential key.
|
||||
visible_keys = [
|
||||
name
|
||||
for name in credential.keys.keys()
|
||||
if name not in self.INDEX_INTERNAL_KEY_NAMES and not name.startswith("_identity_")
|
||||
]
|
||||
|
||||
# Earliest expiry across all keys (most likely the access_token).
|
||||
earliest_expiry: datetime | None = None
|
||||
for key in credential.keys.values():
|
||||
if key.expires_at is None:
|
||||
continue
|
||||
if earliest_expiry is None or key.expires_at < earliest_expiry:
|
||||
earliest_expiry = key.expires_at
|
||||
|
||||
return {
|
||||
"credential_type": credential.credential_type.value,
|
||||
"provider": credential.provider_type,
|
||||
"alias": credential.alias,
|
||||
"identity": credential.identity.to_dict(),
|
||||
"key_names": sorted(visible_keys),
|
||||
"created_at": credential.created_at.isoformat() if credential.created_at else None,
|
||||
"updated_at": credential.updated_at.isoformat() if credential.updated_at else None,
|
||||
"last_refreshed": (credential.last_refreshed.isoformat() if credential.last_refreshed else None),
|
||||
"expires_at": earliest_expiry.isoformat() if earliest_expiry else None,
|
||||
"auto_refresh": credential.auto_refresh,
|
||||
"tags": list(credential.tags),
|
||||
}
|
||||
|
||||
def _index_upsert(self, credential: CredentialObject) -> None:
|
||||
"""Insert or update one credential entry in the index."""
|
||||
try:
|
||||
index = self._read_index()
|
||||
if index.get("version") != self.INDEX_VERSION:
|
||||
# Old schema — rebuild from disk so we don't blend formats.
|
||||
self._rebuild_index()
|
||||
return
|
||||
credentials = index.setdefault("credentials", {})
|
||||
credentials[credential.id] = self._index_entry_for(credential)
|
||||
self._write_index(index)
|
||||
except Exception:
|
||||
logger.debug("Index upsert failed (non-fatal)", exc_info=True)
|
||||
|
||||
def _index_remove(self, credential_id: str) -> None:
|
||||
"""Remove one credential entry from the index."""
|
||||
try:
|
||||
index = self._read_index()
|
||||
if index.get("version") != self.INDEX_VERSION:
|
||||
self._rebuild_index()
|
||||
return
|
||||
credentials = index.setdefault("credentials", {})
|
||||
credentials.pop(credential_id, None)
|
||||
self._write_index(index)
|
||||
except Exception:
|
||||
logger.debug("Index remove failed (non-fatal)", exc_info=True)
|
||||
|
||||
def _maybe_rebuild_index(self) -> None:
|
||||
"""Rebuild the index if it's missing, malformed, or on an old schema.
|
||||
|
||||
Called once at startup. The check is cheap — read the version field
|
||||
and bail out if it matches. Encrypted files remain authoritative; this
|
||||
only refreshes the developer-facing snapshot.
|
||||
"""
|
||||
path = self._index_path()
|
||||
if path.exists():
|
||||
try:
|
||||
with open(path, encoding="utf-8-sig") as f:
|
||||
index = json.load(f)
|
||||
if index.get("version") == self.INDEX_VERSION:
|
||||
return
|
||||
except Exception:
|
||||
pass # fall through to rebuild
|
||||
self._rebuild_index()
|
||||
|
||||
def _rebuild_index(self) -> None:
|
||||
"""Walk the encrypted credentials directory and rewrite a fresh index."""
|
||||
cred_dir = self.base_path / "credentials"
|
||||
if not cred_dir.is_dir():
|
||||
return
|
||||
|
||||
entries: dict[str, Any] = {}
|
||||
for cred_file in sorted(cred_dir.glob("*.enc")):
|
||||
credential_id = cred_file.stem
|
||||
try:
|
||||
cred = self.load(credential_id)
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Failed to load %s during index rebuild — skipping",
|
||||
credential_id,
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
if cred is None:
|
||||
continue
|
||||
entries[cred.id] = self._index_entry_for(cred)
|
||||
|
||||
index = {"credentials": entries}
|
||||
self._write_index(index)
|
||||
logger.info("Rebuilt credential index with %d entries", len(entries))
|
||||
|
||||
|
||||
class EnvVarStorage(CredentialStorage):
|
||||
@@ -351,8 +483,7 @@ class EnvVarStorage(CredentialStorage):
|
||||
def save(self, credential: CredentialObject) -> None:
|
||||
"""Cannot save to environment variables at runtime."""
|
||||
raise NotImplementedError(
|
||||
"EnvVarStorage is read-only. Set environment variables "
|
||||
"externally or use EncryptedFileStorage."
|
||||
"EnvVarStorage is read-only. Set environment variables externally or use EncryptedFileStorage."
|
||||
)
|
||||
|
||||
def load(self, credential_id: str) -> CredentialObject | None:
|
||||
@@ -372,9 +503,7 @@ class EnvVarStorage(CredentialStorage):
|
||||
|
||||
def delete(self, credential_id: str) -> bool:
|
||||
"""Cannot delete environment variables at runtime."""
|
||||
raise NotImplementedError(
|
||||
"EnvVarStorage is read-only. Unset environment variables externally."
|
||||
)
|
||||
raise NotImplementedError("EnvVarStorage is read-only. Unset environment variables externally.")
|
||||
|
||||
def list_all(self) -> list[str]:
|
||||
"""List credentials that are available in environment."""
|
||||
@@ -390,7 +519,7 @@ class EnvVarStorage(CredentialStorage):
|
||||
def exists(self, credential_id: str) -> bool:
|
||||
"""Check if credential is available in environment."""
|
||||
env_var = self._get_env_var_name(credential_id)
|
||||
return self._read_env_value(env_var) is not None
|
||||
return bool(self._read_env_value(env_var))
|
||||
|
||||
def add_mapping(self, credential_id: str, env_var: str) -> None:
|
||||
"""
|
||||
|
||||
@@ -19,6 +19,7 @@ from typing import Any
|
||||
from pydantic import SecretStr
|
||||
|
||||
from .models import (
|
||||
CredentialExpiredError,
|
||||
CredentialKey,
|
||||
CredentialObject,
|
||||
CredentialRefreshError,
|
||||
@@ -123,9 +124,7 @@ class CredentialStore:
|
||||
"""
|
||||
return self._providers.get(provider_id)
|
||||
|
||||
def get_provider_for_credential(
|
||||
self, credential: CredentialObject
|
||||
) -> CredentialProvider | None:
|
||||
def get_provider_for_credential(self, credential: CredentialObject) -> CredentialProvider | None:
|
||||
"""
|
||||
Get the appropriate provider for a credential.
|
||||
|
||||
@@ -177,6 +176,8 @@ class CredentialStore:
|
||||
self,
|
||||
credential_id: str,
|
||||
refresh_if_needed: bool = True,
|
||||
*,
|
||||
raise_on_refresh_failure: bool = False,
|
||||
) -> CredentialObject | None:
|
||||
"""
|
||||
Get a credential by ID.
|
||||
@@ -184,6 +185,11 @@ class CredentialStore:
|
||||
Args:
|
||||
credential_id: The credential identifier
|
||||
refresh_if_needed: If True, refresh expired credentials
|
||||
raise_on_refresh_failure: If True, raise ``CredentialExpiredError``
|
||||
when refresh fails instead of silently returning the stale
|
||||
credential. Tool-execution call sites should pass True so the
|
||||
agent gets a structured "reauth needed" signal rather than a
|
||||
later 401 from the provider.
|
||||
|
||||
Returns:
|
||||
CredentialObject or None if not found
|
||||
@@ -193,7 +199,7 @@ class CredentialStore:
|
||||
cached = self._get_from_cache(credential_id)
|
||||
if cached is not None:
|
||||
if refresh_if_needed and self._should_refresh(cached):
|
||||
return self._refresh_credential(cached)
|
||||
return self._refresh_credential(cached, raise_on_failure=raise_on_refresh_failure)
|
||||
return cached
|
||||
|
||||
# Load from storage
|
||||
@@ -203,30 +209,42 @@ class CredentialStore:
|
||||
|
||||
# Refresh if needed
|
||||
if refresh_if_needed and self._should_refresh(credential):
|
||||
credential = self._refresh_credential(credential)
|
||||
credential = self._refresh_credential(credential, raise_on_failure=raise_on_refresh_failure)
|
||||
|
||||
# Cache
|
||||
self._add_to_cache(credential)
|
||||
|
||||
return credential
|
||||
|
||||
def get_key(self, credential_id: str, key_name: str) -> str | None:
|
||||
def get_key(
|
||||
self,
|
||||
credential_id: str,
|
||||
key_name: str,
|
||||
*,
|
||||
raise_on_refresh_failure: bool = False,
|
||||
) -> str | None:
|
||||
"""
|
||||
Convenience method to get a specific key value.
|
||||
|
||||
Args:
|
||||
credential_id: The credential identifier
|
||||
key_name: The key within the credential
|
||||
raise_on_refresh_failure: See ``get_credential``.
|
||||
|
||||
Returns:
|
||||
The key value or None if not found
|
||||
"""
|
||||
credential = self.get_credential(credential_id)
|
||||
credential = self.get_credential(credential_id, raise_on_refresh_failure=raise_on_refresh_failure)
|
||||
if credential is None:
|
||||
return None
|
||||
return credential.get_key(key_name)
|
||||
|
||||
def get(self, credential_id: str) -> str | None:
|
||||
def get(
|
||||
self,
|
||||
credential_id: str,
|
||||
*,
|
||||
raise_on_refresh_failure: bool = False,
|
||||
) -> str | None:
|
||||
"""
|
||||
Legacy compatibility: get the primary key value.
|
||||
|
||||
@@ -235,11 +253,12 @@ class CredentialStore:
|
||||
|
||||
Args:
|
||||
credential_id: The credential identifier
|
||||
raise_on_refresh_failure: See ``get_credential``.
|
||||
|
||||
Returns:
|
||||
The primary key value or None
|
||||
"""
|
||||
credential = self.get_credential(credential_id)
|
||||
credential = self.get_credential(credential_id, raise_on_refresh_failure=raise_on_refresh_failure)
|
||||
if credential is None:
|
||||
return None
|
||||
return credential.get_default_key()
|
||||
@@ -510,8 +529,20 @@ class CredentialStore:
|
||||
|
||||
return provider.should_refresh(credential)
|
||||
|
||||
def _refresh_credential(self, credential: CredentialObject) -> CredentialObject:
|
||||
"""Refresh a credential using its provider."""
|
||||
def _refresh_credential(
|
||||
self,
|
||||
credential: CredentialObject,
|
||||
*,
|
||||
raise_on_failure: bool = False,
|
||||
) -> CredentialObject:
|
||||
"""Refresh a credential using its provider.
|
||||
|
||||
When ``raise_on_failure`` is True, a refresh failure raises
|
||||
``CredentialExpiredError`` carrying provider/alias/help_url metadata
|
||||
for the caller (typically the tool runner) to surface a reauth
|
||||
request. Otherwise, the stale credential is returned to preserve
|
||||
legacy best-effort behavior.
|
||||
"""
|
||||
provider = self.get_provider_for_credential(credential)
|
||||
if provider is None:
|
||||
logger.warning(f"No provider found for credential '{credential.id}'")
|
||||
@@ -530,6 +561,16 @@ class CredentialStore:
|
||||
|
||||
except CredentialRefreshError as e:
|
||||
logger.error(f"Failed to refresh credential '{credential.id}': {e}")
|
||||
if raise_on_failure:
|
||||
raise CredentialExpiredError(
|
||||
credential_id=credential.id,
|
||||
message=(
|
||||
f"OAuth token for '{credential.id}' is expired and "
|
||||
f"refresh failed: {e}. Reauthorization required."
|
||||
),
|
||||
provider=credential.provider_type,
|
||||
alias=credential.alias,
|
||||
) from e
|
||||
return credential
|
||||
|
||||
def refresh_credential(self, credential_id: str) -> CredentialObject | None:
|
||||
@@ -704,13 +745,14 @@ class CredentialStore:
|
||||
token = store.get_key("hubspot", "access_token")
|
||||
"""
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from .storage import EncryptedFileStorage
|
||||
|
||||
# Determine local storage path
|
||||
if local_path is None:
|
||||
local_path = str(Path.home() / ".hive" / "credentials")
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
local_path = str(HIVE_HOME / "credentials")
|
||||
|
||||
local_storage = EncryptedFileStorage(base_path=local_path)
|
||||
|
||||
|
||||
@@ -88,9 +88,7 @@ class TemplateResolver:
|
||||
if key_name:
|
||||
value = credential.get_key(key_name)
|
||||
if value is None:
|
||||
raise CredentialKeyNotFoundError(
|
||||
f"Key '{key_name}' not found in credential '{cred_id}'"
|
||||
)
|
||||
raise CredentialKeyNotFoundError(f"Key '{key_name}' not found in credential '{cred_id}'")
|
||||
else:
|
||||
# Use default key
|
||||
value = credential.get_default_key()
|
||||
@@ -126,9 +124,7 @@ class TemplateResolver:
|
||||
... })
|
||||
{"Authorization": "Bearer ghp_xxx", "X-API-Key": "BSAKxxx"}
|
||||
"""
|
||||
return {
|
||||
key: self.resolve(value, fail_on_missing) for key, value in header_templates.items()
|
||||
}
|
||||
return {key: self.resolve(value, fail_on_missing) for key, value in header_templates.items()}
|
||||
|
||||
def resolve_params(
|
||||
self,
|
||||
|
||||
@@ -130,9 +130,7 @@ class TestCredentialObject:
|
||||
# With access_token
|
||||
cred2 = CredentialObject(
|
||||
id="test",
|
||||
keys={
|
||||
"access_token": CredentialKey(name="access_token", value=SecretStr("token-value"))
|
||||
},
|
||||
keys={"access_token": CredentialKey(name="access_token", value=SecretStr("token-value"))},
|
||||
)
|
||||
assert cred2.get_default_key() == "token-value"
|
||||
|
||||
@@ -260,6 +258,14 @@ class TestEnvVarStorage:
|
||||
with pytest.raises(NotImplementedError):
|
||||
storage.delete("test")
|
||||
|
||||
def test_exists_matches_load_for_empty_value(self):
|
||||
"""Test exists() and load() stay consistent for empty values."""
|
||||
storage = EnvVarStorage(env_mapping={"empty": "EMPTY_API_KEY"})
|
||||
|
||||
with patch.object(storage, "_read_env_value", return_value=""):
|
||||
assert storage.load("empty") is None
|
||||
assert not storage.exists("empty")
|
||||
|
||||
|
||||
class TestEncryptedFileStorage:
|
||||
"""Tests for EncryptedFileStorage."""
|
||||
@@ -297,9 +303,7 @@ class TestEncryptedFileStorage:
|
||||
key = Fernet.generate_key().decode()
|
||||
with patch.dict(os.environ, {"HIVE_CREDENTIAL_KEY": key}):
|
||||
storage = EncryptedFileStorage(temp_dir)
|
||||
cred = CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
|
||||
)
|
||||
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
|
||||
storage.save(cred)
|
||||
|
||||
# Create new storage instance with same key
|
||||
@@ -330,18 +334,10 @@ class TestCompositeStorage:
|
||||
def test_read_from_primary(self):
|
||||
"""Test reading from primary storage."""
|
||||
primary = InMemoryStorage()
|
||||
primary.save(
|
||||
CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("primary"))}
|
||||
)
|
||||
)
|
||||
primary.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("primary"))}))
|
||||
|
||||
fallback = InMemoryStorage()
|
||||
fallback.save(
|
||||
CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}
|
||||
)
|
||||
)
|
||||
fallback.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}))
|
||||
|
||||
storage = CompositeStorage(primary, [fallback])
|
||||
cred = storage.load("test")
|
||||
@@ -353,11 +349,7 @@ class TestCompositeStorage:
|
||||
"""Test fallback when credential not in primary."""
|
||||
primary = InMemoryStorage()
|
||||
fallback = InMemoryStorage()
|
||||
fallback.save(
|
||||
CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}
|
||||
)
|
||||
)
|
||||
fallback.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("fallback"))}))
|
||||
|
||||
storage = CompositeStorage(primary, [fallback])
|
||||
cred = storage.load("test")
|
||||
@@ -393,9 +385,7 @@ class TestStaticProvider:
|
||||
def test_refresh_returns_unchanged(self):
|
||||
"""Test that refresh returns credential unchanged."""
|
||||
provider = StaticProvider()
|
||||
cred = CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
|
||||
)
|
||||
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
|
||||
|
||||
refreshed = provider.refresh(cred)
|
||||
assert refreshed.get_key("k") == "v"
|
||||
@@ -403,9 +393,7 @@ class TestStaticProvider:
|
||||
def test_validate_with_keys(self):
|
||||
"""Test validation with keys present."""
|
||||
provider = StaticProvider()
|
||||
cred = CredentialObject(
|
||||
id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}
|
||||
)
|
||||
cred = CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
|
||||
|
||||
assert provider.validate(cred)
|
||||
|
||||
@@ -606,9 +594,7 @@ class TestCredentialStore:
|
||||
storage = InMemoryStorage()
|
||||
store = CredentialStore(storage=storage, cache_ttl_seconds=60)
|
||||
|
||||
storage.save(
|
||||
CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))})
|
||||
)
|
||||
storage.save(CredentialObject(id="test", keys={"k": CredentialKey(name="k", value=SecretStr("v"))}))
|
||||
|
||||
# First load
|
||||
store.get_credential("test")
|
||||
@@ -686,9 +672,7 @@ class TestOAuth2Module:
|
||||
from core.framework.credentials.oauth2 import OAuth2Config, TokenPlacement
|
||||
|
||||
# Valid config
|
||||
config = OAuth2Config(
|
||||
token_url="https://example.com/token", client_id="id", client_secret="secret"
|
||||
)
|
||||
config = OAuth2Config(token_url="https://example.com/token", client_id="id", client_secret="secret")
|
||||
assert config.token_url == "https://example.com/token"
|
||||
|
||||
# Missing token_url
|
||||
|
||||
@@ -160,15 +160,9 @@ class CredentialValidationResult:
|
||||
if aden_nc:
|
||||
if missing or invalid:
|
||||
lines.append("")
|
||||
lines.append(
|
||||
"Aden integrations not connected "
|
||||
"(ADEN_API_KEY is set but OAuth tokens unavailable):\n"
|
||||
)
|
||||
lines.append("Aden integrations not connected (ADEN_API_KEY is set but OAuth tokens unavailable):\n")
|
||||
for c in aden_nc:
|
||||
lines.append(
|
||||
f" {c.env_var} for {_label(c)}"
|
||||
f"\n Connect this integration at hive.adenhq.com first."
|
||||
)
|
||||
lines.append(f" {c.env_var} for {_label(c)}\n Connect this integration at hive.adenhq.com first.")
|
||||
lines.append("\nIf you've already set up credentials, restart your terminal to load them.")
|
||||
return "\n".join(lines)
|
||||
|
||||
@@ -236,6 +230,45 @@ def _presync_aden_tokens(credential_specs: dict, *, force: bool = False) -> None
|
||||
)
|
||||
|
||||
|
||||
def compute_unavailable_tools(nodes: list) -> tuple[set[str], list[str]]:
|
||||
"""Return (tool_names_to_drop, human_messages).
|
||||
|
||||
Runs credential validation *without* raising, collects every tool
|
||||
bound to a failed credential (missing / invalid / Aden-not-connected
|
||||
and no alternative provider available), and returns the set of tool
|
||||
names that should be silently dropped from the worker's effective
|
||||
tool list.
|
||||
|
||||
Use this at every worker-spawn preflight so missing credentials
|
||||
filter tools out of the graph instead of hard-failing the whole
|
||||
spawn. Only affects non-MCP tools — the MCP admission gate
|
||||
(``_build_mcp_admission_gate``) already handles MCP tools at
|
||||
registration time.
|
||||
"""
|
||||
try:
|
||||
result = validate_agent_credentials(nodes, verify=False, raise_on_error=False)
|
||||
except Exception as exc:
|
||||
logger.debug("compute_unavailable_tools: validation raised: %s", exc)
|
||||
return set(), []
|
||||
|
||||
drop: set[str] = set()
|
||||
messages: list[str] = []
|
||||
for status in result.failed:
|
||||
if not status.tools:
|
||||
continue
|
||||
drop.update(status.tools)
|
||||
reason = "missing"
|
||||
if status.aden_not_connected:
|
||||
reason = "aden_not_connected"
|
||||
elif status.available and status.valid is False:
|
||||
reason = "invalid"
|
||||
messages.append(
|
||||
f"{status.env_var} ({reason}) → drops {len(status.tools)} tool(s): "
|
||||
f"{', '.join(status.tools[:6])}" + (f" +{len(status.tools) - 6} more" if len(status.tools) > 6 else "")
|
||||
)
|
||||
return drop, messages
|
||||
|
||||
|
||||
def validate_agent_credentials(
|
||||
nodes: list,
|
||||
quiet: bool = False,
|
||||
@@ -292,9 +325,7 @@ def validate_agent_credentials(
|
||||
if os.environ.get("ADEN_API_KEY"):
|
||||
_presync_aden_tokens(CREDENTIAL_SPECS, force=force_refresh)
|
||||
|
||||
env_mapping = {
|
||||
(spec.credential_id or name): spec.env_var for name, spec in CREDENTIAL_SPECS.items()
|
||||
}
|
||||
env_mapping = {(spec.credential_id or name): spec.env_var for name, spec in CREDENTIAL_SPECS.items()}
|
||||
env_storage = EnvVarStorage(env_mapping=env_mapping)
|
||||
if os.environ.get("HIVE_CREDENTIAL_KEY"):
|
||||
storage = CompositeStorage(primary=env_storage, fallbacks=[EncryptedFileStorage()])
|
||||
@@ -328,12 +359,7 @@ def validate_agent_credentials(
|
||||
available = store.is_available(cred_id)
|
||||
|
||||
# Aden-not-connected: ADEN_API_KEY set, Aden-only cred, but integration missing
|
||||
is_aden_nc = (
|
||||
not available
|
||||
and has_aden_key
|
||||
and spec.aden_supported
|
||||
and not spec.direct_api_key_supported
|
||||
)
|
||||
is_aden_nc = not available and has_aden_key and spec.aden_supported and not spec.direct_api_key_supported
|
||||
|
||||
status = CredentialStatus(
|
||||
credential_name=cred_name,
|
||||
@@ -451,9 +477,7 @@ def validate_agent_credentials(
|
||||
identity_data = result.details.get("identity")
|
||||
if identity_data and isinstance(identity_data, dict):
|
||||
try:
|
||||
cred_obj = store.get_credential(
|
||||
status.credential_id, refresh_if_needed=False
|
||||
)
|
||||
cred_obj = store.get_credential(status.credential_id, refresh_if_needed=False)
|
||||
if cred_obj:
|
||||
cred_obj.set_identity(**identity_data)
|
||||
store.save_credential(cred_obj)
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
"""Host layer -- how agents are triggered and hosted."""
|
||||
|
||||
from framework.host.agent_host import ( # noqa: F401
|
||||
AgentHost,
|
||||
AgentRuntimeConfig,
|
||||
create_agent_runtime,
|
||||
from framework.host.colony_runtime import ( # noqa: F401
|
||||
ColonyConfig,
|
||||
ColonyRuntime,
|
||||
StreamEventBus,
|
||||
TriggerSpec,
|
||||
)
|
||||
from framework.host.event_bus import AgentEvent, EventBus, EventType # noqa: F401
|
||||
from framework.host.execution_manager import ( # noqa: F401
|
||||
EntryPointSpec,
|
||||
ExecutionManager,
|
||||
from framework.host.worker import ( # noqa: F401
|
||||
Worker,
|
||||
WorkerInfo,
|
||||
WorkerResult,
|
||||
WorkerStatus,
|
||||
)
|
||||
|
||||
+358
-498
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,95 @@
|
||||
"""Read/write helpers for per-colony metadata.json.
|
||||
|
||||
A colony's metadata.json lives at ``{COLONIES_DIR}/{colony_name}/metadata.json``
|
||||
and holds immutable provenance: the queen that created it, the forked
|
||||
session id, creation/update timestamps, and the list of workers.
|
||||
|
||||
Mutable user-editable tool configuration lives in a sibling
|
||||
``tools.json`` sidecar — see :mod:`framework.host.colony_tools_config`
|
||||
— so identity and tool gating evolve independently.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def colony_metadata_path(colony_name: str) -> Path:
|
||||
"""Return the on-disk path to a colony's metadata.json."""
|
||||
return COLONIES_DIR / colony_name / "metadata.json"
|
||||
|
||||
|
||||
def load_colony_metadata(colony_name: str) -> dict[str, Any]:
|
||||
"""Load metadata.json for ``colony_name``.
|
||||
|
||||
Returns an empty dict if the file is missing or malformed — callers
|
||||
are expected to treat missing fields as defaults.
|
||||
"""
|
||||
path = colony_metadata_path(colony_name)
|
||||
if not path.exists():
|
||||
return {}
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Failed to read colony metadata at %s", path)
|
||||
return {}
|
||||
return data if isinstance(data, dict) else {}
|
||||
|
||||
|
||||
def update_colony_metadata(colony_name: str, updates: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Shallow-merge ``updates`` into metadata.json and persist.
|
||||
|
||||
Returns the full updated dict. Raises ``FileNotFoundError`` if the
|
||||
colony does not exist. Writes atomically via ``os.replace`` to
|
||||
minimize the window where a reader could see a half-written file.
|
||||
"""
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
path = colony_metadata_path(colony_name)
|
||||
if not path.parent.exists():
|
||||
raise FileNotFoundError(f"Colony '{colony_name}' not found")
|
||||
|
||||
data = load_colony_metadata(colony_name) if path.exists() else {}
|
||||
for key, value in updates.items():
|
||||
data[key] = value
|
||||
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp_path = tempfile.mkstemp(
|
||||
prefix=".metadata.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp_path, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
return data
|
||||
|
||||
|
||||
def list_colony_names() -> list[str]:
|
||||
"""Return the names of every colony that has a metadata.json on disk."""
|
||||
if not COLONIES_DIR.is_dir():
|
||||
return []
|
||||
names: list[str] = []
|
||||
for entry in sorted(COLONIES_DIR.iterdir()):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
if (entry / "metadata.json").exists():
|
||||
names.append(entry.name)
|
||||
return names
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,162 @@
|
||||
"""Per-colony tool configuration sidecar (``tools.json``).
|
||||
|
||||
Lives at ``~/.hive/colonies/{colony_name}/tools.json`` alongside
|
||||
``metadata.json``. Kept separate so provenance (queen_name,
|
||||
created_at, workers) stays in metadata while the user-editable tool
|
||||
allowlist gets its own file.
|
||||
|
||||
Schema::
|
||||
|
||||
{
|
||||
"enabled_mcp_tools": ["read_file", ...] | null,
|
||||
"updated_at": "2026-04-21T12:34:56+00:00"
|
||||
}
|
||||
|
||||
- ``null`` / missing file → default "allow every MCP tool".
|
||||
- ``[]`` → explicitly disable every MCP tool.
|
||||
- ``["foo", "bar"]`` → only those MCP tool names pass the filter.
|
||||
|
||||
Atomic writes via ``os.replace`` mirror
|
||||
``framework.host.colony_metadata.update_colony_metadata``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tools_config_path(colony_name: str) -> Path:
|
||||
"""Return the on-disk path to a colony's ``tools.json``."""
|
||||
return COLONIES_DIR / colony_name / "tools.json"
|
||||
|
||||
|
||||
def _metadata_path(colony_name: str) -> Path:
|
||||
return COLONIES_DIR / colony_name / "metadata.json"
|
||||
|
||||
|
||||
def _atomic_write_json(path: Path, data: dict[str, Any]) -> None:
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
fd, tmp = tempfile.mkstemp(
|
||||
prefix=".tools.",
|
||||
suffix=".json.tmp",
|
||||
dir=str(path.parent),
|
||||
)
|
||||
try:
|
||||
with os.fdopen(fd, "w", encoding="utf-8") as fh:
|
||||
json.dump(data, fh, indent=2)
|
||||
fh.flush()
|
||||
os.fsync(fh.fileno())
|
||||
os.replace(tmp, path)
|
||||
except BaseException:
|
||||
try:
|
||||
os.unlink(tmp)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _migrate_from_metadata_if_needed(colony_name: str) -> list[str] | None:
|
||||
"""Hoist a legacy ``enabled_mcp_tools`` field out of ``metadata.json``.
|
||||
|
||||
Returns the migrated value (or ``None`` if nothing to migrate). After
|
||||
migration the sidecar exists and ``metadata.json`` no longer contains
|
||||
``enabled_mcp_tools``. Safe to call repeatedly.
|
||||
"""
|
||||
meta_path = _metadata_path(colony_name)
|
||||
if not meta_path.exists():
|
||||
return None
|
||||
try:
|
||||
data = json.loads(meta_path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Could not read metadata.json during tools migration: %s", colony_name)
|
||||
return None
|
||||
if not isinstance(data, dict) or "enabled_mcp_tools" not in data:
|
||||
return None
|
||||
|
||||
raw = data.pop("enabled_mcp_tools")
|
||||
enabled: list[str] | None
|
||||
if raw is None:
|
||||
enabled = None
|
||||
elif isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
enabled = raw
|
||||
else:
|
||||
logger.warning(
|
||||
"Legacy enabled_mcp_tools on colony %s had unexpected shape %r; dropping",
|
||||
colony_name,
|
||||
raw,
|
||||
)
|
||||
enabled = None
|
||||
|
||||
# Sidecar first so a partial failure leaves the config recoverable.
|
||||
_atomic_write_json(
|
||||
tools_config_path(colony_name),
|
||||
{
|
||||
"enabled_mcp_tools": enabled,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
_atomic_write_json(meta_path, data)
|
||||
logger.info(
|
||||
"Migrated enabled_mcp_tools for colony %s from metadata.json to tools.json",
|
||||
colony_name,
|
||||
)
|
||||
return enabled
|
||||
|
||||
|
||||
def load_colony_tools_config(colony_name: str) -> list[str] | None:
|
||||
"""Return the colony's MCP tool allowlist, or ``None`` for default-allow.
|
||||
|
||||
Order of resolution:
|
||||
1. ``tools.json`` sidecar (authoritative).
|
||||
2. Legacy ``metadata.json`` field (migrated and deleted on first read).
|
||||
3. ``None`` — default "allow every MCP tool".
|
||||
"""
|
||||
path = tools_config_path(colony_name)
|
||||
if path.exists():
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Invalid %s; treating as default-allow", path)
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
raw = data.get("enabled_mcp_tools")
|
||||
if raw is None:
|
||||
return None
|
||||
if isinstance(raw, list) and all(isinstance(x, str) for x in raw):
|
||||
return raw
|
||||
logger.warning("Unexpected enabled_mcp_tools shape in %s; ignoring", path)
|
||||
return None
|
||||
|
||||
return _migrate_from_metadata_if_needed(colony_name)
|
||||
|
||||
|
||||
def update_colony_tools_config(
|
||||
colony_name: str,
|
||||
enabled_mcp_tools: list[str] | None,
|
||||
) -> list[str] | None:
|
||||
"""Persist a colony's MCP allowlist to ``tools.json``.
|
||||
|
||||
Raises ``FileNotFoundError`` if the colony's directory is missing.
|
||||
"""
|
||||
colony_dir = COLONIES_DIR / colony_name
|
||||
if not colony_dir.exists():
|
||||
raise FileNotFoundError(f"Colony directory not found: {colony_name}")
|
||||
_atomic_write_json(
|
||||
tools_config_path(colony_name),
|
||||
{
|
||||
"enabled_mcp_tools": enabled_mcp_tools,
|
||||
"updated_at": datetime.now(UTC).isoformat(),
|
||||
},
|
||||
)
|
||||
return enabled_mcp_tools
|
||||
@@ -42,7 +42,9 @@ def _open_event_log() -> IO[str] | None:
|
||||
return None
|
||||
raw = _DEBUG_EVENTS_RAW
|
||||
if raw.lower() in ("1", "true", "full"):
|
||||
log_dir = Path.home() / ".hive" / "event_logs"
|
||||
from framework.config import HIVE_HOME
|
||||
|
||||
log_dir = HIVE_HOME / "event_logs"
|
||||
else:
|
||||
log_dir = Path(raw)
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
@@ -108,14 +110,19 @@ class EventType(StrEnum):
|
||||
# Judge decisions (implicit judge in event loop nodes)
|
||||
JUDGE_VERDICT = "judge_verdict"
|
||||
|
||||
# Output tracking
|
||||
OUTPUT_KEY_SET = "output_key_set"
|
||||
|
||||
# Retry / edge tracking
|
||||
# Retry tracking
|
||||
NODE_RETRY = "node_retry"
|
||||
EDGE_TRAVERSED = "edge_traversed"
|
||||
|
||||
# Worker agent lifecycle (event-driven graph execution)
|
||||
# Stream-health observability. Split from NODE_RETRY so the UI can
|
||||
# distinguish "slow TTFT on a huge context" (healthy, just slow) from
|
||||
# "stream went silent mid-generation" (probable stall) from "we nudged
|
||||
# the model to continue" (recovery), which NODE_RETRY used to conflate.
|
||||
STREAM_TTFT_EXCEEDED = "stream_ttft_exceeded"
|
||||
STREAM_INACTIVE = "stream_inactive"
|
||||
STREAM_NUDGE_SENT = "stream_nudge_sent"
|
||||
TOOL_CALL_REPLAY_DETECTED = "tool_call_replay_detected"
|
||||
|
||||
# Worker agent lifecycle
|
||||
WORKER_COMPLETED = "worker_completed"
|
||||
WORKER_FAILED = "worker_failed"
|
||||
|
||||
@@ -135,17 +142,15 @@ class EventType(StrEnum):
|
||||
# Execution resurrection (auto-restart on non-fatal failure)
|
||||
EXECUTION_RESURRECTED = "execution_resurrected"
|
||||
|
||||
# Graph lifecycle (session manager → frontend)
|
||||
WORKER_GRAPH_LOADED = "worker_graph_loaded"
|
||||
# Colony lifecycle (session manager → frontend)
|
||||
WORKER_COLONY_LOADED = "worker_colony_loaded"
|
||||
# Queen create_colony tool finished forking; carries colony_name +
|
||||
# path so the frontend can render a system message linking to the
|
||||
# new colony page at /colony/{colony_name}.
|
||||
COLONY_CREATED = "colony_created"
|
||||
CREDENTIALS_REQUIRED = "credentials_required"
|
||||
|
||||
# Draft graph (planning phase — lightweight graph preview)
|
||||
DRAFT_GRAPH_UPDATED = "draft_graph_updated"
|
||||
|
||||
# Flowchart map updated (after reconciliation with runtime graph)
|
||||
FLOWCHART_MAP_UPDATED = "flowchart_map_updated"
|
||||
|
||||
# Queen phase changes (building <-> staging <-> running)
|
||||
# Queen phase changes (working <-> reviewing)
|
||||
QUEEN_PHASE_CHANGED = "queen_phase_changed"
|
||||
|
||||
# Queen identity — which queen profile was selected for this session
|
||||
@@ -162,6 +167,14 @@ class EventType(StrEnum):
|
||||
TRIGGER_REMOVED = "trigger_removed"
|
||||
TRIGGER_UPDATED = "trigger_updated"
|
||||
|
||||
# Task system lifecycle (per-list diffs streamed to the UI)
|
||||
TASK_CREATED = "task_created"
|
||||
TASK_UPDATED = "task_updated"
|
||||
TASK_DELETED = "task_deleted"
|
||||
TASK_LIST_RESET = "task_list_reset"
|
||||
TASK_LIST_REATTACH_MISMATCH = "task_list_reattach_mismatch"
|
||||
COLONY_TEMPLATE_ASSIGNMENT = "colony_template_assignment"
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentEvent:
|
||||
@@ -174,7 +187,7 @@ class AgentEvent:
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
correlation_id: str | None = None # For tracking related events
|
||||
graph_id: str | None = None # Which graph emitted this event (multi-graph sessions)
|
||||
colony_id: str | None = None # Which colony emitted this event
|
||||
run_id: str | None = None # Unique ID per trigger() invocation — used for run dividers
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
@@ -187,7 +200,7 @@ class AgentEvent:
|
||||
"data": self.data,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"correlation_id": self.correlation_id,
|
||||
"graph_id": self.graph_id,
|
||||
"colony_id": self.colony_id,
|
||||
}
|
||||
if self.run_id is not None:
|
||||
d["run_id"] = self.run_id
|
||||
@@ -208,7 +221,7 @@ class Subscription:
|
||||
filter_stream: str | None = None # Only receive events from this stream
|
||||
filter_node: str | None = None # Only receive events from this node
|
||||
filter_execution: str | None = None # Only receive events from this execution
|
||||
filter_graph: str | None = None # Only receive events from this graph
|
||||
filter_colony: str | None = None # Only receive events from this colony
|
||||
|
||||
|
||||
class EventBus:
|
||||
@@ -390,7 +403,7 @@ class EventBus:
|
||||
filter_stream: str | None = None,
|
||||
filter_node: str | None = None,
|
||||
filter_execution: str | None = None,
|
||||
filter_graph: str | None = None,
|
||||
filter_colony: str | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Subscribe to events.
|
||||
@@ -401,7 +414,7 @@ class EventBus:
|
||||
filter_stream: Only receive events from this stream
|
||||
filter_node: Only receive events from this node
|
||||
filter_execution: Only receive events from this execution
|
||||
filter_graph: Only receive events from this graph
|
||||
filter_colony: Only receive events from this colony
|
||||
|
||||
Returns:
|
||||
Subscription ID (use to unsubscribe)
|
||||
@@ -416,7 +429,7 @@ class EventBus:
|
||||
filter_stream=filter_stream,
|
||||
filter_node=filter_node,
|
||||
filter_execution=filter_execution,
|
||||
filter_graph=filter_graph,
|
||||
filter_colony=filter_colony,
|
||||
)
|
||||
|
||||
self._subscriptions[sub_id] = subscription
|
||||
@@ -452,11 +465,7 @@ class EventBus:
|
||||
# iteration values. Without this, live SSE would use raw iterations
|
||||
# while events.jsonl would use offset iterations, causing ID collisions
|
||||
# on the frontend when replaying after cold resume.
|
||||
if (
|
||||
self._session_log_iteration_offset
|
||||
and isinstance(event.data, dict)
|
||||
and "iteration" in event.data
|
||||
):
|
||||
if self._session_log_iteration_offset and isinstance(event.data, dict) and "iteration" in event.data:
|
||||
offset = self._session_log_iteration_offset
|
||||
event.data = {**event.data, "iteration": event.data["iteration"] + offset}
|
||||
|
||||
@@ -518,23 +527,41 @@ class EventBus:
|
||||
if subscription.filter_execution and subscription.filter_execution != event.execution_id:
|
||||
return False
|
||||
|
||||
# Check graph filter
|
||||
if subscription.filter_graph and subscription.filter_graph != event.graph_id:
|
||||
# Check colony filter
|
||||
if subscription.filter_colony and subscription.filter_colony != event.colony_id:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# Per-handler wall-clock timeout. A subscriber that deadlocks or
|
||||
# blocks on slow I/O would otherwise freeze the publisher (and via
|
||||
# ``await publish(...)`` any coroutine that emits events) indefinitely.
|
||||
# 15 s is generous for legitimate handlers and cheap to tune later.
|
||||
_HANDLER_TIMEOUT_SECONDS: float = 15.0
|
||||
|
||||
async def _execute_handlers(
|
||||
self,
|
||||
event: AgentEvent,
|
||||
handlers: list[EventHandler],
|
||||
) -> None:
|
||||
"""Execute handlers concurrently with rate limiting."""
|
||||
"""Execute handlers concurrently with rate limiting + hard timeout."""
|
||||
|
||||
async def run_handler(handler: EventHandler) -> None:
|
||||
async with self._semaphore:
|
||||
try:
|
||||
await handler(event)
|
||||
await asyncio.wait_for(
|
||||
handler(event),
|
||||
timeout=self._HANDLER_TIMEOUT_SECONDS,
|
||||
)
|
||||
except TimeoutError:
|
||||
handler_name = getattr(handler, "__qualname__", repr(handler))
|
||||
logger.error(
|
||||
"EventBus handler %s exceeded %.0fs on event %s — dropping; "
|
||||
"fix the handler or the publisher will stall",
|
||||
handler_name,
|
||||
self._HANDLER_TIMEOUT_SECONDS,
|
||||
getattr(event.type, "name", event.type),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(f"Handler error for {event.type}")
|
||||
|
||||
@@ -792,16 +819,28 @@ class EventBus:
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
cache_creation_tokens: int = 0,
|
||||
cost_usd: float = 0.0,
|
||||
execution_id: str | None = None,
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM turn completion with stop reason and model metadata."""
|
||||
"""Emit LLM turn completion with stop reason and model metadata.
|
||||
|
||||
``cached_tokens`` and ``cache_creation_tokens`` are subsets of
|
||||
``input_tokens`` (already inside provider ``prompt_tokens``).
|
||||
Subscribers should display them, not add them to a total.
|
||||
|
||||
``cost_usd`` is the USD cost for this turn when known (Anthropic,
|
||||
OpenAI, OpenRouter). 0.0 means unreported (not free).
|
||||
"""
|
||||
data: dict = {
|
||||
"stop_reason": stop_reason,
|
||||
"model": model,
|
||||
"input_tokens": input_tokens,
|
||||
"output_tokens": output_tokens,
|
||||
"cached_tokens": cached_tokens,
|
||||
"cache_creation_tokens": cache_creation_tokens,
|
||||
"cost_usd": cost_usd,
|
||||
}
|
||||
if iteration is not None:
|
||||
data["iteration"] = iteration
|
||||
@@ -897,24 +936,22 @@ class EventBus:
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
prompt: str = "",
|
||||
execution_id: str | None = None,
|
||||
options: list[str] | None = None,
|
||||
questions: list[dict] | None = None,
|
||||
) -> None:
|
||||
"""Emit a user-input request for interactive queen turns.
|
||||
|
||||
Args:
|
||||
options: Optional predefined choices for the user (1-3 items).
|
||||
The frontend appends an "Other" free-text option
|
||||
automatically.
|
||||
questions: Optional list of question dicts for multi-question
|
||||
batches (from ask_user_multiple). Each dict has id,
|
||||
prompt, and optional options.
|
||||
questions: Optional list of question dicts from ``ask_user``.
|
||||
Each dict has ``id``, ``prompt``, and optional ``options``
|
||||
(2-3 predefined choices). The frontend renders the
|
||||
QuestionWidget for a single-entry list and the
|
||||
MultiQuestionWidget for 2+ entries. Free-text asks (no
|
||||
options) stream the prompt separately as a chat message;
|
||||
auto-block turns have no questions at all and fall back
|
||||
to the normal text input.
|
||||
"""
|
||||
data: dict[str, Any] = {"prompt": prompt}
|
||||
if options:
|
||||
data["options"] = options
|
||||
data: dict[str, Any] = {}
|
||||
if questions:
|
||||
data["questions"] = questions
|
||||
await self.publish(
|
||||
@@ -1029,24 +1066,6 @@ class EventBus:
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_output_key_set(
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
key: str,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit output key set event."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.OUTPUT_KEY_SET,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={"key": key},
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_node_retry(
|
||||
self,
|
||||
stream_id: str,
|
||||
@@ -1071,25 +1090,90 @@ class EventBus:
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_edge_traversed(
|
||||
async def emit_stream_ttft_exceeded(
|
||||
self,
|
||||
stream_id: str,
|
||||
source_node: str,
|
||||
target_node: str,
|
||||
edge_condition: str = "",
|
||||
node_id: str,
|
||||
ttft_seconds: float,
|
||||
limit_seconds: float,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit edge traversed event."""
|
||||
"""Emit when a stream stayed silent past the TTFT budget (no first event)."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.EDGE_TRAVERSED,
|
||||
type=EventType.STREAM_TTFT_EXCEEDED,
|
||||
stream_id=stream_id,
|
||||
node_id=source_node,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={
|
||||
"source_node": source_node,
|
||||
"target_node": target_node,
|
||||
"edge_condition": edge_condition,
|
||||
"ttft_seconds": ttft_seconds,
|
||||
"limit_seconds": limit_seconds,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_stream_inactive(
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
idle_seconds: float,
|
||||
limit_seconds: float,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit when a stream that had produced events went silent past budget."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.STREAM_INACTIVE,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={
|
||||
"idle_seconds": idle_seconds,
|
||||
"limit_seconds": limit_seconds,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_stream_nudge_sent(
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
reason: str,
|
||||
nudge_count: int,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit when the continue-nudge was injected (recovery, not retry)."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.STREAM_NUDGE_SENT,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={
|
||||
"reason": reason,
|
||||
"nudge_count": nudge_count,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
async def emit_tool_call_replay_detected(
|
||||
self,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
tool_name: str,
|
||||
prior_seq: int,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit when the model is about to re-execute a prior successful call."""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.TOOL_CALL_REPLAY_DETECTED,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={
|
||||
"tool_name": tool_name,
|
||||
"prior_seq": prior_seq,
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -1208,15 +1292,25 @@ class EventBus:
|
||||
reason: str = "",
|
||||
context: str = "",
|
||||
execution_id: str | None = None,
|
||||
request_id: str | None = None,
|
||||
) -> None:
|
||||
"""Emit escalation requested event (agent wants queen)."""
|
||||
"""Emit escalation requested event (agent wants queen).
|
||||
|
||||
``request_id`` is a caller-supplied handle used by the queen to
|
||||
address its reply back to the specific escalation. When omitted the
|
||||
event still fires but the queen cannot route a targeted reply.
|
||||
"""
|
||||
await self.publish(
|
||||
AgentEvent(
|
||||
type=EventType.ESCALATION_REQUESTED,
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
execution_id=execution_id,
|
||||
data={"reason": reason, "context": context},
|
||||
data={
|
||||
"request_id": request_id,
|
||||
"reason": reason,
|
||||
"context": context,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1297,7 +1391,7 @@ class EventBus:
|
||||
stream_id: str | None = None,
|
||||
node_id: str | None = None,
|
||||
execution_id: str | None = None,
|
||||
graph_id: str | None = None,
|
||||
colony_id: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> AgentEvent | None:
|
||||
"""
|
||||
@@ -1308,7 +1402,7 @@ class EventBus:
|
||||
stream_id: Filter by stream
|
||||
node_id: Filter by node
|
||||
execution_id: Filter by execution
|
||||
graph_id: Filter by graph
|
||||
colony_id: Filter by colony
|
||||
timeout: Maximum time to wait (seconds)
|
||||
|
||||
Returns:
|
||||
@@ -1329,7 +1423,7 @@ class EventBus:
|
||||
filter_stream=stream_id,
|
||||
filter_node=node_id,
|
||||
filter_execution=execution_id,
|
||||
filter_graph=graph_id,
|
||||
filter_colony=colony_id,
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -16,20 +16,20 @@ from collections import OrderedDict
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
|
||||
from framework.orchestrator.checkpoint_config import CheckpointConfig
|
||||
from framework.orchestrator.orchestrator import ExecutionResult, Orchestrator
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.host.shared_state import IsolationLevel, SharedBufferManager
|
||||
from framework.host.stream_runtime import StreamDecisionTracker, StreamRuntimeAdapter
|
||||
from framework.orchestrator.checkpoint_config import CheckpointConfig
|
||||
from framework.orchestrator.orchestrator import ExecutionResult, Orchestrator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.goal import Goal
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.host.event_bus import AgentEvent
|
||||
from framework.host.outcome_aggregator import OutcomeAggregator
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.goal import Goal
|
||||
from framework.storage.concurrent import ConcurrentStorage
|
||||
from framework.storage.session_store import SessionStore
|
||||
|
||||
@@ -48,6 +48,8 @@ class ExecutionAlreadyRunningError(RuntimeError):
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CancelExecutionResult = Literal["cancelled", "cancelling", "not_found"]
|
||||
|
||||
|
||||
class GraphScopedEventBus(EventBus):
|
||||
"""Proxy that stamps ``graph_id`` on every published event.
|
||||
@@ -130,7 +132,7 @@ class ExecutionContext:
|
||||
run_id: str | None = None # Unique ID per trigger() invocation
|
||||
started_at: datetime = field(default_factory=datetime.now)
|
||||
completed_at: datetime | None = None
|
||||
status: str = "pending" # pending, running, completed, failed, paused
|
||||
status: str = "pending" # pending, running, cancelling, completed, failed, paused, cancelled
|
||||
|
||||
|
||||
class ExecutionManager:
|
||||
@@ -172,7 +174,7 @@ class ExecutionManager:
|
||||
goal: "Goal",
|
||||
state_manager: SharedBufferManager,
|
||||
storage: "ConcurrentStorage",
|
||||
outcome_aggregator: "OutcomeAggregator",
|
||||
outcome_aggregator: "OutcomeAggregator | None" = None,
|
||||
event_bus: "EventBus | None" = None,
|
||||
llm: "LLMProvider | None" = None,
|
||||
tools: list["Tool"] | None = None,
|
||||
@@ -192,11 +194,6 @@ class ExecutionManager:
|
||||
context_warn_ratio: float | None = None,
|
||||
batch_init_nudge: str | None = None,
|
||||
dynamic_memory_provider_factory: Callable[[str], Callable[[], str] | None] | None = None,
|
||||
colony_memory_dir: Any = None,
|
||||
colony_worker_sessions_dir: Any = None,
|
||||
colony_recall_cache: dict[str, str] | None = None,
|
||||
colony_reflect_llm: Any = None,
|
||||
execution_middleware: list | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize execution stream.
|
||||
@@ -252,11 +249,6 @@ class ExecutionManager:
|
||||
self._context_warn_ratio: float | None = context_warn_ratio
|
||||
self._batch_init_nudge: str | None = batch_init_nudge
|
||||
self._dynamic_memory_provider_factory = dynamic_memory_provider_factory
|
||||
self._colony_memory_dir = colony_memory_dir
|
||||
self._colony_worker_sessions_dir = colony_worker_sessions_dir
|
||||
self._colony_recall_cache = colony_recall_cache
|
||||
self._colony_reflect_llm = colony_reflect_llm
|
||||
self._execution_middleware = execution_middleware or []
|
||||
|
||||
_es_logger = logging.getLogger(__name__)
|
||||
if protocols_prompt:
|
||||
@@ -275,7 +267,6 @@ class ExecutionManager:
|
||||
self._runtime = StreamDecisionTracker(
|
||||
stream_id=stream_id,
|
||||
storage=storage,
|
||||
outcome_aggregator=outcome_aggregator,
|
||||
)
|
||||
|
||||
# Execution tracking
|
||||
@@ -326,6 +317,22 @@ class ExecutionManager:
|
||||
"""Return IDs of all currently active executions."""
|
||||
return list(self._active_executions.keys())
|
||||
|
||||
def _get_blocking_execution_ids_locked(self) -> list[str]:
|
||||
"""Return executions that still block a replacement from starting.
|
||||
|
||||
An execution continues to block replacement until its task has
|
||||
terminated and the task's final cleanup has removed its bookkeeping.
|
||||
This is intentional: a timed-out cancellation does not mean the old
|
||||
task is harmless. If it is still alive, it can still write shared
|
||||
session state, so letting a replacement start would guarantee
|
||||
overlapping mutations on the same session.
|
||||
"""
|
||||
blocking_ids: list[str] = list(self._active_executions.keys())
|
||||
for execution_id, task in self._execution_tasks.items():
|
||||
if not task.done() and execution_id not in self._active_executions:
|
||||
blocking_ids.append(execution_id)
|
||||
return blocking_ids
|
||||
|
||||
@property
|
||||
def agent_idle_seconds(self) -> float:
|
||||
"""Seconds since the last agent activity (LLM call, tool call, node transition).
|
||||
@@ -407,15 +414,22 @@ class ExecutionManager:
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the execution stream and cancel active executions."""
|
||||
if not self._running:
|
||||
return
|
||||
async with self._lock:
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
self._running = False
|
||||
self._running = False
|
||||
|
||||
# Cancel all active executions
|
||||
tasks_to_wait = []
|
||||
for _, task in self._execution_tasks.items():
|
||||
if not task.done():
|
||||
# Cancel all active executions, but keep bookkeeping until each
|
||||
# task reaches its own cleanup path.
|
||||
tasks_to_wait: list[asyncio.Task] = []
|
||||
for execution_id, task in self._execution_tasks.items():
|
||||
if task.done():
|
||||
continue
|
||||
ctx = self._active_executions.get(execution_id)
|
||||
if ctx is not None:
|
||||
ctx.status = "cancelling"
|
||||
self._cancel_reasons.setdefault(execution_id, "Execution cancelled")
|
||||
task.cancel()
|
||||
tasks_to_wait.append(task)
|
||||
|
||||
@@ -429,9 +443,6 @@ class ExecutionManager:
|
||||
len(pending),
|
||||
)
|
||||
|
||||
self._execution_tasks.clear()
|
||||
self._active_executions.clear()
|
||||
|
||||
logger.info(f"ExecutionStream '{self.stream_id}' stopped")
|
||||
|
||||
# Emit stream stopped event
|
||||
@@ -463,9 +474,7 @@ class ExecutionManager:
|
||||
for executor in self._active_executors.values():
|
||||
node = executor.node_registry.get(node_id)
|
||||
if node is not None and hasattr(node, "inject_event"):
|
||||
await node.inject_event(
|
||||
content, is_client_input=is_client_input, image_content=image_content
|
||||
)
|
||||
await node.inject_event(content, is_client_input=is_client_input, image_content=image_content)
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -582,12 +591,16 @@ class ExecutionManager:
|
||||
)
|
||||
|
||||
async with self._lock:
|
||||
if not self._running:
|
||||
raise RuntimeError(f"ExecutionStream '{self.stream_id}' is not running")
|
||||
|
||||
blocking_ids = self._get_blocking_execution_ids_locked()
|
||||
if blocking_ids:
|
||||
raise ExecutionAlreadyRunningError(self.stream_id, blocking_ids)
|
||||
|
||||
self._active_executions[execution_id] = ctx
|
||||
self._completion_events[execution_id] = asyncio.Event()
|
||||
|
||||
# Start execution task
|
||||
task = asyncio.create_task(self._run_execution(ctx))
|
||||
self._execution_tasks[execution_id] = task
|
||||
self._execution_tasks[execution_id] = asyncio.create_task(self._run_execution(ctx))
|
||||
|
||||
logger.debug(f"Queued execution {execution_id} for stream {self.stream_id}")
|
||||
return execution_id
|
||||
@@ -680,9 +693,7 @@ class ExecutionManager:
|
||||
if self._runtime_log_store:
|
||||
from framework.tracker.runtime_logger import RuntimeLogger
|
||||
|
||||
runtime_logger = RuntimeLogger(
|
||||
store=self._runtime_log_store, agent_id=self.graph.id
|
||||
)
|
||||
runtime_logger = RuntimeLogger(store=self._runtime_log_store, agent_id=self.graph.id)
|
||||
|
||||
# Derive storage from session_store (graph-specific for secondary
|
||||
# graphs) so that all files — conversations, state, checkpoints,
|
||||
@@ -706,24 +717,6 @@ class ExecutionManager:
|
||||
# the executor's session_state (memory + resume_from) carries
|
||||
# forward so the next attempt resumes at the failed node.
|
||||
while True:
|
||||
# Run execution middleware (per-attempt, including resurrections)
|
||||
if self._execution_middleware:
|
||||
from framework.pipeline.execution_middleware import (
|
||||
ExecutionContext as _ExecMwCtx,
|
||||
)
|
||||
|
||||
mw_ctx = _ExecMwCtx(
|
||||
execution_id=execution_id,
|
||||
stream_id=self.stream_id,
|
||||
run_id=ctx.run_id or "",
|
||||
input_data=_current_input_data or {},
|
||||
session_state=_current_session_state,
|
||||
attempt=_resurrection_count + 1,
|
||||
)
|
||||
for mw in self._execution_middleware:
|
||||
mw_ctx = await mw.on_execution_start(mw_ctx)
|
||||
_current_input_data = mw_ctx.input_data
|
||||
|
||||
# Create executor for this execution.
|
||||
executor = Orchestrator(
|
||||
runtime=runtime_adapter,
|
||||
@@ -750,10 +743,6 @@ class ExecutionManager:
|
||||
if self._dynamic_memory_provider_factory is not None
|
||||
else None
|
||||
),
|
||||
colony_memory_dir=self._colony_memory_dir,
|
||||
colony_worker_sessions_dir=self._colony_worker_sessions_dir,
|
||||
colony_recall_cache=self._colony_recall_cache,
|
||||
colony_reflect_llm=self._colony_reflect_llm,
|
||||
)
|
||||
# Track executor so inject_input() can reach EventLoopNode instances
|
||||
self._active_executors[execution_id] = executor
|
||||
@@ -920,9 +909,7 @@ class ExecutionManager:
|
||||
if has_result and result.paused_at:
|
||||
await self._write_session_state(execution_id, ctx, result=result)
|
||||
else:
|
||||
await self._write_session_state(
|
||||
execution_id, ctx, error="Execution cancelled"
|
||||
)
|
||||
await self._write_session_state(execution_id, ctx, error="Execution cancelled")
|
||||
|
||||
# Emit SSE event so the frontend knows the execution stopped.
|
||||
# The executor does NOT emit on CancelledError, so there is no
|
||||
@@ -1222,7 +1209,7 @@ class ExecutionManager:
|
||||
"""Get execution context."""
|
||||
return self._active_executions.get(execution_id)
|
||||
|
||||
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> bool:
|
||||
async def cancel_execution(self, execution_id: str, *, reason: str | None = None) -> CancelExecutionResult:
|
||||
"""
|
||||
Cancel a running execution.
|
||||
|
||||
@@ -1233,33 +1220,38 @@ class ExecutionManager:
|
||||
provided, defaults to "Execution cancelled".
|
||||
|
||||
Returns:
|
||||
True if cancelled, False if not found
|
||||
"cancelled" if the task fully exited within the grace period,
|
||||
"cancelling" if cancellation was requested but the task is still
|
||||
shutting down, or "not_found" if no active task exists.
|
||||
"""
|
||||
task = self._execution_tasks.get(execution_id)
|
||||
if task and not task.done():
|
||||
async with self._lock:
|
||||
task = self._execution_tasks.get(execution_id)
|
||||
if task is None or task.done():
|
||||
return "not_found"
|
||||
|
||||
# Store the reason so the CancelledError handler can use it
|
||||
# when emitting the pause/fail event.
|
||||
self._cancel_reasons[execution_id] = reason or "Execution cancelled"
|
||||
ctx = self._active_executions.get(execution_id)
|
||||
if ctx is not None:
|
||||
ctx.status = "cancelling"
|
||||
task.cancel()
|
||||
# Wait briefly for the task to finish. Don't block indefinitely —
|
||||
# the task may be stuck in a long LLM API call that doesn't
|
||||
# respond to cancellation quickly.
|
||||
done, _ = await asyncio.wait({task}, timeout=5.0)
|
||||
if not done:
|
||||
# Task didn't finish within timeout — clean up bookkeeping now
|
||||
# so the session doesn't think it still has running executions.
|
||||
# The task will continue winding down in the background and its
|
||||
# finally block will harmlessly pop already-removed keys.
|
||||
logger.warning(
|
||||
"Execution %s did not finish within cancel timeout; force-cleaning bookkeeping",
|
||||
execution_id,
|
||||
)
|
||||
async with self._lock:
|
||||
self._active_executions.pop(execution_id, None)
|
||||
self._execution_tasks.pop(execution_id, None)
|
||||
self._active_executors.pop(execution_id, None)
|
||||
return True
|
||||
return False
|
||||
|
||||
# Wait briefly for the task to finish. Don't block indefinitely —
|
||||
# the task may be stuck in a long LLM API call that doesn't
|
||||
# respond to cancellation quickly.
|
||||
done, _ = await asyncio.wait({task}, timeout=5.0)
|
||||
if not done:
|
||||
# Keep bookkeeping in place until the task's own finally block runs.
|
||||
# We intentionally do not add deferred cleanup keyed by execution_id
|
||||
# here because resumed executions reuse the same id; a delayed pop
|
||||
# could otherwise delete bookkeeping that belongs to the new run.
|
||||
logger.warning(
|
||||
"Execution %s did not finish within cancel timeout; leaving bookkeeping in place until task exit",
|
||||
execution_id,
|
||||
)
|
||||
return "cancelling"
|
||||
return "cancelled"
|
||||
|
||||
# === STATS AND MONITORING ===
|
||||
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
"""State isolation level enum."""
|
||||
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class IsolationLevel(StrEnum):
|
||||
ISOLATED = "isolated"
|
||||
SHARED = "shared"
|
||||
SYNCHRONIZED = "synchronized"
|
||||
@@ -1,459 +1,21 @@
|
||||
"""
|
||||
Outcome Aggregator - Aggregates outcomes across streams for goal evaluation.
|
||||
"""Stub — outcome aggregator removed in colony refactor."""
|
||||
|
||||
The goal-driven nature of Hive means we need to track whether
|
||||
concurrent executions collectively achieve the goal.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from framework.schemas.decision import Decision, Outcome
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.orchestrator.goal import Goal
|
||||
from framework.host.event_bus import EventBus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CriterionStatus:
|
||||
"""Status of a success criterion."""
|
||||
|
||||
criterion_id: str
|
||||
description: str
|
||||
met: bool
|
||||
evidence: list[str] = field(default_factory=list)
|
||||
progress: float = 0.0 # 0.0 to 1.0
|
||||
last_updated: datetime = field(default_factory=datetime.now)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConstraintCheck:
|
||||
"""Result of a constraint check."""
|
||||
|
||||
constraint_id: str
|
||||
description: str
|
||||
violated: bool
|
||||
violation_details: str | None = None
|
||||
stream_id: str | None = None
|
||||
execution_id: str | None = None
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DecisionRecord:
|
||||
"""Record of a decision for aggregation."""
|
||||
|
||||
stream_id: str
|
||||
execution_id: str
|
||||
decision: Decision
|
||||
outcome: Outcome | None = None
|
||||
timestamp: datetime = field(default_factory=datetime.now)
|
||||
from framework.schemas.goal import Goal
|
||||
|
||||
|
||||
class OutcomeAggregator:
|
||||
"""
|
||||
Aggregates outcomes across all execution streams for goal evaluation.
|
||||
|
||||
Responsibilities:
|
||||
- Track all decisions across streams
|
||||
- Evaluate success criteria progress
|
||||
- Detect constraint violations
|
||||
- Provide unified goal progress metrics
|
||||
|
||||
Example:
|
||||
aggregator = OutcomeAggregator(goal, event_bus)
|
||||
|
||||
# Decisions are automatically recorded by StreamRuntime
|
||||
aggregator.record_decision(stream_id, execution_id, decision)
|
||||
aggregator.record_outcome(stream_id, execution_id, decision_id, outcome)
|
||||
|
||||
# Evaluate goal progress
|
||||
progress = await aggregator.evaluate_goal_progress()
|
||||
print(f"Goal progress: {progress['overall_progress']:.1%}")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
goal: "Goal",
|
||||
event_bus: "EventBus | None" = None,
|
||||
):
|
||||
"""
|
||||
Initialize outcome aggregator.
|
||||
|
||||
Args:
|
||||
goal: The goal to evaluate progress against
|
||||
event_bus: Optional event bus for publishing progress events
|
||||
"""
|
||||
self.goal = goal
|
||||
def __init__(self, goal: Goal, event_bus=None):
|
||||
self._goal = goal
|
||||
self._event_bus = event_bus
|
||||
|
||||
# Decision tracking
|
||||
self._decisions: list[DecisionRecord] = []
|
||||
self._decisions_by_id: dict[str, DecisionRecord] = {}
|
||||
self._lock = asyncio.Lock()
|
||||
def record_decision(self, **kwargs):
|
||||
pass
|
||||
|
||||
# Criterion tracking
|
||||
self._criterion_status: dict[str, CriterionStatus] = {}
|
||||
self._initialize_criteria()
|
||||
def record_outcome(self, **kwargs):
|
||||
pass
|
||||
|
||||
# Constraint tracking
|
||||
self._constraint_violations: list[ConstraintCheck] = []
|
||||
def evaluate_goal_progress(self):
|
||||
return {"progress": 0.0, "criteria_status": {}}
|
||||
|
||||
# Metrics
|
||||
self._total_decisions = 0
|
||||
self._successful_outcomes = 0
|
||||
self._failed_outcomes = 0
|
||||
|
||||
def _initialize_criteria(self) -> None:
|
||||
"""Initialize criterion status from goal."""
|
||||
for criterion in self.goal.success_criteria:
|
||||
self._criterion_status[criterion.id] = CriterionStatus(
|
||||
criterion_id=criterion.id,
|
||||
description=criterion.description,
|
||||
met=False,
|
||||
progress=0.0,
|
||||
)
|
||||
|
||||
# === DECISION RECORDING ===
|
||||
|
||||
def record_decision(
|
||||
self,
|
||||
stream_id: str,
|
||||
execution_id: str,
|
||||
decision: Decision,
|
||||
) -> None:
|
||||
"""
|
||||
Record a decision from any stream.
|
||||
|
||||
Args:
|
||||
stream_id: Which stream made the decision
|
||||
execution_id: Which execution
|
||||
decision: The decision made
|
||||
"""
|
||||
record = DecisionRecord(
|
||||
stream_id=stream_id,
|
||||
execution_id=execution_id,
|
||||
decision=decision,
|
||||
)
|
||||
|
||||
# Create unique key for lookup
|
||||
key = f"{stream_id}:{execution_id}:{decision.id}"
|
||||
self._decisions.append(record)
|
||||
self._decisions_by_id[key] = record
|
||||
self._total_decisions += 1
|
||||
|
||||
logger.debug(f"Recorded decision {decision.id} from {stream_id}/{execution_id}")
|
||||
|
||||
def record_outcome(
|
||||
self,
|
||||
stream_id: str,
|
||||
execution_id: str,
|
||||
decision_id: str,
|
||||
outcome: Outcome,
|
||||
) -> None:
|
||||
"""
|
||||
Record the outcome of a decision.
|
||||
|
||||
Args:
|
||||
stream_id: Which stream
|
||||
execution_id: Which execution
|
||||
decision_id: Which decision
|
||||
outcome: The outcome
|
||||
"""
|
||||
key = f"{stream_id}:{execution_id}:{decision_id}"
|
||||
record = self._decisions_by_id.get(key)
|
||||
|
||||
if record:
|
||||
record.outcome = outcome
|
||||
|
||||
if outcome.success:
|
||||
self._successful_outcomes += 1
|
||||
else:
|
||||
self._failed_outcomes += 1
|
||||
|
||||
logger.debug(f"Recorded outcome for {decision_id}: success={outcome.success}")
|
||||
|
||||
def record_constraint_violation(
|
||||
self,
|
||||
constraint_id: str,
|
||||
description: str,
|
||||
violation_details: str,
|
||||
stream_id: str | None = None,
|
||||
execution_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Record a constraint violation.
|
||||
|
||||
Args:
|
||||
constraint_id: Which constraint was violated
|
||||
description: Constraint description
|
||||
violation_details: What happened
|
||||
stream_id: Which stream
|
||||
execution_id: Which execution
|
||||
"""
|
||||
check = ConstraintCheck(
|
||||
constraint_id=constraint_id,
|
||||
description=description,
|
||||
violated=True,
|
||||
violation_details=violation_details,
|
||||
stream_id=stream_id,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
self._constraint_violations.append(check)
|
||||
logger.warning(f"Constraint violation: {constraint_id} - {violation_details}")
|
||||
|
||||
# Publish event if event bus available
|
||||
if self._event_bus and stream_id:
|
||||
asyncio.create_task(
|
||||
self._event_bus.emit_constraint_violation(
|
||||
stream_id=stream_id,
|
||||
execution_id=execution_id or "",
|
||||
constraint_id=constraint_id,
|
||||
description=violation_details,
|
||||
)
|
||||
)
|
||||
|
||||
# === GOAL EVALUATION ===
|
||||
|
||||
async def evaluate_goal_progress(self) -> dict[str, Any]:
|
||||
"""
|
||||
Evaluate progress toward goal across all streams.
|
||||
|
||||
Returns:
|
||||
{
|
||||
"overall_progress": 0.0-1.0,
|
||||
"criteria_status": {criterion_id: {...}},
|
||||
"constraint_violations": [...],
|
||||
"metrics": {...},
|
||||
"recommendation": "continue" | "adjust" | "complete"
|
||||
}
|
||||
"""
|
||||
async with self._lock:
|
||||
result = {
|
||||
"overall_progress": 0.0,
|
||||
"criteria_status": {},
|
||||
"constraint_violations": [],
|
||||
"metrics": {},
|
||||
"recommendation": "continue",
|
||||
}
|
||||
|
||||
# Evaluate each success criterion
|
||||
total_weight = 0.0
|
||||
met_weight = 0.0
|
||||
|
||||
for criterion in self.goal.success_criteria:
|
||||
status = await self._evaluate_criterion(criterion)
|
||||
self._criterion_status[criterion.id] = status
|
||||
result["criteria_status"][criterion.id] = {
|
||||
"description": status.description,
|
||||
"met": status.met,
|
||||
"progress": status.progress,
|
||||
"evidence": status.evidence,
|
||||
}
|
||||
|
||||
total_weight += criterion.weight
|
||||
if status.met:
|
||||
met_weight += criterion.weight
|
||||
else:
|
||||
# Partial credit based on progress
|
||||
met_weight += criterion.weight * status.progress
|
||||
|
||||
# Calculate overall progress
|
||||
if total_weight > 0:
|
||||
result["overall_progress"] = met_weight / total_weight
|
||||
|
||||
# Include constraint violations
|
||||
result["constraint_violations"] = [
|
||||
{
|
||||
"constraint_id": v.constraint_id,
|
||||
"description": v.description,
|
||||
"details": v.violation_details,
|
||||
"stream_id": v.stream_id,
|
||||
"timestamp": v.timestamp.isoformat(),
|
||||
}
|
||||
for v in self._constraint_violations
|
||||
]
|
||||
|
||||
# Add metrics
|
||||
result["metrics"] = {
|
||||
"total_decisions": self._total_decisions,
|
||||
"successful_outcomes": self._successful_outcomes,
|
||||
"failed_outcomes": self._failed_outcomes,
|
||||
"success_rate": (
|
||||
self._successful_outcomes
|
||||
/ max(1, self._successful_outcomes + self._failed_outcomes)
|
||||
),
|
||||
"streams_active": len({d.stream_id for d in self._decisions}),
|
||||
"executions_total": len({(d.stream_id, d.execution_id) for d in self._decisions}),
|
||||
}
|
||||
|
||||
# Determine recommendation
|
||||
result["recommendation"] = self._get_recommendation(result)
|
||||
|
||||
# Publish progress event
|
||||
if self._event_bus:
|
||||
# Get any stream ID for the event
|
||||
stream_ids = {d.stream_id for d in self._decisions}
|
||||
if stream_ids:
|
||||
await self._event_bus.emit_goal_progress(
|
||||
stream_id=list(stream_ids)[0],
|
||||
progress=result["overall_progress"],
|
||||
criteria_status=result["criteria_status"],
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def _evaluate_criterion(self, criterion: Any) -> CriterionStatus:
|
||||
"""
|
||||
Evaluate a single success criterion.
|
||||
This is a heuristic evaluation based on decision outcomes.
|
||||
More sophisticated evaluation can be added per criterion type.
|
||||
"""
|
||||
status = CriterionStatus(
|
||||
criterion_id=criterion.id,
|
||||
description=criterion.description,
|
||||
met=False,
|
||||
progress=0.0,
|
||||
evidence=[],
|
||||
)
|
||||
|
||||
# Guard: only apply this heuristic to success-rate criteria
|
||||
criterion_type = getattr(criterion, "type", "success_rate")
|
||||
if criterion_type != "success_rate":
|
||||
return status
|
||||
|
||||
# Get relevant decisions (those mentioning this criterion or related intents)
|
||||
relevant_decisions = [
|
||||
d
|
||||
for d in self._decisions
|
||||
if criterion.id in str(d.decision.active_constraints)
|
||||
or self._is_related_to_criterion(d.decision, criterion)
|
||||
]
|
||||
|
||||
if not relevant_decisions:
|
||||
# No evidence yet
|
||||
return status
|
||||
|
||||
# Calculate success rate for relevant decisions
|
||||
outcomes = [d.outcome for d in relevant_decisions if d.outcome is not None]
|
||||
if outcomes:
|
||||
success_count = sum(1 for o in outcomes if o.success)
|
||||
|
||||
# Progress is computed as raw success rate of decision outcomes.
|
||||
status.progress = success_count / len(outcomes)
|
||||
|
||||
# Add evidence
|
||||
for d in relevant_decisions[:5]: # Limit evidence
|
||||
if d.outcome:
|
||||
evidence = (
|
||||
f"decision_id={d.decision.id}, "
|
||||
f"intent={d.decision.intent}, "
|
||||
f"result={'success' if d.outcome.success else 'failed'}"
|
||||
)
|
||||
status.evidence.append(evidence)
|
||||
|
||||
# Check if criterion is met based on target
|
||||
try:
|
||||
target = criterion.target
|
||||
if isinstance(target, str) and target.endswith("%"):
|
||||
target_value = float(target.rstrip("%")) / 100
|
||||
status.met = status.progress >= target_value
|
||||
else:
|
||||
# For non-percentage targets, consider met if progress > 0.8
|
||||
status.met = status.progress >= 0.8
|
||||
except (ValueError, AttributeError):
|
||||
status.met = status.progress >= 0.8
|
||||
|
||||
return status
|
||||
|
||||
def _is_related_to_criterion(self, decision: Decision, criterion: Any) -> bool:
|
||||
"""Check if a decision is related to a criterion."""
|
||||
# Simple keyword matching
|
||||
criterion_keywords = criterion.description.lower().split()
|
||||
decision_text = f"{decision.intent} {decision.reasoning}".lower()
|
||||
|
||||
matches = sum(1 for kw in criterion_keywords if kw in decision_text)
|
||||
return matches >= 2 # At least 2 keyword matches
|
||||
|
||||
def _get_recommendation(self, result: dict) -> str:
|
||||
"""Get recommendation based on current progress."""
|
||||
progress = result["overall_progress"]
|
||||
violations = result["constraint_violations"]
|
||||
|
||||
# Check for hard constraint violations
|
||||
hard_violations = [v for v in violations if self._is_hard_constraint(v["constraint_id"])]
|
||||
|
||||
if hard_violations:
|
||||
return "adjust" # Must address violations
|
||||
|
||||
if progress >= 0.95:
|
||||
return "complete" # Goal essentially achieved
|
||||
|
||||
if progress < 0.3 and result["metrics"]["total_decisions"] > 10:
|
||||
return "adjust" # Low progress despite many decisions
|
||||
|
||||
return "continue"
|
||||
|
||||
def _is_hard_constraint(self, constraint_id: str) -> bool:
|
||||
"""Check if a constraint is a hard constraint."""
|
||||
for constraint in self.goal.constraints:
|
||||
if constraint.id == constraint_id:
|
||||
return constraint.constraint_type == "hard"
|
||||
return False
|
||||
|
||||
# === QUERY OPERATIONS ===
|
||||
|
||||
def get_decisions_by_stream(self, stream_id: str) -> list[DecisionRecord]:
|
||||
"""Get all decisions from a specific stream."""
|
||||
return [d for d in self._decisions if d.stream_id == stream_id]
|
||||
|
||||
def get_decisions_by_execution(
|
||||
self,
|
||||
stream_id: str,
|
||||
execution_id: str,
|
||||
) -> list[DecisionRecord]:
|
||||
"""Get all decisions from a specific execution."""
|
||||
return [
|
||||
d
|
||||
for d in self._decisions
|
||||
if d.stream_id == stream_id and d.execution_id == execution_id
|
||||
]
|
||||
|
||||
def get_recent_decisions(self, limit: int = 10) -> list[DecisionRecord]:
|
||||
"""Get most recent decisions."""
|
||||
return self._decisions[-limit:]
|
||||
|
||||
def get_criterion_status(self, criterion_id: str) -> CriterionStatus | None:
|
||||
"""Get status of a specific criterion."""
|
||||
return self._criterion_status.get(criterion_id)
|
||||
|
||||
def get_stats(self) -> dict:
|
||||
"""Get aggregator statistics."""
|
||||
return {
|
||||
"total_decisions": self._total_decisions,
|
||||
"successful_outcomes": self._successful_outcomes,
|
||||
"failed_outcomes": self._failed_outcomes,
|
||||
"constraint_violations": len(self._constraint_violations),
|
||||
"criteria_tracked": len(self._criterion_status),
|
||||
"streams_seen": len({d.stream_id for d in self._decisions}),
|
||||
}
|
||||
|
||||
# === RESET OPERATIONS ===
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset all aggregated data."""
|
||||
self._decisions.clear()
|
||||
self._decisions_by_id.clear()
|
||||
self._constraint_violations.clear()
|
||||
self._total_decisions = 0
|
||||
self._successful_outcomes = 0
|
||||
self._failed_outcomes = 0
|
||||
self._initialize_criteria()
|
||||
logger.info("OutcomeAggregator reset")
|
||||
def get_stats(self):
|
||||
return {"total_decisions": 0, "total_outcomes": 0}
|
||||
|
||||
@@ -0,0 +1,489 @@
|
||||
"""Per-colony SQLite task queue + progress ledger.
|
||||
|
||||
Every colony gets its own ``progress.db`` under ``~/.hive/colonies/{name}/data/``.
|
||||
The DB holds the colony's task queue plus per-task step and SOP checklist
|
||||
rows. Workers claim tasks atomically, write progress as they execute, and
|
||||
verify SOP gates before marking a task done. This gives cross-run memory
|
||||
that the existing per-iteration stall detectors don't have.
|
||||
|
||||
The DB is driven by agents via the ``sqlite3`` CLI through
|
||||
``terminal_exec``. This module handles framework-side lifecycle:
|
||||
creation, migration, queen-side bulk seeding, stale-claim reclamation.
|
||||
|
||||
Concurrency model:
|
||||
- WAL mode on from day one so 100 concurrent workers don't serialize.
|
||||
- Workers hold NO long-running connection — they ``sqlite3`` per call,
|
||||
which naturally releases locks between LLM turns.
|
||||
- Atomic claim via ``BEGIN IMMEDIATE; UPDATE tasks SET status='claimed'
|
||||
WHERE id=(SELECT ... LIMIT 1)``. The subquery-form UPDATE runs inside
|
||||
the immediate transaction so racers either win the row or find zero
|
||||
affected rows.
|
||||
- Stale-claim reclaimer runs on host startup: claims older than
|
||||
``stale_after_minutes`` get returned to ``pending`` and the row's
|
||||
``retry_count`` increments. When ``retry_count >= max_retries`` the
|
||||
row is moved to ``failed`` instead.
|
||||
|
||||
All writes go through ``BEGIN IMMEDIATE`` so racing readers see
|
||||
consistent snapshots.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA_VERSION = 1
|
||||
|
||||
_SCHEMA_V1 = """
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
id TEXT PRIMARY KEY,
|
||||
seq INTEGER,
|
||||
priority INTEGER NOT NULL DEFAULT 0,
|
||||
goal TEXT NOT NULL,
|
||||
payload TEXT,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
worker_id TEXT,
|
||||
claim_token TEXT,
|
||||
claimed_at TEXT,
|
||||
started_at TEXT,
|
||||
completed_at TEXT,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
retry_count INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL DEFAULT 3,
|
||||
last_error TEXT,
|
||||
parent_task_id TEXT REFERENCES tasks(id) ON DELETE SET NULL,
|
||||
source TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS steps (
|
||||
id TEXT PRIMARY KEY,
|
||||
task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
|
||||
seq INTEGER NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
detail TEXT,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
evidence TEXT,
|
||||
worker_id TEXT,
|
||||
started_at TEXT,
|
||||
completed_at TEXT,
|
||||
UNIQUE (task_id, seq)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sop_checklist (
|
||||
id TEXT PRIMARY KEY,
|
||||
task_id TEXT NOT NULL REFERENCES tasks(id) ON DELETE CASCADE,
|
||||
key TEXT NOT NULL,
|
||||
description TEXT NOT NULL,
|
||||
required INTEGER NOT NULL DEFAULT 1,
|
||||
done_at TEXT,
|
||||
done_by TEXT,
|
||||
note TEXT,
|
||||
UNIQUE (task_id, key)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS colony_meta (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_claimable
|
||||
ON tasks(status, priority DESC, seq, created_at)
|
||||
WHERE status = 'pending';
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_steps_task_seq
|
||||
ON steps(task_id, seq);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sop_required_open
|
||||
ON sop_checklist(task_id, required, done_at);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_status
|
||||
ON tasks(status, updated_at);
|
||||
"""
|
||||
|
||||
_PRAGMAS = (
|
||||
"PRAGMA journal_mode = WAL;",
|
||||
"PRAGMA synchronous = NORMAL;",
|
||||
"PRAGMA foreign_keys = ON;",
|
||||
"PRAGMA busy_timeout = 5000;",
|
||||
)
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(UTC).isoformat(timespec="seconds")
|
||||
|
||||
|
||||
def _new_id() -> str:
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def _connect(db_path: Path) -> sqlite3.Connection:
|
||||
"""Open a connection with the standard pragmas applied.
|
||||
|
||||
WAL mode is sticky on the file once set, so re-applying on every
|
||||
open is cheap. The other pragmas are per-connection and must be
|
||||
set each time.
|
||||
"""
|
||||
con = sqlite3.connect(str(db_path), isolation_level=None, timeout=5.0)
|
||||
for pragma in _PRAGMAS:
|
||||
con.execute(pragma)
|
||||
return con
|
||||
|
||||
|
||||
def ensure_progress_db(colony_dir: Path) -> Path:
|
||||
"""Create or migrate ``{colony_dir}/data/progress.db``.
|
||||
|
||||
Idempotent: safe to call on an already-initialized DB. Returns the
|
||||
absolute path to the DB file.
|
||||
|
||||
Steps:
|
||||
1. Ensure ``data/`` subdir exists.
|
||||
2. Open the DB (creates the file if missing).
|
||||
3. Apply WAL + pragmas.
|
||||
4. Read ``PRAGMA user_version``; if < SCHEMA_VERSION, run the
|
||||
schema block and bump user_version.
|
||||
5. Reclaim any stale claims left from previous runs.
|
||||
6. Patch every ``*.json`` worker config in the colony dir to
|
||||
inject ``input_data.db_path`` and ``input_data.colony_id`` so
|
||||
pre-existing colonies (forked before this feature landed) get
|
||||
the tracker wiring on their next spawn.
|
||||
"""
|
||||
data_dir = Path(colony_dir) / "data"
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
db_path = data_dir / "progress.db"
|
||||
|
||||
con = _connect(db_path)
|
||||
try:
|
||||
current_version = con.execute("PRAGMA user_version").fetchone()[0]
|
||||
if current_version < SCHEMA_VERSION:
|
||||
con.executescript(_SCHEMA_V1)
|
||||
con.execute(f"PRAGMA user_version = {SCHEMA_VERSION}")
|
||||
con.execute(
|
||||
"INSERT OR REPLACE INTO colony_meta(key, value, updated_at) VALUES (?, ?, ?)",
|
||||
("schema_version", str(SCHEMA_VERSION), _now_iso()),
|
||||
)
|
||||
logger.info("progress_db: initialized schema v%d at %s", SCHEMA_VERSION, db_path)
|
||||
|
||||
reclaimed = _reclaim_stale_inner(con, stale_after_minutes=15)
|
||||
if reclaimed:
|
||||
logger.info(
|
||||
"progress_db: reclaimed %d stale claims at startup (%s)",
|
||||
reclaimed,
|
||||
db_path,
|
||||
)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
resolved_db_path = db_path.resolve()
|
||||
_patch_worker_configs(Path(colony_dir), resolved_db_path)
|
||||
return resolved_db_path
|
||||
|
||||
|
||||
def _patch_worker_configs(colony_dir: Path, db_path: Path) -> int:
|
||||
"""Inject ``input_data.db_path`` + ``input_data.colony_id`` +
|
||||
``input_data.colony_data_dir`` into existing ``worker.json`` files
|
||||
in a colony directory.
|
||||
|
||||
Runs on every ``ensure_progress_db`` call so colonies that were
|
||||
forked before this feature landed get their worker spawn messages
|
||||
patched in place. Idempotent: if ``input_data`` already contains
|
||||
all three values, the file is not rewritten.
|
||||
|
||||
Returns the number of files that were actually modified (0 on
|
||||
the common case of already-patched colonies).
|
||||
|
||||
Why ``colony_data_dir``? ``db_path`` alone points agents at
|
||||
``progress.db``; for anything else (custom SQLite stores, JSON
|
||||
ledgers, scraped artefacts) they need the *directory* so they
|
||||
stop creating state under ``~/.hive/skills/`` — which holds skill
|
||||
*definitions*, not runtime data. See
|
||||
``_default_skills/colony-storage-paths/SKILL.md``.
|
||||
"""
|
||||
colony_id = colony_dir.name
|
||||
abs_db = str(db_path)
|
||||
abs_data_dir = str(db_path.parent)
|
||||
patched = 0
|
||||
|
||||
for worker_cfg in colony_dir.glob("*.json"):
|
||||
# Only patch files that look like worker configs (have the
|
||||
# worker_meta shape). ``metadata.json`` and ``triggers.json``
|
||||
# are colony-level and must not be touched.
|
||||
if worker_cfg.name in ("metadata.json", "triggers.json"):
|
||||
continue
|
||||
try:
|
||||
data = json.loads(worker_cfg.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
continue
|
||||
if not isinstance(data, dict) or "system_prompt" not in data:
|
||||
# Not a worker config (lacks the worker_meta schema).
|
||||
continue
|
||||
|
||||
input_data = data.get("input_data")
|
||||
if not isinstance(input_data, dict):
|
||||
input_data = {}
|
||||
|
||||
if (
|
||||
input_data.get("db_path") == abs_db
|
||||
and input_data.get("colony_id") == colony_id
|
||||
and input_data.get("colony_data_dir") == abs_data_dir
|
||||
):
|
||||
continue # already patched
|
||||
|
||||
input_data["db_path"] = abs_db
|
||||
input_data["colony_id"] = colony_id
|
||||
input_data["colony_data_dir"] = abs_data_dir
|
||||
data["input_data"] = input_data
|
||||
|
||||
try:
|
||||
worker_cfg.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
patched += 1
|
||||
except OSError as e:
|
||||
logger.warning("progress_db: failed to patch worker config %s: %s", worker_cfg, e)
|
||||
|
||||
if patched:
|
||||
logger.info(
|
||||
"progress_db: patched %d worker config(s) in colony '%s' with db_path + colony_data_dir",
|
||||
patched,
|
||||
colony_id,
|
||||
)
|
||||
return patched
|
||||
|
||||
|
||||
def ensure_all_colony_dbs(colonies_root: Path | None = None) -> list[Path]:
|
||||
"""Idempotently ensure every existing colony has a progress.db.
|
||||
|
||||
Called on framework host startup to backfill older colonies and
|
||||
run the stale-claim reclaimer on all of them in one pass.
|
||||
"""
|
||||
if colonies_root is None:
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
colonies_root = COLONIES_DIR
|
||||
if not colonies_root.is_dir():
|
||||
return []
|
||||
|
||||
initialized: list[Path] = []
|
||||
for entry in sorted(colonies_root.iterdir()):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
try:
|
||||
initialized.append(ensure_progress_db(entry))
|
||||
except Exception as e:
|
||||
logger.warning("progress_db: failed to ensure DB for colony '%s': %s", entry.name, e)
|
||||
return initialized
|
||||
|
||||
|
||||
def seed_tasks(
|
||||
db_path: Path,
|
||||
tasks: list[dict[str, Any]],
|
||||
*,
|
||||
source: str = "queen_create",
|
||||
) -> list[str]:
|
||||
"""Bulk-insert tasks (with optional nested steps + sop_items).
|
||||
|
||||
Each task dict accepts:
|
||||
- goal: str (required)
|
||||
- seq: int (optional ordering hint)
|
||||
- priority: int (default 0)
|
||||
- payload: dict | str | None (stored as JSON text)
|
||||
- max_retries: int (default 3)
|
||||
- parent_task_id: str | None
|
||||
- steps: list[{"title": str, "detail"?: str}] (optional)
|
||||
- sop_items: list[{"key": str, "description": str, "required"?: bool, "note"?: str}] (optional)
|
||||
|
||||
All rows are inserted in a single BEGIN IMMEDIATE transaction so
|
||||
10k-row seeds finish in one disk flush. Returns the created task ids
|
||||
in the same order as input.
|
||||
"""
|
||||
if not tasks:
|
||||
return []
|
||||
|
||||
created_ids: list[str] = []
|
||||
now = _now_iso()
|
||||
con = _connect(Path(db_path))
|
||||
try:
|
||||
con.execute("BEGIN IMMEDIATE")
|
||||
for idx, task in enumerate(tasks):
|
||||
goal = task.get("goal")
|
||||
if not goal:
|
||||
raise ValueError(f"task[{idx}] missing required 'goal' field")
|
||||
|
||||
task_id = task.get("id") or _new_id()
|
||||
payload = task.get("payload")
|
||||
if payload is not None and not isinstance(payload, str):
|
||||
payload = json.dumps(payload, ensure_ascii=False)
|
||||
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO tasks (
|
||||
id, seq, priority, goal, payload, status,
|
||||
created_at, updated_at, max_retries, parent_task_id, source
|
||||
) VALUES (?, ?, ?, ?, ?, 'pending', ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
task_id,
|
||||
task.get("seq"),
|
||||
int(task.get("priority", 0)),
|
||||
goal,
|
||||
payload,
|
||||
now,
|
||||
now,
|
||||
int(task.get("max_retries", 3)),
|
||||
task.get("parent_task_id"),
|
||||
source,
|
||||
),
|
||||
)
|
||||
|
||||
for step_seq, step in enumerate(task.get("steps") or [], start=1):
|
||||
if not step.get("title"):
|
||||
raise ValueError(f"task[{idx}].steps[{step_seq - 1}] missing required 'title'")
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO steps (id, task_id, seq, title, detail, status)
|
||||
VALUES (?, ?, ?, ?, ?, 'pending')
|
||||
""",
|
||||
(
|
||||
_new_id(),
|
||||
task_id,
|
||||
step.get("seq", step_seq),
|
||||
step["title"],
|
||||
step.get("detail"),
|
||||
),
|
||||
)
|
||||
|
||||
for sop in task.get("sop_items") or []:
|
||||
key = sop.get("key")
|
||||
description = sop.get("description")
|
||||
if not key or not description:
|
||||
raise ValueError(f"task[{idx}].sop_items missing 'key' or 'description'")
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO sop_checklist
|
||||
(id, task_id, key, description, required, note)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
_new_id(),
|
||||
task_id,
|
||||
key,
|
||||
description,
|
||||
1 if sop.get("required", True) else 0,
|
||||
sop.get("note"),
|
||||
),
|
||||
)
|
||||
|
||||
created_ids.append(task_id)
|
||||
|
||||
con.execute("COMMIT")
|
||||
except Exception:
|
||||
con.execute("ROLLBACK")
|
||||
raise
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
return created_ids
|
||||
|
||||
|
||||
def enqueue_task(
|
||||
db_path: Path,
|
||||
goal: str,
|
||||
*,
|
||||
steps: list[dict[str, Any]] | None = None,
|
||||
sop_items: list[dict[str, Any]] | None = None,
|
||||
payload: Any = None,
|
||||
priority: int = 0,
|
||||
parent_task_id: str | None = None,
|
||||
source: str = "enqueue_tool",
|
||||
) -> str:
|
||||
"""Append a single task to an existing queue. Thin wrapper over seed_tasks."""
|
||||
ids = seed_tasks(
|
||||
db_path,
|
||||
[
|
||||
{
|
||||
"goal": goal,
|
||||
"steps": steps,
|
||||
"sop_items": sop_items,
|
||||
"payload": payload,
|
||||
"priority": priority,
|
||||
"parent_task_id": parent_task_id,
|
||||
}
|
||||
],
|
||||
source=source,
|
||||
)
|
||||
return ids[0]
|
||||
|
||||
|
||||
def _reclaim_stale_inner(con: sqlite3.Connection, *, stale_after_minutes: int) -> int:
|
||||
"""Reclaim stale claims. Runs inside an existing open connection.
|
||||
|
||||
Two-step:
|
||||
1. Tasks past max_retries go to 'failed' with last_error populated.
|
||||
2. Remaining stale claims return to 'pending', retry_count++.
|
||||
"""
|
||||
cutoff_expr = f"datetime('now', '-{int(stale_after_minutes)} minutes')"
|
||||
|
||||
con.execute("BEGIN IMMEDIATE")
|
||||
try:
|
||||
con.execute(
|
||||
f"""
|
||||
UPDATE tasks
|
||||
SET status = 'failed',
|
||||
last_error = COALESCE(last_error, 'exceeded max_retries after stale claim'),
|
||||
completed_at = datetime('now'),
|
||||
updated_at = datetime('now')
|
||||
WHERE status IN ('claimed', 'in_progress')
|
||||
AND claimed_at IS NOT NULL
|
||||
AND claimed_at < {cutoff_expr}
|
||||
AND retry_count >= max_retries
|
||||
"""
|
||||
)
|
||||
|
||||
cur = con.execute(
|
||||
f"""
|
||||
UPDATE tasks
|
||||
SET status = 'pending',
|
||||
worker_id = NULL,
|
||||
claim_token = NULL,
|
||||
claimed_at = NULL,
|
||||
started_at = NULL,
|
||||
retry_count = retry_count + 1,
|
||||
updated_at = datetime('now')
|
||||
WHERE status IN ('claimed', 'in_progress')
|
||||
AND claimed_at IS NOT NULL
|
||||
AND claimed_at < {cutoff_expr}
|
||||
AND retry_count < max_retries
|
||||
"""
|
||||
)
|
||||
reclaimed = cur.rowcount or 0
|
||||
con.execute("COMMIT")
|
||||
return reclaimed
|
||||
except Exception:
|
||||
con.execute("ROLLBACK")
|
||||
raise
|
||||
|
||||
|
||||
def reclaim_stale(db_path: Path, stale_after_minutes: int = 15) -> int:
|
||||
"""Public wrapper that opens its own connection."""
|
||||
con = _connect(Path(db_path))
|
||||
try:
|
||||
return _reclaim_stale_inner(con, stale_after_minutes=stale_after_minutes)
|
||||
finally:
|
||||
con.close()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"SCHEMA_VERSION",
|
||||
"ensure_progress_db",
|
||||
"ensure_all_colony_dbs",
|
||||
"seed_tasks",
|
||||
"enqueue_task",
|
||||
"reclaim_stale",
|
||||
]
|
||||
@@ -1,16 +1,7 @@
|
||||
"""
|
||||
Shared Buffer Manager - Manages state across concurrent executions.
|
||||
|
||||
Provides different isolation levels:
|
||||
- ISOLATED: Each execution has its own state copy
|
||||
- SHARED: All executions read/write same state (eventual consistency)
|
||||
- SYNCHRONIZED: Shared state with write locks (strong consistency)
|
||||
"""
|
||||
"""Stub — shared state removed in colony refactor."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from enum import StrEnum
|
||||
from typing import Any
|
||||
|
||||
@@ -18,482 +9,53 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IsolationLevel(StrEnum):
|
||||
"""State isolation level for concurrent executions."""
|
||||
|
||||
ISOLATED = "isolated" # Private state per execution
|
||||
SHARED = "shared" # Shared state (eventual consistency)
|
||||
SYNCHRONIZED = "synchronized" # Shared with write locks (strong consistency)
|
||||
ISOLATED = "isolated"
|
||||
SHARED = "shared"
|
||||
SYNCHRONIZED = "synchronized"
|
||||
|
||||
|
||||
class StateScope(StrEnum):
|
||||
"""Scope for state operations."""
|
||||
|
||||
EXECUTION = "execution" # Local to a single execution
|
||||
STREAM = "stream" # Shared within a stream
|
||||
GLOBAL = "global" # Shared across all streams
|
||||
|
||||
|
||||
@dataclass
|
||||
class StateChange:
|
||||
"""Record of a state change."""
|
||||
|
||||
key: str
|
||||
old_value: Any
|
||||
new_value: Any
|
||||
scope: StateScope
|
||||
execution_id: str
|
||||
stream_id: str
|
||||
timestamp: float = field(default_factory=time.time)
|
||||
EXECUTION = "execution"
|
||||
STREAM = "stream"
|
||||
GLOBAL = "global"
|
||||
|
||||
|
||||
class SharedBufferManager:
|
||||
"""
|
||||
Manages shared state across concurrent executions.
|
||||
|
||||
State hierarchy:
|
||||
- Global state: Shared across all streams and executions
|
||||
- Stream state: Shared within a stream (across executions)
|
||||
- Execution state: Private to a single execution
|
||||
|
||||
Isolation levels control visibility:
|
||||
- ISOLATED: Only sees execution state
|
||||
- SHARED: Sees all levels, writes propagate up based on scope
|
||||
- SYNCHRONIZED: Like SHARED but with write locks
|
||||
|
||||
Example:
|
||||
manager = SharedBufferManager()
|
||||
|
||||
# Create buffer for an execution
|
||||
buf = manager.create_buffer(
|
||||
execution_id="exec_123",
|
||||
stream_id="webhook",
|
||||
isolation=IsolationLevel.SHARED,
|
||||
)
|
||||
|
||||
# Read/write through the buffer
|
||||
await buf.write("customer_id", "cust_456", scope=StateScope.STREAM)
|
||||
value = await buf.read("customer_id")
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# State storage at each level
|
||||
self._global_state: dict[str, Any] = {}
|
||||
self._stream_state: dict[str, dict[str, Any]] = {} # stream_id -> {key: value}
|
||||
self._execution_state: dict[str, dict[str, Any]] = {} # execution_id -> {key: value}
|
||||
|
||||
# Locks for synchronized access
|
||||
self._global_lock = asyncio.Lock()
|
||||
self._stream_locks: dict[str, asyncio.Lock] = {}
|
||||
self._key_locks: dict[str, asyncio.Lock] = {}
|
||||
|
||||
# Change history for debugging/auditing
|
||||
self._change_history: list[StateChange] = []
|
||||
self._max_history = 1000
|
||||
|
||||
# Version tracking
|
||||
self._version = 0
|
||||
self._stream_states: dict[str, dict[str, Any]] = {}
|
||||
self._execution_states: dict[str, dict[str, Any]] = {}
|
||||
self._lock = asyncio.Lock()
|
||||
|
||||
def create_buffer(
|
||||
self,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
) -> "StreamBuffer":
|
||||
"""
|
||||
Create a buffer instance for an execution.
|
||||
|
||||
Args:
|
||||
execution_id: Unique execution identifier
|
||||
stream_id: Stream this execution belongs to
|
||||
isolation: Isolation level for this execution
|
||||
|
||||
Returns:
|
||||
StreamBuffer instance for reading/writing state
|
||||
"""
|
||||
# Initialize execution state
|
||||
if execution_id not in self._execution_state:
|
||||
self._execution_state[execution_id] = {}
|
||||
|
||||
# Initialize stream state
|
||||
if stream_id not in self._stream_state:
|
||||
self._stream_state[stream_id] = {}
|
||||
self._stream_locks[stream_id] = asyncio.Lock()
|
||||
|
||||
return StreamBuffer(
|
||||
manager=self,
|
||||
execution_id=execution_id,
|
||||
stream_id=stream_id,
|
||||
isolation=isolation,
|
||||
)
|
||||
|
||||
def cleanup_execution(self, execution_id: str) -> None:
|
||||
"""
|
||||
Clean up state for a completed execution.
|
||||
|
||||
Args:
|
||||
execution_id: Execution to clean up
|
||||
"""
|
||||
self._execution_state.pop(execution_id, None)
|
||||
logger.debug(f"Cleaned up state for execution: {execution_id}")
|
||||
|
||||
def cleanup_stream(self, stream_id: str) -> None:
|
||||
"""
|
||||
Clean up state for a closed stream.
|
||||
|
||||
Args:
|
||||
stream_id: Stream to clean up
|
||||
"""
|
||||
self._stream_state.pop(stream_id, None)
|
||||
self._stream_locks.pop(stream_id, None)
|
||||
logger.debug(f"Cleaned up state for stream: {stream_id}")
|
||||
|
||||
# === LOW-LEVEL STATE OPERATIONS ===
|
||||
|
||||
async def read(
|
||||
self,
|
||||
key: str,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
) -> Any:
|
||||
"""
|
||||
Read a value respecting isolation level.
|
||||
|
||||
Resolution order (stops at first match):
|
||||
1. Execution state (always checked)
|
||||
2. Stream state (if isolation != ISOLATED)
|
||||
3. Global state (if isolation != ISOLATED)
|
||||
"""
|
||||
# Always check execution-local first
|
||||
if execution_id in self._execution_state:
|
||||
if key in self._execution_state[execution_id]:
|
||||
return self._execution_state[execution_id][key]
|
||||
|
||||
# Check stream-level (unless isolated)
|
||||
if isolation != IsolationLevel.ISOLATED:
|
||||
if stream_id in self._stream_state:
|
||||
if key in self._stream_state[stream_id]:
|
||||
return self._stream_state[stream_id][key]
|
||||
|
||||
# Check global
|
||||
if key in self._global_state:
|
||||
return self._global_state[key]
|
||||
|
||||
return None
|
||||
|
||||
async def write(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
scope: StateScope = StateScope.EXECUTION,
|
||||
) -> None:
|
||||
"""
|
||||
Write a value respecting isolation level.
|
||||
|
||||
Args:
|
||||
key: State key
|
||||
value: Value to write
|
||||
execution_id: Current execution
|
||||
stream_id: Current stream
|
||||
isolation: Isolation level
|
||||
scope: Where to write (execution, stream, or global)
|
||||
"""
|
||||
# Get old value for change tracking
|
||||
old_value = await self.read(key, execution_id, stream_id, isolation)
|
||||
|
||||
# ISOLATED can only write to execution scope
|
||||
if isolation == IsolationLevel.ISOLATED:
|
||||
scope = StateScope.EXECUTION
|
||||
|
||||
# SYNCHRONIZED requires locks for stream/global writes
|
||||
if isolation == IsolationLevel.SYNCHRONIZED and scope != StateScope.EXECUTION:
|
||||
await self._write_with_lock(key, value, execution_id, stream_id, scope)
|
||||
else:
|
||||
await self._write_direct(key, value, execution_id, stream_id, scope)
|
||||
|
||||
# Record change
|
||||
self._record_change(
|
||||
StateChange(
|
||||
key=key,
|
||||
old_value=old_value,
|
||||
new_value=value,
|
||||
scope=scope,
|
||||
execution_id=execution_id,
|
||||
stream_id=stream_id,
|
||||
)
|
||||
)
|
||||
|
||||
async def _write_direct(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
scope: StateScope,
|
||||
) -> None:
|
||||
"""Write without locking (for ISOLATED and SHARED)."""
|
||||
if scope == StateScope.EXECUTION:
|
||||
if execution_id not in self._execution_state:
|
||||
self._execution_state[execution_id] = {}
|
||||
self._execution_state[execution_id][key] = value
|
||||
|
||||
elif scope == StateScope.STREAM:
|
||||
if stream_id not in self._stream_state:
|
||||
self._stream_state[stream_id] = {}
|
||||
self._stream_state[stream_id][key] = value
|
||||
|
||||
elif scope == StateScope.GLOBAL:
|
||||
self._global_state[key] = value
|
||||
|
||||
self._version += 1
|
||||
|
||||
async def _write_with_lock(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
scope: StateScope,
|
||||
) -> None:
|
||||
"""Write with locking (for SYNCHRONIZED)."""
|
||||
lock = self._get_lock(scope, key, stream_id)
|
||||
async with lock:
|
||||
await self._write_direct(key, value, execution_id, stream_id, scope)
|
||||
|
||||
def _get_lock(self, scope: StateScope, key: str, stream_id: str) -> asyncio.Lock:
|
||||
"""Get appropriate lock for scope and key."""
|
||||
if scope == StateScope.GLOBAL:
|
||||
lock_key = f"global:{key}"
|
||||
elif scope == StateScope.STREAM:
|
||||
lock_key = f"stream:{stream_id}:{key}"
|
||||
else:
|
||||
lock_key = f"exec:{key}"
|
||||
|
||||
if lock_key not in self._key_locks:
|
||||
self._key_locks[lock_key] = asyncio.Lock()
|
||||
|
||||
return self._key_locks[lock_key]
|
||||
|
||||
def _record_change(self, change: StateChange) -> None:
|
||||
"""Record a state change for auditing."""
|
||||
self._change_history.append(change)
|
||||
|
||||
# Trim history if too long
|
||||
if len(self._change_history) > self._max_history:
|
||||
self._change_history = self._change_history[-self._max_history :]
|
||||
|
||||
# === BULK OPERATIONS ===
|
||||
|
||||
async def read_all(
|
||||
self,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Read all visible state for an execution.
|
||||
|
||||
Returns merged state from all visible levels.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# Start with global (if visible)
|
||||
if isolation != IsolationLevel.ISOLATED:
|
||||
result.update(self._global_state)
|
||||
|
||||
# Add stream state (overwrites global)
|
||||
if stream_id in self._stream_state:
|
||||
result.update(self._stream_state[stream_id])
|
||||
|
||||
# Add execution state (overwrites all)
|
||||
if execution_id in self._execution_state:
|
||||
result.update(self._execution_state[execution_id])
|
||||
|
||||
return result
|
||||
|
||||
async def write_batch(
|
||||
self,
|
||||
updates: dict[str, Any],
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
scope: StateScope = StateScope.EXECUTION,
|
||||
) -> None:
|
||||
"""Write multiple values atomically."""
|
||||
for key, value in updates.items():
|
||||
await self.write(key, value, execution_id, stream_id, isolation, scope)
|
||||
|
||||
# === UTILITY ===
|
||||
|
||||
def get_stats(self) -> dict:
|
||||
"""Get state manager statistics."""
|
||||
return {
|
||||
"global_keys": len(self._global_state),
|
||||
"stream_count": len(self._stream_state),
|
||||
"execution_count": len(self._execution_state),
|
||||
"total_changes": len(self._change_history),
|
||||
"version": self._version,
|
||||
}
|
||||
|
||||
def get_recent_changes(self, limit: int = 10) -> list[StateChange]:
|
||||
"""Get recent state changes."""
|
||||
return self._change_history[-limit:]
|
||||
|
||||
|
||||
class StreamBuffer:
|
||||
"""
|
||||
Buffer interface for a single execution.
|
||||
|
||||
Provides scoped access to shared state with proper isolation.
|
||||
Compatible with the existing DataBuffer interface where possible.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
manager: SharedBufferManager,
|
||||
execution_id: str,
|
||||
stream_id: str,
|
||||
isolation: IsolationLevel,
|
||||
stream_id: str = "",
|
||||
isolation: IsolationLevel = IsolationLevel.ISOLATED,
|
||||
):
|
||||
self._manager = manager
|
||||
self._execution_id = execution_id
|
||||
self._stream_id = stream_id
|
||||
self._isolation = isolation
|
||||
execution_key = f"{stream_id}:{execution_id}"
|
||||
if execution_key not in self._execution_states:
|
||||
self._execution_states[execution_key] = {}
|
||||
return self._execution_states[execution_key]
|
||||
|
||||
# Permission model (optional, for node-level scoping)
|
||||
self._allowed_read: set[str] | None = None
|
||||
self._allowed_write: set[str] | None = None
|
||||
def get_stream_state(self, stream_id: str) -> dict[str, Any]:
|
||||
return self._stream_states.setdefault(stream_id, {})
|
||||
|
||||
def with_permissions(
|
||||
self,
|
||||
read_keys: list[str],
|
||||
write_keys: list[str],
|
||||
) -> "StreamBuffer":
|
||||
def get_global_state(self) -> dict[str, Any]:
|
||||
return self._global_state
|
||||
|
||||
def cleanup_execution(self, execution_id: str, stream_id: str = "") -> None:
|
||||
"""Drop the per-execution state bucket.
|
||||
|
||||
No-op when the key is absent. Called from
|
||||
``ExecutionManager._run_execution``'s finally block. Before this
|
||||
stub existed, the call raised ``AttributeError`` on every
|
||||
execution teardown because the SharedBufferManager stub had no
|
||||
such method.
|
||||
"""
|
||||
Create a scoped view with read/write permissions.
|
||||
execution_key = f"{stream_id}:{execution_id}"
|
||||
self._execution_states.pop(execution_key, None)
|
||||
|
||||
Compatible with existing DataBuffer.with_permissions().
|
||||
"""
|
||||
scoped = StreamBuffer(
|
||||
manager=self._manager,
|
||||
execution_id=self._execution_id,
|
||||
stream_id=self._stream_id,
|
||||
isolation=self._isolation,
|
||||
)
|
||||
scoped._allowed_read = set(read_keys)
|
||||
scoped._allowed_write = set(write_keys)
|
||||
return scoped
|
||||
|
||||
async def read(self, key: str) -> Any:
|
||||
"""Read a value from state."""
|
||||
# Check permissions
|
||||
if self._allowed_read is not None and key not in self._allowed_read:
|
||||
raise PermissionError(f"Not allowed to read key: {key}")
|
||||
|
||||
return await self._manager.read(
|
||||
key=key,
|
||||
execution_id=self._execution_id,
|
||||
stream_id=self._stream_id,
|
||||
isolation=self._isolation,
|
||||
)
|
||||
|
||||
async def write(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
scope: StateScope = StateScope.EXECUTION,
|
||||
) -> None:
|
||||
"""Write a value to state."""
|
||||
# Check permissions
|
||||
if self._allowed_write is not None and key not in self._allowed_write:
|
||||
raise PermissionError(f"Not allowed to write key: {key}")
|
||||
|
||||
await self._manager.write(
|
||||
key=key,
|
||||
value=value,
|
||||
execution_id=self._execution_id,
|
||||
stream_id=self._stream_id,
|
||||
isolation=self._isolation,
|
||||
scope=scope,
|
||||
)
|
||||
|
||||
async def read_all(self) -> dict[str, Any]:
|
||||
"""Read all visible state."""
|
||||
all_state = await self._manager.read_all(
|
||||
execution_id=self._execution_id,
|
||||
stream_id=self._stream_id,
|
||||
isolation=self._isolation,
|
||||
)
|
||||
|
||||
# Filter by permissions if set
|
||||
if self._allowed_read is not None:
|
||||
return {k: v for k, v in all_state.items() if k in self._allowed_read}
|
||||
|
||||
return all_state
|
||||
|
||||
# === SYNC API (for backward compatibility with DataBuffer) ===
|
||||
|
||||
def read_sync(self, key: str) -> Any:
|
||||
"""
|
||||
Synchronous read (for compatibility with existing code).
|
||||
|
||||
Note: This runs the async operation in a new event loop
|
||||
or uses direct access if no loop is running.
|
||||
"""
|
||||
# Direct access for sync usage
|
||||
if self._allowed_read is not None and key not in self._allowed_read:
|
||||
raise PermissionError(f"Not allowed to read key: {key}")
|
||||
|
||||
# Check execution state
|
||||
exec_state = self._manager._execution_state.get(self._execution_id, {})
|
||||
if key in exec_state:
|
||||
return exec_state[key]
|
||||
|
||||
# Check stream/global if not isolated
|
||||
if self._isolation != IsolationLevel.ISOLATED:
|
||||
stream_state = self._manager._stream_state.get(self._stream_id, {})
|
||||
if key in stream_state:
|
||||
return stream_state[key]
|
||||
|
||||
if key in self._manager._global_state:
|
||||
return self._manager._global_state[key]
|
||||
|
||||
return None
|
||||
|
||||
def write_sync(self, key: str, value: Any) -> None:
|
||||
"""
|
||||
Synchronous write (for compatibility with existing code).
|
||||
|
||||
Always writes to execution scope for simplicity.
|
||||
"""
|
||||
if self._allowed_write is not None and key not in self._allowed_write:
|
||||
raise PermissionError(f"Not allowed to write key: {key}")
|
||||
|
||||
if self._execution_id not in self._manager._execution_state:
|
||||
self._manager._execution_state[self._execution_id] = {}
|
||||
|
||||
self._manager._execution_state[self._execution_id][key] = value
|
||||
self._manager._version += 1
|
||||
|
||||
def read_all_sync(self) -> dict[str, Any]:
|
||||
"""Synchronous read all."""
|
||||
result = {}
|
||||
|
||||
# Global (if visible)
|
||||
if self._isolation != IsolationLevel.ISOLATED:
|
||||
result.update(self._manager._global_state)
|
||||
if self._stream_id in self._manager._stream_state:
|
||||
result.update(self._manager._stream_state[self._stream_id])
|
||||
|
||||
# Execution
|
||||
if self._execution_id in self._manager._execution_state:
|
||||
result.update(self._manager._execution_state[self._execution_id])
|
||||
|
||||
# Filter by permissions
|
||||
if self._allowed_read is not None:
|
||||
result = {k: v for k, v in result.items() if k in self._allowed_read}
|
||||
|
||||
return result
|
||||
def get_recent_changes(self, limit: int = 10) -> list[dict[str, Any]]:
|
||||
"""Compat stub — returns empty list. Shared buffer was removed."""
|
||||
return []
|
||||
|
||||
@@ -10,16 +10,13 @@ import asyncio
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import Any
|
||||
|
||||
from framework.observability import set_trace_context
|
||||
from framework.schemas.decision import Decision, DecisionType, Option, Outcome
|
||||
from framework.schemas.run import Run, RunStatus
|
||||
from framework.storage.concurrent import ConcurrentStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.host.outcome_aggregator import OutcomeAggregator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -75,7 +72,6 @@ class StreamDecisionTracker:
|
||||
self,
|
||||
stream_id: str,
|
||||
storage: ConcurrentStorage,
|
||||
outcome_aggregator: "OutcomeAggregator | None" = None,
|
||||
):
|
||||
"""
|
||||
Initialize stream runtime.
|
||||
@@ -83,11 +79,9 @@ class StreamDecisionTracker:
|
||||
Args:
|
||||
stream_id: Unique identifier for this stream
|
||||
storage: Concurrent storage backend
|
||||
outcome_aggregator: Optional aggregator for cross-stream evaluation
|
||||
"""
|
||||
self.stream_id = stream_id
|
||||
self._storage = storage
|
||||
self._outcome_aggregator = outcome_aggregator
|
||||
|
||||
# Track runs by execution_id (thread-safe via lock)
|
||||
self._runs: dict[str, Run] = {}
|
||||
@@ -142,9 +136,7 @@ class StreamDecisionTracker:
|
||||
self._run_locks[execution_id] = asyncio.Lock()
|
||||
self._current_nodes[execution_id] = "unknown"
|
||||
|
||||
logger.debug(
|
||||
f"Started run {run_id} for execution {execution_id} in stream {self.stream_id}"
|
||||
)
|
||||
logger.debug(f"Started run {run_id} for execution {execution_id} in stream {self.stream_id}")
|
||||
return run_id
|
||||
|
||||
def end_run(
|
||||
@@ -268,14 +260,6 @@ class StreamDecisionTracker:
|
||||
|
||||
run.add_decision(decision)
|
||||
|
||||
# Report to outcome aggregator if available
|
||||
if self._outcome_aggregator:
|
||||
self._outcome_aggregator.record_decision(
|
||||
stream_id=self.stream_id,
|
||||
execution_id=execution_id,
|
||||
decision=decision,
|
||||
)
|
||||
|
||||
return decision_id
|
||||
|
||||
def record_outcome(
|
||||
@@ -321,15 +305,6 @@ class StreamDecisionTracker:
|
||||
|
||||
run.record_outcome(decision_id, outcome)
|
||||
|
||||
# Report to outcome aggregator if available
|
||||
if self._outcome_aggregator:
|
||||
self._outcome_aggregator.record_outcome(
|
||||
stream_id=self.stream_id,
|
||||
execution_id=execution_id,
|
||||
decision_id=decision_id,
|
||||
outcome=outcome,
|
||||
)
|
||||
|
||||
# === PROBLEM RECORDING ===
|
||||
|
||||
def report_problem(
|
||||
@@ -357,10 +332,7 @@ class StreamDecisionTracker:
|
||||
"""
|
||||
run = self._runs.get(execution_id)
|
||||
if run is None:
|
||||
logger.warning(
|
||||
f"report_problem called but no run for execution {execution_id}: "
|
||||
f"[{severity}] {description}"
|
||||
)
|
||||
logger.warning(f"report_problem called but no run for execution {execution_id}: [{severity}] {description}")
|
||||
return ""
|
||||
|
||||
return run.add_problem(
|
||||
|
||||
@@ -89,8 +89,7 @@ class WebhookServer:
|
||||
)
|
||||
await self._site.start()
|
||||
logger.info(
|
||||
f"Webhook server started on {self._config.host}:{self._config.port} "
|
||||
f"with {len(self._routes)} route(s)"
|
||||
f"Webhook server started on {self._config.host}:{self._config.port} with {len(self._routes)} route(s)"
|
||||
)
|
||||
|
||||
async def stop(self) -> None:
|
||||
|
||||
@@ -0,0 +1,467 @@
|
||||
"""Worker — a single autonomous AgentLoop clone in a colony.
|
||||
|
||||
Two modes:
|
||||
|
||||
**Ephemeral (default)**: runs a single AgentLoop execution with a task,
|
||||
emits a `SUBAGENT_REPORT` event on termination (success, partial, or
|
||||
failed), and terminates. Used for parallel fan-out from the overseer.
|
||||
|
||||
**Persistent (``persistent=True``)**: runs an initial AgentLoop execution
|
||||
(usually idle, no task) and then loops forever, receiving user chat via
|
||||
``inject(message)`` and pumping each message into the already-running
|
||||
agent loop via ``inject_event``. Used for the colony's long-running
|
||||
client-facing overseer.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WorkerStatus(StrEnum):
|
||||
PENDING = "pending"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
STOPPED = "stopped"
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkerResult:
|
||||
output: dict[str, Any] = field(default_factory=dict)
|
||||
error: str | None = None
|
||||
tokens_used: int = 0
|
||||
duration_seconds: float = 0.0
|
||||
# New: structured report fields. Populated by report_to_parent tool or
|
||||
# synthesised from AgentResult on termination.
|
||||
status: str = "success" # "success" | "partial" | "failed" | "timeout" | "stopped"
|
||||
summary: str = ""
|
||||
data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkerInfo:
|
||||
id: str
|
||||
task: str
|
||||
status: WorkerStatus
|
||||
started_at: float = 0.0
|
||||
result: WorkerResult | None = None
|
||||
|
||||
|
||||
class Worker:
|
||||
"""A single autonomous clone in a colony.
|
||||
|
||||
Ephemeral mode (default):
|
||||
- PENDING → RUNNING → COMPLETED/FAILED/STOPPED, one shot, terminates.
|
||||
|
||||
Persistent mode (``persistent=True``, used by the overseer):
|
||||
- PENDING → RUNNING (never transitions out by itself).
|
||||
- Receives user chat via ``inject(message)``.
|
||||
- Each injected message is pumped into the running AgentLoop via
|
||||
``inject_event``, triggering another turn.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
worker_id: str,
|
||||
task: str,
|
||||
agent_loop: Any,
|
||||
context: Any,
|
||||
event_bus: Any = None,
|
||||
colony_id: str = "",
|
||||
persistent: bool = False,
|
||||
storage_path: Path | None = None,
|
||||
):
|
||||
self.id = worker_id
|
||||
self.task = task
|
||||
self.status = WorkerStatus.PENDING
|
||||
self._agent_loop = agent_loop
|
||||
self._context = context
|
||||
self._event_bus = event_bus
|
||||
self._colony_id = colony_id
|
||||
self._persistent = persistent
|
||||
# Canonical on-disk home for this worker (conversations, events,
|
||||
# result.json, data). Required when seed_conversation() is used —
|
||||
# we deliberately do NOT fall back to CWD, which previously caused
|
||||
# conversation parts to leak into the process working directory.
|
||||
self._storage_path: Path | None = Path(storage_path) if storage_path is not None else None
|
||||
self._task_handle: asyncio.Task | None = None
|
||||
self._started_at: float = 0.0
|
||||
self._result: WorkerResult | None = None
|
||||
self._input_queue: asyncio.Queue[str | None] = asyncio.Queue()
|
||||
# Set by AgentLoop when the worker's LLM calls ``report_to_parent``.
|
||||
# Takes precedence over the synthesised report from AgentResult.
|
||||
self._explicit_report: dict[str, Any] | None = None
|
||||
# Back-reference so AgentLoop's report_to_parent handler can call
|
||||
# record_explicit_report on the owning Worker. The agent_loop's
|
||||
# _owner_worker attribute is set here during construction.
|
||||
if agent_loop is not None:
|
||||
agent_loop._owner_worker = self
|
||||
|
||||
@property
|
||||
def info(self) -> WorkerInfo:
|
||||
return WorkerInfo(
|
||||
id=self.id,
|
||||
task=self.task,
|
||||
status=self.status,
|
||||
started_at=self._started_at,
|
||||
result=self._result,
|
||||
)
|
||||
|
||||
@property
|
||||
def is_active(self) -> bool:
|
||||
return self.status in (WorkerStatus.PENDING, WorkerStatus.RUNNING)
|
||||
|
||||
@property
|
||||
def is_persistent(self) -> bool:
|
||||
return self._persistent
|
||||
|
||||
@property
|
||||
def agent_loop(self) -> Any:
|
||||
"""The wrapped AgentLoop. Used by the SessionManager chat path."""
|
||||
return self._agent_loop
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def run(self) -> WorkerResult:
|
||||
"""Entry point for the worker's background task.
|
||||
|
||||
Ephemeral workers run ``AgentLoop.execute`` once and terminate,
|
||||
emitting a ``SUBAGENT_REPORT`` event.
|
||||
|
||||
Persistent workers run the initial execute then loop forever
|
||||
processing injected user messages.
|
||||
"""
|
||||
self.status = WorkerStatus.RUNNING
|
||||
self._started_at = time.monotonic()
|
||||
|
||||
# Scope browser profile (and any other CONTEXT_PARAMS) to this
|
||||
# worker. asyncio.create_task() copies the parent's contextvars,
|
||||
# so without this override every spawned worker inherits the
|
||||
# queen's `profile=<queen_session_id>` and its browser_* tool
|
||||
# calls end up driving the queen's Chrome tab group. Setting
|
||||
# it here (inside the new Task's context) shadows the parent
|
||||
# value without affecting the queen's ongoing calls.
|
||||
try:
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
from framework.tasks.scoping import session_task_list_id
|
||||
|
||||
ctx = self._context
|
||||
agent_id = getattr(ctx, "agent_id", None) or self.id
|
||||
list_id = getattr(ctx, "task_list_id", None) or session_task_list_id(agent_id, self.id)
|
||||
ToolRegistry.set_execution_context(
|
||||
profile=self.id,
|
||||
agent_id=agent_id,
|
||||
task_list_id=list_id,
|
||||
colony_id=getattr(ctx, "colony_id", None),
|
||||
picked_up_from=getattr(ctx, "picked_up_from", None),
|
||||
)
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Worker %s: failed to scope execution context",
|
||||
self.id,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
try:
|
||||
result = await self._agent_loop.execute(self._context)
|
||||
duration = time.monotonic() - self._started_at
|
||||
|
||||
if result.success:
|
||||
self.status = WorkerStatus.COMPLETED
|
||||
self._result = self._build_result(result, duration, default_status="success")
|
||||
else:
|
||||
self.status = WorkerStatus.FAILED
|
||||
self._result = self._build_result(result, duration, default_status="failed")
|
||||
|
||||
await self._emit_terminal_events(result)
|
||||
|
||||
if self._persistent:
|
||||
# Persistent worker: keep the loop alive, pump injected
|
||||
# messages forever. Status stays RUNNING; info reflects
|
||||
# current progress.
|
||||
self.status = WorkerStatus.RUNNING
|
||||
await self._persistent_input_loop()
|
||||
|
||||
return self._result # type: ignore[return-value]
|
||||
|
||||
except asyncio.CancelledError:
|
||||
self.status = WorkerStatus.STOPPED
|
||||
duration = time.monotonic() - self._started_at
|
||||
# Preserve any explicit report the worker's LLM already filed
|
||||
# via ``report_to_parent`` before being cancelled — the caller
|
||||
# cares about that payload even on a hard stop. Only fall back
|
||||
# to the canned "stopped" message when no explicit report exists.
|
||||
explicit = self._explicit_report
|
||||
if explicit is not None:
|
||||
self._result = WorkerResult(
|
||||
error="Worker stopped by queen after reporting",
|
||||
duration_seconds=duration,
|
||||
status=explicit["status"],
|
||||
summary=explicit["summary"],
|
||||
data=explicit["data"],
|
||||
)
|
||||
await self._emit_terminal_events(None, force_status=explicit["status"])
|
||||
else:
|
||||
self._result = WorkerResult(
|
||||
error="Worker stopped by queen",
|
||||
duration_seconds=duration,
|
||||
status="stopped",
|
||||
summary="Worker was cancelled before completion.",
|
||||
)
|
||||
await self._emit_terminal_events(None, force_status="stopped")
|
||||
return self._result
|
||||
|
||||
except Exception as exc:
|
||||
self.status = WorkerStatus.FAILED
|
||||
duration = time.monotonic() - self._started_at
|
||||
self._result = WorkerResult(
|
||||
error=str(exc),
|
||||
duration_seconds=duration,
|
||||
status="failed",
|
||||
summary=f"Worker crashed: {exc}",
|
||||
)
|
||||
logger.error("Worker %s failed: %s", self.id, exc, exc_info=True)
|
||||
await self._emit_terminal_events(None, force_status="failed")
|
||||
return self._result
|
||||
|
||||
async def _persistent_input_loop(self) -> None:
|
||||
"""Pump injected messages into the running AgentLoop forever.
|
||||
|
||||
Each ``inject(msg)`` call puts a string on ``_input_queue``. This
|
||||
loop awaits it and calls ``agent_loop.inject_event(msg)`` which
|
||||
wakes the loop's pending user-input gate.
|
||||
"""
|
||||
while True:
|
||||
msg = await self._input_queue.get()
|
||||
if msg is None:
|
||||
# Sentinel: shutdown
|
||||
return
|
||||
try:
|
||||
await self._agent_loop.inject_event(msg, is_client_input=True)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Overseer %s: inject_event failed for injected message",
|
||||
self.id,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Reporting
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def record_explicit_report(
|
||||
self,
|
||||
status: str,
|
||||
summary: str,
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Called by AgentLoop when the worker's LLM invokes ``report_to_parent``.
|
||||
|
||||
Stores the report so that when ``run()`` reaches the termination
|
||||
block, the explicit report wins over a synthesised one.
|
||||
"""
|
||||
self._explicit_report = {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"data": data or {},
|
||||
}
|
||||
|
||||
def _build_result(
|
||||
self,
|
||||
agent_result: Any,
|
||||
duration: float,
|
||||
default_status: str,
|
||||
) -> WorkerResult:
|
||||
"""Construct a WorkerResult from AgentResult + optional explicit report."""
|
||||
explicit = self._explicit_report
|
||||
if explicit is not None:
|
||||
return WorkerResult(
|
||||
output=dict(agent_result.output or {}),
|
||||
error=agent_result.error,
|
||||
tokens_used=getattr(agent_result, "tokens_used", 0),
|
||||
duration_seconds=duration,
|
||||
status=explicit["status"],
|
||||
summary=explicit["summary"],
|
||||
data=explicit["data"],
|
||||
)
|
||||
# Synthesise a minimal report from AgentResult
|
||||
if agent_result.success:
|
||||
summary = f"Completed task '{self.task[:80]}' with {len(agent_result.output or {})} outputs."
|
||||
data = dict(agent_result.output or {})
|
||||
else:
|
||||
summary = f"Task '{self.task[:80]}' failed: {agent_result.error or 'unknown'}"
|
||||
data = {}
|
||||
return WorkerResult(
|
||||
output=dict(agent_result.output or {}),
|
||||
error=agent_result.error,
|
||||
tokens_used=getattr(agent_result, "tokens_used", 0),
|
||||
duration_seconds=duration,
|
||||
status=default_status,
|
||||
summary=summary,
|
||||
data=data,
|
||||
)
|
||||
|
||||
async def _emit_terminal_events(
|
||||
self,
|
||||
agent_result: Any,
|
||||
force_status: str | None = None,
|
||||
) -> None:
|
||||
"""Emit EXECUTION_COMPLETED/FAILED AND SUBAGENT_REPORT on termination.
|
||||
|
||||
Both events are published so that consumers that listen for
|
||||
either shape keep working. The SUBAGENT_REPORT carries the
|
||||
structured summary the overseer actually cares about.
|
||||
"""
|
||||
if self._event_bus is None:
|
||||
return
|
||||
|
||||
from framework.host.event_bus import AgentEvent, EventType
|
||||
|
||||
# EXECUTION_COMPLETED / EXECUTION_FAILED (backwards-compat)
|
||||
if agent_result is not None:
|
||||
lifecycle_type = EventType.EXECUTION_COMPLETED if agent_result.success else EventType.EXECUTION_FAILED
|
||||
await self._event_bus.publish(
|
||||
AgentEvent(
|
||||
type=lifecycle_type,
|
||||
stream_id=self._context.stream_id or self.id,
|
||||
node_id=self.id,
|
||||
execution_id=self._context.execution_id or self.id,
|
||||
data={
|
||||
"worker_id": self.id,
|
||||
"colony_id": self._colony_id,
|
||||
"task": self.task,
|
||||
"success": agent_result.success,
|
||||
"error": agent_result.error,
|
||||
"output_keys": (list(agent_result.output.keys()) if agent_result.output else []),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# SUBAGENT_REPORT — the structured channel the overseer awaits
|
||||
result = self._result
|
||||
if result is None:
|
||||
return
|
||||
await self._event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.SUBAGENT_REPORT,
|
||||
stream_id=self._context.stream_id or self.id,
|
||||
node_id=self.id,
|
||||
execution_id=self._context.execution_id or self.id,
|
||||
data={
|
||||
"worker_id": self.id,
|
||||
"colony_id": self._colony_id,
|
||||
"task": self.task,
|
||||
"status": force_status or result.status,
|
||||
"summary": result.summary,
|
||||
"data": result.data,
|
||||
"error": result.error,
|
||||
"duration_seconds": result.duration_seconds,
|
||||
"tokens_used": result.tokens_used,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# External control
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def start_background(self) -> None:
|
||||
"""Spawn the worker's run() as an asyncio background task."""
|
||||
self._task_handle = asyncio.create_task(self.run(), name=f"worker:{self.id}")
|
||||
# Surface any exception that escapes run(); without this callback
|
||||
# a crash here only becomes visible when stop() eventually awaits
|
||||
# the handle (and is silently lost if stop() is never called).
|
||||
self._task_handle.add_done_callback(self._on_task_done)
|
||||
|
||||
def _on_task_done(self, task: asyncio.Task) -> None:
|
||||
if task.cancelled():
|
||||
return
|
||||
exc = task.exception()
|
||||
if exc is not None:
|
||||
logger.error(
|
||||
"Worker '%s' background task crashed: %s",
|
||||
self.id,
|
||||
exc,
|
||||
exc_info=exc,
|
||||
)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Cancel the worker's background task, if any."""
|
||||
if self._persistent:
|
||||
# Signal the input loop to exit cleanly first
|
||||
await self._input_queue.put(None)
|
||||
if self._task_handle and not self._task_handle.done():
|
||||
self._task_handle.cancel()
|
||||
try:
|
||||
await self._task_handle
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
async def inject(self, message: str) -> None:
|
||||
"""Pump a user message into the worker.
|
||||
|
||||
For ephemeral workers this is rarely used (they don't take
|
||||
follow-up input). For persistent overseers this is the chat
|
||||
injection path.
|
||||
"""
|
||||
await self._input_queue.put(message)
|
||||
|
||||
async def seed_conversation(self, messages: list[dict[str, Any]]) -> None:
|
||||
"""Pre-populate the worker's ConversationStore before starting.
|
||||
|
||||
Used when forking a queen DM into a colony: the DM's prior
|
||||
conversation becomes the colony overseer's starting point so the
|
||||
overseer resumes mid-thought instead of greeting the user fresh.
|
||||
|
||||
``messages`` is a list of dicts matching the ConversationStore's
|
||||
part format: ``{seq, role, content, tool_calls, tool_use_id,
|
||||
created_at, phase}``. The caller is responsible for rewriting
|
||||
``agent_id`` to match the new worker, and for numbering ``seq``
|
||||
monotonically from 0.
|
||||
|
||||
Must be called BEFORE ``start_background``.
|
||||
"""
|
||||
if self.status != WorkerStatus.PENDING:
|
||||
raise RuntimeError(
|
||||
f"seed_conversation must be called before start_background (worker {self.id} is {self.status})"
|
||||
)
|
||||
|
||||
# Write parts directly to the worker's on-disk conversation store
|
||||
# so that the AgentLoop's FileConversationStore picks them up when
|
||||
# NodeConversation loads from disk. We require an explicit
|
||||
# storage_path — falling back to CWD previously caused part files
|
||||
# to leak into the process working directory.
|
||||
if self._storage_path is None:
|
||||
raise RuntimeError(
|
||||
f"seed_conversation requires storage_path to be set on "
|
||||
f"Worker {self.id}; construct Worker with storage_path=..."
|
||||
)
|
||||
|
||||
parts_dir = self._storage_path / "conversations" / "parts"
|
||||
parts_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
import json
|
||||
|
||||
for i, msg in enumerate(messages):
|
||||
msg = dict(msg) # copy
|
||||
msg.setdefault("seq", i)
|
||||
msg.setdefault("agent_id", self.id)
|
||||
part_file = parts_dir / f"{msg['seq']:010d}.json"
|
||||
part_file.write_text(json.dumps(msg), encoding="utf-8")
|
||||
|
||||
logger.info(
|
||||
"Worker %s: seeded %d messages into %s",
|
||||
self.id,
|
||||
len(messages),
|
||||
parts_dir,
|
||||
)
|
||||
@@ -50,9 +50,7 @@ class AnthropicProvider(LLMProvider):
|
||||
# Delegate to LiteLLMProvider internally.
|
||||
self.api_key = api_key or _get_api_key_from_credential_store()
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key."
|
||||
)
|
||||
raise ValueError("Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass api_key.")
|
||||
|
||||
self.model = model
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ from collections.abc import AsyncIterator, Callable, Iterator
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.config import HIVE_HOME as _HIVE_HOME
|
||||
from framework.llm.provider import LLMProvider, LLMResponse, Tool
|
||||
from framework.llm.stream_events import (
|
||||
FinishEvent,
|
||||
@@ -50,20 +51,12 @@ _ENDPOINTS = [
|
||||
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
|
||||
_TOKEN_REFRESH_BUFFER_SECS = 60
|
||||
|
||||
# Credentials file in ~/.hive/ (native implementation)
|
||||
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
|
||||
# Credentials file in $HIVE_HOME (native implementation)
|
||||
_ACCOUNTS_FILE = _HIVE_HOME / "antigravity-accounts.json"
|
||||
_IDE_STATE_DB_MAC = (
|
||||
Path.home()
|
||||
/ "Library"
|
||||
/ "Application Support"
|
||||
/ "Antigravity"
|
||||
/ "User"
|
||||
/ "globalStorage"
|
||||
/ "state.vscdb"
|
||||
)
|
||||
_IDE_STATE_DB_LINUX = (
|
||||
Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
|
||||
Path.home() / "Library" / "Application Support" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
|
||||
)
|
||||
_IDE_STATE_DB_LINUX = Path.home() / ".config" / "Antigravity" / "User" / "globalStorage" / "state.vscdb"
|
||||
_IDE_STATE_DB_KEY = "antigravityUnifiedStateSync.oauthToken"
|
||||
|
||||
_BASE_HEADERS: dict[str, str] = {
|
||||
@@ -368,9 +361,7 @@ def _to_gemini_contents(
|
||||
|
||||
|
||||
def _map_finish_reason(reason: str) -> str:
|
||||
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get(
|
||||
(reason or "").upper(), "stop"
|
||||
)
|
||||
return {"STOP": "stop", "MAX_TOKENS": "max_tokens", "OTHER": "tool_use"}.get((reason or "").upper(), "stop")
|
||||
|
||||
|
||||
def _parse_complete_response(raw: dict[str, Any], model: str) -> LLMResponse:
|
||||
@@ -538,8 +529,7 @@ class AntigravityProvider(LLMProvider):
|
||||
return self._access_token
|
||||
|
||||
raise RuntimeError(
|
||||
"No valid Antigravity credentials. "
|
||||
"Run: uv run python core/antigravity_auth.py auth account add"
|
||||
"No valid Antigravity credentials. Run: uv run python core/antigravity_auth.py auth account add"
|
||||
)
|
||||
|
||||
# --- Request building -------------------------------------------------- #
|
||||
@@ -593,11 +583,7 @@ class AntigravityProvider(LLMProvider):
|
||||
|
||||
token = self._ensure_token()
|
||||
body_bytes = json.dumps(body).encode("utf-8")
|
||||
path = (
|
||||
"/v1internal:streamGenerateContent?alt=sse"
|
||||
if streaming
|
||||
else "/v1internal:generateContent"
|
||||
)
|
||||
path = "/v1internal:streamGenerateContent?alt=sse" if streaming else "/v1internal:generateContent"
|
||||
headers = {
|
||||
**_BASE_HEADERS,
|
||||
"Authorization": f"Bearer {token}",
|
||||
@@ -619,9 +605,7 @@ class AntigravityProvider(LLMProvider):
|
||||
if result:
|
||||
self._access_token, self._token_expires_at = result
|
||||
headers["Authorization"] = f"Bearer {self._access_token}"
|
||||
req2 = urllib.request.Request(
|
||||
url, data=body_bytes, headers=headers, method="POST"
|
||||
)
|
||||
req2 = urllib.request.Request(url, data=body_bytes, headers=headers, method="POST")
|
||||
try:
|
||||
return urllib.request.urlopen(req2, timeout=120) # noqa: S310
|
||||
except urllib.error.HTTPError as exc2:
|
||||
@@ -642,9 +626,7 @@ class AntigravityProvider(LLMProvider):
|
||||
last_exc = exc
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"All Antigravity endpoints failed. Last error: {last_exc}"
|
||||
) from last_exc
|
||||
raise RuntimeError(f"All Antigravity endpoints failed. Last error: {last_exc}") from last_exc
|
||||
|
||||
# --- LLMProvider interface --------------------------------------------- #
|
||||
|
||||
@@ -672,10 +654,17 @@ class AntigravityProvider(LLMProvider):
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
import asyncio # noqa: PLC0415
|
||||
import concurrent.futures # noqa: PLC0415
|
||||
|
||||
# Antigravity (Google's proprietary endpoint) doesn't expose a
|
||||
# cache_control hook. Concatenate the dynamic suffix so its shape
|
||||
# matches the legacy single-string call site.
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
queue: asyncio.Queue[StreamEvent | None] = asyncio.Queue()
|
||||
|
||||
@@ -683,9 +672,7 @@ class AntigravityProvider(LLMProvider):
|
||||
try:
|
||||
body = self._build_body(messages, system, tools, max_tokens)
|
||||
http_resp = self._post(body, streaming=True)
|
||||
for event in _parse_sse_stream(
|
||||
http_resp, self.model, self._thought_sigs.__setitem__
|
||||
):
|
||||
for event in _parse_sse_stream(http_resp, self.model, self._thought_sigs.__setitem__):
|
||||
loop.call_soon_threadsafe(queue.put_nowait, event)
|
||||
except Exception as exc:
|
||||
logger.error("Antigravity stream error: %s", exc)
|
||||
|
||||
@@ -1,106 +1,48 @@
|
||||
"""Model capability checks for LLM providers.
|
||||
|
||||
Vision support rules are derived from official vendor documentation:
|
||||
- ZAI (z.ai): docs.z.ai/guides/vlm — GLM-4.6V variants are vision; GLM-5/4.6/4.7 are text-only
|
||||
- MiniMax: platform.minimax.io/docs — minimax-vl-01 is vision; M2.x are text-only
|
||||
- DeepSeek: api-docs.deepseek.com — deepseek-vl2 is vision; chat/reasoner are text-only
|
||||
- Cerebras: inference-docs.cerebras.ai — no vision models at all
|
||||
- Groq: console.groq.com/docs/vision — vision capable; treat as supported by default
|
||||
- Ollama/LM Studio/vLLM/llama.cpp: local runners denied by default; model names
|
||||
don't reliably indicate vision support, so users must configure explicitly
|
||||
Vision support is sourced from the curated ``model_catalog.json``. Each model
|
||||
entry carries an optional ``supports_vision`` boolean; unknown models default
|
||||
to vision-capable so hosted frontier models work out of the box. To toggle
|
||||
support for a model, edit its catalog entry rather than this file.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
def _model_name(model: str) -> str:
|
||||
"""Return the bare model name after stripping any 'provider/' prefix."""
|
||||
if "/" in model:
|
||||
return model.split("/", 1)[1]
|
||||
return model
|
||||
from framework.llm.model_catalog import model_supports_vision
|
||||
|
||||
|
||||
# Step 1: explicit vision allow-list — these always support images regardless
|
||||
# of what the provider-level rules say. Checked first so that e.g. glm-4.6v
|
||||
# is allowed even though glm-4.6 is denied.
|
||||
_VISION_ALLOW_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# ZAI/GLM vision models (docs.z.ai/guides/vlm)
|
||||
"glm-4v", # GLM-4V series (legacy)
|
||||
"glm-4.6v", # GLM-4.6V, GLM-4.6V-flash, GLM-4.6V-flashx
|
||||
# DeepSeek vision models
|
||||
"deepseek-vl", # deepseek-vl2, deepseek-vl2-small, deepseek-vl2-tiny
|
||||
# MiniMax vision model
|
||||
"minimax-vl", # minimax-vl-01
|
||||
)
|
||||
|
||||
# Step 2: provider-level deny — every model from this provider is text-only.
|
||||
_TEXT_ONLY_PROVIDER_PREFIXES: tuple[str, ...] = (
|
||||
# Cerebras: inference-docs.cerebras.ai lists only text models
|
||||
"cerebras/",
|
||||
# Local runners: model names don't reliably indicate vision support
|
||||
"ollama/",
|
||||
"ollama_chat/",
|
||||
"lm_studio/",
|
||||
"vllm/",
|
||||
"llamacpp/",
|
||||
)
|
||||
|
||||
# Step 3: per-model deny — text-only models within otherwise mixed providers.
|
||||
# Matched against the bare model name (provider prefix stripped, lower-cased).
|
||||
# The vision allow-list above is checked first, so vision variants of the same
|
||||
# family are already handled before these deny patterns are reached.
|
||||
_TEXT_ONLY_MODEL_BARE_PREFIXES: tuple[str, ...] = (
|
||||
# --- ZAI / GLM family ---
|
||||
# text-only: glm-5, glm-4.6, glm-4.7, glm-4.5, zai-glm-*
|
||||
# vision: glm-4v, glm-4.6v (caught by allow-list above)
|
||||
"glm-5",
|
||||
"glm-4.6", # bare glm-4.6 is text-only; glm-4.6v is caught by allow-list
|
||||
"glm-4.7",
|
||||
"glm-4.5",
|
||||
"zai-glm",
|
||||
# --- DeepSeek ---
|
||||
# text-only: deepseek-chat, deepseek-coder, deepseek-reasoner
|
||||
# vision: deepseek-vl2 (caught by allow-list above)
|
||||
# Note: LiteLLM's deepseek handler may flatten content lists for some models;
|
||||
# VL models are allowed through and rely on LiteLLM's native VL support.
|
||||
"deepseek-chat",
|
||||
"deepseek-coder",
|
||||
"deepseek-reasoner",
|
||||
# --- MiniMax ---
|
||||
# text-only: minimax-m2.*, minimax-text-*, abab* (legacy)
|
||||
# vision: minimax-vl-01 (caught by allow-list above)
|
||||
"minimax-m2",
|
||||
"minimax-text",
|
||||
"abab",
|
||||
)
|
||||
if TYPE_CHECKING:
|
||||
from framework.llm.provider import Tool
|
||||
|
||||
|
||||
def supports_image_tool_results(model: str) -> bool:
|
||||
"""Return whether *model* can receive image content in messages.
|
||||
|
||||
Used to gate both user-message images and tool-result image blocks.
|
||||
|
||||
Logic (checked in order):
|
||||
1. Vision allow-list → True (known vision model, skip all denies)
|
||||
2. Provider deny → False (entire provider is text-only)
|
||||
3. Model deny → False (specific text-only model within a mixed provider)
|
||||
4. Default → True (assume capable; unknown providers and models)
|
||||
Thin wrapper over :func:`model_supports_vision` so existing call sites
|
||||
keep working. Used to gate both user-message images and tool-result
|
||||
image blocks. Empty model strings are treated as capable so the default
|
||||
code path doesn't strip images before a provider is selected.
|
||||
"""
|
||||
model_lower = model.lower()
|
||||
bare = _model_name(model_lower)
|
||||
|
||||
# 1. Explicit vision allow — takes priority over all denies
|
||||
if any(bare.startswith(p) for p in _VISION_ALLOW_BARE_PREFIXES):
|
||||
if not model:
|
||||
return True
|
||||
return model_supports_vision(model)
|
||||
|
||||
# 2. Provider-level deny (all models from this provider are text-only)
|
||||
if any(model_lower.startswith(p) for p in _TEXT_ONLY_PROVIDER_PREFIXES):
|
||||
return False
|
||||
|
||||
# 3. Per-model deny (text-only variants within mixed-capability families)
|
||||
if any(bare.startswith(p) for p in _TEXT_ONLY_MODEL_BARE_PREFIXES):
|
||||
return False
|
||||
def filter_tools_for_model(tools: list[Tool], model: str) -> tuple[list[Tool], list[str]]:
|
||||
"""Drop image-producing tools for text-only models.
|
||||
|
||||
# 5. Default: assume vision capable
|
||||
# Covers: OpenAI, Anthropic, Google, Mistral, Kimi, and other hosted providers
|
||||
return True
|
||||
Returns ``(filtered_tools, hidden_names)``. For vision-capable models
|
||||
(or when *model* is empty) the input list is returned unchanged and
|
||||
``hidden_names`` is empty. For text-only models any tool with
|
||||
``produces_image=True`` is removed so the LLM never sees it in its
|
||||
schema — avoids wasted calls and stale "screenshot failed" entries
|
||||
in agent memory.
|
||||
"""
|
||||
if not model or supports_image_tool_results(model):
|
||||
return list(tools), []
|
||||
hidden = [t.name for t in tools if t.produces_image]
|
||||
if not hidden:
|
||||
return list(tools), []
|
||||
kept = [t for t in tools if not t.produces_image]
|
||||
return kept, hidden
|
||||
|
||||
+757
-119
File diff suppressed because it is too large
Load Diff
@@ -155,8 +155,11 @@ class MockLLMProvider(LLMProvider):
|
||||
response_format: dict[str, Any] | None = None,
|
||||
json_mode: bool = False,
|
||||
max_retries: int | None = None,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> LLMResponse:
|
||||
"""Async mock completion (no I/O, returns immediately)."""
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
return self.complete(
|
||||
messages=messages,
|
||||
system=system,
|
||||
@@ -173,6 +176,7 @@ class MockLLMProvider(LLMProvider):
|
||||
system: str = "",
|
||||
tools: list[Tool] | None = None,
|
||||
max_tokens: int = 4096,
|
||||
system_dynamic_suffix: str | None = None,
|
||||
) -> AsyncIterator[StreamEvent]:
|
||||
"""Stream a mock completion as word-level TextDeltaEvents.
|
||||
|
||||
@@ -180,6 +184,8 @@ class MockLLMProvider(LLMProvider):
|
||||
TextDeltaEvent with an accumulating snapshot, exercising the full
|
||||
streaming pipeline without any API calls.
|
||||
"""
|
||||
if system_dynamic_suffix:
|
||||
system = f"{system}\n\n{system_dynamic_suffix}" if system else system_dynamic_suffix
|
||||
content = self._generate_mock_response(system=system, json_mode=False)
|
||||
words = content.split(" ")
|
||||
accumulated = ""
|
||||
|
||||
@@ -0,0 +1,516 @@
|
||||
{
|
||||
"schema_version": 1,
|
||||
"providers": {
|
||||
"anthropic": {
|
||||
"default_model": "claude-haiku-4-5-20251001",
|
||||
"models": [
|
||||
{
|
||||
"id": "claude-haiku-4-5-20251001",
|
||||
"label": "Haiku 4.5 - Fast + cheap",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 136000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "claude-sonnet-4-5-20250929",
|
||||
"label": "Sonnet 4.5 - Best balance",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 136000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "claude-opus-4-6",
|
||||
"label": "Opus 4.6 - Most capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"openai": {
|
||||
"default_model": "gpt-5.5",
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-5.5",
|
||||
"label": "GPT-5.5 - Frontier coding + reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 1050000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 5.00,
|
||||
"output": 30.00
|
||||
},
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4",
|
||||
"label": "GPT-5.4 - Previous flagship",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 960000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4-mini",
|
||||
"label": "GPT-5.4 Mini - Faster + cheaper",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 400000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gpt-5.4-nano",
|
||||
"label": "GPT-5.4 Nano - Cheapest high-volume",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 400000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"gemini": {
|
||||
"default_model": "gemini-3-flash-preview",
|
||||
"models": [
|
||||
{
|
||||
"id": "gemini-3-flash-preview",
|
||||
"label": "Gemini 3 Flash - Fast",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "gemini-3.1-pro-preview-customtools",
|
||||
"label": "Gemini 3.1 Pro - Best quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"groq": {
|
||||
"default_model": "openai/gpt-oss-120b",
|
||||
"models": [
|
||||
{
|
||||
"id": "openai/gpt-oss-120b",
|
||||
"label": "GPT-OSS 120B - Best reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 65536,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "openai/gpt-oss-20b",
|
||||
"label": "GPT-OSS 20B - Fast + cheaper",
|
||||
"recommended": false,
|
||||
"max_tokens": 65536,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "llama-3.3-70b-versatile",
|
||||
"label": "Llama 3.3 70B - General purpose",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "llama-3.1-8b-instant",
|
||||
"label": "Llama 3.1 8B - Fastest",
|
||||
"recommended": false,
|
||||
"max_tokens": 131072,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"cerebras": {
|
||||
"default_model": "gpt-oss-120b",
|
||||
"models": [
|
||||
{
|
||||
"id": "gpt-oss-120b",
|
||||
"label": "GPT-OSS 120B - Best production reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "zai-glm-4.7",
|
||||
"label": "Z.ai GLM 4.7 - Strong coding preview",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "qwen-3-235b-a22b-instruct-2507",
|
||||
"label": "Qwen 3 235B Instruct - Frontier preview",
|
||||
"recommended": false,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"minimax": {
|
||||
"default_model": "MiniMax-M2.7",
|
||||
"models": [
|
||||
{
|
||||
"id": "MiniMax-M2.7",
|
||||
"label": "MiniMax M2.7 - Best coding quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.30,
|
||||
"output": 1.20
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "MiniMax-M2.5",
|
||||
"label": "MiniMax M2.5 - Strong value",
|
||||
"recommended": false,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"mistral": {
|
||||
"default_model": "mistral-large-2512",
|
||||
"models": [
|
||||
{
|
||||
"id": "mistral-large-2512",
|
||||
"label": "Mistral Large 3 - Best quality",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 256000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "mistral-medium-2508",
|
||||
"label": "Mistral Medium 3.1 - Balanced",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "mistral-small-2603",
|
||||
"label": "Mistral Small 4 - Fast + capable",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 256000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "codestral-2508",
|
||||
"label": "Codestral - Coding specialist",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"together": {
|
||||
"default_model": "deepseek-ai/DeepSeek-V3.1",
|
||||
"models": [
|
||||
{
|
||||
"id": "deepseek-ai/DeepSeek-V3.1",
|
||||
"label": "DeepSeek V3.1 - Best general coding",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
|
||||
"label": "Qwen3 Coder 480B - Advanced coding",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 262144,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "openai/gpt-oss-120b",
|
||||
"label": "GPT-OSS 120B - Strong reasoning",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"label": "Llama 3.3 70B Turbo - Fast baseline",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 131072,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"deepseek": {
|
||||
"default_model": "deepseek-v4-pro",
|
||||
"models": [
|
||||
{
|
||||
"id": "deepseek-v4-pro",
|
||||
"label": "DeepSeek V4 Pro - Most capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 384000,
|
||||
"max_context_tokens": 1000000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.74,
|
||||
"output": 3.48,
|
||||
"cache_read": 0.145
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "deepseek-v4-flash",
|
||||
"label": "DeepSeek V4 Flash - Fast + cheap",
|
||||
"recommended": true,
|
||||
"max_tokens": 384000,
|
||||
"max_context_tokens": 1000000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.14,
|
||||
"output": 0.28,
|
||||
"cache_read": 0.028
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "deepseek-reasoner",
|
||||
"label": "DeepSeek Reasoner - Legacy (deprecating)",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 128000,
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"kimi": {
|
||||
"default_model": "kimi-k2.5",
|
||||
"models": [
|
||||
{
|
||||
"id": "kimi-k2.5",
|
||||
"label": "Kimi K2.5 - Best coding",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 200000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.60,
|
||||
"output": 2.50,
|
||||
"cache_read": 0.15
|
||||
},
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"hive": {
|
||||
"default_model": "queen",
|
||||
"models": [
|
||||
{
|
||||
"id": "queen",
|
||||
"label": "Queen - Hive native",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "kimi-2.5",
|
||||
"label": "Kimi 2.5 - Via Hive",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "glm-5.1",
|
||||
"label": "GLM-5.1 - Via Hive",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.40,
|
||||
"output": 4.40,
|
||||
"cache_read": 0.26,
|
||||
"cache_creation": 0.0
|
||||
},
|
||||
"supports_vision": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"openrouter": {
|
||||
"default_model": "openai/gpt-5.4",
|
||||
"models": [
|
||||
{
|
||||
"id": "openai/gpt-5.4",
|
||||
"label": "GPT-5.4 - Best overall",
|
||||
"recommended": true,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "anthropic/claude-sonnet-4.6",
|
||||
"label": "Claude Sonnet 4.6 - Best coding balance",
|
||||
"recommended": false,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "anthropic/claude-opus-4.6",
|
||||
"label": "Claude Opus 4.6 - Most capable",
|
||||
"recommended": false,
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "google/gemini-3.1-pro-preview-customtools",
|
||||
"label": "Gemini 3.1 Pro Preview - Long-context reasoning",
|
||||
"recommended": false,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 872000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "qwen/qwen3.6-plus",
|
||||
"label": "Qwen 3.6 Plus - Strong reasoning",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "z-ai/glm-5v-turbo",
|
||||
"label": "GLM-5V Turbo - Vision capable",
|
||||
"recommended": true,
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 192000,
|
||||
"supports_vision": true
|
||||
},
|
||||
{
|
||||
"id": "z-ai/glm-5.1",
|
||||
"label": "GLM-5.1 - Better but Slower",
|
||||
"recommended": true,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 192000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 1.40,
|
||||
"output": 4.40,
|
||||
"cache_read": 0.26,
|
||||
"cache_creation": 0.0
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "minimax/minimax-m2.7",
|
||||
"label": "Minimax M2.7 - Minimax flagship",
|
||||
"recommended": false,
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180000,
|
||||
"pricing_usd_per_mtok": {
|
||||
"input": 0.30,
|
||||
"output": 1.20
|
||||
},
|
||||
"supports_vision": false
|
||||
},
|
||||
{
|
||||
"id": "xiaomi/mimo-v2-pro",
|
||||
"label": "MiMo V2 Pro - Xiaomi multimodal",
|
||||
"recommended": true,
|
||||
"max_tokens": 64000,
|
||||
"max_context_tokens": 240000,
|
||||
"supports_vision": true
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"presets": {
|
||||
"claude_code": {
|
||||
"provider": "anthropic",
|
||||
"model": "claude-opus-4-6",
|
||||
"max_tokens": 128000,
|
||||
"max_context_tokens": 872000
|
||||
},
|
||||
"zai_code": {
|
||||
"provider": "openai",
|
||||
"api_key_env_var": "ZAI_API_KEY",
|
||||
"model": "glm-5.1",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000,
|
||||
"api_base": "https://api.z.ai/api/coding/paas/v4"
|
||||
},
|
||||
"codex": {
|
||||
"provider": "openai",
|
||||
"model": "gpt-5.3-codex",
|
||||
"max_tokens": 16384,
|
||||
"max_context_tokens": 120000,
|
||||
"api_base": "https://chatgpt.com/backend-api/codex"
|
||||
},
|
||||
"minimax_code": {
|
||||
"provider": "minimax",
|
||||
"api_key_env_var": "MINIMAX_API_KEY",
|
||||
"model": "MiniMax-M2.7",
|
||||
"max_tokens": 40960,
|
||||
"max_context_tokens": 180800,
|
||||
"api_base": "https://api.minimax.io/v1"
|
||||
},
|
||||
"kimi_code": {
|
||||
"provider": "kimi",
|
||||
"api_key_env_var": "KIMI_API_KEY",
|
||||
"model": "kimi-k2.5",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 240000,
|
||||
"api_base": "https://api.kimi.com/coding"
|
||||
},
|
||||
"hive_llm": {
|
||||
"provider": "hive",
|
||||
"api_key_env_var": "HIVE_API_KEY",
|
||||
"model": "queen",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 180000,
|
||||
"api_base": "https://api.adenhq.com",
|
||||
"model_choices": [
|
||||
{
|
||||
"id": "queen",
|
||||
"label": "queen",
|
||||
"recommended": true
|
||||
},
|
||||
{
|
||||
"id": "kimi-2.5",
|
||||
"label": "kimi-2.5",
|
||||
"recommended": false
|
||||
},
|
||||
{
|
||||
"id": "glm-5.1",
|
||||
"label": "glm-5.1",
|
||||
"recommended": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"antigravity": {
|
||||
"provider": "openai",
|
||||
"model": "gemini-3-flash",
|
||||
"max_tokens": 32768,
|
||||
"max_context_tokens": 1000000
|
||||
},
|
||||
"ollama_local": {
|
||||
"provider": "ollama",
|
||||
"max_tokens": 8192,
|
||||
"max_context_tokens": 16384,
|
||||
"api_base": "http://localhost:11434"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,274 @@
|
||||
"""Shared curated model metadata loaded from ``model_catalog.json``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import json
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
MODEL_CATALOG_PATH = Path(__file__).with_name("model_catalog.json")
|
||||
|
||||
|
||||
class ModelCatalogError(RuntimeError):
|
||||
"""Raised when the curated model catalogue is missing or malformed."""
|
||||
|
||||
|
||||
def _require_mapping(value: Any, path: str) -> dict[str, Any]:
|
||||
if not isinstance(value, dict):
|
||||
raise ModelCatalogError(f"{path} must be an object")
|
||||
return value
|
||||
|
||||
|
||||
def _require_list(value: Any, path: str) -> list[Any]:
|
||||
if not isinstance(value, list):
|
||||
raise ModelCatalogError(f"{path} must be an array")
|
||||
return value
|
||||
|
||||
|
||||
_PRICING_KEYS = ("input", "output", "cache_read", "cache_creation")
|
||||
|
||||
|
||||
def _validate_pricing(value: Any, path: str) -> None:
|
||||
"""Validate an optional ``pricing_usd_per_mtok`` block.
|
||||
|
||||
Keys are USD-per-million-tokens rates. ``input``/``output`` are required;
|
||||
``cache_read``/``cache_creation`` are optional. All values must be
|
||||
non-negative numbers. Used as a last-resort fallback when neither the
|
||||
provider nor LiteLLM's catalog reports a cost.
|
||||
"""
|
||||
pricing = _require_mapping(value, path)
|
||||
for key in ("input", "output"):
|
||||
if key not in pricing:
|
||||
raise ModelCatalogError(f"{path}.{key} is required")
|
||||
for key, rate in pricing.items():
|
||||
if key not in _PRICING_KEYS:
|
||||
raise ModelCatalogError(f"{path}.{key} is not a recognized pricing field")
|
||||
if not isinstance(rate, (int, float)) or isinstance(rate, bool) or rate < 0:
|
||||
raise ModelCatalogError(f"{path}.{key} must be a non-negative number")
|
||||
|
||||
|
||||
def _validate_model_catalog(data: dict[str, Any]) -> dict[str, Any]:
|
||||
providers = _require_mapping(data.get("providers"), "providers")
|
||||
|
||||
for provider_id, provider_info in providers.items():
|
||||
provider_path = f"providers.{provider_id}"
|
||||
provider_map = _require_mapping(provider_info, provider_path)
|
||||
default_model = provider_map.get("default_model")
|
||||
if not isinstance(default_model, str) or not default_model.strip():
|
||||
raise ModelCatalogError(f"{provider_path}.default_model must be a non-empty string")
|
||||
|
||||
models = _require_list(provider_map.get("models"), f"{provider_path}.models")
|
||||
if not models:
|
||||
raise ModelCatalogError(f"{provider_path}.models must not be empty")
|
||||
|
||||
seen_model_ids: set[str] = set()
|
||||
default_found = False
|
||||
for idx, model in enumerate(models):
|
||||
model_path = f"{provider_path}.models[{idx}]"
|
||||
model_map = _require_mapping(model, model_path)
|
||||
model_id = model_map.get("id")
|
||||
if not isinstance(model_id, str) or not model_id.strip():
|
||||
raise ModelCatalogError(f"{model_path}.id must be a non-empty string")
|
||||
if model_id in seen_model_ids:
|
||||
raise ModelCatalogError(f"Duplicate model id {model_id!r} in {provider_path}.models")
|
||||
seen_model_ids.add(model_id)
|
||||
|
||||
if model_id == default_model:
|
||||
default_found = True
|
||||
|
||||
label = model_map.get("label")
|
||||
if not isinstance(label, str) or not label.strip():
|
||||
raise ModelCatalogError(f"{model_path}.label must be a non-empty string")
|
||||
|
||||
recommended = model_map.get("recommended")
|
||||
if not isinstance(recommended, bool):
|
||||
raise ModelCatalogError(f"{model_path}.recommended must be a boolean")
|
||||
|
||||
for key in ("max_tokens", "max_context_tokens"):
|
||||
value = model_map.get(key)
|
||||
if not isinstance(value, int) or value <= 0:
|
||||
raise ModelCatalogError(f"{model_path}.{key} must be a positive integer")
|
||||
|
||||
pricing = model_map.get("pricing_usd_per_mtok")
|
||||
if pricing is not None:
|
||||
_validate_pricing(pricing, f"{model_path}.pricing_usd_per_mtok")
|
||||
|
||||
supports_vision = model_map.get("supports_vision")
|
||||
if supports_vision is not None and not isinstance(supports_vision, bool):
|
||||
raise ModelCatalogError(f"{model_path}.supports_vision must be a boolean when present")
|
||||
|
||||
if not default_found:
|
||||
raise ModelCatalogError(
|
||||
f"{provider_path}.default_model={default_model!r} is not present in {provider_path}.models"
|
||||
)
|
||||
|
||||
presets = _require_mapping(data.get("presets"), "presets")
|
||||
for preset_id, preset_info in presets.items():
|
||||
preset_path = f"presets.{preset_id}"
|
||||
preset_map = _require_mapping(preset_info, preset_path)
|
||||
|
||||
provider = preset_map.get("provider")
|
||||
if not isinstance(provider, str) or not provider.strip():
|
||||
raise ModelCatalogError(f"{preset_path}.provider must be a non-empty string")
|
||||
|
||||
model = preset_map.get("model")
|
||||
if model is not None and (not isinstance(model, str) or not model.strip()):
|
||||
raise ModelCatalogError(f"{preset_path}.model must be a non-empty string when present")
|
||||
|
||||
api_base = preset_map.get("api_base")
|
||||
if api_base is not None and (not isinstance(api_base, str) or not api_base.strip()):
|
||||
raise ModelCatalogError(f"{preset_path}.api_base must be a non-empty string when present")
|
||||
|
||||
api_key_env_var = preset_map.get("api_key_env_var")
|
||||
if api_key_env_var is not None and (not isinstance(api_key_env_var, str) or not api_key_env_var.strip()):
|
||||
raise ModelCatalogError(f"{preset_path}.api_key_env_var must be a non-empty string when present")
|
||||
|
||||
for key in ("max_tokens", "max_context_tokens"):
|
||||
value = preset_map.get(key)
|
||||
if not isinstance(value, int) or value <= 0:
|
||||
raise ModelCatalogError(f"{preset_path}.{key} must be a positive integer")
|
||||
|
||||
model_choices = preset_map.get("model_choices")
|
||||
if model_choices is not None:
|
||||
for idx, choice in enumerate(_require_list(model_choices, f"{preset_path}.model_choices")):
|
||||
choice_path = f"{preset_path}.model_choices[{idx}]"
|
||||
choice_map = _require_mapping(choice, choice_path)
|
||||
choice_id = choice_map.get("id")
|
||||
if not isinstance(choice_id, str) or not choice_id.strip():
|
||||
raise ModelCatalogError(f"{choice_path}.id must be a non-empty string")
|
||||
label = choice_map.get("label")
|
||||
if not isinstance(label, str) or not label.strip():
|
||||
raise ModelCatalogError(f"{choice_path}.label must be a non-empty string")
|
||||
recommended = choice_map.get("recommended")
|
||||
if not isinstance(recommended, bool):
|
||||
raise ModelCatalogError(f"{choice_path}.recommended must be a boolean")
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def load_model_catalog() -> dict[str, Any]:
|
||||
"""Load and validate the curated model catalogue."""
|
||||
try:
|
||||
raw = json.loads(MODEL_CATALOG_PATH.read_text(encoding="utf-8"))
|
||||
except FileNotFoundError as exc:
|
||||
raise ModelCatalogError(f"Model catalogue not found: {MODEL_CATALOG_PATH}") from exc
|
||||
except json.JSONDecodeError as exc:
|
||||
raise ModelCatalogError(f"Model catalogue JSON is invalid: {exc}") from exc
|
||||
|
||||
return _validate_model_catalog(_require_mapping(raw, "root"))
|
||||
|
||||
|
||||
def get_models_catalogue() -> dict[str, list[dict[str, Any]]]:
|
||||
"""Return provider -> model list."""
|
||||
providers = load_model_catalog()["providers"]
|
||||
return {provider_id: copy.deepcopy(provider_info["models"]) for provider_id, provider_info in providers.items()}
|
||||
|
||||
|
||||
def get_default_models() -> dict[str, str]:
|
||||
"""Return provider -> default model id."""
|
||||
providers = load_model_catalog()["providers"]
|
||||
return {provider_id: str(provider_info["default_model"]) for provider_id, provider_info in providers.items()}
|
||||
|
||||
|
||||
def get_provider_models(provider: str) -> list[dict[str, Any]]:
|
||||
"""Return the curated models for one provider."""
|
||||
provider_info = load_model_catalog()["providers"].get(provider)
|
||||
if not provider_info:
|
||||
return []
|
||||
return copy.deepcopy(provider_info["models"])
|
||||
|
||||
|
||||
def get_default_model(provider: str) -> str | None:
|
||||
"""Return the curated default model id for one provider."""
|
||||
provider_info = load_model_catalog()["providers"].get(provider)
|
||||
if not provider_info:
|
||||
return None
|
||||
return str(provider_info["default_model"])
|
||||
|
||||
|
||||
def find_model(provider: str, model_id: str) -> dict[str, Any] | None:
|
||||
"""Return one model entry for a provider, if present."""
|
||||
for model in load_model_catalog()["providers"].get(provider, {}).get("models", []):
|
||||
if model["id"] == model_id:
|
||||
return copy.deepcopy(model)
|
||||
return None
|
||||
|
||||
|
||||
def find_model_any_provider(model_id: str) -> tuple[str, dict[str, Any]] | None:
|
||||
"""Return the first curated provider/model entry matching a model id."""
|
||||
for provider_id, provider_info in load_model_catalog()["providers"].items():
|
||||
for model in provider_info["models"]:
|
||||
if model["id"] == model_id:
|
||||
return provider_id, copy.deepcopy(model)
|
||||
return None
|
||||
|
||||
|
||||
def get_model_limits(provider: str, model_id: str) -> tuple[int, int] | None:
|
||||
"""Return ``(max_tokens, max_context_tokens)`` for one provider/model pair."""
|
||||
model = find_model(provider, model_id)
|
||||
if not model:
|
||||
return None
|
||||
return int(model["max_tokens"]), int(model["max_context_tokens"])
|
||||
|
||||
|
||||
def get_model_pricing(model_id: str) -> dict[str, float] | None:
|
||||
"""Return ``pricing_usd_per_mtok`` for a model id, searching all providers.
|
||||
|
||||
Returns ``None`` when the model is absent from the catalog or has no
|
||||
pricing entry. Used by the cost-extraction fallback in ``litellm.py``
|
||||
when the provider response and LiteLLM's catalog both come up empty.
|
||||
"""
|
||||
if not model_id:
|
||||
return None
|
||||
for provider_info in load_model_catalog()["providers"].values():
|
||||
for model in provider_info["models"]:
|
||||
if model["id"] == model_id:
|
||||
pricing = model.get("pricing_usd_per_mtok")
|
||||
if pricing is None:
|
||||
return None
|
||||
return {key: float(rate) for key, rate in pricing.items()}
|
||||
return None
|
||||
|
||||
|
||||
def model_supports_vision(model_id: str) -> bool:
|
||||
"""Return whether *model_id* supports image inputs per the curated catalog.
|
||||
|
||||
Looks up the bare model id (and the provider-prefix-stripped form) in the
|
||||
catalog. Returns the model's ``supports_vision`` flag when found, defaulting
|
||||
to ``True`` for unknown models or when the flag is absent — assume vision
|
||||
capable for hosted providers, since modern frontier models support images
|
||||
by default and the captioning fallback is more expensive than just letting
|
||||
the provider handle the image.
|
||||
"""
|
||||
if not model_id:
|
||||
return True
|
||||
|
||||
candidates = [model_id]
|
||||
if "/" in model_id:
|
||||
candidates.append(model_id.split("/", 1)[1])
|
||||
|
||||
for candidate in candidates:
|
||||
for provider_info in load_model_catalog()["providers"].values():
|
||||
for model in provider_info["models"]:
|
||||
if model["id"] == candidate:
|
||||
flag = model.get("supports_vision")
|
||||
if isinstance(flag, bool):
|
||||
return flag
|
||||
return True
|
||||
return True
|
||||
|
||||
|
||||
def get_preset(preset_id: str) -> dict[str, Any] | None:
|
||||
"""Return one preset entry."""
|
||||
preset = load_model_catalog()["presets"].get(preset_id)
|
||||
if not preset:
|
||||
return None
|
||||
return copy.deepcopy(preset)
|
||||
|
||||
|
||||
def get_presets() -> dict[str, dict[str, Any]]:
|
||||
"""Return all preset entries."""
|
||||
return copy.deepcopy(load_model_catalog()["presets"])
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user