Compare commits
509 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3963855d1d | |||
| 28a71b70a8 | |||
| 33d3a13fde | |||
| 5ea278a08d | |||
| fd95f8da28 | |||
| c1d5952ad9 | |||
| 72673e12fb | |||
| 3867d3926b | |||
| 0b2b7a2622 | |||
| 3951ee1a7d | |||
| 1afde51c7b | |||
| cbeef18f0a | |||
| 1947d8c3ca | |||
| 55c63736ef | |||
| a2b68d893f | |||
| fd06e43d9c | |||
| b550f6efa0 | |||
| 47adf88773 | |||
| 8748da38cf | |||
| f697dc99fb | |||
| ecb038c955 | |||
| 77ff31cec6 | |||
| 5ea8677a5d | |||
| 97f5b3423f | |||
| 4968207eef | |||
| f859e2203a | |||
| fb3dad4354 | |||
| adc82c6a65 | |||
| 96084fea16 | |||
| 6f52026c84 | |||
| 3576218ea9 | |||
| 4c662db530 | |||
| da1ce4e5a7 | |||
| c4944c5662 | |||
| d892f87651 | |||
| 447f23d157 | |||
| aa12f0d295 | |||
| de9226aae0 | |||
| 16e1ab1a87 | |||
| 54287e06ad | |||
| b33de5f0e1 | |||
| 2d5ef20d4d | |||
| 177346b159 | |||
| 08819b1609 | |||
| 35b1332551 | |||
| 52586a024b | |||
| 05a314b121 | |||
| 8e262e2270 | |||
| 733bb4d2dd | |||
| ba31c760a6 | |||
| a388bc6837 | |||
| 3f5bbbf1e3 | |||
| 002da15375 | |||
| 005609da3a | |||
| 182d9ca6f9 | |||
| a6b43f8016 | |||
| 31700fa8da | |||
| 6b475ec1cf | |||
| 1b27844c52 | |||
| 3a0b91f7ab | |||
| 82108e32fa | |||
| 28f4fecfb3 | |||
| ff1bb08217 | |||
| 10617fee0d | |||
| 866103ddf4 | |||
| fcfaca6bd0 | |||
| 4c7d9ab0fb | |||
| 061aec4b3d | |||
| f12ab10725 | |||
| 0882fa6ce5 | |||
| 0b87e4c45d | |||
| 9c7e846828 | |||
| 30bd0e483a | |||
| 13cc93c334 | |||
| 564b1bb752 | |||
| 2f31a92d31 | |||
| fd89c7f56f | |||
| 35738c8279 | |||
| a0d14b8a25 | |||
| 9c781ed78e | |||
| 460a24e34a | |||
| 8ae030e16e | |||
| 3c6467c814 | |||
| 2f11f0c911 | |||
| c3ae67fb1d | |||
| 8c750c7edd | |||
| 571838a289 | |||
| dafaaae792 | |||
| b45e14efb4 | |||
| e70cbf26e2 | |||
| daafdc3704 | |||
| 6661934fed | |||
| f568728de1 | |||
| 263d35bbd6 | |||
| bece21d217 | |||
| d4788e147a | |||
| f4594ecf37 | |||
| 8f1462cb79 | |||
| 76d4d0de69 | |||
| 6ab4e1d641 | |||
| c5d87c99fd | |||
| f53f403022 | |||
| b887b2951e | |||
| 842b69b155 | |||
| d6c34106fc | |||
| 67cbd31280 | |||
| cf877f2b49 | |||
| 6f34cb2c8a | |||
| b88aa2b53c | |||
| 356cab19eb | |||
| 7c6d5fa446 | |||
| 2dae3e47fd | |||
| 6fce789607 | |||
| 9bbb5b38e6 | |||
| ac73aa93bf | |||
| 52a56e4a10 | |||
| a1cede510d | |||
| 682c10e873 | |||
| 5605e24a0d | |||
| f7268a44d9 | |||
| af7a4ff4e8 | |||
| 60b9c0d763 | |||
| 5c550270c6 | |||
| e03fd48e48 | |||
| 6420c74c24 | |||
| ad74351530 | |||
| 1b5f656429 | |||
| 132d84c529 | |||
| a03b378e9b | |||
| 74635e1d7d | |||
| 893053ede7 | |||
| 596ec6fec5 | |||
| 5863b83172 | |||
| 20c92b197a | |||
| ec9c6b4666 | |||
| 8a73e5c119 | |||
| 717f0eee9a | |||
| 09fb47f089 | |||
| b46d943e71 | |||
| b980d6f6ab | |||
| 61f27369ef | |||
| 204b0b4744 | |||
| 1b6ebb1e42 | |||
| 7dfc75b3e6 | |||
| 2920b5ab01 | |||
| 81ad0467b0 | |||
| 115ca55ea0 | |||
| f2814a26e6 | |||
| 4d309950b0 | |||
| 39216a4c12 | |||
| c7fa621aeb | |||
| 5914d28cbe | |||
| 8c3ad3d70a | |||
| 9eb3fc6285 | |||
| e95f7e7339 | |||
| d949551399 | |||
| a7dbd85ed4 | |||
| 1f288dab1c | |||
| 021754d941 | |||
| 7412904fbf | |||
| cd1976e2b9 | |||
| 5f3e9379a3 | |||
| 0e565d6cea | |||
| 67b249dcd5 | |||
| bbf1c8c790 | |||
| 44a8b453b5 | |||
| 26511fe962 | |||
| ce5893216a | |||
| 4e821e4dbf | |||
| d11e97de59 | |||
| 4b10d3e360 | |||
| e04479930f | |||
| 8a8c4cc3f5 | |||
| 1e06ff611e | |||
| 1edc7bb9c7 | |||
| 7b1e0af155 | |||
| 7b15616e29 | |||
| bd7d2277d8 | |||
| 99ed00fd02 | |||
| f7af5f9ee8 | |||
| e5bcc8005f | |||
| 352d285212 | |||
| 3ef60f9d14 | |||
| a103312127 | |||
| 3d0bba4167 | |||
| 3df718cc14 | |||
| c7497a180e | |||
| 3f39039a21 | |||
| 88fbd90fcc | |||
| e0bf09dd78 | |||
| 3e158b07af | |||
| 5319ed7ee1 | |||
| 978904d2a4 | |||
| 4d876ecc54 | |||
| ba327d0b9e | |||
| b69cf3523c | |||
| 4d8c8e9308 | |||
| b70885934c | |||
| 722b087fc0 | |||
| 0c7ea272db | |||
| 5e4f322fc0 | |||
| c02e45f1aa | |||
| a7217f138c | |||
| 3502f25048 | |||
| 93c026fe31 | |||
| e515977b96 | |||
| 045490a097 | |||
| b25903fb7f | |||
| acf4bd5152 | |||
| 1f5711e1a1 | |||
| ca2dd90313 | |||
| 21e07f3b65 | |||
| e8a06ddd34 | |||
| 34cc09904f | |||
| f6bba8b62f | |||
| d241ad60f8 | |||
| 5a3fcf9a8a | |||
| 1f8a47203f | |||
| 7240090274 | |||
| 2e6a47c2df | |||
| 7f5ecd7913 | |||
| 105b98b113 | |||
| 114e65ab41 | |||
| 0fc13a5cc3 | |||
| e651799e9e | |||
| fcd3e514de | |||
| 7ab41de3a2 | |||
| 58e023f277 | |||
| a98f2d5b86 | |||
| eca43231c0 | |||
| 6763077887 | |||
| f85ff8a2f8 | |||
| 1a5c3480e6 | |||
| 69a7fe7b92 | |||
| a5418d760f | |||
| 0deeb87c63 | |||
| d1d5f49c5a | |||
| 917e23ccc8 | |||
| 988922304f | |||
| ab2bd726c3 | |||
| 713fefb163 | |||
| 83140a1398 | |||
| cafa6dd930 | |||
| 82e1af1a7a | |||
| 30c3dc9205 | |||
| 9a3c6703e1 | |||
| e26468aa19 | |||
| fe14992696 | |||
| d0775b95c6 | |||
| 96121b5757 | |||
| 11c003c48d | |||
| fbe72c58ae | |||
| 816156e87f | |||
| 7bceab3cea | |||
| 83d7f56728 | |||
| 76deba2a6a | |||
| d9d048b9e3 | |||
| 930f417729 | |||
| 8e214d06c1 | |||
| 63e0348963 | |||
| b46a5f0247 | |||
| 79dfd90068 | |||
| f9d5c7c751 | |||
| 8958fb2d88 | |||
| 3c51f2ac36 | |||
| 170a0918f7 | |||
| e3da3b619c | |||
| 6e32513b79 | |||
| 520e1963ee | |||
| 843b9b55e2 | |||
| ccd305ff96 | |||
| 3bd0d1e48c | |||
| d9bfa8e675 | |||
| 27746147e2 | |||
| 3a0b642980 | |||
| 8c0241f087 | |||
| 958d016174 | |||
| 913d318ada | |||
| 8212920cb7 | |||
| 6414be7bd4 | |||
| ac62a82d08 | |||
| a670548a57 | |||
| c4a7463f9d | |||
| edf0ac5270 | |||
| 8ff6b76f37 | |||
| c9f9eb365c | |||
| 7a17c115d3 | |||
| 9a2a11055f | |||
| f21aecd91c | |||
| 4aef73c1d7 | |||
| 906480a6e8 | |||
| 9df147b450 | |||
| b71b4b0fc2 | |||
| 1bd2510c52 | |||
| 28b81092f9 | |||
| 4b9a3abba6 | |||
| 0c76b6dcb1 | |||
| 090a85b41b | |||
| 992d573573 | |||
| 9e768e660b | |||
| 26b9ed362e | |||
| 976ae75fde | |||
| 9da91b5319 | |||
| 2493beaf5a | |||
| d63dd021ab | |||
| 697ba89314 | |||
| b6c65ab5d5 | |||
| 162f9a55ad | |||
| e484fdfa51 | |||
| 77d9ccf2e4 | |||
| 94e39ee09e | |||
| 373ad77008 | |||
| 661b0c0038 | |||
| 8ed38bf0e2 | |||
| 4d675dfff7 | |||
| b42a3293f1 | |||
| 87e9bf853d | |||
| c56f78422a | |||
| ac311e10ba | |||
| 0297520263 | |||
| 4803552a7a | |||
| b8d85ff723 | |||
| 7d571dfaec | |||
| ba02e53bdd | |||
| 153e6142ff | |||
| 228449c9d8 | |||
| c65eed8802 | |||
| 40d32f2e01 | |||
| c83aac5e12 | |||
| 48b9241247 | |||
| 7779bc5336 | |||
| beec549f74 | |||
| 310698ecc0 | |||
| 4f719c4778 | |||
| 4cc00f3bdc | |||
| 1f9c47fef1 | |||
| 80a4980640 | |||
| 8dbe424f5a | |||
| ec9bf033e6 | |||
| a2d21ec7bc | |||
| 06ccc853ee | |||
| 4847332161 | |||
| 8c1ee54725 | |||
| 5e537d9d55 | |||
| d6b95067a1 | |||
| 32cae75ef5 | |||
| 21e7554cdb | |||
| 374442e900 | |||
| a1a0ec5ddb | |||
| 1fd56b079c | |||
| a12163d63f | |||
| 0cd6f21980 | |||
| a88fc1d75c | |||
| 87b0037fcd | |||
| 767d32d420 | |||
| e9bde26611 | |||
| c02f40622c | |||
| 929dc24e93 | |||
| 8cfb533fef | |||
| 3328a388b3 | |||
| 8f632eb005 | |||
| c8ee961436 | |||
| 6fd7efece6 | |||
| bc9f6b0af8 | |||
| 7d48f17867 | |||
| 776583b3ad | |||
| 9c28dae583 | |||
| 59a315b90b | |||
| 866518f188 | |||
| 736ae65a1d | |||
| 76c9f7c9a9 | |||
| 32ad225d7f | |||
| e5428bec5c | |||
| 7ae6f67470 | |||
| faf534511b | |||
| 594bceb8f5 | |||
| 9dc0f48ec9 | |||
| 9d11f834b8 | |||
| 131b72cd0c | |||
| ce5a2d4a81 | |||
| 7f489cee46 | |||
| 3c2d669a2f | |||
| ec36e96499 | |||
| 9ecd4980e4 | |||
| 64446ff9b6 | |||
| e3d2262292 | |||
| 891cfa387a | |||
| f0243fddf2 | |||
| 85ff8e364b | |||
| 75f1afe8e3 | |||
| 7b660311e5 | |||
| 98a493296d | |||
| bc2a42aed2 | |||
| 8b501d9091 | |||
| cddae0ed18 | |||
| 9dca42be27 | |||
| a1f3fe4d55 | |||
| 0304b392b2 | |||
| ae9b4e82fe | |||
| 4bac5e4c46 | |||
| c4d3400ec4 | |||
| 1da9bb0c0f | |||
| 760ed51ad3 | |||
| 6d0a3b952a | |||
| 873fcd5822 | |||
| a08f3a8925 | |||
| 2a98d3a489 | |||
| b681ba03b1 | |||
| fe775a36c0 | |||
| 2df9adcb43 | |||
| c756cbf6d5 | |||
| d0ac67c9d3 | |||
| 47cd55052f | |||
| fb203b5bdf | |||
| 6ee47e243d | |||
| c1844b7a9d | |||
| 99a29e79e5 | |||
| 589a66ef26 | |||
| 3f960763cb | |||
| 15f8f3783c | |||
| a2b045c7e3 | |||
| 055cef2fdc | |||
| 6c6c69cbc3 | |||
| 6fe0062e6e | |||
| 26b8b2f448 | |||
| 7e40d6950a | |||
| 590bfa92cb | |||
| f0e89a1720 | |||
| 575563b1e8 | |||
| 82ea0e47ce | |||
| 2f57ca10f7 | |||
| 75c2d541c4 | |||
| b666f8b50b | |||
| 09f9322676 | |||
| f9a864ef93 | |||
| 27f28afe9c | |||
| 8f85722fef | |||
| 5588445a01 | |||
| 40529b5722 | |||
| cee632f50c | |||
| 3453e3aa05 | |||
| 8de637c421 | |||
| 6c75de862c | |||
| 2971134882 | |||
| 6e79860b43 | |||
| 3f6bdda2a0 | |||
| 74d0287ec5 | |||
| 51e81d80fc | |||
| cd014e41e4 | |||
| 830f11c47d | |||
| a73239dd98 | |||
| d68783a612 | |||
| a28ea40a7d | |||
| f2492bd4d4 | |||
| b22be7a6cb | |||
| 5b00445c05 | |||
| 5179677e8f | |||
| 2c25b2eae7 | |||
| f6705fe2d3 | |||
| c2771fed20 | |||
| fc781eccd9 | |||
| d5a25ae081 | |||
| 23b6fb6391 | |||
| 433967f0cf | |||
| 2a876c2a10 | |||
| ff0adeaba7 | |||
| 846edbf256 | |||
| c68dd48f6d | |||
| 8b828dd139 | |||
| 50c0a5da9e | |||
| 2f0e5c42f1 | |||
| 903288468a | |||
| 9e3bba6f59 | |||
| bc16f0752f | |||
| 86badd70fa | |||
| ce5379516c | |||
| a50078bbf2 | |||
| 2cef168442 | |||
| 0a1a9e3545 | |||
| 3c8682d80c | |||
| ecc5a1608f | |||
| bc81b55600 | |||
| 28b628c1b4 | |||
| 148264ac73 | |||
| 4046e4e379 | |||
| 28298d9af2 | |||
| 9d156325e0 | |||
| 221712128d | |||
| e9fc36f2d3 | |||
| 305b880b1d | |||
| 34782a6b85 | |||
| d25d94e71b | |||
| 51f1b449cd | |||
| 804e47dde4 | |||
| 582c810d15 | |||
| cede629718 | |||
| 7519c73f2a | |||
| bf402aaa18 | |||
| 7fae57f311 | |||
| 1f653969a9 | |||
| 4310852ee6 | |||
| 853f1e9873 | |||
| ae5fe84fb2 | |||
| 92b538d5ae | |||
| 5351703949 | |||
| 7ba8169444 | |||
| d090c954ae | |||
| 9bee1666f1 | |||
| fb94637339 |
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "uv",
|
||||
"args": ["run", "--directory", "core", "-m", "framework.mcp.agent_builder_server"],
|
||||
"disabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-concepts
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-create
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-credentials
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-test
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive-concepts
|
||||
---
|
||||
|
||||
use hive-concepts skill
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive-create
|
||||
---
|
||||
|
||||
use hive-create skill
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive-credentials
|
||||
---
|
||||
|
||||
use hive-credentials skill
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive-patterns
|
||||
---
|
||||
|
||||
use hive-patterns skill
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive-test
|
||||
---
|
||||
|
||||
use hive-test skill
|
||||
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: hive
|
||||
---
|
||||
|
||||
use hive skill
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-concepts
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-create
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-credentials
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-test
|
||||
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"mcp__agent-builder__create_session",
|
||||
"mcp__agent-builder__set_goal",
|
||||
"mcp__agent-builder__add_node",
|
||||
"mcp__agent-builder__add_edge",
|
||||
"mcp__agent-builder__configure_loop",
|
||||
"mcp__agent-builder__add_mcp_server",
|
||||
"mcp__agent-builder__validate_graph",
|
||||
"mcp__agent-builder__export_graph",
|
||||
"mcp__agent-builder__load_session_by_id",
|
||||
"Bash(git status:*)",
|
||||
"Bash(gh run view:*)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(env:*)",
|
||||
"mcp__agent-builder__test_node",
|
||||
"mcp__agent-builder__list_mcp_tools",
|
||||
"Bash(python -m py_compile:*)",
|
||||
"Bash(python -m pytest:*)",
|
||||
"Bash(source:*)",
|
||||
"mcp__agent-builder__update_node",
|
||||
"mcp__agent-builder__check_missing_credentials",
|
||||
"mcp__agent-builder__list_stored_credentials",
|
||||
"Bash(find:*)",
|
||||
"mcp__agent-builder__run_tests",
|
||||
"Bash(PYTHONPATH=core:exports:tools/src uv run pytest:*)",
|
||||
"mcp__agent-builder__list_agent_sessions",
|
||||
"mcp__agent-builder__generate_constraint_tests",
|
||||
"mcp__agent-builder__generate_success_tests"
|
||||
]
|
||||
},
|
||||
"enabledMcpjsonServers": ["agent-builder", "tools"]
|
||||
}
|
||||
@@ -1,415 +0,0 @@
|
||||
---
|
||||
name: building-agents-construction
|
||||
description: Step-by-step guide for building goal-driven agents. Creates package structure, defines goals, adds nodes, connects edges, and finalizes agent class. Use when actively building an agent.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: procedural
|
||||
part_of: building-agents
|
||||
requires: building-agents-core
|
||||
---
|
||||
|
||||
# Agent Construction - EXECUTE THESE STEPS
|
||||
|
||||
**THIS IS AN EXECUTABLE WORKFLOW. DO NOT DISPLAY THIS FILE. EXECUTE THE STEPS BELOW.**
|
||||
|
||||
When this skill is loaded, IMMEDIATELY begin executing Step 1. Do not explain what you will do - just do it.
|
||||
|
||||
---
|
||||
|
||||
## STEP 1: Initialize Build Environment
|
||||
|
||||
**EXECUTE THESE TOOL CALLS NOW:**
|
||||
|
||||
1. Register the hive-tools MCP server:
|
||||
|
||||
```
|
||||
mcp__agent-builder__add_mcp_server(
|
||||
name="hive-tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args='["mcp_server.py", "--stdio"]',
|
||||
cwd="tools",
|
||||
description="Hive tools MCP server"
|
||||
)
|
||||
```
|
||||
|
||||
2. Create a build session (replace AGENT_NAME with the user's requested agent name in snake_case):
|
||||
|
||||
```
|
||||
mcp__agent-builder__create_session(name="AGENT_NAME")
|
||||
```
|
||||
|
||||
3. Discover available tools:
|
||||
|
||||
```
|
||||
mcp__agent-builder__list_mcp_tools()
|
||||
```
|
||||
|
||||
4. Create the package directory:
|
||||
|
||||
```
|
||||
mkdir -p exports/AGENT_NAME/nodes
|
||||
```
|
||||
|
||||
**AFTER completing these calls**, tell the user:
|
||||
|
||||
> ✅ Build environment initialized
|
||||
>
|
||||
> - Session created
|
||||
> - Available tools: [list the tools from step 3]
|
||||
>
|
||||
> Proceeding to define the agent goal...
|
||||
|
||||
**THEN immediately proceed to STEP 2.**
|
||||
|
||||
---
|
||||
|
||||
## STEP 2: Define and Approve Goal
|
||||
|
||||
**PROPOSE a goal to the user.** Based on what they asked for, propose:
|
||||
|
||||
- Goal ID (kebab-case)
|
||||
- Goal name
|
||||
- Goal description
|
||||
- 3-5 success criteria (each with: id, description, metric, target, weight)
|
||||
- 2-4 constraints (each with: id, description, constraint_type, category)
|
||||
|
||||
**FORMAT your proposal as a clear summary, then ask for approval:**
|
||||
|
||||
> **Proposed Goal: [Name]**
|
||||
>
|
||||
> [Description]
|
||||
>
|
||||
> **Success Criteria:**
|
||||
>
|
||||
> 1. [criterion 1]
|
||||
> 2. [criterion 2]
|
||||
> ...
|
||||
>
|
||||
> **Constraints:**
|
||||
>
|
||||
> 1. [constraint 1]
|
||||
> 2. [constraint 2]
|
||||
> ...
|
||||
|
||||
**THEN call AskUserQuestion:**
|
||||
|
||||
```
|
||||
AskUserQuestion(questions=[{
|
||||
"question": "Do you approve this goal definition?",
|
||||
"header": "Goal",
|
||||
"options": [
|
||||
{"label": "Approve", "description": "Goal looks good, proceed"},
|
||||
{"label": "Modify", "description": "I want to change something"}
|
||||
],
|
||||
"multiSelect": false
|
||||
}])
|
||||
```
|
||||
|
||||
**WAIT for user response.**
|
||||
|
||||
- If **Approve**: Call `mcp__agent-builder__set_goal(...)` with the goal details, then proceed to STEP 3
|
||||
- If **Modify**: Ask what they want to change, update proposal, ask again
|
||||
|
||||
---
|
||||
|
||||
## STEP 3: Design Node Workflow
|
||||
|
||||
**BEFORE designing nodes**, review the available tools from Step 1. Nodes can ONLY use tools that exist.
|
||||
|
||||
**DESIGN the workflow** as a series of nodes. For each node, determine:
|
||||
|
||||
- node_id (kebab-case)
|
||||
- name
|
||||
- description
|
||||
- node_type: `"event_loop"` (recommended for all LLM work) or `"function"` (deterministic, no LLM)
|
||||
- input_keys (what data this node receives)
|
||||
- output_keys (what data this node produces)
|
||||
- tools (ONLY tools that exist - empty list if no tools needed)
|
||||
- system_prompt (should mention `set_output` for producing structured outputs)
|
||||
- client_facing: True if this node interacts with the user
|
||||
- nullable_output_keys (for mutually exclusive outputs)
|
||||
- max_node_visits (>1 if this node is a feedback loop target)
|
||||
|
||||
**PRESENT the workflow to the user:**
|
||||
|
||||
> **Proposed Workflow: [N] nodes**
|
||||
>
|
||||
> 1. **[node-id]** - [description]
|
||||
>
|
||||
> - Type: event_loop [client-facing] / function
|
||||
> - Input: [keys]
|
||||
> - Output: [keys]
|
||||
> - Tools: [tools or "none"]
|
||||
>
|
||||
> 2. **[node-id]** - [description]
|
||||
> ...
|
||||
>
|
||||
> **Flow:** node1 → node2 → node3 → ...
|
||||
|
||||
**THEN call AskUserQuestion:**
|
||||
|
||||
```
|
||||
AskUserQuestion(questions=[{
|
||||
"question": "Do you approve this workflow design?",
|
||||
"header": "Workflow",
|
||||
"options": [
|
||||
{"label": "Approve", "description": "Workflow looks good, proceed to build nodes"},
|
||||
{"label": "Modify", "description": "I want to change the workflow"}
|
||||
],
|
||||
"multiSelect": false
|
||||
}])
|
||||
```
|
||||
|
||||
**WAIT for user response.**
|
||||
|
||||
- If **Approve**: Proceed to STEP 4
|
||||
- If **Modify**: Ask what they want to change, update design, ask again
|
||||
|
||||
---
|
||||
|
||||
## STEP 4: Build Nodes One by One
|
||||
|
||||
**FOR EACH node in the approved workflow:**
|
||||
|
||||
1. **Call** `mcp__agent-builder__add_node(...)` with the node details
|
||||
|
||||
- input_keys and output_keys must be JSON strings: `'["key1", "key2"]'`
|
||||
- tools must be a JSON string: `'["tool1"]'` or `'[]'`
|
||||
|
||||
2. **Call** `mcp__agent-builder__test_node(...)` to validate:
|
||||
|
||||
```
|
||||
mcp__agent-builder__test_node(
|
||||
node_id="the-node-id",
|
||||
test_input='{"key": "test value"}',
|
||||
mock_llm_response='{"output_key": "test output"}'
|
||||
)
|
||||
```
|
||||
|
||||
3. **Check result:**
|
||||
|
||||
- If valid: Tell user "✅ Node [id] validated" and continue to next node
|
||||
- If invalid: Show errors, fix the node, re-validate
|
||||
|
||||
4. **Show progress** after each node:
|
||||
|
||||
```
|
||||
mcp__agent-builder__get_session_status()
|
||||
```
|
||||
|
||||
> ✅ Node [X] of [Y] complete: [node-id]
|
||||
|
||||
**AFTER all nodes are added and validated**, proceed to STEP 5.
|
||||
|
||||
---
|
||||
|
||||
## STEP 5: Connect Edges
|
||||
|
||||
**DETERMINE the edges** based on the workflow flow. For each connection:
|
||||
|
||||
- edge_id (kebab-case)
|
||||
- source (node that outputs)
|
||||
- target (node that receives)
|
||||
- condition: `"on_success"`, `"always"`, `"on_failure"`, or `"conditional"`
|
||||
- condition_expr (Python expression using `output.get(...)`, only if conditional)
|
||||
- priority (positive = forward edge evaluated first, negative = feedback edge)
|
||||
|
||||
**FOR EACH edge, call:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__add_edge(
|
||||
edge_id="source-to-target",
|
||||
source="source-node-id",
|
||||
target="target-node-id",
|
||||
condition="on_success",
|
||||
condition_expr="",
|
||||
priority=1
|
||||
)
|
||||
```
|
||||
|
||||
**AFTER all edges are added, validate the graph:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__validate_graph()
|
||||
```
|
||||
|
||||
- If valid: Tell user "✅ Graph structure validated" and proceed to STEP 6
|
||||
- If invalid: Show errors, fix edges, re-validate
|
||||
|
||||
---
|
||||
|
||||
## STEP 6: Generate Agent Package
|
||||
|
||||
**EXPORT the graph data:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__export_graph()
|
||||
```
|
||||
|
||||
This returns JSON with all the goal, nodes, edges, and MCP server configurations.
|
||||
|
||||
**THEN write the Python package files** using the exported data. Create these files in `exports/AGENT_NAME/`:
|
||||
|
||||
1. `config.py` - Runtime configuration with model settings
|
||||
2. `nodes/__init__.py` - All NodeSpec definitions
|
||||
3. `agent.py` - Goal, edges, graph config, and agent class
|
||||
4. `__init__.py` - Package exports
|
||||
5. `__main__.py` - CLI interface
|
||||
6. `mcp_servers.json` - MCP server configurations
|
||||
7. `README.md` - Usage documentation
|
||||
|
||||
**IMPORTANT entry_points format:**
|
||||
|
||||
- MUST be: `{"start": "first-node-id"}`
|
||||
- NOT: `{"first-node-id": ["input_keys"]}` (WRONG)
|
||||
- NOT: `{"first-node-id"}` (WRONG - this is a set)
|
||||
|
||||
**Use the example agent** at `.claude/skills/building-agents-construction/examples/deep_research_agent/` as a template for file structure and patterns. It demonstrates: STEP 1/STEP 2 prompts, client-facing nodes, feedback loops, nullable_output_keys, and data tools.
|
||||
|
||||
**AFTER writing all files, tell the user:**
|
||||
|
||||
> ✅ Agent package created: `exports/AGENT_NAME/`
|
||||
>
|
||||
> **Files generated:**
|
||||
>
|
||||
> - `__init__.py` - Package exports
|
||||
> - `agent.py` - Goal, nodes, edges, agent class
|
||||
> - `config.py` - Runtime configuration
|
||||
> - `__main__.py` - CLI interface
|
||||
> - `nodes/__init__.py` - Node definitions
|
||||
> - `mcp_servers.json` - MCP server config
|
||||
> - `README.md` - Usage documentation
|
||||
>
|
||||
> **Test your agent:**
|
||||
>
|
||||
> ```bash
|
||||
> cd /home/timothy/oss/hive
|
||||
> PYTHONPATH=exports uv run python -m AGENT_NAME validate
|
||||
> PYTHONPATH=exports uv run python -m AGENT_NAME info
|
||||
> ```
|
||||
|
||||
---
|
||||
|
||||
## STEP 7: Verify and Test
|
||||
|
||||
**RUN validation:**
|
||||
|
||||
```bash
|
||||
cd /home/timothy/oss/hive && PYTHONPATH=exports uv run python -m AGENT_NAME validate
|
||||
```
|
||||
|
||||
- If valid: Agent is complete!
|
||||
- If errors: Fix the issues and re-run
|
||||
|
||||
**SHOW final session summary:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__get_session_status()
|
||||
```
|
||||
|
||||
**TELL the user the agent is ready** and suggest next steps:
|
||||
|
||||
- Run with mock mode to test without API calls
|
||||
- Use `/testing-agent` skill for comprehensive testing
|
||||
- Use `/setup-credentials` if the agent needs API keys
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: Node Types
|
||||
|
||||
| Type | tools param | Use when |
|
||||
|------|-------------|----------|
|
||||
| `event_loop` | `'["tool1"]'` or `'[]'` | LLM-powered work with or without tools |
|
||||
| `function` | N/A | Deterministic Python operations, no LLM |
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: NodeSpec New Fields
|
||||
|
||||
| Field | Default | Description |
|
||||
|-------|---------|-------------|
|
||||
| `client_facing` | `False` | Streams output to user, blocks for input between turns |
|
||||
| `nullable_output_keys` | `[]` | Output keys that may remain unset (mutually exclusive outputs) |
|
||||
| `max_node_visits` | `1` | Max executions per run. Set >1 for feedback loop targets. 0=unlimited |
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: Edge Conditions & Priority
|
||||
|
||||
| Condition | When edge is followed |
|
||||
|-----------|--------------------------------------|
|
||||
| `on_success` | Source node completed successfully |
|
||||
| `on_failure` | Source node failed |
|
||||
| `always` | Always, regardless of success/failure |
|
||||
| `conditional` | When condition_expr evaluates to True |
|
||||
|
||||
**Priority:** Positive = forward edge (evaluated first). Negative = feedback edge (loops back to earlier node). Multiple ON_SUCCESS edges from same source = parallel execution (fan-out).
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: System Prompt Best Practice
|
||||
|
||||
For **internal** event_loop nodes (not client-facing), instruct the LLM to use `set_output`:
|
||||
|
||||
```
|
||||
Use set_output(key, value) to store your results. For example:
|
||||
- set_output("search_results", <your results as a JSON string>)
|
||||
|
||||
Do NOT return raw JSON. Use the set_output tool to produce outputs.
|
||||
```
|
||||
|
||||
For **client-facing** event_loop nodes, use the STEP 1/STEP 2 pattern:
|
||||
|
||||
```
|
||||
**STEP 1 — Respond to the user (text only, NO tool calls):**
|
||||
[Present information, ask questions, etc.]
|
||||
|
||||
**STEP 2 — After the user responds, call set_output:**
|
||||
- set_output("key", "value based on user's response")
|
||||
```
|
||||
|
||||
This prevents the LLM from calling `set_output` before the user has had a chance to respond. The "NO tool calls" instruction in STEP 1 ensures the node blocks for user input before proceeding.
|
||||
|
||||
---
|
||||
|
||||
## EventLoopNode Runtime
|
||||
|
||||
EventLoopNodes are **auto-created** by `GraphExecutor` at runtime. Both direct `GraphExecutor` and `AgentRuntime` / `create_agent_runtime()` handle event_loop nodes automatically. No manual `node_registry` setup is needed.
|
||||
|
||||
```python
|
||||
# Direct execution
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
storage_path = Path.home() / ".hive" / "my_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
runtime = Runtime(storage_path)
|
||||
|
||||
executor = GraphExecutor(
|
||||
runtime=runtime,
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
storage_path=storage_path,
|
||||
)
|
||||
result = await executor.execute(graph=graph, goal=goal, input_data=input_data)
|
||||
```
|
||||
|
||||
**DO NOT pass `runtime=None` to `GraphExecutor`** — it will crash with `'NoneType' object has no attribute 'start_run'`.
|
||||
|
||||
---
|
||||
|
||||
## COMMON MISTAKES TO AVOID
|
||||
|
||||
1. **Using tools that don't exist** - Always check `mcp__agent-builder__list_mcp_tools()` first
|
||||
2. **Wrong entry_points format** - Must be `{"start": "node-id"}`, NOT a set or list
|
||||
3. **Skipping validation** - Always validate nodes and graph before proceeding
|
||||
4. **Not waiting for approval** - Always ask user before major steps
|
||||
5. **Displaying this file** - Execute the steps, don't show documentation
|
||||
6. **Too many thin nodes** - Prefer fewer, richer nodes (4 nodes > 8 nodes)
|
||||
7. **Missing STEP 1/STEP 2 in client-facing prompts** - Client-facing nodes need explicit phases to prevent premature set_output
|
||||
8. **Forgetting nullable_output_keys** - Mark input_keys that only arrive on certain edges (e.g., feedback) as nullable on the receiving node
|
||||
9. **Adding framework gating for LLM behavior** - Fix prompts or use judges, not ad-hoc code
|
||||
+16
-12
@@ -1,12 +1,12 @@
|
||||
---
|
||||
name: building-agents-core
|
||||
name: hive-concepts
|
||||
description: Core concepts for goal-driven agents - architecture, node types (event_loop, function), tool discovery, and workflow overview. Use when starting agent development or need to understand agent fundamentals.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: foundational
|
||||
part_of: building-agents
|
||||
part_of: hive
|
||||
---
|
||||
|
||||
# Building Agents - Core Concepts
|
||||
@@ -251,6 +251,7 @@ The judge controls when a node's loop exits:
|
||||
Controls loop behavior:
|
||||
- `max_iterations` (default 50) — prevents infinite loops
|
||||
- `max_tool_calls_per_turn` (default 10) — limits tool calls per LLM response
|
||||
- `tool_call_overflow_margin` (default 0.5) — wiggle room before discarding extra tool calls (50% means hard cutoff at 150% of limit)
|
||||
- `stall_detection_threshold` (default 3) — detects repeated identical responses
|
||||
- `max_history_tokens` (default 32000) — triggers conversation compaction
|
||||
|
||||
@@ -258,9 +259,12 @@ Controls loop behavior:
|
||||
|
||||
When tool results exceed the context window, the framework automatically saves them to a spillover directory and truncates with a hint. Nodes that produce or consume large data should include the data tools:
|
||||
|
||||
- `save_data(filename, data, data_dir)` — Write data to a file in the data directory
|
||||
- `load_data(filename, data_dir, offset=0, limit=50)` — Read data with line-based pagination
|
||||
- `list_data_files(data_dir)` — List available data files
|
||||
- `save_data(filename, data)` — Write data to a file in the data directory
|
||||
- `load_data(filename, offset=0, limit=50)` — Read data with line-based pagination
|
||||
- `list_data_files()` — List available data files
|
||||
- `serve_file_to_user(filename, label="")` — Get a clickable file:// URI for the user
|
||||
|
||||
Note: `data_dir` is a framework-injected context parameter — the LLM never sees or passes it. `GraphExecutor.execute()` sets it per-execution via `contextvars`, so data tools and spillover always share the same session-scoped directory.
|
||||
|
||||
These are real MCP tools (not synthetic). Add them to nodes that handle large tool results:
|
||||
|
||||
@@ -346,15 +350,15 @@ Before writing a node with `tools=[...]`:
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use building-agents-core when:
|
||||
Use hive-concepts when:
|
||||
- Starting a new agent project and need to understand fundamentals
|
||||
- Need to understand agent architecture before building
|
||||
- Want to validate tool availability before proceeding
|
||||
- Learning about node types, edges, and graph execution
|
||||
|
||||
**Next Steps:**
|
||||
- Ready to build? → Use `building-agents-construction` skill
|
||||
- Need patterns and examples? → Use `building-agents-patterns` skill
|
||||
- Ready to build? → Use `hive-create` skill
|
||||
- Need patterns and examples? → Use `hive-patterns` skill
|
||||
|
||||
## MCP Tools for Validation
|
||||
|
||||
@@ -389,7 +393,7 @@ mcp__agent-builder__configure_loop(
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **building-agents-construction** - Step-by-step building process
|
||||
- **building-agents-patterns** - Best practices: judges, feedback edges, fan-out, context management
|
||||
- **agent-workflow** - Complete workflow orchestrator
|
||||
- **testing-agent** - Test and validate completed agents
|
||||
- **hive-create** - Step-by-step building process
|
||||
- **hive-patterns** - Best practices: judges, feedback edges, fan-out, context management
|
||||
- **hive** - Complete workflow orchestrator
|
||||
- **hive-test** - Test and validate completed agents
|
||||
File diff suppressed because it is too large
Load Diff
+10
-5
@@ -70,7 +70,9 @@ def tui(mock, verbose, debug):
|
||||
try:
|
||||
from framework.tui.app import AdenTUI
|
||||
except ImportError:
|
||||
click.echo("TUI requires the 'textual' package. Install with: pip install textual")
|
||||
click.echo(
|
||||
"TUI requires the 'textual' package. Install with: pip install textual"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
from pathlib import Path
|
||||
@@ -88,6 +90,9 @@ def tui(mock, verbose, debug):
|
||||
agent._event_bus = EventBus()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
agent._tool_registry.load_mcp_config(mcp_config_path)
|
||||
@@ -104,9 +109,6 @@ def tui(mock, verbose, debug):
|
||||
tool_executor = agent._tool_registry.get_executor()
|
||||
graph = agent._build_graph()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "deep_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
runtime = create_agent_runtime(
|
||||
graph=graph,
|
||||
goal=agent.goal,
|
||||
@@ -216,7 +218,9 @@ async def _interactive_shell(verbose=False):
|
||||
if "references" in output:
|
||||
click.echo("--- References ---\n")
|
||||
for ref in output.get("references", []):
|
||||
click.echo(f" [{ref.get('number', '?')}] {ref.get('title', '')} - {ref.get('url', '')}")
|
||||
click.echo(
|
||||
f" [{ref.get('number', '?')}] {ref.get('title', '')} - {ref.get('url', '')}"
|
||||
)
|
||||
click.echo("\n")
|
||||
else:
|
||||
click.echo(f"\nResearch failed: {result.error}\n")
|
||||
@@ -227,6 +231,7 @@ async def _interactive_shell(verbose=False):
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
await agent.stop()
|
||||
@@ -0,0 +1,358 @@
|
||||
"""Agent graph construction for Deep Research Agent."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import (
|
||||
intake_node,
|
||||
research_node,
|
||||
review_node,
|
||||
report_node,
|
||||
)
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="rigorous-interactive-research",
|
||||
name="Rigorous Interactive Research",
|
||||
description=(
|
||||
"Research any topic by searching diverse sources, analyzing findings, "
|
||||
"and producing a cited report — with user checkpoints to guide direction."
|
||||
),
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="source-diversity",
|
||||
description="Use multiple diverse, authoritative sources",
|
||||
metric="source_count",
|
||||
target=">=5",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="citation-coverage",
|
||||
description="Every factual claim in the report cites its source",
|
||||
metric="citation_coverage",
|
||||
target="100%",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="user-satisfaction",
|
||||
description="User reviews findings before report generation",
|
||||
metric="user_approval",
|
||||
target="true",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="report-completeness",
|
||||
description="Final report answers the original research questions",
|
||||
metric="question_coverage",
|
||||
target="90%",
|
||||
weight=0.25,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="no-hallucination",
|
||||
description="Only include information found in fetched sources",
|
||||
constraint_type="quality",
|
||||
category="accuracy",
|
||||
),
|
||||
Constraint(
|
||||
id="source-attribution",
|
||||
description="Every claim must cite its source with a numbered reference",
|
||||
constraint_type="quality",
|
||||
category="accuracy",
|
||||
),
|
||||
Constraint(
|
||||
id="user-checkpoint",
|
||||
description="Present findings to the user before writing the final report",
|
||||
constraint_type="functional",
|
||||
category="interaction",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes = [
|
||||
intake_node,
|
||||
research_node,
|
||||
review_node,
|
||||
report_node,
|
||||
]
|
||||
|
||||
# Edge definitions
|
||||
edges = [
|
||||
# intake -> research
|
||||
EdgeSpec(
|
||||
id="intake-to-research",
|
||||
source="intake",
|
||||
target="research",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
# research -> review
|
||||
EdgeSpec(
|
||||
id="research-to-review",
|
||||
source="research",
|
||||
target="review",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
# review -> research (feedback loop)
|
||||
EdgeSpec(
|
||||
id="review-to-research-feedback",
|
||||
source="review",
|
||||
target="research",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="needs_more_research == True",
|
||||
priority=1,
|
||||
),
|
||||
# review -> report (user satisfied)
|
||||
EdgeSpec(
|
||||
id="review-to-report",
|
||||
source="review",
|
||||
target="report",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="needs_more_research == False",
|
||||
priority=2,
|
||||
),
|
||||
# report -> research (user wants deeper research on current topic)
|
||||
EdgeSpec(
|
||||
id="report-to-research",
|
||||
source="report",
|
||||
target="research",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(next_action).lower() == 'more_research'",
|
||||
priority=2,
|
||||
),
|
||||
# report -> intake (user wants a new topic — default when not more_research)
|
||||
EdgeSpec(
|
||||
id="report-to-intake",
|
||||
source="report",
|
||||
target="intake",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(next_action).lower() != 'more_research'",
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
# Graph configuration
|
||||
entry_node = "intake"
|
||||
entry_points = {"start": "intake"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = []
|
||||
|
||||
|
||||
class DeepResearchAgent:
|
||||
"""
|
||||
Deep Research Agent — 4-node pipeline with user checkpoints.
|
||||
|
||||
Flow: intake -> research -> review -> report
|
||||
^ |
|
||||
+-- feedback loop (if user wants more)
|
||||
|
||||
Uses AgentRuntime for proper session management:
|
||||
- Session-scoped storage (sessions/{session_id}/)
|
||||
- Checkpointing for resume capability
|
||||
- Runtime logging
|
||||
- Data folder for save_data/load_data
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._graph: GraphSpec | None = None
|
||||
self._agent_runtime: AgentRuntime | None = None
|
||||
self._tool_registry: ToolRegistry | None = None
|
||||
self._storage_path: Path | None = None
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
"""Build the GraphSpec."""
|
||||
return GraphSpec(
|
||||
id="deep-research-agent-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config={
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
},
|
||||
conversation_mode="continuous",
|
||||
identity_prompt=(
|
||||
"You are a rigorous research agent. You search for information "
|
||||
"from diverse, authoritative sources, analyze findings critically, "
|
||||
"and produce well-cited reports. You never fabricate information — "
|
||||
"every claim must trace back to a source you actually retrieved."
|
||||
),
|
||||
)
|
||||
|
||||
def _setup(self, mock_mode=False) -> None:
|
||||
"""Set up the agent runtime with sessions, checkpoints, and logging."""
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "deep_research_agent"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._tool_registry = ToolRegistry()
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = None
|
||||
if not mock_mode:
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
|
||||
self._graph = self._build_graph()
|
||||
|
||||
checkpoint_config = CheckpointConfig(
|
||||
enabled=True,
|
||||
checkpoint_on_node_start=False,
|
||||
checkpoint_on_node_complete=True,
|
||||
checkpoint_max_age_days=7,
|
||||
async_checkpoint=True,
|
||||
)
|
||||
|
||||
entry_point_specs = [
|
||||
EntryPointSpec(
|
||||
id="default",
|
||||
name="Default",
|
||||
entry_node=self.entry_node,
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
)
|
||||
]
|
||||
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=entry_point_specs,
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=checkpoint_config,
|
||||
)
|
||||
|
||||
async def start(self, mock_mode=False) -> None:
|
||||
"""Set up and start the agent runtime."""
|
||||
if self._agent_runtime is None:
|
||||
self._setup(mock_mode=mock_mode)
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the agent runtime and clean up."""
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
entry_point: str = "default",
|
||||
input_data: dict | None = None,
|
||||
timeout: float | None = None,
|
||||
session_state: dict | None = None,
|
||||
) -> ExecutionResult | None:
|
||||
"""Execute the graph and wait for completion."""
|
||||
if self._agent_runtime is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
|
||||
return await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id=entry_point,
|
||||
input_data=input_data or {},
|
||||
session_state=session_state,
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, context: dict, mock_mode=False, session_state=None
|
||||
) -> ExecutionResult:
|
||||
"""Run the agent (convenience method for single execution)."""
|
||||
await self.start(mock_mode=mock_mode)
|
||||
try:
|
||||
result = await self.trigger_and_wait(
|
||||
"default", context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
"""Get agent information."""
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {
|
||||
"name": self.goal.name,
|
||||
"description": self.goal.description,
|
||||
},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"pause_nodes": self.pause_nodes,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate agent structure."""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
node_ids = {node.id for node in self.nodes}
|
||||
for edge in self.edges:
|
||||
if edge.source not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: source '{edge.source}' not found")
|
||||
if edge.target not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: target '{edge.target}' not found")
|
||||
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
|
||||
for terminal in self.terminal_nodes:
|
||||
if terminal not in node_ids:
|
||||
errors.append(f"Terminal node '{terminal}' not found")
|
||||
|
||||
for ep_id, node_id in self.entry_points.items():
|
||||
if node_id not in node_ids:
|
||||
errors.append(
|
||||
f"Entry point '{ep_id}' references unknown node '{node_id}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
}
|
||||
|
||||
|
||||
# Create default instance
|
||||
default_agent = DeepResearchAgent()
|
||||
@@ -0,0 +1,26 @@
|
||||
"""Runtime configuration."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Deep Research Agent"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Interactive research agent that rigorously investigates topics through "
|
||||
"multi-source search, quality evaluation, and synthesis - with TUI conversation "
|
||||
"at key checkpoints for user guidance and feedback."
|
||||
)
|
||||
intro_message: str = (
|
||||
"Hi! I'm your deep research assistant. Tell me a topic and I'll investigate it "
|
||||
"thoroughly — searching multiple sources, evaluating quality, and synthesizing "
|
||||
"a comprehensive report. What would you like me to research?"
|
||||
)
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
+2
-2
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../tools",
|
||||
"description": "Hive tools MCP server providing web_search, web_scrape, and write_to_file"
|
||||
}
|
||||
+79
-22
@@ -10,8 +10,13 @@ intake_node = NodeSpec(
|
||||
description="Discuss the research topic with the user, clarify scope, and confirm direction",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["topic"],
|
||||
output_keys=["research_brief"],
|
||||
success_criteria=(
|
||||
"The research brief is specific and actionable: it states the topic, "
|
||||
"the key questions to answer, the desired scope, and depth."
|
||||
),
|
||||
system_prompt="""\
|
||||
You are a research intake specialist. The user wants to research a topic.
|
||||
Have a brief conversation to clarify what they need.
|
||||
@@ -38,10 +43,14 @@ research_node = NodeSpec(
|
||||
name="Research",
|
||||
description="Search the web, fetch source content, and compile findings",
|
||||
node_type="event_loop",
|
||||
max_node_visits=3,
|
||||
max_node_visits=0,
|
||||
input_keys=["research_brief", "feedback"],
|
||||
output_keys=["findings", "sources", "gaps"],
|
||||
nullable_output_keys=["feedback"],
|
||||
success_criteria=(
|
||||
"Findings reference at least 3 distinct sources with URLs. "
|
||||
"Key claims are substantiated by fetched content, not generated."
|
||||
),
|
||||
system_prompt="""\
|
||||
You are a research agent. Given a research brief, find and analyze sources.
|
||||
|
||||
@@ -56,18 +65,26 @@ Work in phases:
|
||||
and any contradictions between sources.
|
||||
|
||||
Important:
|
||||
- Work in batches of 3-4 tool calls at a time to manage context
|
||||
- Work in batches of 3-4 tool calls at a time — never more than 10 per turn
|
||||
- After each batch, assess whether you have enough material
|
||||
- Prefer quality over quantity — 5 good sources beat 15 thin ones
|
||||
- Track which URL each finding comes from (you'll need citations later)
|
||||
- Call set_output for each key in a SEPARATE turn (not in the same turn as other tool calls)
|
||||
|
||||
When done, use set_output:
|
||||
When done, use set_output (one key at a time, separate turns):
|
||||
- set_output("findings", "Structured summary: key findings with source URLs for each claim. \
|
||||
Include themes, contradictions, and confidence levels.")
|
||||
- set_output("sources", [{"url": "...", "title": "...", "summary": "..."}])
|
||||
- set_output("gaps", "What aspects of the research brief are NOT well-covered yet, if any.")
|
||||
""",
|
||||
tools=["web_search", "web_scrape", "load_data", "save_data", "list_data_files"],
|
||||
tools=[
|
||||
"web_search",
|
||||
"web_scrape",
|
||||
"load_data",
|
||||
"save_data",
|
||||
"append_data",
|
||||
"list_data_files",
|
||||
],
|
||||
)
|
||||
|
||||
# Node 3: Review (client-facing)
|
||||
@@ -78,9 +95,13 @@ review_node = NodeSpec(
|
||||
description="Present findings to user and decide whether to research more or write the report",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=3,
|
||||
max_node_visits=0,
|
||||
input_keys=["findings", "sources", "gaps", "research_brief"],
|
||||
output_keys=["needs_more_research", "feedback"],
|
||||
success_criteria=(
|
||||
"The user has been presented with findings and has explicitly indicated "
|
||||
"whether they want more research or are ready for the report."
|
||||
),
|
||||
system_prompt="""\
|
||||
Present the research findings to the user clearly and concisely.
|
||||
|
||||
@@ -102,41 +123,77 @@ Should we proceed to writing the final report?
|
||||
)
|
||||
|
||||
# Node 4: Report (client-facing)
|
||||
# Writes the final report and presents it to the user.
|
||||
# Writes an HTML report, serves the link to the user, and answers follow-ups.
|
||||
report_node = NodeSpec(
|
||||
id="report",
|
||||
name="Write & Deliver Report",
|
||||
description="Write a cited report from the findings and present it to the user",
|
||||
description="Write a cited HTML report from the findings and present it to the user",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["findings", "sources", "research_brief"],
|
||||
output_keys=["delivery_status"],
|
||||
output_keys=["delivery_status", "next_action"],
|
||||
success_criteria=(
|
||||
"An HTML report has been saved, the file link has been presented to the user, "
|
||||
"and the user has indicated what they want to do next."
|
||||
),
|
||||
system_prompt="""\
|
||||
Write a comprehensive research report and present it to the user.
|
||||
Write a research report as an HTML file and present it to the user.
|
||||
|
||||
**STEP 1 — Write and present the report (text only, NO tool calls):**
|
||||
IMPORTANT: save_data requires TWO separate arguments: filename and data.
|
||||
Call it like: save_data(filename="report.html", data="<html>...</html>")
|
||||
Do NOT use _raw, do NOT nest arguments inside a JSON string.
|
||||
|
||||
**STEP 1 — Write and save the HTML report (tool calls, NO text to user yet):**
|
||||
|
||||
Build a clean HTML document. Keep the HTML concise — aim for clarity over length.
|
||||
Use minimal embedded CSS (a few lines of style, not a full framework).
|
||||
|
||||
Report structure:
|
||||
1. **Executive Summary** (2-3 paragraphs)
|
||||
2. **Findings** (organized by theme, with [n] citations)
|
||||
3. **Analysis** (synthesis, implications, areas of debate)
|
||||
4. **Conclusion** (key takeaways, confidence assessment)
|
||||
5. **References** (numbered list of sources cited)
|
||||
- Title & date
|
||||
- Executive Summary (2-3 paragraphs)
|
||||
- Key Findings (organized by theme, with [n] citation links)
|
||||
- Analysis (synthesis, implications)
|
||||
- Conclusion (key takeaways)
|
||||
- References (numbered list with clickable URLs)
|
||||
|
||||
Requirements:
|
||||
- Every factual claim must cite its source with [n] notation
|
||||
- Be objective — present multiple viewpoints where sources disagree
|
||||
- Distinguish well-supported conclusions from speculation
|
||||
- Answer the original research questions from the brief
|
||||
|
||||
End by asking the user if they have questions or want to save the report.
|
||||
Save the HTML:
|
||||
save_data(filename="report.html", data="<html>...</html>")
|
||||
|
||||
**STEP 2 — After the user responds:**
|
||||
- Answer follow-up questions from the research material
|
||||
- If they want to save, use write_to_file tool
|
||||
- When the user is satisfied: set_output("delivery_status", "completed")
|
||||
Then get the clickable link:
|
||||
serve_file_to_user(filename="report.html", label="Research Report")
|
||||
|
||||
If save_data fails, simplify and shorten the HTML, then retry.
|
||||
|
||||
**STEP 2 — Present the link to the user (text only, NO tool calls):**
|
||||
|
||||
Tell the user the report is ready and include the file:// URI from
|
||||
serve_file_to_user so they can click it to open. Give a brief summary
|
||||
of what the report covers. Ask if they have questions or want to continue.
|
||||
|
||||
**STEP 3 — After the user responds:**
|
||||
- Answer any follow-up questions from the research material
|
||||
- When the user is ready to move on, ask what they'd like to do next:
|
||||
- Research a new topic?
|
||||
- Dig deeper into the current topic?
|
||||
- Then call set_output:
|
||||
- set_output("delivery_status", "completed")
|
||||
- set_output("next_action", "new_topic") — if they want a new topic
|
||||
- set_output("next_action", "more_research") — if they want deeper research
|
||||
""",
|
||||
tools=["write_to_file"],
|
||||
tools=[
|
||||
"save_data",
|
||||
"append_data",
|
||||
"edit_data",
|
||||
"serve_file_to_user",
|
||||
"load_data",
|
||||
"list_data_files",
|
||||
],
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
+134
-143
@@ -1,10 +1,10 @@
|
||||
---
|
||||
name: setup-credentials
|
||||
name: hive-credentials
|
||||
description: Set up and install credentials for an agent. Detects missing credentials from agent config, collects them from the user, and stores them securely in the local encrypted store at ~/.hive/credentials.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.2"
|
||||
version: "2.3"
|
||||
type: utility
|
||||
---
|
||||
|
||||
@@ -31,95 +31,50 @@ Determine which agent needs credentials. The user will either:
|
||||
|
||||
Locate the agent's directory under `exports/{agent_name}/`.
|
||||
|
||||
### Step 2: Detect Required Credentials (Bash-First)
|
||||
### Step 2: Detect Missing Credentials
|
||||
|
||||
Use bash commands to determine what the agent needs and what's already configured. This avoids Python import issues and works even when `HIVE_CREDENTIAL_KEY` is not set.
|
||||
Use the `check_missing_credentials` MCP tool to detect what the agent needs and what's already configured. This tool loads the agent, inspects its required tools and node types, maps them to credentials via `CREDENTIAL_SPECS`, and checks both the encrypted store and environment variables.
|
||||
|
||||
#### Step 2a: Read Agent Requirements
|
||||
|
||||
Extract `required_tools` and node types from the agent config:
|
||||
|
||||
```bash
|
||||
# Get required tools
|
||||
jq -r '.required_tools[]?' exports/{agent_name}/agent.json 2>/dev/null
|
||||
|
||||
# Get node types from graph nodes
|
||||
jq -r '.graph.nodes[]?.node_type' exports/{agent_name}/agent.json 2>/dev/null | sort -u
|
||||
```
|
||||
check_missing_credentials(agent_path="exports/{agent_name}")
|
||||
```
|
||||
|
||||
Map the extracted tools and node types to credentials by reading the spec files directly:
|
||||
The tool returns a JSON response:
|
||||
|
||||
```bash
|
||||
# Read all credential specs — each file defines tools, node_types, env_var, and credential_id
|
||||
cat tools/src/aden_tools/credentials/llm.py tools/src/aden_tools/credentials/search.py tools/src/aden_tools/credentials/email.py tools/src/aden_tools/credentials/integrations.py
|
||||
```json
|
||||
{
|
||||
"agent": "exports/{agent_name}",
|
||||
"missing": [
|
||||
{
|
||||
"credential_name": "brave_search",
|
||||
"env_var": "BRAVE_SEARCH_API_KEY",
|
||||
"description": "Brave Search API key for web search",
|
||||
"help_url": "https://brave.com/search/api/",
|
||||
"tools": ["web_search"]
|
||||
}
|
||||
],
|
||||
"available": [
|
||||
{
|
||||
"credential_name": "anthropic",
|
||||
"env_var": "ANTHROPIC_API_KEY",
|
||||
"source": "encrypted_store"
|
||||
}
|
||||
],
|
||||
"total_missing": 1,
|
||||
"ready": false
|
||||
}
|
||||
```
|
||||
|
||||
For each `CredentialSpec`, match its `tools` and `node_types` lists against the agent's required tools and node types. Extract the `env_var`, `credential_id`, and `credential_group` for every match. This is the list of needed credentials.
|
||||
|
||||
#### Step 2b: Check Existing Credential Sources
|
||||
|
||||
For each needed credential, check three sources. A credential is "found" if it exists in ANY of them:
|
||||
|
||||
**1. Encrypted store metadata index** (unencrypted JSON — no decryption key needed):
|
||||
|
||||
```bash
|
||||
cat ~/.hive/credentials/metadata/index.json 2>/dev/null | jq -r '.credentials | keys[]'
|
||||
```
|
||||
|
||||
If a credential ID appears in this list, it is stored in the encrypted store.
|
||||
|
||||
**2. Environment variables:**
|
||||
|
||||
```bash
|
||||
# Check each needed env var, e.g.:
|
||||
printenv ANTHROPIC_API_KEY > /dev/null 2>&1 && echo "ANTHROPIC_API_KEY: set" || echo "ANTHROPIC_API_KEY: not set"
|
||||
printenv BRAVE_SEARCH_API_KEY > /dev/null 2>&1 && echo "BRAVE_SEARCH_API_KEY: set" || echo "BRAVE_SEARCH_API_KEY: not set"
|
||||
```
|
||||
|
||||
**3. Project `.env` file:**
|
||||
|
||||
```bash
|
||||
# Check each needed env var, e.g.:
|
||||
grep -q '^ANTHROPIC_API_KEY=' .env 2>/dev/null && echo "ANTHROPIC_API_KEY: in .env" || echo "ANTHROPIC_API_KEY: not in .env"
|
||||
grep -q '^BRAVE_SEARCH_API_KEY=' .env 2>/dev/null && echo "BRAVE_SEARCH_API_KEY: in .env" || echo "BRAVE_SEARCH_API_KEY: not in .env"
|
||||
```
|
||||
|
||||
#### Step 2c: HIVE_CREDENTIAL_KEY Check
|
||||
|
||||
If any credentials were found in the encrypted store metadata index, verify the encryption key is available. The key is typically persisted to shell config by a previous setup-credentials run.
|
||||
|
||||
Check both the current session AND shell config files:
|
||||
|
||||
```bash
|
||||
# Check 1: Current session
|
||||
printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "session: set" || echo "session: not set"
|
||||
|
||||
# Check 2: Shell config files (where setup-credentials persists it)
|
||||
# Note: check each file individually to avoid non-zero exit when one doesn't exist
|
||||
for f in ~/.zshrc ~/.bashrc ~/.profile; do [ -f "$f" ] && grep -q 'HIVE_CREDENTIAL_KEY' "$f" && echo "$f"; done
|
||||
```
|
||||
|
||||
Decision logic:
|
||||
- **In current session** — no action needed, credentials in the store are usable
|
||||
- **In shell config but NOT in current session** — the key is persisted but this shell hasn't sourced it. Run `source ~/.zshrc` (or `~/.bashrc`), then re-check. Credentials in the store are usable after sourcing.
|
||||
- **Not in session AND not in shell config** — the key was never persisted. Warn the user that credentials in the store cannot be decrypted. Help fix the key situation (recover/re-persist), do NOT re-collect credential values that are already stored.
|
||||
|
||||
#### Step 2d: Compute Missing & Group
|
||||
|
||||
Diff the "needed" credentials against the "found" credentials to get the truly missing list.
|
||||
|
||||
Group related credentials by their `credential_group` field from the spec files. Credentials that share the same non-empty `credential_group` value should be presented as a single setup step rather than asking for each one individually.
|
||||
|
||||
**If nothing is missing and there's no HIVE_CREDENTIAL_KEY issue:** Report all credentials as configured and skip Steps 3-5. Example:
|
||||
**If `ready` is true (nothing missing):** Report all credentials as configured and skip Steps 3-5. Example:
|
||||
|
||||
```
|
||||
All required credentials are already configured:
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — found in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — found in environment
|
||||
✓ anthropic (ANTHROPIC_API_KEY)
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY)
|
||||
Your agent is ready to run!
|
||||
```
|
||||
|
||||
**If credentials are missing:** Continue to Step 3 with only the missing ones.
|
||||
**If credentials are missing:** Continue to Step 3 with the `missing` list.
|
||||
|
||||
### Step 3: Present Auth Options for Each Missing Credential
|
||||
|
||||
@@ -153,7 +108,7 @@ Present the available options using AskUserQuestion:
|
||||
Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
|
||||
1) Aden Platform (OAuth) (Recommended)
|
||||
Secure OAuth2 flow via integration.adenhq.com
|
||||
Secure OAuth2 flow via hive.adenhq.com
|
||||
- Quick setup with automatic token refresh
|
||||
- No need to manage API keys manually
|
||||
|
||||
@@ -170,6 +125,28 @@ Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
|
||||
### Step 4: Execute Auth Flow Based on User Choice
|
||||
|
||||
#### Prerequisite: Ensure HIVE_CREDENTIAL_KEY Is Available
|
||||
|
||||
Before storing any credentials, verify `HIVE_CREDENTIAL_KEY` is set (needed to encrypt/decrypt the local store). Check both the current session and shell config:
|
||||
|
||||
```bash
|
||||
# Check current session
|
||||
printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "session: set" || echo "session: not set"
|
||||
|
||||
# Check shell config files
|
||||
for f in ~/.zshrc ~/.bashrc ~/.profile; do [ -f "$f" ] && grep -q 'HIVE_CREDENTIAL_KEY' "$f" && echo "$f"; done
|
||||
```
|
||||
|
||||
- **In current session** — proceed to store credentials
|
||||
- **In shell config but NOT in current session** — run `source ~/.zshrc` (or `~/.bashrc`) first, then proceed
|
||||
- **Not set anywhere** — `EncryptedFileStorage` will auto-generate one. After storing, tell the user to persist it: `export HIVE_CREDENTIAL_KEY="{generated_key}"` in their shell profile
|
||||
|
||||
> **⚠️ IMPORTANT: After adding `HIVE_CREDENTIAL_KEY` to the user's shell config, always display:**
|
||||
> ```
|
||||
> ⚠️ Environment variables were added to your shell config.
|
||||
> Open a NEW TERMINAL for them to take effect outside this session.
|
||||
> ```
|
||||
|
||||
#### Option 1: Aden Platform (OAuth)
|
||||
|
||||
This is the recommended flow for supported integrations (HubSpot, etc.).
|
||||
@@ -195,7 +172,7 @@ If not set, guide user to get one from Aden (this is where they do OAuth):
|
||||
from aden_tools.credentials import open_browser, get_aden_setup_url
|
||||
|
||||
# Open browser to Aden - user will sign up and connect integrations there
|
||||
url = get_aden_setup_url() # https://integration.adenhq.com/setup
|
||||
url = get_aden_setup_url() # https://hive.adenhq.com
|
||||
success, msg = open_browser(url)
|
||||
|
||||
print("Please sign in to Aden and connect your integrations (HubSpot, etc.).")
|
||||
@@ -231,6 +208,12 @@ if success:
|
||||
print(f"Run: {source_cmd}")
|
||||
```
|
||||
|
||||
> **⚠️ IMPORTANT: After adding `ADEN_API_KEY` to the user's shell config, always display:**
|
||||
> ```
|
||||
> ⚠️ Environment variables were added to your shell config.
|
||||
> Open a NEW TERMINAL for them to take effect outside this session.
|
||||
> ```
|
||||
|
||||
Also save to `~/.hive/configuration.json` for the framework:
|
||||
|
||||
```python
|
||||
@@ -272,7 +255,7 @@ print(f"Synced credentials: {synced}")
|
||||
# If the required credential wasn't synced, the user hasn't authorized it on Aden yet
|
||||
if "hubspot" not in synced:
|
||||
print("HubSpot not found in your Aden account.")
|
||||
print("Please visit https://integration.adenhq.com to connect HubSpot, then try again.")
|
||||
print("Please visit https://hive.adenhq.com to connect HubSpot, then try again.")
|
||||
```
|
||||
|
||||
For more control over the sync process:
|
||||
@@ -442,28 +425,38 @@ config_path.write_text(json.dumps(config, indent=2))
|
||||
|
||||
### Step 6: Verify All Credentials
|
||||
|
||||
Run validation again to confirm everything is set:
|
||||
Use the `verify_credentials` MCP tool to confirm everything is properly configured:
|
||||
|
||||
```python
|
||||
runner = AgentRunner.load("exports/{agent_name}")
|
||||
validation = runner.validate()
|
||||
assert not validation.missing_credentials, "Still missing credentials!"
|
||||
```
|
||||
verify_credentials(agent_path="exports/{agent_name}")
|
||||
```
|
||||
|
||||
Report the result to the user.
|
||||
The tool returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"agent": "exports/{agent_name}",
|
||||
"ready": true,
|
||||
"missing_credentials": [],
|
||||
"warnings": [],
|
||||
"errors": []
|
||||
}
|
||||
```
|
||||
|
||||
If `ready` is true, report success. If `missing_credentials` is non-empty, identify what failed and loop back to Step 3 for the remaining credentials.
|
||||
|
||||
## Health Check Reference
|
||||
|
||||
Health checks validate credentials by making lightweight API calls:
|
||||
|
||||
| Credential | Endpoint | What It Checks |
|
||||
| --------------- | --------------------------------------- | ---------------------------------- |
|
||||
| `anthropic` | `POST /v1/messages` | API key validity |
|
||||
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
|
||||
| `google_search` | `GET /customsearch/v1?q=test&num=1` | API key + CSE ID validity |
|
||||
| `github` | `GET /user` | Token validity, user identity |
|
||||
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
|
||||
| `resend` | `GET /domains` | API key validity |
|
||||
| Credential | Endpoint | What It Checks |
|
||||
| --------------- | --------------------------------------- | --------------------------------- |
|
||||
| `anthropic` | `POST /v1/messages` | API key validity |
|
||||
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
|
||||
| `google_search` | `GET /customsearch/v1?q=test&num=1` | API key + CSE ID validity |
|
||||
| `github` | `GET /user` | Token validity, user identity |
|
||||
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
|
||||
| `resend` | `GET /domains` | API key validity |
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import check_credential_health, HealthCheckResult
|
||||
@@ -479,9 +472,14 @@ result: HealthCheckResult = check_credential_health("hubspot", token_value)
|
||||
The local encrypted store requires `HIVE_CREDENTIAL_KEY` to encrypt/decrypt credentials.
|
||||
|
||||
- If the user doesn't have one, `EncryptedFileStorage` will auto-generate one and log it
|
||||
- The user MUST persist this key (e.g., in `~/.bashrc` or a secrets manager)
|
||||
- The user MUST persist this key (e.g., in `~/.bashrc`/`~/.zshrc` or a secrets manager)
|
||||
- Without this key, stored credentials cannot be decrypted
|
||||
- This is the ONLY secret that should live in `~/.bashrc` or environment config
|
||||
|
||||
**Shell config rule:** Only TWO keys belong in shell config (`~/.zshrc`/`~/.bashrc`):
|
||||
- `HIVE_CREDENTIAL_KEY` — encryption key for the credential store
|
||||
- `ADEN_API_KEY` — Aden platform auth key (needed before the store can sync)
|
||||
|
||||
All other API keys (Brave, Google, HubSpot, etc.) must go in the encrypted store only. **Never offer to add them to shell config.**
|
||||
|
||||
If `HIVE_CREDENTIAL_KEY` is not set:
|
||||
|
||||
@@ -494,6 +492,7 @@ If `HIVE_CREDENTIAL_KEY` is not set:
|
||||
- **NEVER** log, print, or echo credential values in tool output
|
||||
- **NEVER** store credentials in plaintext files, git-tracked files, or agent configs
|
||||
- **NEVER** hardcode credentials in source code
|
||||
- **NEVER** offer to save API keys to shell config (`~/.zshrc`/`~/.bashrc`) — the **only** keys that belong in shell config are `HIVE_CREDENTIAL_KEY` and `ADEN_API_KEY`. All other credentials (Brave, Google, HubSpot, GitHub, Resend, etc.) go in the encrypted store only.
|
||||
- **ALWAYS** use `SecretStr` from Pydantic when handling credential values in Python
|
||||
- **ALWAYS** use the local encrypted store (`~/.hive/credentials`) for persistence
|
||||
- **ALWAYS** run health checks before storing credentials (when possible)
|
||||
@@ -509,7 +508,7 @@ All credential specs are defined in `tools/src/aden_tools/credentials/`:
|
||||
| `llm.py` | LLM Providers | `anthropic` | No |
|
||||
| `search.py` | Search Tools | `brave_search`, `google_search`, `google_cse` | No |
|
||||
| `email.py` | Email | `resend` | No |
|
||||
| `integrations.py` | Integrations | `github`, `hubspot` | No / Yes |
|
||||
| `integrations.py` | Integrations | `github`, `hubspot`, `google_calendar_oauth` | No / Yes |
|
||||
|
||||
**Note:** Additional LLM providers (Cerebras, Groq, OpenAI) are handled by LiteLLM via environment
|
||||
variables (`CEREBRAS_API_KEY`, `GROQ_API_KEY`, `OPENAI_API_KEY`) but are not yet in CREDENTIAL_SPECS.
|
||||
@@ -560,60 +559,27 @@ token = store.get_key("hubspot", "access_token")
|
||||
## Example Session
|
||||
|
||||
```
|
||||
User: /setup-credentials for my research-agent
|
||||
User: /hive-credentials for my research-agent
|
||||
|
||||
Agent: Let me check what credentials your research-agent needs.
|
||||
|
||||
[Reads agent config]
|
||||
$ jq -r '.required_tools[]?' exports/research-agent/agent.json
|
||||
web_search
|
||||
google_search
|
||||
|
||||
$ jq -r '.graph.nodes[]?.node_type' exports/research-agent/agent.json | sort -u
|
||||
event_loop
|
||||
|
||||
[Maps tools/nodes to credentials using lookup table]
|
||||
Needed: anthropic, brave_search, google_search, google_cse
|
||||
|
||||
[Checks encrypted store metadata index]
|
||||
$ cat ~/.hive/credentials/metadata/index.json | jq -r '.credentials | keys[]'
|
||||
anthropic
|
||||
brave_search
|
||||
|
||||
[Checks environment variables]
|
||||
$ printenv ANTHROPIC_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv BRAVE_SEARCH_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv GOOGLE_API_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
$ printenv GOOGLE_CSE_ID > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
not set
|
||||
|
||||
[Checks HIVE_CREDENTIAL_KEY since credentials found in store]
|
||||
$ printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "session: set" || echo "session: not set"
|
||||
session: not set
|
||||
$ for f in ~/.zshrc ~/.bashrc ~/.profile; do [ -f "$f" ] && grep -q 'HIVE_CREDENTIAL_KEY' "$f" && echo "$f"; done
|
||||
/Users/user/.zshrc
|
||||
|
||||
[Key is in shell config but not current session — sourcing it]
|
||||
$ source ~/.zshrc
|
||||
|
||||
[Computes missing credentials]
|
||||
Found:
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — in encrypted store
|
||||
Missing:
|
||||
✗ google_search (GOOGLE_API_KEY)
|
||||
✗ google_cse (GOOGLE_CSE_ID)
|
||||
[Calls check_missing_credentials(agent_path="exports/research-agent")]
|
||||
→ Returns:
|
||||
available: anthropic (encrypted_store), brave_search (encrypted_store)
|
||||
missing: google_search (GOOGLE_API_KEY), google_cse (GOOGLE_CSE_ID)
|
||||
ready: false
|
||||
|
||||
Agent: 2 of 4 required credentials are already configured. Only Google Custom
|
||||
Search needs setup (2 values as a single group).
|
||||
Search needs setup (2 values).
|
||||
|
||||
--- Setting up Google Custom Search (google_search + google_cse) ---
|
||||
|
||||
This requires two values that work together.
|
||||
|
||||
[Checks HIVE_CREDENTIAL_KEY before storing]
|
||||
$ printenv HIVE_CREDENTIAL_KEY > /dev/null 2>&1 && echo "set" || echo "not set"
|
||||
set
|
||||
|
||||
First, the Google API Key:
|
||||
1. Go to https://console.cloud.google.com/apis/credentials
|
||||
2. Create a new project (or select an existing one)
|
||||
@@ -640,10 +606,35 @@ Now, the Custom Search Engine ID:
|
||||
|
||||
✓ Google Custom Search credentials valid
|
||||
|
||||
[Calls verify_credentials(agent_path="exports/research-agent")]
|
||||
→ Returns: ready: true, missing_credentials: []
|
||||
|
||||
All credentials are now configured:
|
||||
✓ anthropic (ANTHROPIC_API_KEY) — already in encrypted store
|
||||
✓ brave_search (BRAVE_SEARCH_API_KEY) — already in encrypted store
|
||||
✓ google_search (GOOGLE_API_KEY) — stored in encrypted store
|
||||
✓ google_cse (GOOGLE_CSE_ID) — stored in encrypted store
|
||||
Your agent is ready to run!
|
||||
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ ✅ CREDENTIALS CONFIGURED │
|
||||
├─────────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ OPEN A NEW TERMINAL before running commands below. │
|
||||
│ Environment variables were saved to your shell config but │
|
||||
│ only take effect in new terminal sessions. │
|
||||
│ │
|
||||
│ NEXT STEPS: │
|
||||
│ │
|
||||
│ 1. RUN YOUR AGENT: │
|
||||
│ │
|
||||
│ hive tui │
|
||||
│ │
|
||||
│ 2. IF YOU ENCOUNTER ISSUES, USE THE DEBUGGER: │
|
||||
│ │
|
||||
│ /hive-debugger │
|
||||
│ │
|
||||
│ The debugger analyzes runtime logs, identifies retry loops, tool │
|
||||
│ failures, stalled execution, and provides actionable fix suggestions. │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
+52
-41
@@ -1,19 +1,19 @@
|
||||
---
|
||||
name: building-agents-patterns
|
||||
name: hive-patterns
|
||||
description: Best practices, patterns, and examples for building goal-driven agents. Includes client-facing interaction, feedback edges, judge patterns, fan-out/fan-in, context management, and anti-patterns.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: reference
|
||||
part_of: building-agents
|
||||
part_of: hive
|
||||
---
|
||||
|
||||
# Building Agents - Patterns & Best Practices
|
||||
|
||||
Design patterns, examples, and best practices for building robust goal-driven agents.
|
||||
|
||||
**Prerequisites:** Complete agent structure using `building-agents-construction`.
|
||||
**Prerequisites:** Complete agent structure using `hive-create`.
|
||||
|
||||
## Practical Example: Hybrid Workflow
|
||||
|
||||
@@ -97,6 +97,7 @@ research_node = NodeSpec(
|
||||
```
|
||||
|
||||
**How it works:**
|
||||
|
||||
- Client-facing nodes stream LLM text to the user and block for input after each response
|
||||
- User input is injected via `node.inject_event(text)`
|
||||
- When the LLM calls `set_output` to produce structured outputs, the judge evaluates and ACCEPTs
|
||||
@@ -107,13 +108,13 @@ research_node = NodeSpec(
|
||||
|
||||
### When to Use client_facing
|
||||
|
||||
| Scenario | client_facing | Why |
|
||||
|----------|:---:|-----|
|
||||
| Gathering user requirements | Yes | Need user input |
|
||||
| Human review/approval checkpoint | Yes | Need human decision |
|
||||
| Data processing (scanning, scoring) | No | Runs autonomously |
|
||||
| Report generation | No | No user input needed |
|
||||
| Final confirmation before action | Yes | Need explicit approval |
|
||||
| Scenario | client_facing | Why |
|
||||
| ----------------------------------- | :-----------: | ---------------------- |
|
||||
| Gathering user requirements | Yes | Need user input |
|
||||
| Human review/approval checkpoint | Yes | Need human decision |
|
||||
| Data processing (scanning, scoring) | No | Runs autonomously |
|
||||
| Report generation | No | No user input needed |
|
||||
| Final confirmation before action | Yes | Need explicit approval |
|
||||
|
||||
> **Legacy Note:** The `pause_nodes` / `entry_points` pattern still works for backward compatibility but `client_facing=True` is preferred for new agents.
|
||||
|
||||
@@ -158,22 +159,24 @@ EdgeSpec(
|
||||
```
|
||||
|
||||
**Key concepts:**
|
||||
|
||||
- `nullable_output_keys`: Lists output keys that may remain unset. The node sets exactly one of the mutually exclusive keys per execution.
|
||||
- `max_node_visits`: Must be >1 on the feedback target (extractor) so it can re-execute. Default is 1.
|
||||
- `priority`: Positive = forward edge (evaluated first). Negative = feedback edge. The executor tries forward edges first; if none match, falls back to feedback edges.
|
||||
|
||||
### Routing Decision Table
|
||||
|
||||
| Pattern | Old Approach | New Approach |
|
||||
|---------|-------------|--------------|
|
||||
| Conditional branching | `router` node | Conditional edges with `condition_expr` |
|
||||
| Binary approve/reject | `pause_nodes` + resume | `client_facing=True` + `nullable_output_keys` |
|
||||
| Loop-back on rejection | Manual entry_points | Feedback edge with `priority=-1` |
|
||||
| Multi-way routing | Router with routes dict | Multiple conditional edges with priorities |
|
||||
| Pattern | Old Approach | New Approach |
|
||||
| ---------------------- | ----------------------- | --------------------------------------------- |
|
||||
| Conditional branching | `router` node | Conditional edges with `condition_expr` |
|
||||
| Binary approve/reject | `pause_nodes` + resume | `client_facing=True` + `nullable_output_keys` |
|
||||
| Loop-back on rejection | Manual entry_points | Feedback edge with `priority=-1` |
|
||||
| Multi-way routing | Router with routes dict | Multiple conditional edges with priorities |
|
||||
|
||||
## Judge Patterns
|
||||
|
||||
**Core Principle: The judge is the SOLE mechanism for acceptance decisions.** Never add ad-hoc framework gating to compensate for LLM behavior. If the LLM calls `set_output` prematurely, fix the system prompt or use a custom judge. Anti-patterns to avoid:
|
||||
|
||||
- Output rollback logic
|
||||
- `_user_has_responded` flags
|
||||
- Premature set_output rejection
|
||||
@@ -184,6 +187,7 @@ Judges control when an event_loop node's loop exits. Choose based on validation
|
||||
### Implicit Judge (Default)
|
||||
|
||||
When no judge is configured, the implicit judge ACCEPTs when:
|
||||
|
||||
- The LLM finishes its response with no tool calls
|
||||
- All required output keys have been set via `set_output`
|
||||
|
||||
@@ -219,11 +223,11 @@ class SchemaJudge:
|
||||
|
||||
### When to Use Which Judge
|
||||
|
||||
| Judge | Use When | Example |
|
||||
|-------|----------|---------|
|
||||
| Judge | Use When | Example |
|
||||
| --------------- | ------------------------------------- | ---------------------- |
|
||||
| Implicit (None) | Output keys are sufficient validation | Simple data extraction |
|
||||
| SchemaJudge | Need structural validation of outputs | API response parsing |
|
||||
| Custom | Domain-specific validation logic | Score must be 0.0-1.0 |
|
||||
| SchemaJudge | Need structural validation of outputs | API response parsing |
|
||||
| Custom | Domain-specific validation logic | Score must be 0.0-1.0 |
|
||||
|
||||
## Fan-Out / Fan-In (Parallel Execution)
|
||||
|
||||
@@ -244,6 +248,7 @@ EdgeSpec(id="scorer-to-extractor", source="scorer", target="extractor",
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
|
||||
- Parallel event_loop nodes must have **disjoint output_keys** (no key written by both)
|
||||
- Only one parallel branch may contain a `client_facing` node
|
||||
- Fan-in node receives outputs from all completed branches in shared memory
|
||||
@@ -253,6 +258,7 @@ EdgeSpec(id="scorer-to-extractor", source="scorer", target="extractor",
|
||||
### Tiered Compaction
|
||||
|
||||
EventLoopNode automatically manages context window usage with tiered compaction:
|
||||
|
||||
1. **Pruning** — Old tool results replaced with compact placeholders (zero-cost, no LLM call)
|
||||
2. **Normal compaction** — LLM summarizes older messages
|
||||
3. **Aggressive compaction** — Keeps only recent messages + summary
|
||||
@@ -265,17 +271,20 @@ The framework automatically truncates large tool results and saves full content
|
||||
For explicit data management, use the data tools (real MCP tools, not synthetic):
|
||||
|
||||
```python
|
||||
# save_data, load_data, list_data_files are real MCP tools
|
||||
# Each takes a data_dir parameter since the MCP server is shared
|
||||
# save_data, load_data, list_data_files, serve_file_to_user are real MCP tools
|
||||
# data_dir is auto-injected by the framework — the LLM never sees it
|
||||
|
||||
# Saving large results
|
||||
save_data(filename="sources.json", data=large_json_string, data_dir="/path/to/spillover")
|
||||
save_data(filename="sources.json", data=large_json_string)
|
||||
|
||||
# Reading with pagination (line-based offset/limit)
|
||||
load_data(filename="sources.json", data_dir="/path/to/spillover", offset=0, limit=50)
|
||||
load_data(filename="sources.json", offset=0, limit=50)
|
||||
|
||||
# Listing available files
|
||||
list_data_files(data_dir="/path/to/spillover")
|
||||
list_data_files()
|
||||
|
||||
# Serving a file to the user as a clickable link
|
||||
serve_file_to_user(filename="report.html", label="Research Report")
|
||||
```
|
||||
|
||||
Add data tools to nodes that handle large tool results:
|
||||
@@ -287,7 +296,7 @@ research_node = NodeSpec(
|
||||
)
|
||||
```
|
||||
|
||||
The `data_dir` is passed by the framework (from the node's spillover directory). The LLM sees `data_dir` in truncation messages and uses it when calling `load_data`.
|
||||
`data_dir` is a framework context parameter — auto-injected at call time. `GraphExecutor.execute()` sets it per-execution via `ToolRegistry.set_execution_context(data_dir=...)` (using `contextvars` for concurrency safety), ensuring it matches the session-scoped spillover directory.
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
@@ -304,18 +313,19 @@ The `data_dir` is passed by the framework (from the node's spillover directory).
|
||||
|
||||
A common mistake is splitting work into too many small single-purpose nodes. Each node boundary requires serializing outputs, losing in-context information, and adding edge complexity.
|
||||
|
||||
| Bad (8 thin nodes) | Good (4 rich nodes) |
|
||||
|---------------------|---------------------|
|
||||
| parse-query | intake (client-facing) |
|
||||
| search-sources | research (search + fetch + analyze) |
|
||||
| fetch-content | review (client-facing) |
|
||||
| evaluate-sources | report (write + deliver) |
|
||||
| synthesize-findings | |
|
||||
| write-report | |
|
||||
| quality-check | |
|
||||
| save-report | |
|
||||
| Bad (8 thin nodes) | Good (4 rich nodes) |
|
||||
| ------------------- | ----------------------------------- |
|
||||
| parse-query | intake (client-facing) |
|
||||
| search-sources | research (search + fetch + analyze) |
|
||||
| fetch-content | review (client-facing) |
|
||||
| evaluate-sources | report (write + deliver) |
|
||||
| synthesize-findings | |
|
||||
| write-report | |
|
||||
| quality-check | |
|
||||
| save-report | |
|
||||
|
||||
**Why fewer nodes are better:**
|
||||
|
||||
- The LLM retains full context of its work within a single node
|
||||
- A research node that searches, fetches, and analyzes keeps all source material in its conversation history
|
||||
- Fewer edges means simpler graph and fewer failure points
|
||||
@@ -324,6 +334,7 @@ A common mistake is splitting work into too many small single-purpose nodes. Eac
|
||||
### MCP Tools - Correct Usage
|
||||
|
||||
**MCP tools OK for:**
|
||||
|
||||
- `test_node` — Validate node configuration with mock inputs
|
||||
- `validate_graph` — Check graph structure
|
||||
- `configure_loop` — Set event loop parameters
|
||||
@@ -356,7 +367,7 @@ When agent is complete, transition to testing phase:
|
||||
### Pre-Testing Checklist
|
||||
|
||||
- [ ] Agent structure validates: `uv run python -m agent_name validate`
|
||||
- [ ] All nodes defined in nodes/__init__.py
|
||||
- [ ] All nodes defined in nodes/**init**.py
|
||||
- [ ] All edges connect valid nodes with correct priorities
|
||||
- [ ] Feedback edge targets have `max_node_visits > 1`
|
||||
- [ ] Client-facing nodes have meaningful system prompts
|
||||
@@ -364,10 +375,10 @@ When agent is complete, transition to testing phase:
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **building-agents-core** — Fundamental concepts (node types, edges, event loop architecture)
|
||||
- **building-agents-construction** — Step-by-step building process
|
||||
- **testing-agent** — Test and validate agents
|
||||
- **agent-workflow** — Complete workflow orchestrator
|
||||
- **hive-concepts** — Fundamental concepts (node types, edges, event loop architecture)
|
||||
- **hive-create** — Step-by-step building process
|
||||
- **hive-test** — Test and validate agents
|
||||
- **hive** — Complete workflow orchestrator
|
||||
|
||||
---
|
||||
|
||||
@@ -0,0 +1,940 @@
|
||||
---
|
||||
name: hive-test
|
||||
description: Iterative agent testing with session recovery. Execute, analyze, fix, resume from checkpoints. Use when testing an agent, debugging test failures, or verifying fixes without re-running from scratch.
|
||||
---
|
||||
|
||||
# Agent Testing
|
||||
|
||||
Test agents iteratively: execute, analyze failures, fix, resume from checkpoint, repeat.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Testing a newly built agent against its goal
|
||||
- Debugging a failing agent iteratively
|
||||
- Verifying fixes without re-running expensive early nodes
|
||||
- Running final regression tests before deployment
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Agent package at `exports/{agent_name}/` (built with `/hive-create`)
|
||||
2. Credentials configured (`/hive-credentials`)
|
||||
3. `ANTHROPIC_API_KEY` set (or appropriate LLM provider key)
|
||||
|
||||
**Path distinction** (critical — don't confuse these):
|
||||
- `exports/{agent_name}/` — agent source code (edit here)
|
||||
- `~/.hive/agents/{agent_name}/` — runtime data: sessions, checkpoints, logs (read here)
|
||||
|
||||
---
|
||||
|
||||
## The Iterative Test Loop
|
||||
|
||||
This is the core workflow. Don't re-run the entire agent when a late node fails — analyze, fix, and resume from the last clean checkpoint.
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────┐
|
||||
│ PHASE 1: Generate Test Scenarios │
|
||||
│ Goal → synthetic test inputs + tests │
|
||||
└──────────────┬───────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────┐
|
||||
│ PHASE 2: Execute │◄────────────────┐
|
||||
│ Run agent (CLI or pytest) │ │
|
||||
└──────────────┬───────────────────────┘ │
|
||||
↓ │
|
||||
Pass? ──yes──► PHASE 6: Final Verification │
|
||||
│ │
|
||||
no │
|
||||
↓ │
|
||||
┌──────────────────────────────────────┐ │
|
||||
│ PHASE 3: Analyze │ │
|
||||
│ Session + runtime logs + checkpoints │ │
|
||||
└──────────────┬───────────────────────┘ │
|
||||
↓ │
|
||||
┌──────────────────────────────────────┐ │
|
||||
│ PHASE 4: Fix │ │
|
||||
│ Prompt / code / graph / goal │ │
|
||||
└──────────────┬───────────────────────┘ │
|
||||
↓ │
|
||||
┌──────────────────────────────────────┐ │
|
||||
│ PHASE 5: Recover & Resume │─────────────────┘
|
||||
│ Checkpoint resume OR fresh re-run │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 1: Generate Test Scenarios
|
||||
|
||||
Create synthetic tests from the agent's goal, constraints, and success criteria.
|
||||
|
||||
#### Step 1a: Read the goal
|
||||
|
||||
```python
|
||||
# Read goal from agent.py
|
||||
Read(file_path="exports/{agent_name}/agent.py")
|
||||
# Extract the Goal definition and convert to JSON string
|
||||
```
|
||||
|
||||
#### Step 1b: Get test guidelines
|
||||
|
||||
```python
|
||||
# Get constraint test guidelines
|
||||
generate_constraint_tests(
|
||||
goal_id="your-goal-id",
|
||||
goal_json='{"id": "...", "constraints": [...]}',
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
|
||||
# Get success criteria test guidelines
|
||||
generate_success_tests(
|
||||
goal_id="your-goal-id",
|
||||
goal_json='{"id": "...", "success_criteria": [...]}',
|
||||
node_names="intake,research,review,report",
|
||||
tool_names="web_search,web_scrape",
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
```
|
||||
|
||||
These return `file_header`, `test_template`, `constraints_formatted`/`success_criteria_formatted`, and `test_guidelines`. They do NOT generate test code — you write the tests.
|
||||
|
||||
#### Step 1c: Write tests
|
||||
|
||||
```python
|
||||
Write(
|
||||
file_path=result["output_file"],
|
||||
content=result["file_header"] + "\n\n" + your_test_code
|
||||
)
|
||||
```
|
||||
|
||||
#### Test writing rules
|
||||
|
||||
- Every test MUST be `async` with `@pytest.mark.asyncio`
|
||||
- Every test MUST accept `runner, auto_responder, mock_mode` fixtures
|
||||
- Use `await auto_responder.start()` before running, `await auto_responder.stop()` in `finally`
|
||||
- Use `await runner.run(input_dict)` — this goes through AgentRunner → AgentRuntime → ExecutionStream
|
||||
- Access output via `result.output.get("key")` — NEVER `result.output["key"]`
|
||||
- `result.success=True` means no exception, NOT goal achieved — always check output
|
||||
- Write 8-15 tests total, not 30+
|
||||
- Each real test costs ~3 seconds + LLM tokens
|
||||
- NEVER use `default_agent.run()` — it bypasses the runtime (no sessions, no logs, client-facing nodes hang)
|
||||
|
||||
#### Step 1d: Check existing tests
|
||||
|
||||
Before generating, check if tests already exist:
|
||||
|
||||
```python
|
||||
list_tests(
|
||||
goal_id="your-goal-id",
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Execute
|
||||
|
||||
Two execution paths, use the right one for your situation.
|
||||
|
||||
#### Iterative debugging (for complex agents)
|
||||
|
||||
Run the agent via CLI. This creates sessions with checkpoints at `~/.hive/agents/{agent_name}/sessions/`:
|
||||
|
||||
```bash
|
||||
uv run hive run exports/{agent_name} --input '{"query": "test topic"}'
|
||||
```
|
||||
|
||||
Sessions and checkpoints are saved automatically.
|
||||
|
||||
**Client-facing nodes**: Agents with `client_facing=True` nodes (interactive conversation) work in headless mode when run from a real terminal — the agent streams output to stdout and reads user input from stdin via a `>>> ` prompt. In non-interactive shells (like Claude Code's Bash tool), client-facing nodes will hang because there is no stdin. For testing interactive agents from Claude Code, use `run_tests` with mock mode or have the user run the agent manually in their terminal.
|
||||
|
||||
#### Automated regression (for CI or final verification)
|
||||
|
||||
Use the `run_tests` MCP tool to run all pytest tests:
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="your-goal-id",
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
```
|
||||
|
||||
Returns structured results:
|
||||
```json
|
||||
{
|
||||
"overall_passed": false,
|
||||
"summary": {"total": 12, "passed": 10, "failed": 2, "pass_rate": "83.3%"},
|
||||
"test_results": [{"test_name": "test_success_source_diversity", "status": "failed"}],
|
||||
"failures": [{"test_name": "test_success_source_diversity", "details": "..."}]
|
||||
}
|
||||
```
|
||||
|
||||
**Options:**
|
||||
```python
|
||||
# Run only constraint tests
|
||||
run_tests(goal_id, agent_path, test_types='["constraint"]')
|
||||
|
||||
# Stop on first failure
|
||||
run_tests(goal_id, agent_path, fail_fast=True)
|
||||
|
||||
# Parallel execution
|
||||
run_tests(goal_id, agent_path, parallel=4)
|
||||
```
|
||||
|
||||
**Note:** `run_tests` uses `AgentRunner` with `tmp_path` storage, so sessions are isolated per test run. For checkpoint-based recovery with persistent sessions, use CLI execution. Use `run_tests` for quick regression checks and final verification.
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Analyze Failures
|
||||
|
||||
When a test fails, drill down systematically. Don't guess — use the tools.
|
||||
|
||||
#### Step 3a: Get error category
|
||||
|
||||
```python
|
||||
debug_test(
|
||||
goal_id="your-goal-id",
|
||||
test_name="test_success_source_diversity",
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
```
|
||||
|
||||
Returns error category (`IMPLEMENTATION_ERROR`, `ASSERTION_FAILURE`, `TIMEOUT`, `IMPORT_ERROR`, `API_ERROR`) plus full traceback and suggestions.
|
||||
|
||||
#### Step 3b: Find the failed session
|
||||
|
||||
```python
|
||||
list_agent_sessions(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
status="failed",
|
||||
limit=5
|
||||
)
|
||||
```
|
||||
|
||||
Returns session list with IDs, timestamps, current_node (where it failed), execution_quality.
|
||||
|
||||
#### Step 3c: Inspect session state
|
||||
|
||||
```python
|
||||
get_agent_session_state(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
session_id="session_20260209_143022_abc12345"
|
||||
)
|
||||
```
|
||||
|
||||
Returns execution path, which node was current, step count, timestamps — but excludes memory values (to avoid context bloat). Shows `memory_keys` and `memory_size` instead.
|
||||
|
||||
#### Step 3d: Examine runtime logs (L2/L3)
|
||||
|
||||
```python
|
||||
# L2: Per-node success/failure, retry counts
|
||||
query_runtime_log_details(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
run_id="session_20260209_143022_abc12345",
|
||||
needs_attention_only=True
|
||||
)
|
||||
|
||||
# L3: Exact LLM responses, tool call inputs/outputs
|
||||
query_runtime_log_raw(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
run_id="session_20260209_143022_abc12345",
|
||||
node_id="research"
|
||||
)
|
||||
```
|
||||
|
||||
#### Step 3e: Inspect memory data
|
||||
|
||||
```python
|
||||
# See what data a node actually produced
|
||||
get_agent_session_memory(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
session_id="session_20260209_143022_abc12345",
|
||||
key="research_results"
|
||||
)
|
||||
```
|
||||
|
||||
#### Step 3f: Find recovery points
|
||||
|
||||
```python
|
||||
list_agent_checkpoints(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
session_id="session_20260209_143022_abc12345",
|
||||
is_clean="true"
|
||||
)
|
||||
```
|
||||
|
||||
Returns checkpoint summaries with IDs, types (`node_start`, `node_complete`), which node, and `is_clean` flag. Clean checkpoints are safe resume points.
|
||||
|
||||
#### Step 3g: Compare checkpoints (optional)
|
||||
|
||||
To understand what changed between two points in execution:
|
||||
|
||||
```python
|
||||
compare_agent_checkpoints(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
session_id="session_20260209_143022_abc12345",
|
||||
checkpoint_id_before="cp_node_complete_research_143030",
|
||||
checkpoint_id_after="cp_node_complete_review_143115"
|
||||
)
|
||||
```
|
||||
|
||||
Returns memory diff (added/removed/changed keys) and execution path diff.
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Fix Based on Root Cause
|
||||
|
||||
Use the analysis from Phase 3 to determine what to fix and where.
|
||||
|
||||
| Root Cause | What to Fix | Where to Edit |
|
||||
|------------|------------|---------------|
|
||||
| **Prompt issue** — LLM produces wrong output format, misses instructions | Node `system_prompt` | `exports/{agent}/nodes/__init__.py` |
|
||||
| **Code bug** — TypeError, KeyError, logic error in Python | Agent code | `exports/{agent}/agent.py`, `nodes/__init__.py` |
|
||||
| **Graph issue** — wrong routing, missing edge, bad condition_expr | Edges, node config | `exports/{agent}/agent.py` |
|
||||
| **Tool issue** — MCP tool fails, wrong config, missing credential | Tool config | `exports/{agent}/mcp_servers.json`, `/hive-credentials` |
|
||||
| **Goal issue** — success criteria too strict/vague, wrong constraints | Goal definition | `exports/{agent}/agent.py` (goal section) |
|
||||
| **Test issue** — test expectations don't match actual agent behavior | Test code | `exports/{agent}/tests/test_*.py` |
|
||||
|
||||
#### Fix strategies by error category
|
||||
|
||||
**IMPLEMENTATION_ERROR** (TypeError, AttributeError, KeyError):
|
||||
```python
|
||||
# Read the failing code
|
||||
Read(file_path="exports/{agent_name}/nodes/__init__.py")
|
||||
|
||||
# Fix the bug
|
||||
Edit(
|
||||
file_path="exports/{agent_name}/nodes/__init__.py",
|
||||
old_string="results.get('videos')",
|
||||
new_string="(results or {}).get('videos', [])"
|
||||
)
|
||||
```
|
||||
|
||||
**ASSERTION_FAILURE** (test assertions fail but agent ran successfully):
|
||||
- Check if the agent's output is actually wrong → fix the prompt
|
||||
- Check if the test's expectations are unrealistic → fix the test
|
||||
- Use `get_agent_session_memory` to see what the agent actually produced
|
||||
|
||||
**TIMEOUT / STALL** (agent runs too long):
|
||||
- Check `node_visit_counts` for feedback loops hitting max_node_visits
|
||||
- Check L3 logs for tool calls that hang
|
||||
- Reduce `max_iterations` in loop_config or fix the prompt to converge faster
|
||||
|
||||
**API_ERROR** (connection, rate limit, auth):
|
||||
- Verify credentials with `/hive-credentials`
|
||||
- Check MCP server configuration
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Recover & Resume
|
||||
|
||||
After fixing the agent, decide whether to resume or re-run.
|
||||
|
||||
#### When to resume from checkpoint
|
||||
|
||||
Resume when ALL of these are true:
|
||||
- The fix is to a node that comes AFTER existing clean checkpoints
|
||||
- Clean checkpoints exist (from a CLI execution with checkpointing)
|
||||
- The early nodes are expensive (web scraping, API calls, long LLM chains)
|
||||
|
||||
```bash
|
||||
# Resume from the last clean checkpoint before the failing node
|
||||
uv run hive run exports/{agent_name} \
|
||||
--resume-session session_20260209_143022_abc12345 \
|
||||
--checkpoint cp_node_complete_research_143030
|
||||
```
|
||||
|
||||
This skips all nodes before the checkpoint and only re-runs the fixed node onward.
|
||||
|
||||
#### When to re-run from scratch
|
||||
|
||||
Re-run when ANY of these are true:
|
||||
- The fix is to the entry node or an early node
|
||||
- No checkpoints exist (e.g., agent was run via `run_tests`)
|
||||
- The agent is fast (2-3 nodes, completes in seconds)
|
||||
- You changed the graph structure (added/removed nodes/edges)
|
||||
|
||||
```bash
|
||||
uv run hive run exports/{agent_name} --input '{"query": "test topic"}'
|
||||
```
|
||||
|
||||
#### Inspecting a checkpoint before resuming
|
||||
|
||||
```python
|
||||
get_agent_checkpoint(
|
||||
agent_work_dir="~/.hive/agents/{agent_name}",
|
||||
session_id="session_20260209_143022_abc12345",
|
||||
checkpoint_id="cp_node_complete_research_143030"
|
||||
)
|
||||
```
|
||||
|
||||
Returns the full checkpoint: shared_memory snapshot, execution_path, current_node, next_node, is_clean.
|
||||
|
||||
#### Loop back to Phase 2
|
||||
|
||||
After resuming or re-running, check if the fix worked. If not, go back to Phase 3.
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: Final Verification
|
||||
|
||||
Once the iterative fix loop converges (the agent produces correct output), run the full automated test suite:
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="your-goal-id",
|
||||
agent_path="exports/{agent_name}"
|
||||
)
|
||||
```
|
||||
|
||||
All tests should pass. If not, repeat the loop for remaining failures.
|
||||
|
||||
---
|
||||
|
||||
## Credential Requirements
|
||||
|
||||
**CRITICAL: Testing requires ALL credentials the agent depends on.** This includes both the LLM API key AND any tool-specific credentials (HubSpot, Brave Search, etc.).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before running agent tests, you MUST collect ALL required credentials from the user.
|
||||
|
||||
**Step 1: LLM API Key (always required)**
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
**Step 2: Tool-specific credentials (depends on agent's tools)**
|
||||
|
||||
Inspect the agent's `mcp_servers.json` and tool configuration to determine which tools the agent uses, then check for all required credentials:
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CredentialManager, CREDENTIAL_SPECS
|
||||
|
||||
creds = CredentialManager()
|
||||
|
||||
# Determine which tools the agent uses (from agent.json or mcp_servers.json)
|
||||
agent_tools = [...] # e.g., ["hubspot_search_contacts", "web_search", ...]
|
||||
|
||||
# Find all missing credentials for those tools
|
||||
missing = creds.get_missing_for_tools(agent_tools)
|
||||
```
|
||||
|
||||
Common tool credentials:
|
||||
| Tool | Env Var | Help URL |
|
||||
|------|---------|----------|
|
||||
| HubSpot CRM | `HUBSPOT_ACCESS_TOKEN` | https://developers.hubspot.com/docs/api/private-apps |
|
||||
| Brave Search | `BRAVE_SEARCH_API_KEY` | https://brave.com/search/api/ |
|
||||
| Google Search | `GOOGLE_SEARCH_API_KEY` + `GOOGLE_SEARCH_CX` | https://developers.google.com/custom-search |
|
||||
|
||||
**Why ALL credentials are required:**
|
||||
- Tests need to execute the agent's LLM nodes to validate behavior
|
||||
- Tools with missing credentials will return error dicts instead of real data
|
||||
- Mock mode bypasses everything, providing no confidence in real-world performance
|
||||
|
||||
### Mock Mode Limitations
|
||||
|
||||
Mock mode (`--mock` flag or `MOCK_MODE=1`) is **ONLY for structure validation**:
|
||||
|
||||
- Validates graph structure (nodes, edges, connections)
|
||||
- Validates that `AgentRunner.load()` succeeds and the agent is importable
|
||||
- Does NOT execute event_loop agents — MockLLMProvider never calls `set_output`, so event_loop nodes loop forever
|
||||
- Does NOT test LLM reasoning, content quality, or constraint validation
|
||||
- Does NOT test real API integrations or tool use
|
||||
|
||||
**Bottom line:** If you're testing whether an agent achieves its goal, you MUST use real credentials.
|
||||
|
||||
### Enforcing Credentials in Tests
|
||||
|
||||
When writing tests, **ALWAYS include credential checks**:
|
||||
|
||||
```python
|
||||
import os
|
||||
import pytest
|
||||
from aden_tools.credentials import CredentialManager
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not CredentialManager().is_available("anthropic") and not os.environ.get("MOCK_MODE"),
|
||||
reason="API key required for real testing. Set ANTHROPIC_API_KEY or use MOCK_MODE=1."
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def check_credentials():
|
||||
"""Ensure ALL required credentials are set for real testing."""
|
||||
creds = CredentialManager()
|
||||
mock_mode = os.environ.get("MOCK_MODE")
|
||||
|
||||
if not creds.is_available("anthropic"):
|
||||
if mock_mode:
|
||||
print("\nRunning in MOCK MODE - structure validation only")
|
||||
else:
|
||||
pytest.fail(
|
||||
"\nANTHROPIC_API_KEY not set!\n"
|
||||
"Set API key: export ANTHROPIC_API_KEY='your-key-here'\n"
|
||||
"Or run structure validation: MOCK_MODE=1 pytest exports/{agent}/tests/"
|
||||
)
|
||||
|
||||
if not mock_mode:
|
||||
agent_tools = [] # Update per agent
|
||||
missing = creds.get_missing_for_tools(agent_tools)
|
||||
if missing:
|
||||
lines = ["\nMissing tool credentials!"]
|
||||
for name in missing:
|
||||
spec = creds.specs.get(name)
|
||||
if spec:
|
||||
lines.append(f" {spec.env_var} - {spec.description}")
|
||||
pytest.fail("\n".join(lines))
|
||||
```
|
||||
|
||||
### User Communication
|
||||
|
||||
When the user asks to test an agent, **ALWAYS check for ALL credentials first**:
|
||||
|
||||
1. **Identify the agent's tools** from `mcp_servers.json`
|
||||
2. **Check ALL required credentials** using `CredentialManager`
|
||||
3. **Ask the user to provide any missing credentials** before proceeding
|
||||
4. Collect ALL missing credentials in a single prompt — not one at a time
|
||||
|
||||
---
|
||||
|
||||
## Safe Test Patterns
|
||||
|
||||
### OutputCleaner
|
||||
|
||||
The framework automatically validates and cleans node outputs using a fast LLM at edge traversal time. Tests should still use safe patterns because OutputCleaner may not catch all issues.
|
||||
|
||||
### Safe Access (REQUIRED)
|
||||
|
||||
```python
|
||||
# UNSAFE - will crash on missing keys
|
||||
approval = result.output["approval_decision"]
|
||||
category = result.output["analysis"]["category"]
|
||||
|
||||
# SAFE - use .get() with defaults
|
||||
output = result.output or {}
|
||||
approval = output.get("approval_decision", "UNKNOWN")
|
||||
|
||||
# SAFE - type check before operations
|
||||
analysis = output.get("analysis", {})
|
||||
if isinstance(analysis, dict):
|
||||
category = analysis.get("category", "unknown")
|
||||
|
||||
# SAFE - handle JSON parsing trap (LLM response as string)
|
||||
import json
|
||||
recommendation = output.get("recommendation", "{}")
|
||||
if isinstance(recommendation, str):
|
||||
try:
|
||||
parsed = json.loads(recommendation)
|
||||
if isinstance(parsed, dict):
|
||||
approval = parsed.get("approval_decision", "UNKNOWN")
|
||||
except json.JSONDecodeError:
|
||||
approval = "UNKNOWN"
|
||||
elif isinstance(recommendation, dict):
|
||||
approval = recommendation.get("approval_decision", "UNKNOWN")
|
||||
|
||||
# SAFE - type check before iteration
|
||||
items = output.get("items", [])
|
||||
if isinstance(items, list):
|
||||
for item in items:
|
||||
...
|
||||
```
|
||||
|
||||
### Helper Functions for conftest.py
|
||||
|
||||
```python
|
||||
import json
|
||||
import re
|
||||
|
||||
def _parse_json_from_output(result, key):
|
||||
"""Parse JSON from agent output (framework may store full LLM response as string)."""
|
||||
response_text = result.output.get(key, "")
|
||||
json_text = re.sub(r'```json\s*|\s*```', '', response_text).strip()
|
||||
try:
|
||||
return json.loads(json_text)
|
||||
except (json.JSONDecodeError, AttributeError, TypeError):
|
||||
return result.output.get(key)
|
||||
|
||||
def safe_get_nested(result, key_path, default=None):
|
||||
"""Safely get nested value from result.output."""
|
||||
output = result.output or {}
|
||||
current = output
|
||||
for key in key_path:
|
||||
if isinstance(current, dict):
|
||||
current = current.get(key)
|
||||
elif isinstance(current, str):
|
||||
try:
|
||||
json_text = re.sub(r'```json\s*|\s*```', '', current).strip()
|
||||
parsed = json.loads(json_text)
|
||||
if isinstance(parsed, dict):
|
||||
current = parsed.get(key)
|
||||
else:
|
||||
return default
|
||||
except json.JSONDecodeError:
|
||||
return default
|
||||
else:
|
||||
return default
|
||||
return current if current is not None else default
|
||||
|
||||
# Make available in tests
|
||||
pytest.parse_json_from_output = _parse_json_from_output
|
||||
pytest.safe_get_nested = safe_get_nested
|
||||
```
|
||||
|
||||
### ExecutionResult Fields
|
||||
|
||||
**`result.success=True` means NO exception, NOT goal achieved**
|
||||
|
||||
```python
|
||||
# WRONG
|
||||
assert result.success
|
||||
|
||||
# RIGHT
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
approval = output.get("approval_decision")
|
||||
assert approval == "APPROVED", f"Expected APPROVED, got {approval}"
|
||||
```
|
||||
|
||||
All fields:
|
||||
- `success: bool` — Completed without exception (NOT goal achieved!)
|
||||
- `output: dict` — Complete memory snapshot (may contain raw strings)
|
||||
- `error: str | None` — Error message if failed
|
||||
- `steps_executed: int` — Number of nodes executed
|
||||
- `total_tokens: int` — Cumulative token usage
|
||||
- `total_latency_ms: int` — Total execution time
|
||||
- `path: list[str]` — Node IDs traversed (may repeat in feedback loops)
|
||||
- `paused_at: str | None` — Node ID if paused
|
||||
- `session_state: dict` — State for resuming
|
||||
- `node_visit_counts: dict[str, int]` — Visit counts per node (feedback loop testing)
|
||||
- `execution_quality: str` — "clean", "degraded", or "failed"
|
||||
|
||||
### Test Count Guidance
|
||||
|
||||
**Write 8-15 tests, not 30+**
|
||||
|
||||
- 2-3 tests per success criterion
|
||||
- 1 happy path test
|
||||
- 1 boundary/edge case test
|
||||
- 1 error handling test (optional)
|
||||
|
||||
Each real test costs ~3 seconds + LLM tokens. 12 tests = ~36 seconds, $0.12.
|
||||
|
||||
---
|
||||
|
||||
## Test Patterns
|
||||
|
||||
### Happy Path
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_happy_path(runner, auto_responder, mock_mode):
|
||||
"""Test normal successful execution."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "python tutorials"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
assert output.get("report"), "No report produced"
|
||||
```
|
||||
|
||||
### Boundary Condition
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_minimum_sources(runner, auto_responder, mock_mode):
|
||||
"""Test at minimum source threshold."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "niche topic"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
sources = output.get("sources", [])
|
||||
if isinstance(sources, list):
|
||||
assert len(sources) >= 3, f"Expected >= 3 sources, got {len(sources)}"
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_input(runner, auto_responder, mock_mode):
|
||||
"""Test graceful handling of empty input."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": ""})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
# Agent should either fail gracefully or produce an error message
|
||||
output = result.output or {}
|
||||
assert not result.success or output.get("error"), "Should handle empty input"
|
||||
```
|
||||
|
||||
### Feedback Loop
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_feedback_loop_terminates(runner, auto_responder, mock_mode):
|
||||
"""Test that feedback loops don't run forever."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "test"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
visits = result.node_visit_counts or {}
|
||||
for node_id, count in visits.items():
|
||||
assert count <= 5, f"Node {node_id} visited {count} times — possible infinite loop"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## MCP Tool Reference
|
||||
|
||||
### Phase 1: Test Generation
|
||||
|
||||
```python
|
||||
# Check existing tests
|
||||
list_tests(goal_id, agent_path)
|
||||
|
||||
# Get constraint test guidelines (returns templates, NOT generated tests)
|
||||
generate_constraint_tests(goal_id, goal_json, agent_path)
|
||||
# Returns: output_file, file_header, test_template, constraints_formatted, test_guidelines
|
||||
|
||||
# Get success criteria test guidelines
|
||||
generate_success_tests(goal_id, goal_json, node_names, tool_names, agent_path)
|
||||
# Returns: output_file, file_header, test_template, success_criteria_formatted, test_guidelines
|
||||
```
|
||||
|
||||
### Phase 2: Execution
|
||||
|
||||
```python
|
||||
# Automated regression (no checkpoints, fresh runs)
|
||||
run_tests(goal_id, agent_path, test_types='["all"]', parallel=-1, fail_fast=False)
|
||||
|
||||
# Run only specific test types
|
||||
run_tests(goal_id, agent_path, test_types='["constraint"]')
|
||||
run_tests(goal_id, agent_path, test_types='["success"]')
|
||||
```
|
||||
|
||||
```bash
|
||||
# Iterative debugging with checkpoints (via CLI)
|
||||
uv run hive run exports/{agent_name} --input '{"query": "test"}'
|
||||
```
|
||||
|
||||
### Phase 3: Analysis
|
||||
|
||||
```python
|
||||
# Debug a specific failed test
|
||||
debug_test(goal_id, test_name, agent_path)
|
||||
|
||||
# Find failed sessions
|
||||
list_agent_sessions(agent_work_dir, status="failed", limit=5)
|
||||
|
||||
# Inspect session state (excludes memory values)
|
||||
get_agent_session_state(agent_work_dir, session_id)
|
||||
|
||||
# Inspect memory data
|
||||
get_agent_session_memory(agent_work_dir, session_id, key="research_results")
|
||||
|
||||
# Runtime logs: L1 summaries
|
||||
query_runtime_logs(agent_work_dir, status="needs_attention")
|
||||
|
||||
# Runtime logs: L2 per-node details
|
||||
query_runtime_log_details(agent_work_dir, run_id, needs_attention_only=True)
|
||||
|
||||
# Runtime logs: L3 tool/LLM raw data
|
||||
query_runtime_log_raw(agent_work_dir, run_id, node_id="research")
|
||||
|
||||
# Find clean checkpoints
|
||||
list_agent_checkpoints(agent_work_dir, session_id, is_clean="true")
|
||||
|
||||
# Compare checkpoints (memory diff)
|
||||
compare_agent_checkpoints(agent_work_dir, session_id, cp_before, cp_after)
|
||||
```
|
||||
|
||||
### Phase 5: Recovery
|
||||
|
||||
```python
|
||||
# Inspect checkpoint before resuming
|
||||
get_agent_checkpoint(agent_work_dir, session_id, checkpoint_id)
|
||||
# Empty checkpoint_id = latest checkpoint
|
||||
```
|
||||
|
||||
```bash
|
||||
# Resume from checkpoint via CLI (headless)
|
||||
uv run hive run exports/{agent_name} \
|
||||
--resume-session {session_id} --checkpoint {checkpoint_id}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
| Don't | Do Instead |
|
||||
|-------|-----------|
|
||||
| Use `default_agent.run()` in tests | Use `runner.run()` with `auto_responder` fixtures (goes through AgentRuntime) |
|
||||
| Re-run entire agent when a late node fails | Resume from last clean checkpoint |
|
||||
| Treat `result.success` as goal achieved | Check `result.output` for actual criteria |
|
||||
| Access `result.output["key"]` directly | Use `result.output.get("key")` |
|
||||
| Fix random things hoping tests pass | Analyze L2/L3 logs to find root cause first |
|
||||
| Write 30+ tests | Write 8-15 focused tests |
|
||||
| Skip credential check | Use `/hive-credentials` before testing |
|
||||
| Confuse `exports/` with `~/.hive/agents/` | Code in `exports/`, runtime data in `~/.hive/` |
|
||||
| Use `run_tests` for iterative debugging | Use headless CLI with checkpoints for iterative debugging |
|
||||
| Use headless CLI for final regression | Use `run_tests` for automated regression |
|
||||
| Use `--tui` from Claude Code | Use headless `run` command — TUI hangs in non-interactive shells |
|
||||
| Test client-facing nodes from Claude Code | Use mock mode, or have the user run the agent in their terminal |
|
||||
| Run tests without reading goal first | Always understand the goal before writing tests |
|
||||
| Skip Phase 3 analysis and guess | Use session + log tools to identify root cause |
|
||||
|
||||
---
|
||||
|
||||
## Example Walkthrough: Deep Research Agent
|
||||
|
||||
A complete iteration showing the test loop for an agent with nodes: `intake → research → review → report`.
|
||||
|
||||
### Phase 1: Generate tests
|
||||
|
||||
```python
|
||||
# Read the goal
|
||||
Read(file_path="exports/deep_research_agent/agent.py")
|
||||
|
||||
# Get success criteria test guidelines
|
||||
result = generate_success_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
goal_json='{"id": "rigorous-interactive-research", "success_criteria": [{"id": "source-diversity", "target": ">=5"}, {"id": "citation-coverage", "target": "100%"}, {"id": "report-completeness", "target": "90%"}]}',
|
||||
node_names="intake,research,review,report",
|
||||
tool_names="web_search,web_scrape",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
|
||||
# Write tests
|
||||
Write(
|
||||
file_path=result["output_file"],
|
||||
content=result["file_header"] + "\n\n" + test_code
|
||||
)
|
||||
```
|
||||
|
||||
### Phase 2: First execution
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
agent_path="exports/deep_research_agent",
|
||||
fail_fast=True
|
||||
)
|
||||
```
|
||||
|
||||
Result: `test_success_source_diversity` fails — agent only found 2 sources instead of 5.
|
||||
|
||||
### Phase 3: Analyze
|
||||
|
||||
```python
|
||||
# Debug the failing test
|
||||
debug_test(
|
||||
goal_id="rigorous-interactive-research",
|
||||
test_name="test_success_source_diversity",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
# → ASSERTION_FAILURE: Expected >= 5 sources, got 2
|
||||
|
||||
# Find the session
|
||||
list_agent_sessions(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
status="completed",
|
||||
limit=1
|
||||
)
|
||||
# → session_20260209_150000_abc12345
|
||||
|
||||
# See what the research node produced
|
||||
get_agent_session_memory(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
session_id="session_20260209_150000_abc12345",
|
||||
key="research_results"
|
||||
)
|
||||
# → Only 2 web_search calls made, each returned 1 source
|
||||
|
||||
# Check the LLM's behavior in the research node
|
||||
query_runtime_log_raw(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
run_id="session_20260209_150000_abc12345",
|
||||
node_id="research"
|
||||
)
|
||||
# → LLM called web_search only twice, then called set_output
|
||||
```
|
||||
|
||||
Root cause: The research node's prompt doesn't tell the LLM to search for at least 5 diverse sources. It stops after the first couple of searches.
|
||||
|
||||
### Phase 4: Fix the prompt
|
||||
|
||||
```python
|
||||
Read(file_path="exports/deep_research_agent/nodes/__init__.py")
|
||||
|
||||
Edit(
|
||||
file_path="exports/deep_research_agent/nodes/__init__.py",
|
||||
old_string='system_prompt="Search for information on the user\'s topic."',
|
||||
new_string='system_prompt="Search for information on the user\'s topic. You MUST find at least 5 diverse, authoritative sources. Use multiple different search queries to ensure source diversity. Do not stop searching until you have at least 5 distinct sources."'
|
||||
)
|
||||
```
|
||||
|
||||
### Phase 5: Resume from checkpoint
|
||||
|
||||
For this example, the fix is to the `research` node. If we had run via CLI with checkpointing, we could resume from the checkpoint after `intake` to skip re-running intake:
|
||||
|
||||
```bash
|
||||
# Check if clean checkpoint exists after intake
|
||||
list_agent_checkpoints(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
session_id="session_20260209_150000_abc12345",
|
||||
is_clean="true"
|
||||
)
|
||||
# → cp_node_complete_intake_150005
|
||||
|
||||
# Resume from after intake, re-run research with fixed prompt
|
||||
uv run hive run exports/deep_research_agent \
|
||||
--resume-session session_20260209_150000_abc12345 \
|
||||
--checkpoint cp_node_complete_intake_150005
|
||||
```
|
||||
|
||||
Or for this simple case (intake is fast), just re-run:
|
||||
|
||||
```bash
|
||||
uv run hive run exports/deep_research_agent --input '{"topic": "test"}'
|
||||
```
|
||||
|
||||
### Phase 6: Final verification
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
# → All 12 tests pass
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test File Structure
|
||||
|
||||
```
|
||||
exports/{agent_name}/
|
||||
├── agent.py ← Agent to test (goal, nodes, edges)
|
||||
├── nodes/__init__.py ← Node implementations (prompts, config)
|
||||
├── config.py ← Agent configuration
|
||||
├── mcp_servers.json ← Tool server config
|
||||
└── tests/
|
||||
├── conftest.py ← Shared fixtures + safe access helpers
|
||||
├── test_constraints.py ← Constraint tests
|
||||
├── test_success_criteria.py ← Success criteria tests
|
||||
└── test_edge_cases.py ← Edge case tests
|
||||
```
|
||||
|
||||
## Integration with Other Skills
|
||||
|
||||
| Scenario | From | To | Action |
|
||||
|----------|------|----|--------|
|
||||
| Agent built, ready to test | `/hive-create` | `/hive-test` | Generate tests, start loop |
|
||||
| Prompt fix needed | `/hive-test` Phase 4 | Direct edit | Edit `nodes/__init__.py`, resume |
|
||||
| Goal definition wrong | `/hive-test` Phase 4 | `/hive-create` | Update goal, may need rebuild |
|
||||
| Missing credentials | `/hive-test` Phase 3 | `/hive-credentials` | Set up credentials |
|
||||
| Complex runtime failure | `/hive-test` Phase 3 | `/hive-debugger` | Deep L1/L2/L3 analysis |
|
||||
| All tests pass | `/hive-test` Phase 6 | Done | Agent validated |
|
||||
@@ -0,0 +1,333 @@
|
||||
# Example: Iterative Testing of a Research Agent
|
||||
|
||||
This example walks through the full iterative test loop for a research agent that searches the web, reviews findings, and produces a cited report.
|
||||
|
||||
## Agent Structure
|
||||
|
||||
```
|
||||
exports/deep_research_agent/
|
||||
├── agent.py # Goal + graph: intake → research → review → report
|
||||
├── nodes/__init__.py # Node definitions (system_prompt, input/output keys)
|
||||
├── config.py # Model config
|
||||
├── mcp_servers.json # Tools: web_search, web_scrape
|
||||
└── tests/ # Test files (we'll create these)
|
||||
```
|
||||
|
||||
**Goal:** "Rigorous Interactive Research" — find 5+ diverse sources, cite every claim, produce a complete report.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Generate Tests
|
||||
|
||||
### Read the goal
|
||||
|
||||
```python
|
||||
Read(file_path="exports/deep_research_agent/agent.py")
|
||||
# Extract: goal_id="rigorous-interactive-research"
|
||||
# success_criteria: source-diversity (>=5), citation-coverage (100%), report-completeness (90%)
|
||||
# constraints: no-hallucination, source-attribution
|
||||
```
|
||||
|
||||
### Get test guidelines
|
||||
|
||||
```python
|
||||
result = generate_success_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
goal_json='{"id": "rigorous-interactive-research", "success_criteria": [{"id": "source-diversity", "description": "Use multiple diverse sources", "target": ">=5"}, {"id": "citation-coverage", "description": "Every claim cites its source", "target": "100%"}, {"id": "report-completeness", "description": "Report answers the research questions", "target": "90%"}]}',
|
||||
node_names="intake,research,review,report",
|
||||
tool_names="web_search,web_scrape",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
```
|
||||
|
||||
### Write tests
|
||||
|
||||
```python
|
||||
Write(
|
||||
file_path="exports/deep_research_agent/tests/test_success_criteria.py",
|
||||
content=result["file_header"] + '''
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_source_diversity(runner, auto_responder, mock_mode):
|
||||
"""At least 5 diverse sources are found."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "impact of remote work on productivity"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
sources = output.get("sources", [])
|
||||
if isinstance(sources, list):
|
||||
assert len(sources) >= 5, f"Expected >= 5 sources, got {len(sources)}"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_citation_coverage(runner, auto_responder, mock_mode):
|
||||
"""Every factual claim in the report cites its source."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "climate change effects on agriculture"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
report = output.get("report", "")
|
||||
# Check that report contains numbered references
|
||||
assert "[1]" in str(report) or "[source" in str(report).lower(), "Report lacks citations"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_report_completeness(runner, auto_responder, mock_mode):
|
||||
"""Report addresses the original research question."""
|
||||
query = "pros and cons of nuclear energy"
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": query})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
assert result.success, f"Agent failed: {result.error}"
|
||||
output = result.output or {}
|
||||
report = output.get("report", "")
|
||||
assert len(str(report)) > 200, f"Report too short: {len(str(report))} chars"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_query_handling(runner, auto_responder, mock_mode):
|
||||
"""Agent handles empty input gracefully."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": ""})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
output = result.output or {}
|
||||
assert not result.success or output.get("error"), "Should handle empty query"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_feedback_loop_terminates(runner, auto_responder, mock_mode):
|
||||
"""Feedback loop between review and research terminates."""
|
||||
await auto_responder.start()
|
||||
try:
|
||||
result = await runner.run({"query": "quantum computing basics"})
|
||||
finally:
|
||||
await auto_responder.stop()
|
||||
visits = result.node_visit_counts or {}
|
||||
for node_id, count in visits.items():
|
||||
assert count <= 5, f"Node {node_id} visited {count} times"
|
||||
'''
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: First Execution
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
agent_path="exports/deep_research_agent",
|
||||
fail_fast=True
|
||||
)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
```json
|
||||
{
|
||||
"overall_passed": false,
|
||||
"summary": {"total": 5, "passed": 3, "failed": 2, "pass_rate": "60.0%"},
|
||||
"failures": [
|
||||
{"test_name": "test_success_source_diversity", "details": "AssertionError: Expected >= 5 sources, got 2"},
|
||||
{"test_name": "test_success_citation_coverage", "details": "AssertionError: Report lacks citations"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Analyze (Iteration 1)
|
||||
|
||||
### Debug the first failure
|
||||
|
||||
```python
|
||||
debug_test(
|
||||
goal_id="rigorous-interactive-research",
|
||||
test_name="test_success_source_diversity",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
# Category: ASSERTION_FAILURE — Expected >= 5 sources, got 2
|
||||
```
|
||||
|
||||
### Find the session and inspect memory
|
||||
|
||||
```python
|
||||
list_agent_sessions(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
status="completed",
|
||||
limit=1
|
||||
)
|
||||
# → session_20260209_150000_abc12345
|
||||
|
||||
get_agent_session_memory(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
session_id="session_20260209_150000_abc12345",
|
||||
key="research_results"
|
||||
)
|
||||
# → Only 2 sources found. LLM stopped searching after 2 queries.
|
||||
```
|
||||
|
||||
### Check LLM behavior in the research node
|
||||
|
||||
```python
|
||||
query_runtime_log_raw(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
run_id="session_20260209_150000_abc12345",
|
||||
node_id="research"
|
||||
)
|
||||
# → LLM called web_search twice, got results, immediately called set_output.
|
||||
# → Prompt doesn't instruct it to find at least 5 sources.
|
||||
```
|
||||
|
||||
**Root cause:** The research node's system_prompt doesn't specify minimum source requirements.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Fix (Iteration 1)
|
||||
|
||||
```python
|
||||
Read(file_path="exports/deep_research_agent/nodes/__init__.py")
|
||||
|
||||
# Fix the research node prompt
|
||||
Edit(
|
||||
file_path="exports/deep_research_agent/nodes/__init__.py",
|
||||
old_string='system_prompt="Search for information on the user\'s topic using web search."',
|
||||
new_string='system_prompt="Search for information on the user\'s topic using web search. You MUST find at least 5 diverse, authoritative sources. Use multiple different search queries with varied keywords. Do NOT call set_output until you have gathered at least 5 distinct sources from different domains."'
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Recover & Resume (Iteration 1)
|
||||
|
||||
The fix is to the `research` node. Since this was a `run_tests` execution (no checkpoints), we re-run from scratch:
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
agent_path="exports/deep_research_agent",
|
||||
fail_fast=True
|
||||
)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
```json
|
||||
{
|
||||
"overall_passed": false,
|
||||
"summary": {"total": 5, "passed": 4, "failed": 1, "pass_rate": "80.0%"},
|
||||
"failures": [
|
||||
{"test_name": "test_success_citation_coverage", "details": "AssertionError: Report lacks citations"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Source diversity now passes. Citation coverage still fails.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Analyze (Iteration 2)
|
||||
|
||||
```python
|
||||
debug_test(
|
||||
goal_id="rigorous-interactive-research",
|
||||
test_name="test_success_citation_coverage",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
# Category: ASSERTION_FAILURE — Report lacks citations
|
||||
|
||||
# Check what the report node produced
|
||||
list_agent_sessions(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
status="completed",
|
||||
limit=1
|
||||
)
|
||||
# → session_20260209_151500_def67890
|
||||
|
||||
get_agent_session_memory(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
session_id="session_20260209_151500_def67890",
|
||||
key="report"
|
||||
)
|
||||
# → Report text exists but uses no numbered references.
|
||||
# → Sources are in memory but report node doesn't cite them.
|
||||
```
|
||||
|
||||
**Root cause:** The report node's prompt doesn't instruct the LLM to include numbered citations.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Fix (Iteration 2)
|
||||
|
||||
```python
|
||||
Edit(
|
||||
file_path="exports/deep_research_agent/nodes/__init__.py",
|
||||
old_string='system_prompt="Write a comprehensive report based on the research findings."',
|
||||
new_string='system_prompt="Write a comprehensive report based on the research findings. You MUST include numbered citations [1], [2], etc. for every factual claim. At the end, include a References section listing all sources with their URLs. Every claim must be traceable to a specific source."'
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Resume (Iteration 2)
|
||||
|
||||
The fix is to the `report` node (the last node). To demonstrate checkpoint recovery, run via CLI:
|
||||
|
||||
```bash
|
||||
# Run via CLI to get checkpoints
|
||||
uv run hive run exports/deep_research_agent --input '{"topic": "climate change effects"}'
|
||||
|
||||
# After it runs, find the clean checkpoint before report
|
||||
list_agent_checkpoints(
|
||||
agent_work_dir="~/.hive/agents/deep_research_agent",
|
||||
session_id="session_20260209_152000_ghi34567",
|
||||
is_clean="true"
|
||||
)
|
||||
# → cp_node_complete_review_152100 (after review, before report)
|
||||
|
||||
# Resume — skips intake, research, review entirely
|
||||
uv run hive run exports/deep_research_agent \
|
||||
--resume-session session_20260209_152000_ghi34567 \
|
||||
--checkpoint cp_node_complete_review_152100
|
||||
```
|
||||
|
||||
Only the `report` node re-runs with the fixed prompt, using research data from the checkpoint.
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Final Verification
|
||||
|
||||
```python
|
||||
run_tests(
|
||||
goal_id="rigorous-interactive-research",
|
||||
agent_path="exports/deep_research_agent"
|
||||
)
|
||||
```
|
||||
|
||||
**Result:**
|
||||
```json
|
||||
{
|
||||
"overall_passed": true,
|
||||
"summary": {"total": 5, "passed": 5, "failed": 0, "pass_rate": "100.0%"}
|
||||
}
|
||||
```
|
||||
|
||||
All tests pass.
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Iteration | Failure | Root Cause | Fix | Recovery |
|
||||
|-----------|---------|------------|-----|----------|
|
||||
| 1 | Source diversity (2 < 5) | Research prompt too vague | Added "at least 5 sources" to prompt | Re-run (no checkpoints) |
|
||||
| 2 | No citations in report | Report prompt lacks citation instructions | Added citation requirements | Checkpoint resume (skipped 3 nodes) |
|
||||
|
||||
**Key takeaways:**
|
||||
- Phase 3 analysis (session memory + L3 logs) identified root causes without guessing
|
||||
- Checkpoint recovery in iteration 2 saved time by skipping 3 expensive nodes
|
||||
- Final `run_tests` confirms all scenarios pass end-to-end
|
||||
@@ -1,32 +1,53 @@
|
||||
---
|
||||
name: agent-workflow
|
||||
description: Complete workflow for building, implementing, and testing goal-driven agents. Orchestrates building-agents-* and testing-agent skills. Use when starting a new agent project, unsure which skill to use, or need end-to-end guidance.
|
||||
name: hive
|
||||
description: Complete workflow for building, implementing, and testing goal-driven agents. Orchestrates hive-* skills. Use when starting a new agent project, unsure which skill to use, or need end-to-end guidance.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: workflow-orchestrator
|
||||
orchestrates:
|
||||
- building-agents-core
|
||||
- building-agents-construction
|
||||
- building-agents-patterns
|
||||
- testing-agent
|
||||
- setup-credentials
|
||||
- hive-concepts
|
||||
- hive-create
|
||||
- hive-patterns
|
||||
- hive-test
|
||||
- hive-credentials
|
||||
- hive-debugger
|
||||
---
|
||||
|
||||
# Agent Development Workflow
|
||||
|
||||
**THIS IS AN EXECUTABLE WORKFLOW. DO NOT explore the codebase or read source files. ROUTE to the correct skill IMMEDIATELY.**
|
||||
|
||||
When this skill is loaded, **ALWAYS use the AskUserQuestion tool** to present options:
|
||||
|
||||
```
|
||||
Use AskUserQuestion with these options:
|
||||
- "Build a new agent" → Then invoke /hive-create
|
||||
- "Test an existing agent" → Then invoke /hive-test
|
||||
- "Learn agent concepts" → Then invoke /hive-concepts
|
||||
- "Optimize agent design" → Then invoke /hive-patterns
|
||||
- "Set up credentials" → Then invoke /hive-credentials
|
||||
- "Debug a failing agent" → Then invoke /hive-debugger
|
||||
- "Other" (please describe what you want to achieve)
|
||||
```
|
||||
|
||||
**DO NOT:** Read source files, explore the codebase, search for code, or do any investigation before routing. The sub-skills handle all of that.
|
||||
|
||||
---
|
||||
|
||||
Complete Standard Operating Procedure (SOP) for building production-ready goal-driven agents.
|
||||
|
||||
## Overview
|
||||
|
||||
This workflow orchestrates specialized skills to take you from initial concept to production-ready agent:
|
||||
|
||||
1. **Understand Concepts** → `/building-agents-core` (optional)
|
||||
2. **Build Structure** → `/building-agents-construction`
|
||||
3. **Optimize Design** → `/building-agents-patterns` (optional)
|
||||
4. **Setup Credentials** → `/setup-credentials` (if agent uses tools requiring API keys)
|
||||
5. **Test & Validate** → `/testing-agent`
|
||||
1. **Understand Concepts** → `/hive-concepts` (optional)
|
||||
2. **Build Structure** → `/hive-create`
|
||||
3. **Optimize Design** → `/hive-patterns` (optional)
|
||||
4. **Setup Credentials** → `/hive-credentials` (if agent uses tools requiring API keys)
|
||||
5. **Test & Validate** → `/hive-test`
|
||||
6. **Debug Issues** → `/hive-debugger` (if agent fails at runtime)
|
||||
|
||||
## When to Use This Workflow
|
||||
|
||||
@@ -37,26 +58,26 @@ Use this meta-skill when:
|
||||
- Want consistent, repeatable agent builds
|
||||
|
||||
**Skip this workflow** if:
|
||||
- You only need to test an existing agent → use `/testing-agent` directly
|
||||
- You only need to test an existing agent → use `/hive-test` directly
|
||||
- You know exactly which phase you're in → use specific skill directly
|
||||
|
||||
## Quick Decision Tree
|
||||
|
||||
```
|
||||
"Need to understand agent concepts" → building-agents-core
|
||||
"Build a new agent" → building-agents-construction
|
||||
"Optimize my agent design" → building-agents-patterns
|
||||
"Need client-facing nodes or feedback loops" → building-agents-patterns
|
||||
"Set up API keys for my agent" → setup-credentials
|
||||
"Test my agent" → testing-agent
|
||||
"Need to understand agent concepts" → hive-concepts
|
||||
"Build a new agent" → hive-create
|
||||
"Optimize my agent design" → hive-patterns
|
||||
"Need client-facing nodes or feedback loops" → hive-patterns
|
||||
"Set up API keys for my agent" → hive-credentials
|
||||
"Test my agent" → hive-test
|
||||
"My agent is failing/stuck/has errors" → hive-debugger
|
||||
"Not sure what I need" → Read phases below, then decide
|
||||
"Agent has structure but needs implementation" → See agent directory STATUS.md
|
||||
```
|
||||
|
||||
## Phase 0: Understand Concepts (Optional)
|
||||
|
||||
**Duration**: 5-10 minutes
|
||||
**Skill**: `/building-agents-core`
|
||||
**Skill**: `/hive-concepts`
|
||||
**Input**: Questions about agent architecture
|
||||
|
||||
### When to Use
|
||||
@@ -77,9 +98,8 @@ Use this meta-skill when:
|
||||
|
||||
## Phase 1: Build Agent Structure
|
||||
|
||||
**Duration**: 15-30 minutes
|
||||
**Skill**: `/building-agents-construction`
|
||||
**Input**: User requirements ("Build an agent that...")
|
||||
**Skill**: `/hive-create`
|
||||
**Input**: User requirements ("Build an agent that...") or a template to start from
|
||||
|
||||
### What This Phase Does
|
||||
|
||||
@@ -121,7 +141,7 @@ You're ready for Phase 2 when:
|
||||
|
||||
### Common Outputs
|
||||
|
||||
The building-agents-construction skill produces:
|
||||
The hive-create skill produces:
|
||||
```
|
||||
exports/agent_name/
|
||||
├── __init__.py (package exports)
|
||||
@@ -141,15 +161,14 @@ exports/agent_name/
|
||||
→ You may need to add Python functions or MCP tools (not covered by current skills)
|
||||
|
||||
**If want to optimize design:**
|
||||
→ Proceed to Phase 1.5 (building-agents-patterns)
|
||||
→ Proceed to Phase 1.5 (hive-patterns)
|
||||
|
||||
**If ready to test:**
|
||||
→ Proceed to Phase 2
|
||||
|
||||
## Phase 1.5: Optimize Design (Optional)
|
||||
|
||||
**Duration**: 10-15 minutes
|
||||
**Skill**: `/building-agents-patterns`
|
||||
**Skill**: `/hive-patterns`
|
||||
**Input**: Completed agent structure
|
||||
|
||||
### When to Use
|
||||
@@ -173,22 +192,21 @@ exports/agent_name/
|
||||
|
||||
## Phase 2: Test & Validate
|
||||
|
||||
**Duration**: 20-40 minutes
|
||||
**Skill**: `/testing-agent`
|
||||
**Skill**: `/hive-test`
|
||||
**Input**: Working agent from Phase 1
|
||||
|
||||
### What This Phase Does
|
||||
|
||||
Creates comprehensive test suite:
|
||||
- Constraint tests (verify hard requirements)
|
||||
- Success criteria tests (measure goal achievement)
|
||||
- Edge case tests (handle failures gracefully)
|
||||
- Integration tests (end-to-end workflows)
|
||||
Guides the creation and execution of a comprehensive test suite:
|
||||
- Constraint tests
|
||||
- Success criteria tests
|
||||
- Edge case tests
|
||||
- Integration tests
|
||||
|
||||
### Process
|
||||
|
||||
1. **Analyze agent** - Read goal, constraints, success criteria
|
||||
2. **Generate tests** - Create pytest files in `exports/agent_name/tests/`
|
||||
2. **Generate tests** - The calling agent writes pytest files in `exports/agent_name/tests/` using hive-test guidelines and templates
|
||||
3. **User approval** - Review and approve each test
|
||||
4. **Run evaluation** - Execute tests and collect results
|
||||
5. **Debug failures** - Identify and fix issues
|
||||
@@ -251,9 +269,9 @@ You're done when:
|
||||
|
||||
```
|
||||
User: "Build an agent that monitors files"
|
||||
→ Use /building-agents-construction
|
||||
→ Use /hive-create
|
||||
→ Agent structure created
|
||||
→ Use /testing-agent
|
||||
→ Use /hive-test
|
||||
→ Tests created and passing
|
||||
→ Done: Production-ready agent
|
||||
```
|
||||
@@ -262,19 +280,32 @@ User: "Build an agent that monitors files"
|
||||
|
||||
```
|
||||
User: "Build an agent (first time)"
|
||||
→ Use /building-agents-core (understand concepts)
|
||||
→ Use /building-agents-construction (build structure)
|
||||
→ Use /building-agents-patterns (optimize design)
|
||||
→ Use /testing-agent (validate)
|
||||
→ Use /hive-concepts (understand concepts)
|
||||
→ Use /hive-create (build structure)
|
||||
→ Use /hive-patterns (optimize design)
|
||||
→ Use /hive-test (validate)
|
||||
→ Done: Production-ready agent
|
||||
```
|
||||
|
||||
### Pattern 1c: Build from Template
|
||||
|
||||
```
|
||||
User: "Build an agent based on the deep research template"
|
||||
→ Use /hive-create
|
||||
→ Select "From a template" path
|
||||
→ Pick template, name new agent
|
||||
→ Review/modify goal, nodes, graph
|
||||
→ Agent exported with customizations
|
||||
→ Use /hive-test
|
||||
→ Done: Customized agent
|
||||
```
|
||||
|
||||
### Pattern 2: Test Existing Agent
|
||||
|
||||
```
|
||||
User: "Test my agent at exports/my_agent"
|
||||
→ Skip Phase 1
|
||||
→ Use /testing-agent directly
|
||||
→ Use /hive-test directly
|
||||
→ Tests created
|
||||
→ Done: Validated agent
|
||||
```
|
||||
@@ -283,10 +314,10 @@ User: "Test my agent at exports/my_agent"
|
||||
|
||||
```
|
||||
User: "Build an agent"
|
||||
→ Use /building-agents-construction (Phase 1)
|
||||
→ Use /hive-create (Phase 1)
|
||||
→ Implementation needed (see STATUS.md)
|
||||
→ [User implements functions]
|
||||
→ Use /testing-agent (Phase 2)
|
||||
→ Use /hive-test (Phase 2)
|
||||
→ Tests reveal bugs
|
||||
→ [Fix bugs manually]
|
||||
→ Re-run tests
|
||||
@@ -297,45 +328,57 @@ User: "Build an agent"
|
||||
|
||||
```
|
||||
User: "Build an agent with human review and feedback loops"
|
||||
→ Use /building-agents-core (learn event loop, client-facing nodes)
|
||||
→ Use /building-agents-construction (build structure with feedback edges)
|
||||
→ Use /building-agents-patterns (implement client-facing + feedback patterns)
|
||||
→ Use /testing-agent (validate review flows and edge routing)
|
||||
→ Use /hive-concepts (learn event loop, client-facing nodes)
|
||||
→ Use /hive-create (build structure with feedback edges)
|
||||
→ Use /hive-patterns (implement client-facing + feedback patterns)
|
||||
→ Use /hive-test (validate review flows and edge routing)
|
||||
→ Done: Agent with HITL checkpoints and review loops
|
||||
```
|
||||
|
||||
## Skill Dependencies
|
||||
|
||||
```
|
||||
agent-workflow (meta-skill)
|
||||
hive (meta-skill)
|
||||
│
|
||||
├── building-agents-core (foundational)
|
||||
├── hive-concepts (foundational)
|
||||
│ ├── Architecture concepts (event loop, judges)
|
||||
│ ├── Node types (event_loop, function)
|
||||
│ ├── Edge routing and priority
|
||||
│ ├── Tool discovery procedures
|
||||
│ └── Workflow overview
|
||||
│
|
||||
├── building-agents-construction (procedural)
|
||||
├── hive-create (procedural)
|
||||
│ ├── Creates package structure
|
||||
│ ├── Defines goal
|
||||
│ ├── Adds nodes (event_loop, function)
|
||||
│ ├── Connects edges with priority routing
|
||||
│ ├── Finalizes agent class
|
||||
│ └── Requires: building-agents-core
|
||||
│ └── Requires: hive-concepts
|
||||
│
|
||||
├── building-agents-patterns (reference)
|
||||
├── hive-patterns (reference)
|
||||
│ ├── Client-facing interaction patterns
|
||||
│ ├── Feedback edges and review loops
|
||||
│ ├── Judge patterns (implicit, SchemaJudge)
|
||||
│ ├── Fan-out/fan-in parallel execution
|
||||
│ └── Context management and anti-patterns
|
||||
│
|
||||
└── testing-agent
|
||||
├── Reads agent goal
|
||||
├── Generates tests
|
||||
├── Runs evaluation
|
||||
└── Reports results
|
||||
├── hive-credentials (utility)
|
||||
│ ├── Detects missing credentials
|
||||
│ ├── Offers auth method choices (Aden OAuth, direct API key)
|
||||
│ ├── Stores securely in ~/.hive/credentials
|
||||
│ └── Validates with health checks
|
||||
│
|
||||
├── hive-test (validation)
|
||||
│ ├── Reads agent goal
|
||||
│ ├── Generates tests
|
||||
│ ├── Runs evaluation
|
||||
│ └── Reports results
|
||||
│
|
||||
└── hive-debugger (troubleshooting)
|
||||
├── Monitors runtime logs (L1/L2/L3)
|
||||
├── Identifies retry loops, tool failures
|
||||
├── Categorizes issues (10 categories)
|
||||
└── Provides fix recommendations
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
@@ -351,7 +394,7 @@ agent-workflow (meta-skill)
|
||||
|
||||
- Check for STATUS.md or IMPLEMENTATION_GUIDE.md in agent directory
|
||||
- Implementation may be needed (Python functions or MCP tools)
|
||||
- This is expected - building-agents-construction creates structure, not implementation
|
||||
- This is expected - hive-create creates structure, not implementation
|
||||
- See implementation guide for completion options
|
||||
|
||||
### "Tests are failing"
|
||||
@@ -359,9 +402,16 @@ agent-workflow (meta-skill)
|
||||
- Review test output for specific failures
|
||||
- Check agent goal and success criteria
|
||||
- Verify constraints are met
|
||||
- Use `/testing-agent` to debug and iterate
|
||||
- Use `/hive-test` to debug and iterate
|
||||
- Fix agent code and re-run tests
|
||||
|
||||
### "Agent is failing at runtime"
|
||||
|
||||
- Use `/hive-debugger` to analyze runtime logs
|
||||
- The debugger identifies retry loops, tool failures, and stalled execution
|
||||
- Get actionable fix recommendations with code changes
|
||||
- Monitor the agent in real-time during TUI sessions
|
||||
|
||||
### "Not sure which phase I'm in"
|
||||
|
||||
Run these checks:
|
||||
@@ -420,10 +470,10 @@ You're done with the workflow when:
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- **building-agents-core**: See `.claude/skills/building-agents-core/SKILL.md`
|
||||
- **building-agents-construction**: See `.claude/skills/building-agents-construction/SKILL.md`
|
||||
- **building-agents-patterns**: See `.claude/skills/building-agents-patterns/SKILL.md`
|
||||
- **testing-agent**: See `.claude/skills/testing-agent/SKILL.md`
|
||||
- **hive-concepts**: See `.claude/skills/hive-concepts/SKILL.md`
|
||||
- **hive-create**: See `.claude/skills/hive-create/SKILL.md`
|
||||
- **hive-patterns**: See `.claude/skills/hive-patterns/SKILL.md`
|
||||
- **hive-test**: See `.claude/skills/hive-test/SKILL.md`
|
||||
- **Agent framework docs**: See `core/README.md`
|
||||
- **Example agents**: See `exports/` directory
|
||||
|
||||
@@ -431,36 +481,46 @@ You're done with the workflow when:
|
||||
|
||||
This workflow provides a proven path from concept to production-ready agent:
|
||||
|
||||
1. **Learn** with `/building-agents-core` → Understand fundamentals (optional)
|
||||
2. **Build** with `/building-agents-construction` → Get validated structure
|
||||
3. **Optimize** with `/building-agents-patterns` → Apply best practices (optional)
|
||||
4. **Test** with `/testing-agent` → Get verified functionality
|
||||
1. **Learn** with `/hive-concepts` → Understand fundamentals (optional)
|
||||
2. **Build** with `/hive-create` → Get validated structure
|
||||
3. **Optimize** with `/hive-patterns` → Apply best practices (optional)
|
||||
4. **Configure** with `/hive-credentials` → Set up API keys (if needed)
|
||||
5. **Test** with `/hive-test` → Get verified functionality
|
||||
6. **Debug** with `/hive-debugger` → Fix runtime issues (if needed)
|
||||
|
||||
The workflow is **flexible** - skip phases as needed, iterate freely, and adapt to your specific requirements. The goal is **production-ready agents** built with **consistent, repeatable processes**.
|
||||
|
||||
## Skill Selection Guide
|
||||
|
||||
**Choose building-agents-core when:**
|
||||
**Choose hive-concepts when:**
|
||||
- First time building agents
|
||||
- Need to understand event loop architecture
|
||||
- Validating tool availability
|
||||
- Learning about node types, edges, and judges
|
||||
|
||||
**Choose building-agents-construction when:**
|
||||
**Choose hive-create when:**
|
||||
- Actually building an agent
|
||||
- Have clear requirements
|
||||
- Ready to write code
|
||||
- Want step-by-step guidance
|
||||
- Want to start from an existing template and customize it
|
||||
|
||||
**Choose building-agents-patterns when:**
|
||||
**Choose hive-patterns when:**
|
||||
- Agent structure complete
|
||||
- Need client-facing nodes or feedback edges
|
||||
- Implementing review loops or fan-out/fan-in
|
||||
- Want judge patterns or context management
|
||||
- Want best practices
|
||||
|
||||
**Choose testing-agent when:**
|
||||
**Choose hive-test when:**
|
||||
- Agent structure complete
|
||||
- Ready to validate functionality
|
||||
- Need comprehensive test coverage
|
||||
- Testing feedback loops, output keys, or fan-out
|
||||
|
||||
**Choose hive-debugger when:**
|
||||
- Agent is failing or stuck at runtime
|
||||
- Seeing retry loops or escalations
|
||||
- Tool calls are failing
|
||||
- Need to understand why a node isn't completing
|
||||
- Want real-time monitoring of agent execution
|
||||
+3
-3
@@ -1,6 +1,6 @@
|
||||
# Example: File Monitor Agent
|
||||
|
||||
This example shows the complete agent-workflow in action for building a file monitoring agent.
|
||||
This example shows the complete /hive workflow in action for building a file monitoring agent.
|
||||
|
||||
## Initial Request
|
||||
|
||||
@@ -12,7 +12,7 @@ User: "Build an agent that monitors ~/Downloads and copies new files to ~/Docume
|
||||
|
||||
### Step 1: Create Structure
|
||||
|
||||
Agent invokes `/building-agents` skill and:
|
||||
Agent invokes `/hive-create` skill and:
|
||||
|
||||
1. Creates `exports/file_monitor_agent/` package
|
||||
2. Writes skeleton files (__init__.py, __main__.py, agent.py, etc.)
|
||||
@@ -107,7 +107,7 @@ exports/file_monitor_agent/
|
||||
|
||||
### Step 1: Analyze Agent
|
||||
|
||||
Agent invokes `/testing-agent` skill and:
|
||||
Agent invokes `/hive-test` skill and:
|
||||
|
||||
1. Reads goal from `exports/file_monitor_agent/agent.py`
|
||||
2. Identifies 4 success criteria to test
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,351 +0,0 @@
|
||||
# Example: Testing a YouTube Research Agent
|
||||
|
||||
This example walks through testing a YouTube research agent that finds relevant videos based on a topic.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Agent built with building-agents skill at `exports/youtube-research/`
|
||||
- Goal defined with success criteria and constraints
|
||||
|
||||
## Step 1: Load the Goal
|
||||
|
||||
First, load the goal that was defined during the Goal stage:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "youtube-research",
|
||||
"name": "YouTube Research Agent",
|
||||
"description": "Find relevant YouTube videos on a given topic",
|
||||
"success_criteria": [
|
||||
{
|
||||
"id": "find_videos",
|
||||
"description": "Find 3-5 relevant videos",
|
||||
"metric": "video_count",
|
||||
"target": "3-5",
|
||||
"weight": 1.0
|
||||
},
|
||||
{
|
||||
"id": "relevance",
|
||||
"description": "Videos must be relevant to the topic",
|
||||
"metric": "relevance_score",
|
||||
"target": ">0.8",
|
||||
"weight": 0.8
|
||||
}
|
||||
],
|
||||
"constraints": [
|
||||
{
|
||||
"id": "api_limits",
|
||||
"description": "Must not exceed YouTube API rate limits",
|
||||
"constraint_type": "hard",
|
||||
"category": "technical"
|
||||
},
|
||||
{
|
||||
"id": "content_safety",
|
||||
"description": "Must filter out inappropriate content",
|
||||
"constraint_type": "hard",
|
||||
"category": "safety"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2: Get Constraint Test Guidelines
|
||||
|
||||
During the Goal stage (or early Eval), get test guidelines for constraints:
|
||||
|
||||
```python
|
||||
result = generate_constraint_tests(
|
||||
goal_id="youtube-research",
|
||||
goal_json='<goal JSON above>',
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
**The result contains guidelines (not generated tests):**
|
||||
- `output_file`: Where to write tests
|
||||
- `file_header`: Imports and fixtures to use
|
||||
- `test_template`: Format for test functions
|
||||
- `constraints_formatted`: The constraints to test
|
||||
- `test_guidelines`: Rules for writing tests
|
||||
|
||||
## Step 3: Write Constraint Tests
|
||||
|
||||
Using the guidelines, write tests directly with the Write tool:
|
||||
|
||||
```python
|
||||
# Write constraint tests using the provided file_header and guidelines
|
||||
Write(
|
||||
file_path="exports/youtube-research/tests/test_constraints.py",
|
||||
content='''
|
||||
"""Constraint tests for youtube-research agent."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from exports.youtube_research import default_agent
|
||||
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not os.environ.get("ANTHROPIC_API_KEY") and not os.environ.get("MOCK_MODE"),
|
||||
reason="API key required for real testing."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_constraint_api_limits_respected():
|
||||
"""Verify API rate limits are not exceeded."""
|
||||
import time
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
|
||||
for i in range(10):
|
||||
result = await default_agent.run({"topic": f"test_{i}"}, mock_mode=mock_mode)
|
||||
time.sleep(0.1)
|
||||
|
||||
# Should complete without rate limit errors
|
||||
assert "rate limit" not in str(result).lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_constraint_content_safety_filter():
|
||||
"""Verify inappropriate content is filtered."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "general topic"}, mock_mode=mock_mode)
|
||||
|
||||
for video in result.videos:
|
||||
assert video.safe_for_work is True
|
||||
assert video.age_restricted is False
|
||||
'''
|
||||
)
|
||||
```
|
||||
|
||||
## Step 4: Get Success Criteria Test Guidelines
|
||||
|
||||
After the agent is built, get success criteria test guidelines:
|
||||
|
||||
```python
|
||||
result = generate_success_tests(
|
||||
goal_id="youtube-research",
|
||||
goal_json='<goal JSON>',
|
||||
node_names="search_node,filter_node,rank_node,format_node",
|
||||
tool_names="youtube_search,video_details,channel_info",
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
## Step 5: Write Success Criteria Tests
|
||||
|
||||
Using the guidelines, write success criteria tests:
|
||||
|
||||
```python
|
||||
Write(
|
||||
file_path="exports/youtube-research/tests/test_success_criteria.py",
|
||||
content='''
|
||||
"""Success criteria tests for youtube-research agent."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from exports.youtube_research import default_agent
|
||||
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not os.environ.get("ANTHROPIC_API_KEY") and not os.environ.get("MOCK_MODE"),
|
||||
reason="API key required for real testing."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_happy_path():
|
||||
"""Test finding videos for a common topic."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "machine learning"}, mock_mode=mock_mode)
|
||||
|
||||
assert result.success
|
||||
assert 3 <= len(result.videos) <= 5
|
||||
assert all(v.title for v in result.videos)
|
||||
assert all(v.video_id for v in result.videos)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_minimum_boundary():
|
||||
"""Test at minimum threshold (3 videos)."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "niche topic xyz"}, mock_mode=mock_mode)
|
||||
|
||||
assert len(result.videos) >= 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_relevance_score_threshold():
|
||||
"""Test relevance scoring meets threshold."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "python programming"}, mock_mode=mock_mode)
|
||||
|
||||
for video in result.videos:
|
||||
assert video.relevance_score > 0.8
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_no_results_graceful():
|
||||
"""Test graceful handling of no results."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "xyznonexistent123"}, mock_mode=mock_mode)
|
||||
|
||||
# Should not crash, return empty or message
|
||||
assert result.videos == [] or result.message
|
||||
'''
|
||||
)
|
||||
```
|
||||
|
||||
## Step 6: Run All Tests
|
||||
|
||||
Execute all tests:
|
||||
|
||||
```python
|
||||
result = run_tests(
|
||||
goal_id="youtube-research",
|
||||
agent_path="exports/youtube-research",
|
||||
test_types='["all"]',
|
||||
parallel=4
|
||||
)
|
||||
```
|
||||
|
||||
**Results:**
|
||||
|
||||
```json
|
||||
{
|
||||
"goal_id": "youtube-research",
|
||||
"overall_passed": false,
|
||||
"summary": {
|
||||
"total": 6,
|
||||
"passed": 5,
|
||||
"failed": 1,
|
||||
"pass_rate": "83.3%"
|
||||
},
|
||||
"duration_ms": 4521,
|
||||
"results": [
|
||||
{"test_id": "test_constraint_api_001", "passed": true, "duration_ms": 1234},
|
||||
{"test_id": "test_constraint_content_001", "passed": true, "duration_ms": 456},
|
||||
{"test_id": "test_success_001", "passed": true, "duration_ms": 789},
|
||||
{"test_id": "test_success_002", "passed": true, "duration_ms": 654},
|
||||
{"test_id": "test_success_003", "passed": true, "duration_ms": 543},
|
||||
{"test_id": "test_success_004", "passed": false, "duration_ms": 845,
|
||||
"error_category": "IMPLEMENTATION_ERROR",
|
||||
"error_message": "TypeError: 'NoneType' object has no attribute 'videos'"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Step 7: Debug the Failed Test
|
||||
|
||||
```python
|
||||
result = debug_test(
|
||||
goal_id="youtube-research",
|
||||
test_name="test_find_videos_no_results_graceful",
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
**Debug Output:**
|
||||
|
||||
```json
|
||||
{
|
||||
"test_id": "test_success_004",
|
||||
"test_name": "test_find_videos_no_results_graceful",
|
||||
"input": {"topic": "xyznonexistent123"},
|
||||
"expected": "Empty list or message",
|
||||
"actual": {"error": "TypeError: 'NoneType' object has no attribute 'videos'"},
|
||||
"passed": false,
|
||||
"error_message": "TypeError: 'NoneType' object has no attribute 'videos'",
|
||||
"error_category": "IMPLEMENTATION_ERROR",
|
||||
"stack_trace": "Traceback (most recent call last):\n File \"filter_node.py\", line 42\n for video in result.videos:\nTypeError: 'NoneType' object has no attribute 'videos'",
|
||||
"logs": [
|
||||
{"timestamp": "2026-01-20T10:00:01", "node": "search_node", "level": "INFO", "msg": "Searching for: xyznonexistent123"},
|
||||
{"timestamp": "2026-01-20T10:00:02", "node": "search_node", "level": "WARNING", "msg": "No results found"},
|
||||
{"timestamp": "2026-01-20T10:00:02", "node": "filter_node", "level": "ERROR", "msg": "NoneType error"}
|
||||
],
|
||||
"runtime_data": {
|
||||
"execution_path": ["start", "search_node", "filter_node"],
|
||||
"node_outputs": {
|
||||
"search_node": null
|
||||
}
|
||||
},
|
||||
"suggested_fix": "Add null check in filter_node before accessing .videos attribute",
|
||||
"iteration_guidance": {
|
||||
"stage": "Agent",
|
||||
"action": "Fix the code in nodes/edges",
|
||||
"restart_required": false,
|
||||
"description": "The goal is correct, but filter_node doesn't handle null results from search_node."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 8: Iterate Based on Category
|
||||
|
||||
Since this is an **IMPLEMENTATION_ERROR**, we:
|
||||
|
||||
1. **Don't restart** the Goal → Agent → Eval flow
|
||||
2. **Fix the agent** using building-agents skill:
|
||||
- Modify `filter_node` to handle null results
|
||||
3. **Re-run Eval** (tests only)
|
||||
|
||||
### Fix in building-agents:
|
||||
|
||||
```python
|
||||
# Update the filter_node to handle null
|
||||
add_node(
|
||||
node_id="filter_node",
|
||||
name="Filter Node",
|
||||
description="Filter and rank videos",
|
||||
node_type="function",
|
||||
input_keys=["search_results"],
|
||||
output_keys=["filtered_videos"],
|
||||
system_prompt="""
|
||||
Filter videos by relevance.
|
||||
IMPORTANT: Handle case where search_results is None or empty.
|
||||
Return empty list if no results.
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
### Re-export and re-test:
|
||||
|
||||
```python
|
||||
# Re-export the fixed agent
|
||||
export_graph(path="exports/youtube-research")
|
||||
|
||||
# Re-run tests
|
||||
result = run_tests(
|
||||
goal_id="youtube-research",
|
||||
agent_path="exports/youtube-research",
|
||||
test_types='["all"]'
|
||||
)
|
||||
```
|
||||
|
||||
**Updated Results:**
|
||||
|
||||
```json
|
||||
{
|
||||
"goal_id": "youtube-research",
|
||||
"overall_passed": true,
|
||||
"summary": {
|
||||
"total": 6,
|
||||
"passed": 6,
|
||||
"failed": 0,
|
||||
"pass_rate": "100.0%"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
1. **Got guidelines** for constraint tests during Goal stage
|
||||
2. **Wrote** constraint tests using Write tool
|
||||
3. **Got guidelines** for success criteria tests during Eval stage
|
||||
4. **Wrote** success criteria tests using Write tool
|
||||
5. **Ran** tests in parallel
|
||||
6. **Debugged** the one failure
|
||||
7. **Categorized** as IMPLEMENTATION_ERROR
|
||||
8. **Fixed** the agent (not the goal)
|
||||
9. **Re-ran** Eval only (didn't restart full flow)
|
||||
10. **Passed** all tests
|
||||
|
||||
The agent is now validated and ready for production use.
|
||||
@@ -0,0 +1,7 @@
|
||||
# Project-level Codex config for Hive.
|
||||
# Keep this file minimal: MCP connectivity + skill discovery.
|
||||
|
||||
[mcp_servers.agent-builder]
|
||||
command = "uv"
|
||||
args = ["run", "--directory", "core", "-m", "framework.mcp.agent_builder_server"]
|
||||
cwd = "."
|
||||
@@ -1 +0,0 @@
|
||||
../../.claude/skills/agent-workflow
|
||||
@@ -1 +0,0 @@
|
||||
../../.claude/skills/building-agents-construction
|
||||
@@ -1 +0,0 @@
|
||||
../../.claude/skills/building-agents-core
|
||||
@@ -1 +0,0 @@
|
||||
../../.claude/skills/building-agents-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-concepts
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-create
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-credentials
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-test
|
||||
@@ -1 +0,0 @@
|
||||
../../.claude/skills/testing-agent
|
||||
@@ -46,6 +46,7 @@ coverage/
|
||||
|
||||
# TypeScript
|
||||
*.tsbuildinfo
|
||||
vite.config.d.ts
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
@@ -74,3 +75,6 @@ exports/*
|
||||
|
||||
docs/github-issues/*
|
||||
core/tests/*dumps/*
|
||||
|
||||
screenshots/*
|
||||
|
||||
|
||||
@@ -4,11 +4,6 @@
|
||||
"command": "uv",
|
||||
"args": ["run", "-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core"
|
||||
},
|
||||
"tools": {
|
||||
"command": "uv",
|
||||
"args": ["run", "mcp_server.py", "--stdio"],
|
||||
"cwd": "tools"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"python",
|
||||
"-m",
|
||||
"framework.mcp.agent_builder_server"
|
||||
],
|
||||
"cwd": "core",
|
||||
"env": {
|
||||
"PYTHONPATH": "../tools/src"
|
||||
}
|
||||
},
|
||||
"tools": {
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"python",
|
||||
"mcp_server.py",
|
||||
"--stdio"
|
||||
],
|
||||
"cwd": "tools",
|
||||
"env": {
|
||||
"PYTHONPATH": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-concepts
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-create
|
||||
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-credentials
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-debugger
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/hive-test
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/triage-issue
|
||||
+194
-28
@@ -1,41 +1,207 @@
|
||||
# Changelog
|
||||
# Release Notes
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
**Release Date:** February 18, 2026
|
||||
**Tag:** v0.5.1
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
## The Hive Gets a Brain
|
||||
|
||||
## [Unreleased]
|
||||
v0.5.1 is our most ambitious release yet. Hive agents can now **build other agents** -- the new Hive Coder meta-agent writes, tests, and fixes agent packages from natural language. The runtime grows multi-graph support so one session can orchestrate multiple agents simultaneously. The TUI gets a complete overhaul with an in-app agent picker, live streaming, and seamless escalation to the Coder. And we're now provider-agnostic: Claude Code subscriptions, OpenAI-compatible endpoints, and any LiteLLM-supported model work out of the box.
|
||||
|
||||
### Added
|
||||
- Initial project structure
|
||||
- React frontend (honeycomb) with Vite and TypeScript
|
||||
- Node.js backend (hive) with Express and TypeScript
|
||||
- Docker Compose configuration for local development
|
||||
- Configuration system via `config.yaml`
|
||||
- GitHub Actions CI/CD workflows
|
||||
- Comprehensive documentation
|
||||
---
|
||||
|
||||
### Changed
|
||||
- N/A
|
||||
## Highlights
|
||||
|
||||
### Deprecated
|
||||
- N/A
|
||||
### Hive Coder -- The Agent That Builds Agents
|
||||
|
||||
### Removed
|
||||
- N/A
|
||||
A native meta-agent that lives inside the framework at `core/framework/agents/hive_coder/`. Give it a natural-language specification and it produces a complete agent package -- goal definition, node prompts, edge routing, MCP tool wiring, tests, and all boilerplate files.
|
||||
|
||||
```bash
|
||||
# Launch the Coder directly
|
||||
hive code
|
||||
|
||||
### Fixed
|
||||
- tools: Fixed web_scrape tool attempting to parse non-HTML content (PDF, JSON) as HTML (#487)
|
||||
# Or escalate from any running agent (TUI)
|
||||
Ctrl+E # or /coder in chat
|
||||
```
|
||||
|
||||
### Security
|
||||
- N/A
|
||||
The Coder ships with:
|
||||
|
||||
## [0.1.0] - 2025-01-13
|
||||
- **Reference documentation** -- anti-patterns, construction guide, and design patterns baked into its system prompt
|
||||
- **Guardian watchdog** -- an event-driven monitor that catches agent failures and triggers automatic remediation
|
||||
- **Coder Tools MCP server** -- file I/O, fuzzy-match editing, git snapshots, and sandboxed shell execution (`tools/coder_tools_server.py`)
|
||||
- **Test generation** -- structural tests for forever-alive agents that don't hang on `runner.run()`
|
||||
|
||||
### Added
|
||||
- Initial release
|
||||
### Multi-Graph Agent Runtime
|
||||
|
||||
[Unreleased]: https://github.com/adenhq/hive/compare/v0.1.0...HEAD
|
||||
[0.1.0]: https://github.com/adenhq/hive/releases/tag/v0.1.0
|
||||
`AgentRuntime` now supports loading, managing, and switching between multiple agent graphs within a single session. Six new lifecycle tools give agents (and the TUI) full control:
|
||||
|
||||
```python
|
||||
# Load a second agent into the runtime
|
||||
await runtime.add_graph("exports/deep_research_agent")
|
||||
|
||||
# Tools available to agents:
|
||||
# load_agent, unload_agent, start_agent, restart_agent, list_agents, get_user_presence
|
||||
```
|
||||
|
||||
The Hive Coder uses multi-graph internally -- when you escalate from a worker agent, the Coder loads as a separate graph while the worker stays alive in the background.
|
||||
|
||||
### TUI Revamp
|
||||
|
||||
The Terminal UI gets a ground-up rebuild with five major additions:
|
||||
|
||||
- **Agent Picker** (Ctrl+A) -- tabbed modal screen for browsing Your Agents, Framework agents, and Examples with metadata badges (node count, tool count, session count, tags)
|
||||
- **Runtime-optional startup** -- TUI launches without a pre-loaded agent, showing the picker on first open
|
||||
- **Live streaming pane** -- dedicated RichLog widget shows LLM tokens as they arrive, replacing the old one-token-per-line display
|
||||
- **PDF attachments** -- `/attach` and `/detach` commands with native OS file dialog (macOS, Linux, Windows)
|
||||
- **Multi-graph commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>` for managing agent graphs in-session
|
||||
|
||||
### Provider-Agnostic LLM Support
|
||||
|
||||
Hive is no longer Anthropic-only. v0.5.1 adds first-class support for:
|
||||
|
||||
- **Claude Code subscriptions** -- `use_claude_code_subscription: true` in `~/.hive/configuration.json` reads OAuth tokens from `~/.claude/.credentials.json` with automatic refresh
|
||||
- **OpenAI-compatible endpoints** -- `api_base` config routes traffic through any compatible API (Azure OpenAI, vLLM, Ollama, etc.)
|
||||
- **Any LiteLLM model** -- `RuntimeConfig` now passes `api_key`, `api_base`, and `extra_kwargs` through to LiteLLM
|
||||
|
||||
The quickstart script auto-detects Claude Code subscriptions and ZAI Code installations.
|
||||
|
||||
---
|
||||
|
||||
## What's New
|
||||
|
||||
### Architecture & Runtime
|
||||
|
||||
- **Hive Coder meta-agent** -- Natural-language agent builder with reference docs, guardian watchdog, and `hive code` CLI command. (@TimothyZhang7)
|
||||
- **Multi-graph agent sessions** -- `add_graph`/`remove_graph` on AgentRuntime with 6 lifecycle tools (`load_agent`, `unload_agent`, `start_agent`, `restart_agent`, `list_agents`, `get_user_presence`). (@TimothyZhang7)
|
||||
- **Claude Code subscription support** -- OAuth token refresh via `use_claude_code_subscription` config, auto-detection in quickstart, LiteLLM header patching. (@TimothyZhang7)
|
||||
- **OpenAI-compatible endpoint support** -- `api_base` and `extra_kwargs` in `RuntimeConfig` for any OpenAI-compatible API. (@TimothyZhang7)
|
||||
- **Remove deprecated node types** -- Delete `FlexibleGraphExecutor`, `WorkerNode`, `HybridJudge`, `CodeSandbox`, `Plan`, `FunctionNode`, `LLMNode`, `RouterNode`. Deprecated types (`llm_tool_use`, `llm_generate`, `function`, `router`, `human_input`) now raise `RuntimeError` with migration guidance. (@TimothyZhang7)
|
||||
- **Interactive credential setup** -- Guided `CredentialSetupSession` with health checks and encrypted storage, accessible via `hive setup-credentials` or automatic prompting on credential errors. (@RichardTang-Aden)
|
||||
- **Pre-start confirmation prompt** -- Interactive prompt before agent execution allowing credential updates or abort. (@RichardTang-Aden)
|
||||
- **Event bus multi-graph support** -- `graph_id` on events, `filter_graph` on subscriptions, `ESCALATION_REQUESTED` event type, `exclude_own_graph` filter. (@TimothyZhang7)
|
||||
|
||||
### TUI Improvements
|
||||
|
||||
- **In-app agent picker** (Ctrl+A) -- Tabbed modal for browsing agents with metadata badges (nodes, tools, sessions, tags). (@TimothyZhang7)
|
||||
- **Runtime-optional TUI startup** -- Launches without a pre-loaded agent, shows agent picker on startup. (@TimothyZhang7)
|
||||
- **Hive Coder escalation** (Ctrl+E) -- Escalate to Hive Coder and return; also available via `/coder` and `/back` chat commands. (@TimothyZhang7)
|
||||
- **PDF attachment support** -- `/attach` and `/detach` commands with native OS file dialog. (@TimothyZhang7)
|
||||
- **Streaming output pane** -- Dedicated RichLog widget for live LLM token streaming. (@TimothyZhang7)
|
||||
- **Multi-graph TUI commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>`. (@TimothyZhang7)
|
||||
- **Agent Guardian watchdog** -- Event-driven monitor that catches secondary agent failures and triggers automatic remediation, with `--no-guardian` CLI flag. (@TimothyZhang7)
|
||||
|
||||
### New Tool Integrations
|
||||
|
||||
| Tool | Description | Contributor |
|
||||
| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| **Discord** | 4 MCP tools (`discord_list_guilds`, `discord_list_channels`, `discord_send_message`, `discord_get_messages`) with rate-limit retry and channel filtering | @mishrapravin114 |
|
||||
| **Exa Search API** | 4 AI-powered search tools (`exa_search`, `exa_find_similar`, `exa_get_contents`, `exa_answer`) with neural/keyword search, domain filters, and citation-backed answers | @JeetKaria06 |
|
||||
| **Razorpay** | 6 payment processing tools for payments, invoices, payment links, and refunds with HTTP Basic Auth | @shivamshahi07 |
|
||||
| **Google Docs** | Document creation, reading, and editing with OAuth credential support | @haliaeetusvocifer |
|
||||
| **Gmail enhancements** | Expanded mail operations for inbox management | @bryanadenhq |
|
||||
|
||||
### Infrastructure
|
||||
|
||||
- **Default node type → `event_loop`** -- `NodeSpec.node_type` defaults to `"event_loop"` instead of `"llm_tool_use"`. (@TimothyZhang7)
|
||||
- **Default `max_node_visits` → 0 (unlimited)** -- Nodes default to unlimited visits, reducing friction for feedback loops and forever-alive agents. (@TimothyZhang7)
|
||||
- **Remove `function` field from NodeSpec** -- Follows deprecation of `FunctionNode`. (@TimothyZhang7)
|
||||
- **LiteLLM OAuth patch** -- Correct header construction for OAuth tokens (remove `x-api-key` when Bearer token is present). (@TimothyZhang7)
|
||||
- **Orchestrator config centralization** -- Reads `api_key`, `api_base`, `extra_kwargs` from centralized `~/.hive/configuration.json`. (@TimothyZhang7)
|
||||
- **System prompt datetime injection** -- All system prompts now include current date/time for time-aware agent behavior. (@TimothyZhang7)
|
||||
- **Utils module exports** -- Proper `__init__.py` exports for the utils module. (@Siddharth2624)
|
||||
- **Increased default max_tokens** -- Opus 4.6 defaults to 32768, Sonnet 4.5 to 16384 (up from 8192). (@TimothyZhang7)
|
||||
|
||||
---
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
- Flush WIP accumulator outputs on cancel/failure so edge conditions see correct values on resume
|
||||
- Stall detection state preserved across resume (no more resets on checkpoint restore)
|
||||
- Skip client-facing blocking for event-triggered executions (timer/webhook)
|
||||
- Executor retry override scoped to actual EventLoopNode instances only
|
||||
- Add `_awaiting_input` flag to EventLoopNode to prevent input injection race conditions
|
||||
- Fix TUI streaming display (tokens no longer appear one-per-line)
|
||||
- Fix `_return_from_escalation` crash when ChatRepl widgets not yet mounted
|
||||
- Fix tools registration problems for Google Docs credentials (@RichardTang-Aden)
|
||||
- Fix email agent version conflicts (@RichardTang-Aden)
|
||||
- Fix coder tool timeouts (120s for tests, 300s cap for commands)
|
||||
|
||||
## Documentation
|
||||
|
||||
- Clarify installation and prevent root pip install misuse (@paarths-collab)
|
||||
|
||||
---
|
||||
|
||||
## Agent Updates
|
||||
|
||||
- **Email Inbox Management** -- Consolidate `gmail_inbox_guardian` and `inbox_management` into a single unified agent with updated prompts and config. (@RichardTang-Aden, @bryanadenhq)
|
||||
- **Job Hunter** -- Updated node prompts, config, and agent metadata; added PDF resume selection. (@bryanadenhq)
|
||||
- **Deep Research Agent** -- Revised node implementations with updated prompts and output handling.
|
||||
- **Tech News Reporter** -- Revised node prompts for improved output quality.
|
||||
- **Vulnerability Assessment** -- Expanded prompts with more detailed assessment instructions. (@bryanadenhq)
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
- **Deprecated node types raise `RuntimeError`** -- `llm_tool_use`, `llm_generate`, `function`, `router`, `human_input` now fail instead of warning. Migrate to `event_loop`.
|
||||
- **`NodeSpec.node_type` defaults to `"event_loop"`** (was `"llm_tool_use"`)
|
||||
- **`NodeSpec.max_node_visits` defaults to `0` / unlimited** (was `1`)
|
||||
- **`NodeSpec.function` field removed** -- `FunctionNode` is deleted; use event_loop nodes with tools instead.
|
||||
|
||||
---
|
||||
|
||||
## Community Contributors
|
||||
|
||||
A huge thank you to everyone who contributed to this release:
|
||||
|
||||
- **Richard Tang** (@RichardTang-Aden) -- Interactive credential setup, pre-start confirmation, email agent consolidation, tool registration fixes, lint and formatting
|
||||
- **Pravin Mishra** (@mishrapravin114) -- Discord integration with 4 MCP tools
|
||||
- **Jeet Karia** (@JeetKaria06) -- Exa Search API integration with 4 AI-powered search tools
|
||||
- **Shivam Shahi** (@shivamshahi07) -- Razorpay payment processing integration
|
||||
- **Siddharth Varshney** (@Siddharth2624) -- Utils module exports
|
||||
- **@haliaeetusvocifer** -- Google Docs integration with OAuth support
|
||||
- **Bryan** (@bryanadenhq) -- PDF selection, inbox agent fixes, Job Hunter and Vulnerability Assessment updates
|
||||
- **@paarths-collab** -- Documentation improvements
|
||||
|
||||
---
|
||||
|
||||
## Upgrading
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Migration Guide
|
||||
|
||||
If your agents use deprecated node types, update them:
|
||||
|
||||
```python
|
||||
# Before (v0.5.0) -- these now raise RuntimeError
|
||||
NodeSpec(node_type="llm_tool_use", ...)
|
||||
NodeSpec(node_type="function", function=my_func, ...)
|
||||
|
||||
# After (v0.5.1) -- use event_loop for everything
|
||||
NodeSpec(node_type="event_loop", ...) # or just omit node_type (it's the default now)
|
||||
```
|
||||
|
||||
If your agents set `max_node_visits=1` explicitly, they'll still work. The only change is the _default_ -- new agents without an explicit value now get unlimited visits.
|
||||
|
||||
To try the new Hive Coder:
|
||||
|
||||
```bash
|
||||
# Launch Coder directly
|
||||
hive code
|
||||
|
||||
# Or from TUI -- press Ctrl+E to escalate
|
||||
hive tui
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## What's Next
|
||||
|
||||
- **Agent-to-agent communication** -- one agent's output triggers another agent's entry point
|
||||
- **Cost visibility** -- detailed runtime log of LLM costs per node and per session
|
||||
- **Persistent webhook subscriptions** -- survive agent restarts without re-registering
|
||||
- **Remote agent deployment** -- run agents as long-lived services with HTTP APIs
|
||||
|
||||
+21
-10
@@ -1,10 +1,10 @@
|
||||
# Contributing to Aden Agent Framework
|
||||
|
||||
Thank you for your interest in contributing to the Aden Agent Framework! This document provides guidelines and information for contributors. We’re especially looking for help building tools, integrations([check #2805](https://github.com/adenhq/hive/issues/2805)), and example agents for the framework. If you’re interested in extending its functionality, this is the perfect place to start.
|
||||
Thank you for your interest in contributing to the Aden Agent Framework! This document provides guidelines and information for contributors. We’re especially looking for help building tools, integrations ([check #2805](https://github.com/adenhq/hive/issues/2805)), and example agents for the framework. If you’re interested in extending its functionality, this is the perfect place to start.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](docs/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Issue Assignment Policy
|
||||
|
||||
@@ -35,15 +35,22 @@ You may submit PRs without prior assignment for:
|
||||
|
||||
1. Fork the repository
|
||||
2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/hive.git`
|
||||
3. Create a feature branch: `git checkout -b feature/your-feature-name`
|
||||
4. Make your changes
|
||||
5. Run checks and tests:
|
||||
3. Add the upstream repository: `git remote add upstream https://github.com/adenhq/hive.git`
|
||||
4. Sync with upstream to ensure you're starting from the latest code:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git checkout main
|
||||
git merge upstream/main
|
||||
```
|
||||
5. Create a feature branch: `git checkout -b feature/your-feature-name`
|
||||
6. Make your changes
|
||||
7. Run checks and tests:
|
||||
```bash
|
||||
make check # Lint and format checks (ruff check + ruff format --check on core/ and tools/)
|
||||
make test # Core tests (cd core && pytest tests/ -v)
|
||||
```
|
||||
6. Commit your changes following our commit conventions
|
||||
7. Push to your fork and submit a Pull Request
|
||||
8. Commit your changes following our commit conventions
|
||||
9. Push to your fork and submit a Pull Request
|
||||
|
||||
## Development Setup
|
||||
|
||||
@@ -92,8 +99,7 @@ docs(readme): update installation instructions
|
||||
2. Update documentation if needed
|
||||
3. Add tests for new functionality
|
||||
4. Ensure `make check` and `make test` pass
|
||||
5. Update the CHANGELOG.md if applicable
|
||||
6. Request review from maintainers
|
||||
5. Request review from maintainers
|
||||
|
||||
### PR Title Format
|
||||
|
||||
@@ -120,6 +126,8 @@ feat(component): add new feature description
|
||||
- Use meaningful variable and function names
|
||||
- Keep functions focused and small
|
||||
|
||||
For linting and formatting (Ruff, pre-commit hooks), see [Linting & Formatting Setup](docs/contributing-lint-setup.md).
|
||||
|
||||
## Testing
|
||||
|
||||
> **Note:** When testing agents in `exports/`, always set PYTHONPATH:
|
||||
@@ -138,6 +146,9 @@ make test
|
||||
# Or run tests directly
|
||||
cd core && pytest tests/ -v
|
||||
|
||||
# Run tools package tests (when contributing to tools/)
|
||||
cd tools && uv run pytest tests/ -v
|
||||
|
||||
# Run tests for a specific agent
|
||||
PYTHONPATH=exports uv run python -m agent_name test
|
||||
```
|
||||
@@ -152,4 +163,4 @@ By submitting a Pull Request, you agree that your contributions will be licensed
|
||||
|
||||
Feel free to open an issue for questions or join our [Discord community](https://discord.com/invite/MXE49hrKDk).
|
||||
|
||||
Thank you for contributing!
|
||||
Thank you for contributing!
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
.PHONY: lint format check test install-hooks help
|
||||
.PHONY: lint format check test install-hooks help frontend-dev frontend-build
|
||||
|
||||
help: ## Show this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## Run ruff linter (with auto-fix)
|
||||
lint: ## Run ruff linter and formatter (with auto-fix)
|
||||
cd core && ruff check --fix .
|
||||
cd tools && ruff check --fix .
|
||||
cd core && ruff format .
|
||||
cd tools && ruff format .
|
||||
|
||||
format: ## Run ruff formatter
|
||||
cd core && ruff format .
|
||||
@@ -24,3 +26,9 @@ test: ## Run all tests
|
||||
install-hooks: ## Install pre-commit hooks
|
||||
uv pip install pre-commit
|
||||
pre-commit install
|
||||
|
||||
frontend-dev: ## Start frontend dev server
|
||||
cd core/frontend && npm run dev
|
||||
|
||||
frontend-build: ## Build frontend for production
|
||||
cd core/frontend && npm run build
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
## Summary
|
||||
- **Added HubSpot integration** — new HubSpot MCP tool with search, get, create, and update operations for contacts, companies, and deals. Includes OAuth2 provider for HubSpot credentials and credential store adapter for the tools layer.
|
||||
- **Replaced web_scrape tool with Playwright + stealth** — swapped httpx/BeautifulSoup for a headless Chromium browser using `playwright` (async API) and `playwright-stealth`, enabling JS-rendered page scraping and bot detection evasion
|
||||
- **Added empty response retry logic** — LLM provider now detects empty responses (e.g. Gemini returning 200 with no content on rate limit) and retries with exponential backoff, preventing hallucinated output from the cleanup LLM
|
||||
- **Added context-aware input compaction** — LLM nodes now estimate input token count before calling the model and progressively truncate the largest values if they exceed the context window budget
|
||||
- **Increased rate limit retries to 10** with verbose `[retry]` and `[compaction]` logging that includes model name, finish reason, and attempt count
|
||||
- **Interactive quickstart onboarding** — `quickstart.sh` rewritten as bee-themed interactive wizard that detects existing API keys (including Claude Code subscription), lets user pick ONE default LLM provider, and saves configuration to `~/.hive/configuration.json`
|
||||
- **Fixed lint errors** across `hubspot_tool.py` (line length) and `agent_builder_server.py` (unused variable)
|
||||
|
||||
## Changed files
|
||||
|
||||
### HubSpot Integration
|
||||
- `tools/src/aden_tools/tools/hubspot_tool/` — New MCP tool: contacts, companies, and deals CRUD
|
||||
- `tools/src/aden_tools/tools/__init__.py` — Registered HubSpot tools
|
||||
- `tools/src/aden_tools/credentials/integrations.py` — HubSpot credential integration
|
||||
- `tools/src/aden_tools/credentials/__init__.py` — Updated credential exports
|
||||
- `core/framework/credentials/oauth2/hubspot_provider.py` — HubSpot OAuth2 provider
|
||||
- `core/framework/credentials/oauth2/__init__.py` — Registered HubSpot OAuth2 provider
|
||||
- `core/framework/runner/runner.py` — Updated runner for credential support
|
||||
|
||||
### Web Scrape Rewrite
|
||||
- `tools/src/aden_tools/tools/web_scrape_tool/web_scrape_tool.py` — Playwright async rewrite
|
||||
- `tools/src/aden_tools/tools/web_scrape_tool/README.md` — Updated docs
|
||||
- `tools/pyproject.toml` — Added `playwright`, `playwright-stealth` deps
|
||||
- `tools/Dockerfile` — Added `playwright install chromium --with-deps`
|
||||
### LLM Reliability
|
||||
- `core/framework/llm/litellm.py` — Empty response retry + max retries 10 + verbose logging
|
||||
- `core/framework/graph/node.py` — Input compaction via `_compact_inputs()`, `_estimate_tokens()`, `_get_context_limit()`
|
||||
|
||||
### Quickstart & Setup
|
||||
- `quickstart.sh` — Interactive bee-themed onboarding wizard with single provider selection
|
||||
- `~/.hive/configuration.json` — New user config file for default LLM provider/model
|
||||
|
||||
### Fixes
|
||||
- `core/framework/mcp/agent_builder_server.py` — Removed unused variable
|
||||
- `tools/src/aden_tools/tools/hubspot_tool/hubspot_tool.py` — Fixed E501 line length violations
|
||||
|
||||
## Test plan
|
||||
- [ ] Run `make lint` — passes clean
|
||||
- [ ] Run `./quickstart.sh` and verify interactive flow works, config saved to `~/.hive/configuration.json`
|
||||
- [ ] Run `pytest tests/tools/test_web_scrape_tool.py -v`
|
||||
- [ ] Run agent against a JS-heavy site and verify `web_scrape` returns rendered content
|
||||
- [ ] Set `HUBSPOT_ACCESS_TOKEN` and verify HubSpot tool CRUD operations work
|
||||
- [ ] Trigger rate limit and verify `[retry]` logs appear with correct attempt counts
|
||||
- [ ] Run agent with large inputs and verify `[compaction]` logs show truncation
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img width="100%" alt="Hive Banner" src="https://storage.googleapis.com/aden-prod-assets/website/aden-title-card.png" />
|
||||
<img width="100%" alt="Hive Banner" src="https://github.com/user-attachments/assets/a027429b-5d3c-4d34-88e4-0feaeaabbab3" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -13,16 +13,19 @@
|
||||
<a href="docs/i18n/ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
[](https://github.com/adenhq/hive/blob/main/LICENSE)
|
||||
[](https://www.ycombinator.com/companies/aden)
|
||||
[](https://discord.com/invite/MXE49hrKDk)
|
||||
[](https://x.com/aden_hq)
|
||||
[](https://www.linkedin.com/company/teamaden/)
|
||||
<p align="center">
|
||||
<a href="https://github.com/adenhq/hive/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="Apache 2.0 License" /></a>
|
||||
<a href="https://www.ycombinator.com/companies/aden"><img src="https://img.shields.io/badge/Y%20Combinator-Aden-orange" alt="Y Combinator" /></a>
|
||||
<a href="https://discord.com/invite/MXE49hrKDk"><img src="https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb" alt="Discord" /></a>
|
||||
<a href="https://x.com/aden_hq"><img src="https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5" alt="Twitter Follow" /></a>
|
||||
<a href="https://www.linkedin.com/company/teamaden/"><img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff" alt="LinkedIn" /></a>
|
||||
<img src="https://img.shields.io/badge/MCP-102_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/AI_Agents-Self--Improving-brightgreen?style=flat-square" alt="AI Agents" />
|
||||
<img src="https://img.shields.io/badge/Multi--Agent-Systems-blue?style=flat-square" alt="Multi-Agent" />
|
||||
<img src="https://img.shields.io/badge/Goal--Driven-Development-purple?style=flat-square" alt="Goal-Driven" />
|
||||
<img src="https://img.shields.io/badge/Headless-Development-purple?style=flat-square" alt="Headless" />
|
||||
<img src="https://img.shields.io/badge/Human--in--the--Loop-orange?style=flat-square" alt="HITL" />
|
||||
<img src="https://img.shields.io/badge/Production--Ready-red?style=flat-square" alt="Production" />
|
||||
</p>
|
||||
@@ -30,15 +33,16 @@
|
||||
<img src="https://img.shields.io/badge/OpenAI-supported-412991?style=flat-square&logo=openai" alt="OpenAI" />
|
||||
<img src="https://img.shields.io/badge/Anthropic-supported-d4a574?style=flat-square" alt="Anthropic" />
|
||||
<img src="https://img.shields.io/badge/Google_Gemini-supported-4285F4?style=flat-square&logo=google" alt="Gemini" />
|
||||
<img src="https://img.shields.io/badge/MCP-19_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
|
||||
## Overview
|
||||
|
||||
Build reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with a coding agent, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
Build autonomous, reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with a coding agent, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
https://github.com/user-attachments/assets/846c0cc7-ffd6-47fa-b4b7-495494857a55
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is designed for developers and teams who want to build **production-grade AI agents** without manually wiring complex workflows.
|
||||
@@ -58,45 +62,36 @@ Hive may not be the best fit if you’re only experimenting with simple agent ch
|
||||
Use Hive when you need:
|
||||
|
||||
- Long-running, autonomous agents
|
||||
- Multi-agent coordination
|
||||
- Strong guardrails, process, and controls
|
||||
- Continuous improvement based on failures
|
||||
- Strong monitoring, safety, and budget controls
|
||||
- Multi-agent coordination
|
||||
- A framework that evolves with your goals
|
||||
|
||||
|
||||
## What is Aden
|
||||
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden is a platform for building, deploying, operating, and adapting AI agents:
|
||||
|
||||
- **Build** - A Coding Agent generates specialized Worker Agents (Sales, Marketing, Ops) from natural language goals
|
||||
- **Deploy** - Headless deployment with CI/CD integration and full API lifecycle management
|
||||
- **Operate** - Real-time monitoring, observability, and runtime guardrails keep agents reliable
|
||||
- **Adapt** - Continuous evaluation, supervision, and adaptation ensure agents improve over time
|
||||
- **Infra** - Shared memory, LLM integrations, tools, and skills power every agent
|
||||
|
||||
## Quick Links
|
||||
|
||||
- **[Documentation](https://docs.adenhq.com/)** - Complete guides and API reference
|
||||
- **[Self-Hosting Guide](https://docs.adenhq.com/getting-started/quickstart)** - Deploy Hive on your infrastructure
|
||||
- **[Changelog](https://github.com/adenhq/hive/releases)** - Latest updates and releases
|
||||
<!-- - **[Roadmap](https://adenhq.com/roadmap)** - Upcoming features and plans -->
|
||||
- **[Roadmap](docs/roadmap.md)** - Upcoming features and plans
|
||||
- **[Report Issues](https://github.com/adenhq/hive/issues)** - Bug reports and feature requests
|
||||
- **[Contributing](CONTRIBUTING.md)** - How to contribute and submit PRs
|
||||
|
||||
## Quick Start
|
||||
|
||||
## Prerequisites
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.11+ for agent development
|
||||
- Claude Code or Cursor for utilizing agent skills
|
||||
- Claude Code, Codex CLI, or Cursor for utilizing agent skills
|
||||
|
||||
> **Note for Windows Users:** It is strongly recommended to use **WSL (Windows Subsystem for Linux)** or **Git Bash** to run this framework. Some core automation scripts may not execute correctly in standard Command Prompt or PowerShell.
|
||||
|
||||
### Installation
|
||||
|
||||
> **Note**
|
||||
> Hive uses a `uv` workspace layout and is not installed with `pip install`.
|
||||
> Running `pip install -e .` from the repository root will create a placeholder package and Hive will not function correctly.
|
||||
> Please use the quickstart script below to set up the environment.
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/adenhq/hive.git
|
||||
@@ -107,45 +102,89 @@ cd hive
|
||||
```
|
||||
|
||||
This sets up:
|
||||
|
||||
- **framework** - Core agent runtime and graph executor (in `core/.venv`)
|
||||
- **aden_tools** - MCP tools for agent capabilities (in `tools/.venv`)
|
||||
- All required Python dependencies
|
||||
- **credential store** - Encrypted API key storage (`~/.hive/credentials`)
|
||||
- **LLM provider** - Interactive default model configuration
|
||||
- All required Python dependencies with `uv`
|
||||
|
||||
### Build Your First Agent
|
||||
|
||||
```bash
|
||||
# Build an agent using Claude Code
|
||||
claude> /building-agents-construction
|
||||
claude> /hive
|
||||
|
||||
# Test your agent
|
||||
claude> /testing-agent
|
||||
claude> /hive-debugger
|
||||
|
||||
# Run your agent
|
||||
PYTHONPATH=exports uv run python -m your_agent_name run --input '{...}'
|
||||
# (at separate terminal) Launch the interactive dashboard
|
||||
hive tui
|
||||
|
||||
# Or run directly
|
||||
hive run exports/your_agent_name --input '{"key": "value"}'
|
||||
```
|
||||
|
||||
**[📖 Complete Setup Guide](ENVIRONMENT_SETUP.md)** - Detailed instructions for agent development
|
||||
## Coding Agent Support
|
||||
|
||||
### Cursor IDE Support
|
||||
### Codex CLI
|
||||
|
||||
Skills are also available in Cursor. To enable:
|
||||
Hive includes native support for [OpenAI Codex CLI](https://github.com/openai/codex) (v0.101.0+).
|
||||
|
||||
1. Open Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
2. Run `MCP: Enable` to enable MCP servers
|
||||
3. Restart Cursor to load the MCP servers from `.cursor/mcp.json`
|
||||
4. Type `/` in Agent chat and search for skills (e.g., `/building-agents-construction`)
|
||||
1. **Config:** `.codex/config.toml` with `agent-builder` MCP server (tracked in git)
|
||||
2. **Skills:** `.agents/skills/` symlinks to Hive skills (tracked in git)
|
||||
3. **Launch:** Run `codex` in the repo root, then type `use hive`
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
codex> use hive
|
||||
```
|
||||
|
||||
### Opencode
|
||||
|
||||
Hive includes native support for [Opencode](https://github.com/opencode-ai/opencode).
|
||||
|
||||
1. **Setup:** Run the quickstart script
|
||||
2. **Launch:** Open Opencode in the project root.
|
||||
3. **Activate:** Type `/hive` in the chat to switch to the Hive Agent.
|
||||
4. **Verify:** Ask the agent _"List your tools"_ to confirm the connection.
|
||||
|
||||
The agent has access to all Hive skills and can scaffold agents, add tools, and debug workflows directly from the chat.
|
||||
|
||||
**[📖 Complete Setup Guide](docs/environment-setup.md)** - Detailed instructions for agent development
|
||||
|
||||
### Antigravity IDE Support
|
||||
|
||||
Skills and MCP servers are also available in [Antigravity IDE](https://antigravity.google/) (Google's AI-powered IDE). **Easiest:** open a terminal in the hive repo folder and run (use `./` — the script is inside the repo):
|
||||
|
||||
```bash
|
||||
./scripts/setup-antigravity-mcp.sh
|
||||
```
|
||||
|
||||
**Important:** Always restart/refresh Antigravity IDE after running the setup script—MCP servers only load on startup. After restart, **agent-builder** and **tools** MCP servers should connect. Skills are under `.agent/skills/` (symlinks to `.claude/skills/`). See [docs/antigravity-setup.md](docs/antigravity-setup.md) for manual setup and troubleshooting.
|
||||
|
||||
## Features
|
||||
|
||||
- **Goal-Driven Development** - Define objectives in natural language; the coding agent generates the agent graph and connection code to achieve them
|
||||
- **Adaptiveness** - Framework captures failures, calibrates according to the objectives, and evolves the agent graph
|
||||
- **Dynamic Node Connections** - No predefined edges; connection code is generated by any capable LLM based on your goals
|
||||
- **[Goal-Driven Development](docs/key_concepts/goals_outcome.md)** - Define objectives in natural language; the coding agent generates the agent graph and connection code to achieve them
|
||||
- **[Adaptiveness](docs/key_concepts/evolution.md)** - Framework captures failures, calibrates according to the objectives, and evolves the agent graph
|
||||
- **[Dynamic Node Connections](docs/key_concepts/graph.md)** - No predefined edges; connection code is generated by any capable LLM based on your goals
|
||||
- **SDK-Wrapped Nodes** - Every node gets shared memory, local RLM memory, monitoring, tools, and LLM access out of the box
|
||||
- **Human-in-the-Loop** - Intervention nodes that pause execution for human input with configurable timeouts and escalation
|
||||
- **[Human-in-the-Loop](docs/key_concepts/graph.md#human-in-the-loop)** - Intervention nodes that pause execution for human input with configurable timeouts and escalation
|
||||
- **Real-time Observability** - WebSocket streaming for live monitoring of agent execution, decisions, and node-to-node communication
|
||||
- **Interactive TUI Dashboard** - Terminal-based dashboard with live graph view, event log, and chat interface for agent interaction
|
||||
- **Cost & Budget Control** - Set spending limits, throttles, and automatic model degradation policies
|
||||
- **Production-Ready** - Self-hostable, built for scale and reliability
|
||||
|
||||
## Integration
|
||||
|
||||
<a href="https://github.com/adenhq/hive/tree/main/tools/src/aden_tools/tools"><img width="100%" alt="Integration" src="https://github.com/user-attachments/assets/a1573f93-cf02-4bb8-b3d5-b305b05b1e51" /></a>
|
||||
|
||||
Hive is built to be model-agnostic and system-agnostic.
|
||||
|
||||
- **LLM flexibility** - Hive Framework is designed to support various types of LLMs, including hosted and local models through LiteLLM-compatible providers.
|
||||
- **Business system connectivity** - Hive Framework is designed to connect to all kinds of business systems as tools, such as CRM, support, messaging, data, file, and internal APIs via MCP.
|
||||
|
||||
## Why Aden
|
||||
|
||||
Hive focuses on generating agents that run real business processes rather than generic agents. Instead of requiring you to manually design workflows, define agent interactions, and handle failures reactively, Hive flips the paradigm: **you describe outcomes, and the system builds itself**—delivering an outcome-driven, adaptive experience with an easy-to-use set of tools and integrations.
|
||||
@@ -182,161 +221,187 @@ flowchart LR
|
||||
style V6 fill:#fff,stroke:#ed8c00,stroke-width:1px,color:#cc5d00
|
||||
```
|
||||
|
||||
### The Aden Advantage
|
||||
### The Hive Advantage
|
||||
|
||||
| Traditional Frameworks | Aden |
|
||||
| Traditional Frameworks | Hive |
|
||||
| -------------------------- | -------------------------------------- |
|
||||
| Hardcode agent workflows | Describe goals in natural language |
|
||||
| Manual graph definition | Auto-generated agent graphs |
|
||||
| Reactive error handling | Outcome-evaluation and adaptiveness |
|
||||
| Reactive error handling | Outcome-evaluation and adaptiveness |
|
||||
| Static tool configurations | Dynamic SDK-wrapped nodes |
|
||||
| Separate monitoring setup | Built-in real-time observability |
|
||||
| DIY budget management | Integrated cost controls & degradation |
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Define Your Goal** → Describe what you want to achieve in plain English
|
||||
2. **Coding Agent Generates** → Creates the agent graph, connection code, and test cases
|
||||
3. **Workers Execute** → SDK-wrapped nodes run with full observability and tool access
|
||||
1. **[Define Your Goal](docs/key_concepts/goals_outcome.md)** → Describe what you want to achieve in plain English
|
||||
2. **Coding Agent Generates** → Creates the [agent graph](docs/key_concepts/graph.md), connection code, and test cases
|
||||
3. **[Workers Execute](docs/key_concepts/worker_agent.md)** → SDK-wrapped nodes run with full observability and tool access
|
||||
4. **Control Plane Monitors** → Real-time metrics, budget enforcement, policy management
|
||||
5. **Adaptiveness** → On failure, the system evolves the graph and redeploys automatically
|
||||
5. **[Adaptiveness](docs/key_concepts/evolution.md)** → On failure, the system evolves the graph and redeploys automatically
|
||||
|
||||
## Run pre-built Agents (Coming Soon)
|
||||
## Run Agents
|
||||
|
||||
### Run a sample agent
|
||||
Aden Hive provides a list of featured agents that you can use and build on top of.
|
||||
|
||||
### Run an agent shared by others
|
||||
Put the agent in `exports/` and run `PYTHONPATH=exports uv run python -m your_agent_name run --input '{...}'`
|
||||
|
||||
|
||||
For building and running goal-driven agents with the framework:
|
||||
The `hive` CLI is the primary interface for running agents.
|
||||
|
||||
```bash
|
||||
# One-time setup
|
||||
./quickstart.sh
|
||||
# Browse and run agents interactively (Recommended)
|
||||
hive tui
|
||||
|
||||
# This sets up:
|
||||
# - framework package (core runtime)
|
||||
# - aden_tools package (MCP tools)
|
||||
# - All Python dependencies
|
||||
# Run a specific agent directly
|
||||
hive run exports/my_agent --input '{"task": "Your input here"}'
|
||||
|
||||
# Build new agents using Claude Code skills
|
||||
claude> /building-agents-construction
|
||||
# Run a specific agent with the TUI dashboard
|
||||
hive run exports/my_agent --tui
|
||||
|
||||
# Test agents
|
||||
claude> /testing-agent
|
||||
|
||||
# Run agents
|
||||
PYTHONPATH=exports uv run python -m agent_name run --input '{...}'
|
||||
# Interactive REPL
|
||||
hive shell
|
||||
```
|
||||
|
||||
See [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) for complete setup instructions.
|
||||
The TUI scans both `exports/` and `examples/templates/` for available agents.
|
||||
|
||||
> **Using Python directly (alternative):** You can also run agents with `PYTHONPATH=exports uv run python -m agent_name run --input '{...}'`
|
||||
|
||||
See [environment-setup.md](docs/environment-setup.md) for complete setup instructions.
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Developer Guide](DEVELOPER.md)** - Comprehensive guide for developers
|
||||
- **[Developer Guide](docs/developer-guide.md)** - Comprehensive guide for developers
|
||||
- [Getting Started](docs/getting-started.md) - Quick setup instructions
|
||||
- [TUI Guide](docs/tui-selection-guide.md) - Interactive dashboard usage
|
||||
- [Configuration Guide](docs/configuration.md) - All configuration options
|
||||
- [Architecture Overview](docs/architecture/README.md) - System design and structure
|
||||
|
||||
## Roadmap
|
||||
|
||||
Aden Hive Agent Framework aims to help developers build outcome-oriented, self-adaptive agents. See [ROADMAP.md](ROADMAP.md) for details.
|
||||
Aden Hive Agent Framework aims to help developers build outcome-oriented, self-adaptive agents. See [roadmap.md](docs/roadmap.md) for details.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Foundation
|
||||
direction LR
|
||||
subgraph arch["Architecture"]
|
||||
a1["Node-Based Architecture"]:::done
|
||||
a2["Python SDK"]:::done
|
||||
a3["LLM Integration"]:::done
|
||||
a4["Communication Protocol"]:::done
|
||||
end
|
||||
subgraph ca["Coding Agent"]
|
||||
b1["Goal Creation Session"]:::done
|
||||
b2["Worker Agent Creation"]
|
||||
b3["MCP Tools"]:::done
|
||||
end
|
||||
subgraph wa["Worker Agent"]
|
||||
c1["Human-in-the-Loop"]:::done
|
||||
c2["Callback Handlers"]:::done
|
||||
c3["Intervention Points"]:::done
|
||||
c4["Streaming Interface"]
|
||||
end
|
||||
subgraph cred["Credentials"]
|
||||
d1["Setup Process"]:::done
|
||||
d2["Pluggable Sources"]:::done
|
||||
d3["Enterprise Secrets"]
|
||||
d4["Integration Tools"]:::done
|
||||
end
|
||||
subgraph tools["Tools"]
|
||||
e1["File Use"]:::done
|
||||
e2["Memory STM/LTM"]:::done
|
||||
e3["Web Search/Scraper"]:::done
|
||||
e4["CSV/PDF"]:::done
|
||||
e5["Excel/Email"]
|
||||
end
|
||||
subgraph core["Core"]
|
||||
f1["Eval System"]
|
||||
f2["Pydantic Validation"]:::done
|
||||
f3["Documentation"]:::done
|
||||
f4["Adaptiveness"]
|
||||
f5["Sample Agents"]
|
||||
end
|
||||
end
|
||||
flowchart TB
|
||||
%% Main Entity
|
||||
User([User])
|
||||
|
||||
subgraph Expansion
|
||||
direction LR
|
||||
subgraph intel["Intelligence"]
|
||||
g1["Guardrails"]
|
||||
g2["Streaming Mode"]
|
||||
g3["Image Generation"]
|
||||
g4["Semantic Search"]
|
||||
%% =========================================
|
||||
%% EXTERNAL EVENT SOURCES
|
||||
%% =========================================
|
||||
subgraph ExtEventSource [External Event Source]
|
||||
E_Sch["Schedulers"]
|
||||
E_WH["Webhook"]
|
||||
E_SSE["SSE"]
|
||||
end
|
||||
subgraph mem["Memory Iteration"]
|
||||
h1["Message Model & Sessions"]
|
||||
h2["Storage Migration"]
|
||||
h3["Context Building"]
|
||||
h4["Proactive Compaction"]
|
||||
h5["Token Tracking"]
|
||||
end
|
||||
subgraph evt["Event System"]
|
||||
i1["Event Bus for Nodes"]
|
||||
end
|
||||
subgraph cas["Coding Agent Support"]
|
||||
j1["Claude Code"]
|
||||
j2["Cursor"]
|
||||
j3["Opencode"]
|
||||
j4["Antigravity"]
|
||||
end
|
||||
subgraph plat["Platform"]
|
||||
k1["JavaScript/TypeScript SDK"]
|
||||
k2["Custom Tool Integrator"]
|
||||
k3["Windows Support"]
|
||||
end
|
||||
subgraph dep["Deployment"]
|
||||
l1["Self-Hosted"]
|
||||
l2["Cloud Services"]
|
||||
l3["CI/CD Pipeline"]
|
||||
end
|
||||
subgraph tmpl["Templates"]
|
||||
m1["Sales Agent"]
|
||||
m2["Marketing Agent"]
|
||||
m3["Analytics Agent"]
|
||||
m4["Training Agent"]
|
||||
m5["Smart Form Agent"]
|
||||
end
|
||||
end
|
||||
|
||||
classDef done fill:#9e9e9e,color:#fff,stroke:#757575
|
||||
%% =========================================
|
||||
%% SYSTEM NODES
|
||||
%% =========================================
|
||||
subgraph WorkerBees [Worker Bees]
|
||||
WB_C["Conversation"]
|
||||
WB_SP["System prompt"]
|
||||
|
||||
subgraph Graph [Graph]
|
||||
direction TB
|
||||
N1["Node"] --> N2["Node"] --> N3["Node"]
|
||||
N1 -.-> AN["Active Node"]
|
||||
N2 -.-> AN
|
||||
N3 -.-> AN
|
||||
|
||||
%% Nested Event Loop Node
|
||||
subgraph EventLoopNode [Event Loop Node]
|
||||
ELN_L["listener"]
|
||||
ELN_SP["System Prompt<br/>(Task)"]
|
||||
ELN_EL["Event loop"]
|
||||
ELN_C["Conversation"]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
subgraph JudgeNode [Judge]
|
||||
J_C["Criteria"]
|
||||
J_P["Principles"]
|
||||
J_EL["Event loop"] <--> J_S["Scheduler"]
|
||||
end
|
||||
|
||||
subgraph QueenBee [Queen Bee]
|
||||
QB_SP["System prompt"]
|
||||
QB_EL["Event loop"]
|
||||
QB_C["Conversation"]
|
||||
end
|
||||
|
||||
subgraph Infra [Infra]
|
||||
SA["Sub Agent"]
|
||||
TR["Tool Registry"]
|
||||
WTM["Write through Conversation Memory<br/>(Logs/RAM/Harddrive)"]
|
||||
SM["Shared Memory<br/>(State/Harddrive)"]
|
||||
EB["Event Bus<br/>(RAM)"]
|
||||
CS["Credential Store<br/>(Harddrive/Cloud)"]
|
||||
end
|
||||
|
||||
subgraph PC [PC]
|
||||
B["Browser"]
|
||||
CB["Codebase<br/>v 0.0.x ... v n.n.n"]
|
||||
end
|
||||
|
||||
%% =========================================
|
||||
%% CONNECTIONS & DATA FLOW
|
||||
%% =========================================
|
||||
|
||||
%% External Event Routing
|
||||
E_Sch --> ELN_L
|
||||
E_WH --> ELN_L
|
||||
E_SSE --> ELN_L
|
||||
ELN_L -->|"triggers"| ELN_EL
|
||||
|
||||
%% User Interactions
|
||||
User -->|"Talk"| WB_C
|
||||
User -->|"Talk"| QB_C
|
||||
User -->|"Read/Write Access"| CS
|
||||
|
||||
%% Inter-System Logic
|
||||
ELN_C <-->|"Mirror"| WB_C
|
||||
WB_C -->|"Focus"| AN
|
||||
|
||||
WorkerBees -->|"Inquire"| JudgeNode
|
||||
JudgeNode -->|"Approve"| WorkerBees
|
||||
|
||||
%% Judge Alignments
|
||||
J_C <-.->|"aligns"| WB_SP
|
||||
J_P <-.->|"aligns"| QB_SP
|
||||
|
||||
%% Escalate path
|
||||
J_EL -->|"Report (Escalate)"| QB_EL
|
||||
|
||||
%% Pub/Sub Logic
|
||||
AN -->|"publish"| EB
|
||||
EB -->|"subscribe"| QB_C
|
||||
|
||||
%% Infra and Process Spawning
|
||||
ELN_EL -->|"Spawn"| SA
|
||||
SA -->|"Inform"| ELN_EL
|
||||
SA -->|"Starts"| B
|
||||
B -->|"Report"| ELN_EL
|
||||
TR -->|"Assigned"| ELN_EL
|
||||
CB -->|"Modify Worker Bee"| WB_C
|
||||
|
||||
%% =========================================
|
||||
%% SHARED MEMORY & LOGS ACCESS
|
||||
%% =========================================
|
||||
|
||||
%% Worker Bees Access (link to node inside Graph subgraph)
|
||||
AN <-->|"Read/Write"| WTM
|
||||
AN <-->|"Read/Write"| SM
|
||||
|
||||
%% Queen Bee Access
|
||||
QB_C <-->|"Read/Write"| WTM
|
||||
QB_EL <-->|"Read/Write"| SM
|
||||
|
||||
%% Credentials Access
|
||||
CS -->|"Read Access"| QB_C
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community! We’re especially looking for help building tools, integrations, and example agents for the framework ([check #2805](https://github.com/adenhq/hive/issues/2805)). If you’re interested in extending its functionality, this is the perfect place to start. Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
**Important:** Please get assigned to an issue before submitting a PR. Comment on an issue to claim it, and a maintainer will assign you. Issues with reproducible steps and proposals are prioritized. This helps prevent duplicate work.
|
||||
**Important:** Please get assigned to an issue before submitting a PR. Comment on an issue to claim it, and a maintainer will assign you. Issues with reproducible steps and proposals are prioritized. This helps prevent duplicate work.
|
||||
|
||||
1. Find or create an issue and get assigned
|
||||
2. Fork the repository
|
||||
@@ -369,10 +434,6 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
**Q: Does Hive depend on LangChain or other agent frameworks?**
|
||||
|
||||
No. Hive is built from the ground up with no dependencies on LangChain, CrewAI, or other agent frameworks. The framework is designed to be lean and flexible, generating agent graphs dynamically rather than relying on predefined components.
|
||||
|
||||
**Q: What LLM providers does Hive support?**
|
||||
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, and many more. Simply set the appropriate API key environment variable and specify the model name.
|
||||
@@ -383,37 +444,25 @@ Yes! Hive supports local models through LiteLLM. Simply use the model name forma
|
||||
|
||||
**Q: What makes Hive different from other agent frameworks?**
|
||||
|
||||
Hive generates your entire agent system from natural language goals using a coding agent—you don't hardcode workflows or manually define graphs. When agents fail, the framework automatically captures failure data, evolves the agent graph, and redeploys. This self-improving loop is unique to Aden.
|
||||
Hive generates your entire agent system from natural language goals using a coding agent—you don't hardcode workflows or manually define graphs. When agents fail, the framework automatically captures failure data, [evolves the agent graph](docs/key_concepts/evolution.md), and redeploys. This self-improving loop is unique to Aden.
|
||||
|
||||
**Q: Is Hive open-source?**
|
||||
|
||||
Yes, Hive is fully open-source under the Apache License 2.0. We actively encourage community contributions and collaboration.
|
||||
|
||||
**Q: Does Hive collect data from users?**
|
||||
|
||||
Hive collects telemetry data for monitoring and observability purposes, including token usage, latency metrics, and cost tracking. Content capture (prompts and responses) is configurable and stored with team-scoped data isolation. All data stays within your infrastructure when self-hosted.
|
||||
|
||||
**Q: What deployment options does Hive support?**
|
||||
|
||||
Hive supports self-hosted deployments via Python packages. See the [Environment Setup Guide](ENVIRONMENT_SETUP.md) for installation instructions. Cloud deployment options and Kubernetes-ready configurations are on the roadmap.
|
||||
|
||||
**Q: Can Hive handle complex, production-scale use cases?**
|
||||
|
||||
Yes. Hive is explicitly designed for production environments with features like automatic failure recovery, real-time observability, cost controls, and horizontal scaling support. The framework handles both simple automations and complex multi-agent workflows.
|
||||
|
||||
**Q: Does Hive support human-in-the-loop workflows?**
|
||||
|
||||
Yes, Hive fully supports human-in-the-loop workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
|
||||
**Q: What monitoring and debugging tools does Hive provide?**
|
||||
|
||||
Hive includes comprehensive observability features: real-time WebSocket streaming for live agent execution monitoring, TimescaleDB-powered analytics for cost and performance metrics, health check endpoints for Kubernetes integration, and MCP tools for agent execution, including file operations, web search, data processing, and more.
|
||||
Yes, Hive fully supports [human-in-the-loop](docs/key_concepts/graph.md#human-in-the-loop) workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
|
||||
**Q: What programming languages does Hive support?**
|
||||
|
||||
The Hive framework is built in Python. A JavaScript/TypeScript SDK is on the roadmap.
|
||||
|
||||
**Q: Can Aden agents interact with external tools and APIs?**
|
||||
**Q: Can Hive agents interact with external tools and APIs?**
|
||||
|
||||
Yes. Aden's SDK-wrapped nodes provide built-in tool access, and the framework supports flexible tool ecosystems. Agents can integrate with external APIs, databases, and services through the node architecture.
|
||||
|
||||
@@ -423,7 +472,7 @@ Hive provides granular budget controls including spending limits, throttles, and
|
||||
|
||||
**Q: Where can I find examples and documentation?**
|
||||
|
||||
Visit [docs.adenhq.com](https://docs.adenhq.com/) for complete guides, API reference, and getting started tutorials. The repository also includes documentation in the `docs/` folder and a comprehensive [DEVELOPER.md](DEVELOPER.md) guide.
|
||||
Visit [docs.adenhq.com](https://docs.adenhq.com/) for complete guides, API reference, and getting started tutorials. The repository also includes documentation in the `docs/` folder and a comprehensive [developer guide](docs/developer-guide.md).
|
||||
|
||||
**Q: How can I contribute to Aden?**
|
||||
|
||||
@@ -437,10 +486,6 @@ Aden's adaptation loop begins working from the first execution. When an agent fa
|
||||
|
||||
Hive focuses on generating agents that run real business processes, rather than generic agents. This vision emphasizes outcome-driven design, adaptability, and an easy-to-use set of tools and integrations.
|
||||
|
||||
**Q: Does Aden offer enterprise support?**
|
||||
|
||||
For enterprise inquiries, contact the Aden team through [adenhq.com](https://adenhq.com) or join our [Discord community](https://discord.com/invite/MXE49hrKDk) for support and discussions.
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
|
||||
-299
@@ -1,299 +0,0 @@
|
||||
# Product Roadmap
|
||||
|
||||
Aden Agent Framework aims to help developers build outcome oriented, self-adaptive agents. Please find our roadmap here
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Foundation
|
||||
direction LR
|
||||
subgraph arch["Architecture"]
|
||||
a1["Node-Based Architecture"]:::done
|
||||
a2["Python SDK"]:::done
|
||||
a3["LLM Integration"]:::done
|
||||
a4["Communication Protocol"]:::done
|
||||
end
|
||||
subgraph ca["Coding Agent"]
|
||||
b1["Goal Creation Session"]:::done
|
||||
b2["Worker Agent Creation"]
|
||||
b3["MCP Tools"]:::done
|
||||
end
|
||||
subgraph wa["Worker Agent"]
|
||||
c1["Human-in-the-Loop"]:::done
|
||||
c2["Callback Handlers"]:::done
|
||||
c3["Intervention Points"]:::done
|
||||
c4["Streaming Interface"]
|
||||
end
|
||||
subgraph cred["Credentials"]
|
||||
d1["Setup Process"]:::done
|
||||
d2["Pluggable Sources"]:::done
|
||||
d3["Enterprise Secrets"]
|
||||
d4["Integration Tools"]:::done
|
||||
end
|
||||
subgraph tools["Tools"]
|
||||
e1["File Use"]:::done
|
||||
e2["Memory STM/LTM"]:::done
|
||||
e3["Web Search/Scraper"]:::done
|
||||
e4["CSV/PDF"]:::done
|
||||
e5["Excel/Email"]
|
||||
end
|
||||
subgraph core["Core"]
|
||||
f1["Eval System"]
|
||||
f2["Pydantic Validation"]:::done
|
||||
f3["Documentation"]:::done
|
||||
f4["Adaptiveness"]
|
||||
f5["Sample Agents"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Expansion
|
||||
direction LR
|
||||
subgraph intel["Intelligence"]
|
||||
g1["Guardrails"]
|
||||
g2["Streaming Mode"]
|
||||
g3["Image Generation"]
|
||||
g4["Semantic Search"]
|
||||
end
|
||||
subgraph mem["Memory Iteration"]
|
||||
h1["Message Model & Sessions"]
|
||||
h2["Storage Migration"]
|
||||
h3["Context Building"]
|
||||
h4["Proactive Compaction"]
|
||||
h5["Token Tracking"]
|
||||
end
|
||||
subgraph evt["Event System"]
|
||||
i1["Event Bus for Nodes"]
|
||||
end
|
||||
subgraph cas["Coding Agent Support"]
|
||||
j1["Claude Code"]
|
||||
j2["Cursor"]
|
||||
j3["Opencode"]
|
||||
j4["Antigravity"]
|
||||
end
|
||||
subgraph plat["Platform"]
|
||||
k1["JavaScript/TypeScript SDK"]
|
||||
k2["Custom Tool Integrator"]
|
||||
k3["Windows Support"]
|
||||
end
|
||||
subgraph dep["Deployment"]
|
||||
l1["Self-Hosted"]
|
||||
l2["Cloud Services"]
|
||||
l3["CI/CD Pipeline"]
|
||||
end
|
||||
subgraph tmpl["Templates"]
|
||||
m1["Sales Agent"]
|
||||
m2["Marketing Agent"]
|
||||
m3["Analytics Agent"]
|
||||
m4["Training Agent"]
|
||||
m5["Smart Form Agent"]
|
||||
end
|
||||
end
|
||||
|
||||
classDef done fill:#9e9e9e,color:#fff,stroke:#757575
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Foundation
|
||||
|
||||
### Backbone Architecture
|
||||
- [ ] **Node-Based Architecture (Agent as a node)**
|
||||
- [x] Object schema definition
|
||||
- [x] Node wrapper SDK
|
||||
- [x] Shared memory access
|
||||
- [ ] Default monitoring hooks
|
||||
- [x] Tool access layer
|
||||
- [x] LLM integration layer (Natively supports all mainstream LLMs through LiteLLM)
|
||||
- [x] Anthropic
|
||||
- [x] OpenAI
|
||||
- [x] Google
|
||||
- [x] **Communication protocol between nodes**
|
||||
- [x] **[Coding Agent] Goal Creation Session** (separate from coding session)
|
||||
- [x] Instruction back and forth
|
||||
- [x] Goal Object schema definition
|
||||
- [x] Being able to generate the test cases
|
||||
- [x] Test case validation for worker agent (Outcome driven)
|
||||
- [ ] **[Coding Agent] Worker Agent Creation**
|
||||
- [x] Coding Agent tools
|
||||
- [ ] Use Template Agent as a start
|
||||
- [x] Use our MCP tools
|
||||
- [ ] **[Worker Agent] Human-in-the-Loop**
|
||||
- [x] Worker Agents request with questions and options
|
||||
- [x] Callback Handler System to receive events throughout execution
|
||||
- [x] Tool-Based Intervention Points (tool to pause execution and request human input)
|
||||
- [x] Multiple entrypoint for different event source (e.g. Human input, webhook)
|
||||
- [ ] Streaming Interface for Real-time Monitoring
|
||||
- [x] Request State Management
|
||||
|
||||
### Credential Management
|
||||
- [x] **Credentials Setup Process**
|
||||
- [x] Install Credential MCP
|
||||
- [x] **Pluggable Credential Sources**
|
||||
- [x] **Abstraction & Local Sources**
|
||||
- [x] Introduce `CredentialSource` base class
|
||||
- [x] Refactor existing logic into `EnvVarSource`
|
||||
- [x] Implementation of Source Priority Chain mechanism
|
||||
- [ ] Foundation unit tests
|
||||
- [ ] **Enterprise Secret Managers**
|
||||
- [x] `VaultSource` (HashiCorp Vault)
|
||||
- [ ] `AWSSecretsSource` (AWS Secrets Manager)
|
||||
- [ ] `AzureKeyVaultSource` (Azure Key Vault)
|
||||
- [ ] Management of optional provider dependencies
|
||||
- [ ] **Advanced Features**
|
||||
- [x] Credential expiration and auto-refresh
|
||||
- [ ] Audit logging for compliance/tracking
|
||||
- [ ] Per-environment configuration support
|
||||
- [ ] **Documentation & DX**
|
||||
- [ ] Comprehensive source documentation
|
||||
- [ ] Example configurations for all providers
|
||||
- [x] **Integration as tools coverage**
|
||||
- [x] Gsuite Tools
|
||||
- [x] Social Media
|
||||
- [ ] Twitter(X)
|
||||
- [x] Github
|
||||
- [ ] Instagram
|
||||
- [ ] SAAS
|
||||
- [ ] Hubspot
|
||||
- [ ] Slack
|
||||
- [ ] Teams
|
||||
- [ ] Zoom
|
||||
- [ ] Stripe
|
||||
- [ ] Salesforce
|
||||
|
||||
> [!IMPORTANT]
|
||||
> **Community Contribution Wanted**: We appreciate help from the community to expand the "Integration as tools" capability. Leave an issue of the integration you want to support via Hive!
|
||||
|
||||
### Essential Tools
|
||||
- [x] **File Use Tool Kit**
|
||||
- [X] **Memory Tools**
|
||||
- [x] STM Layer Tool (state-based short-term memory)
|
||||
- [x] LTM Layer Tool (RLM - long-term memory)
|
||||
- [ ] **Infrastructure Tools**
|
||||
- [x] Runtime Log Tool (logs for coding agent)
|
||||
- [x] Web Search
|
||||
- [x] Web Scraper
|
||||
- [x] CSV tools
|
||||
- [x] PDF tools
|
||||
- [ ] Excel tools
|
||||
- [ ] Email Tools
|
||||
- [ ] Recipe for "Add your own tools"
|
||||
|
||||
### Memory & File System
|
||||
- [x] DB for long-term persistent memory (Filesystem as durable scratchpad pattern)
|
||||
- [x] Session Local memory isolation
|
||||
|
||||
### Eval System (Basic)
|
||||
- [x] Test Driven - Run test case for all agent iteration
|
||||
- [ ] Failure recording mechanism
|
||||
- [ ] SDK for defining failure conditions
|
||||
- [ ] Basic observability hooks
|
||||
- [ ] User-driven log analysis (OSS approach)
|
||||
|
||||
### Data Validation
|
||||
- [x] Natively Support data validation of LLMs output with Pydantic
|
||||
|
||||
### Developer Experience
|
||||
- [ ] **MVP Features**
|
||||
- [ ] Debugging mode
|
||||
- [ ] CLI tools for memory management
|
||||
- [ ] CLI tools for credential management
|
||||
- [ ] **MVP Resources & Documentation**
|
||||
- [x] Quick start guide
|
||||
- [x] Goal creation guide
|
||||
- [x] Agent creation guide
|
||||
- [x] GitHub Page setup
|
||||
- [x] README with examples
|
||||
- [x] Contributing guidelines
|
||||
- [ ] Introduction Video
|
||||
|
||||
### Adaptiveness
|
||||
- [ ] Runtime data feedback loop
|
||||
- [ ] Instant Developer Feedback for improvement
|
||||
|
||||
### Sample Agents
|
||||
- [ ] Knowledge Agent
|
||||
- [ ] Blog Writer Agent
|
||||
- [ ] SDR Agent
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Expansion
|
||||
|
||||
### Basic Guardrails
|
||||
- [ ] Support Basic Monitoring from Agent node SDK
|
||||
- [ ] SDK guardrail implementation (in node)
|
||||
- [ ] Guardrail type support (Determined Condition as Guardrails)
|
||||
|
||||
### Agent Capability
|
||||
- [ ] Streaming mode support
|
||||
- [ ] Image Generation support
|
||||
- [ ] Take end user input Image and flatfile understand capability
|
||||
|
||||
### Event-loop For Nodes (Opencode-style)
|
||||
- [ ] **Event bus**
|
||||
|
||||
### Memory System Iteration
|
||||
- [ ] **Message Model & Session Management**
|
||||
- [ ] Introduce `Message` class with structured content types
|
||||
- [ ] Implement `Session` classes for conversation state
|
||||
- [ ] **Storage Migration**
|
||||
- [ ] Implement granular per-message file persistence (`/message/[agentID]/...`)
|
||||
- [ ] Migrate from monolithic run storage
|
||||
- [ ] **Context Building & Conversation Loop**
|
||||
- [ ] Implement `Message.stream(sessionID)`
|
||||
- [ ] Update `LLMNode.execute()` for full context building
|
||||
- [ ] Implement `Message.toModelMessages()` conversion
|
||||
- [ ] **Proactive Compaction**
|
||||
- [ ] Implement proactive overflow detection
|
||||
- [ ] Develop backward-scanning pruning strategy (e.g., clearing old tool outputs)
|
||||
- [ ] **Enhanced Token Tracking**
|
||||
- [ ] Extend `LLMResponse` to track reasoning and cache tokens
|
||||
- [ ] Integrate granular token metrics into compaction logic
|
||||
|
||||
### Coding Agent Support
|
||||
- [ ] Claude Code
|
||||
- [ ] Cursor
|
||||
- [ ] Opencode
|
||||
- [ ] Antigravity
|
||||
|
||||
### File System Enhancement
|
||||
- [ ] Semantic Search integration
|
||||
- [ ] Interactive File System in product (frontend integration)
|
||||
|
||||
### More Worker Tools
|
||||
- [ ] Custom Tool Integrator
|
||||
- [ ] Integration as a tool (Credential Store & Support)
|
||||
- [ ] **Core Agent Tools**
|
||||
- [ ] Node Discovery Tool (find other agents in the graph)
|
||||
- [ ] HITL Tool (pause execution for human approval)
|
||||
- [ ] Wake-up Tool (resume agent tasks)
|
||||
|
||||
### Deployment (Self-Hosted)
|
||||
- [ ] Workder agent docker container standardization
|
||||
- [ ] Headless backend execution
|
||||
- [ ] Exposed API for frontend attachment
|
||||
- [ ] Local monitoring & observability
|
||||
- [ ] Basic lifecycle APIs (Start, Stop, Pause, Resume)
|
||||
|
||||
### Deployment (Cloud)
|
||||
- [ ] Cloud Service Options
|
||||
- [ ] Support deployment to 3rd-party platforms
|
||||
- [ ] Self-deploy + orchestrator connection
|
||||
- [ ] **CI/CD Pipeline**
|
||||
- [ ] Automated test execution
|
||||
- [ ] Agent version control
|
||||
- [ ] All tests must pass for deployment
|
||||
|
||||
### Developer Experience Enhancement
|
||||
- [ ] Tool usage documentation
|
||||
- [ ] Discord Support Channel
|
||||
|
||||
### More Agent Templates
|
||||
- [ ] GTM Sales Agent (workflow)
|
||||
- [ ] GTM Marketing Agent (workflow)
|
||||
- [ ] Analytics Agent
|
||||
- [ ] Training Agent
|
||||
- [ ] Smart Entry / Form Agent (self-evolution emphasis)
|
||||
|
||||
### Cross-Platform
|
||||
- [ ] JavaScript / TypeScript Version SDK
|
||||
- [ ] Better windows support
|
||||
@@ -1,4 +1,5 @@
|
||||
exports/
|
||||
docs/
|
||||
.agent-builder-sessions/
|
||||
.pytest_cache/
|
||||
**/__pycache__/
|
||||
@@ -82,7 +82,7 @@ Register an MCP server as a tool source for your agent.
|
||||
"example_tool"
|
||||
],
|
||||
"total_mcp_servers": 1,
|
||||
"note": "MCP server 'tools' registered with 6 tools. These tools can now be used in llm_tool_use nodes."
|
||||
"note": "MCP server 'tools' registered with 6 tools. These tools can now be used in event_loop nodes."
|
||||
}
|
||||
```
|
||||
|
||||
@@ -149,7 +149,7 @@ List tools available from registered MCP servers.
|
||||
]
|
||||
},
|
||||
"total_tools": 6,
|
||||
"note": "Use these tool names in the 'tools' parameter when adding llm_tool_use nodes"
|
||||
"note": "Use these tool names in the 'tools' parameter when adding event_loop nodes"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -246,7 +246,7 @@ Here's a complete workflow for building an agent with MCP tools:
|
||||
"node_id": "web-searcher",
|
||||
"name": "Web Search",
|
||||
"description": "Search the web for information",
|
||||
"node_type": "llm_tool_use",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": "[\"query\"]",
|
||||
"output_keys": "[\"search_results\"]",
|
||||
"system_prompt": "Search for {query} using the web_search tool",
|
||||
|
||||
@@ -119,7 +119,7 @@ builder = WorkflowBuilder()
|
||||
builder.add_node(
|
||||
node_id="researcher",
|
||||
name="Web Researcher",
|
||||
node_type="llm_tool_use",
|
||||
node_type="event_loop",
|
||||
system_prompt="Research the topic using web_search",
|
||||
tools=["web_search"], # Tool from tools MCP server
|
||||
input_keys=["topic"],
|
||||
@@ -137,7 +137,7 @@ Tools from MCP servers can be referenced in your agent.json just like built-in t
|
||||
{
|
||||
"id": "searcher",
|
||||
"name": "Web Searcher",
|
||||
"node_type": "llm_tool_use",
|
||||
"node_type": "event_loop",
|
||||
"system_prompt": "Search for information about {topic}",
|
||||
"tools": ["web_search", "web_scrape"],
|
||||
"input_keys": ["topic"],
|
||||
|
||||
+17
-70
@@ -103,31 +103,20 @@ Add a processing node to the agent graph.
|
||||
- `node_id` (string, required): Unique node identifier
|
||||
- `name` (string, required): Human-readable name
|
||||
- `description` (string, required): What this node does
|
||||
- `node_type` (string, required): One of: `llm_generate`, `llm_tool_use`, `router`, `function`
|
||||
- `node_type` (string, required): Must be `event_loop` (the only valid type)
|
||||
- `input_keys` (string, required): JSON array of input variable names
|
||||
- `output_keys` (string, required): JSON array of output variable names
|
||||
- `system_prompt` (string, optional): System prompt for LLM nodes
|
||||
- `tools` (string, optional): JSON array of tool names for tool_use nodes
|
||||
- `routes` (string, optional): JSON object of route mappings for router nodes
|
||||
- `system_prompt` (string, optional): System prompt for the LLM
|
||||
- `tools` (string, optional): JSON array of tool names
|
||||
- `client_facing` (boolean, optional): Set to true for human-in-the-loop interaction
|
||||
|
||||
**Node Types:**
|
||||
**Node Type:**
|
||||
|
||||
1. **llm_generate**: Uses LLM to generate output from inputs
|
||||
- Requires: `system_prompt`
|
||||
- Tools: Not used
|
||||
|
||||
2. **llm_tool_use**: Uses LLM with tools to accomplish tasks
|
||||
- Requires: `system_prompt`, `tools`
|
||||
- Tools: Array of tool names (e.g., `["web_search", "web_fetch"]`)
|
||||
|
||||
3. **router**: LLM-powered routing to different paths
|
||||
- Requires: `system_prompt`, `routes`
|
||||
- Routes: Object mapping route names to target node IDs
|
||||
- Example: `{"pass": "success_node", "fail": "retry_node"}`
|
||||
|
||||
4. **function**: Executes a pre-defined function
|
||||
- System prompt describes the function behavior
|
||||
- No LLM calls, pure computation
|
||||
**event_loop**: LLM-powered node with self-correction loop
|
||||
- Requires: `system_prompt`
|
||||
- Optional: `tools` (array of tool names, e.g., `["web_search", "web_fetch"]`)
|
||||
- Optional: `client_facing` (set to true for HITL / user interaction)
|
||||
- Supports: iterative refinement, judge-based evaluation, tool use, streaming
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
@@ -135,7 +124,7 @@ Add a processing node to the agent graph.
|
||||
"node_id": "search_sources",
|
||||
"name": "Search Sources",
|
||||
"description": "Searches for relevant sources on the topic",
|
||||
"node_type": "llm_tool_use",
|
||||
"node_type": "event_loop",
|
||||
"input_keys": "[\"topic\", \"search_queries\"]",
|
||||
"output_keys": "[\"sources\", \"source_count\"]",
|
||||
"system_prompt": "Search for sources using the provided queries...",
|
||||
@@ -198,7 +187,7 @@ Export the validated graph as an agent specification.
|
||||
|
||||
**What it does:**
|
||||
1. Validates the graph
|
||||
2. Auto-generates missing edges from router routes
|
||||
2. Validates edge connectivity
|
||||
3. Writes files to disk:
|
||||
- `exports/{agent-name}/agent.json` - Full agent specification
|
||||
- `exports/{agent-name}/README.md` - Auto-generated documentation
|
||||
@@ -252,47 +241,6 @@ Test the complete agent graph with sample inputs.
|
||||
|
||||
---
|
||||
|
||||
### Evaluation Rules
|
||||
|
||||
#### `add_evaluation_rule`
|
||||
Add a rule for the HybridJudge to evaluate node outputs.
|
||||
|
||||
**Parameters:**
|
||||
- `rule_id` (string, required): Unique rule identifier
|
||||
- `description` (string, required): What this rule checks
|
||||
- `condition` (string, required): Python expression to evaluate
|
||||
- `action` (string, required): Action to take: `accept`, `retry`, `escalate`
|
||||
- `priority` (integer, optional): Rule priority (default: 0)
|
||||
- `feedback_template` (string, optional): Feedback message template
|
||||
|
||||
**Condition Examples:**
|
||||
- `'result.get("success") == True'` - Check for success flag
|
||||
- `'result.get("error_type") == "timeout"'` - Check error type
|
||||
- `'len(result.get("data", [])) > 0'` - Check for non-empty data
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"rule_id": "timeout_retry",
|
||||
"description": "Retry on timeout errors",
|
||||
"condition": "result.get('error_type') == 'timeout'",
|
||||
"action": "retry",
|
||||
"priority": 10,
|
||||
"feedback_template": "Timeout occurred, retrying..."
|
||||
}
|
||||
```
|
||||
|
||||
#### `list_evaluation_rules`
|
||||
List all configured evaluation rules.
|
||||
|
||||
#### `remove_evaluation_rule`
|
||||
Remove an evaluation rule.
|
||||
|
||||
**Parameters:**
|
||||
- `rule_id` (string, required): Rule to remove
|
||||
|
||||
---
|
||||
|
||||
## Example Workflow
|
||||
|
||||
Here's a complete workflow for building a research agent:
|
||||
@@ -320,7 +268,7 @@ add_node(
|
||||
node_id="planner",
|
||||
name="Research Planner",
|
||||
description="Creates research strategy",
|
||||
node_type="llm_generate",
|
||||
node_type="event_loop",
|
||||
input_keys='["topic"]',
|
||||
output_keys='["strategy", "queries"]',
|
||||
system_prompt="Analyze topic and create research plan..."
|
||||
@@ -330,7 +278,7 @@ add_node(
|
||||
node_id="searcher",
|
||||
name="Search Sources",
|
||||
description="Find relevant sources",
|
||||
node_type="llm_tool_use",
|
||||
node_type="event_loop",
|
||||
input_keys='["queries"]',
|
||||
output_keys='["sources"]',
|
||||
system_prompt="Search for sources...",
|
||||
@@ -359,10 +307,9 @@ The exported agent will be saved to `exports/research-agent/`.
|
||||
|
||||
1. **Start with the goal**: Define clear success criteria before building nodes
|
||||
2. **Test nodes individually**: Use `test_node` to verify each node works
|
||||
3. **Use router nodes for branching**: Don't create edges manually for routers - define routes and they'll be auto-generated
|
||||
4. **Add evaluation rules**: Help the judge evaluate outputs deterministically
|
||||
5. **Validate early, validate often**: Run `validate_graph` after adding nodes/edges
|
||||
6. **Check exports**: Review the generated README.md to verify your agent structure
|
||||
3. **Use conditional edges for branching**: Define condition_expr on edges for decision points
|
||||
4. **Validate early, validate often**: Run `validate_graph` after adding nodes/edges
|
||||
5. **Check exports**: Review the generated README.md to verify your agent structure
|
||||
|
||||
---
|
||||
|
||||
|
||||
+2
-2
@@ -73,7 +73,7 @@ To use the agent builder with Claude Desktop or other MCP clients, add this to y
|
||||
The MCP server provides tools for:
|
||||
- Creating agent building sessions
|
||||
- Defining goals with success criteria
|
||||
- Adding nodes (llm_generate, llm_tool_use, router, function)
|
||||
- Adding nodes (event_loop only)
|
||||
- Connecting nodes with edges
|
||||
- Validating and exporting agent graphs
|
||||
- Testing nodes and full agent graphs
|
||||
@@ -145,7 +145,7 @@ uv run python -m framework test-debug <agent_path> <test_name>
|
||||
uv run python -m framework test-list <goal_id>
|
||||
```
|
||||
|
||||
For detailed testing workflows, see the [testing-agent skill](../.claude/skills/testing-agent/SKILL.md).
|
||||
For detailed testing workflows, see the [hive-test skill](../.claude/skills/hive-test/SKILL.md).
|
||||
|
||||
### Analyzing Agent Behavior with Builder
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ from framework.graph.event_loop_node import ( # noqa: E402
|
||||
)
|
||||
from framework.graph.executor import GraphExecutor # noqa: E402
|
||||
from framework.graph.goal import Goal # noqa: E402
|
||||
from framework.graph.node import NodeSpec # noqa: E402
|
||||
from framework.graph.node import NodeContext, NodeProtocol, NodeResult, NodeSpec # noqa: E402
|
||||
from framework.llm.litellm import LiteLLMProvider # noqa: E402
|
||||
from framework.runner.tool_registry import ToolRegistry # noqa: E402
|
||||
from framework.runtime.core import Runtime # noqa: E402
|
||||
@@ -654,7 +654,7 @@ NODE_SPECS = {
|
||||
id="sender",
|
||||
name="Sender",
|
||||
description="Send approved campaign emails",
|
||||
node_type="function",
|
||||
node_type="event_loop",
|
||||
input_keys=["approved_emails"],
|
||||
output_keys=["send_results"],
|
||||
),
|
||||
@@ -823,11 +823,20 @@ def _send_email_via_resend(
|
||||
return {"error": f"Network error: {e}"}
|
||||
|
||||
|
||||
class SenderNode(NodeProtocol):
|
||||
"""Node wrapper for send_emails function."""
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
approved = ctx.input_data.get("approved_emails", "")
|
||||
result_str = send_emails(approved_emails=approved)
|
||||
ctx.memory.write("send_results", result_str)
|
||||
return NodeResult(success=True, output={"send_results": result_str})
|
||||
|
||||
|
||||
def send_emails(approved_emails: str = "") -> str:
|
||||
"""Send approved campaign emails via Resend, or log if unconfigured.
|
||||
|
||||
Called by FunctionNode which unpacks input_keys as kwargs.
|
||||
Returns a JSON string (FunctionNode wraps it in NodeResult).
|
||||
Returns a JSON string.
|
||||
"""
|
||||
approved = approved_emails
|
||||
if not approved:
|
||||
@@ -1780,7 +1789,7 @@ async def _run_pipeline(websocket, initial_message: str):
|
||||
)
|
||||
for nid, impl in nodes.items():
|
||||
executor.register_node(nid, impl)
|
||||
executor.register_function("sender", send_emails)
|
||||
executor.register_node("sender", SenderNode())
|
||||
|
||||
# --- Event forwarding: bus → WebSocket ---
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ Minimal Manual Agent Example
|
||||
This example demonstrates how to build and run an agent programmatically
|
||||
without using the Claude Code CLI or external LLM APIs.
|
||||
|
||||
It uses 'function' nodes to define logic in pure Python, making it perfect
|
||||
for understanding the core runtime loop:
|
||||
It uses custom NodeProtocol implementations to define logic in pure Python,
|
||||
making it perfect for understanding the core runtime loop:
|
||||
Setup -> Graph definition -> Execution -> Result
|
||||
|
||||
Run with:
|
||||
@@ -16,22 +16,33 @@ import asyncio
|
||||
|
||||
from framework.graph import EdgeCondition, EdgeSpec, Goal, GraphSpec, NodeSpec
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.graph.node import NodeContext, NodeProtocol, NodeResult
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
|
||||
# 1. Define Node Logic (Pure Python Functions)
|
||||
def greet(name: str) -> str:
|
||||
# 1. Define Node Logic (Custom NodeProtocol implementations)
|
||||
class GreeterNode(NodeProtocol):
|
||||
"""Generate a simple greeting."""
|
||||
return f"Hello, {name}!"
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
name = ctx.input_data.get("name", "World")
|
||||
greeting = f"Hello, {name}!"
|
||||
ctx.memory.write("greeting", greeting)
|
||||
return NodeResult(success=True, output={"greeting": greeting})
|
||||
|
||||
|
||||
def uppercase(greeting: str) -> str:
|
||||
class UppercaserNode(NodeProtocol):
|
||||
"""Convert text to uppercase."""
|
||||
return greeting.upper()
|
||||
|
||||
async def execute(self, ctx: NodeContext) -> NodeResult:
|
||||
greeting = ctx.input_data.get("greeting") or ctx.memory.read("greeting") or ""
|
||||
result = greeting.upper()
|
||||
ctx.memory.write("final_greeting", result)
|
||||
return NodeResult(success=True, output={"final_greeting": result})
|
||||
|
||||
|
||||
async def main():
|
||||
print("🚀 Setting up Manual Agent...")
|
||||
print("Setting up Manual Agent...")
|
||||
|
||||
# 2. Define the Goal
|
||||
# Every agent needs a goal with success criteria
|
||||
@@ -55,8 +66,7 @@ async def main():
|
||||
id="greeter",
|
||||
name="Greeter",
|
||||
description="Generates a simple greeting",
|
||||
node_type="function",
|
||||
function="greet", # Matches the registered function name
|
||||
node_type="event_loop",
|
||||
input_keys=["name"],
|
||||
output_keys=["greeting"],
|
||||
)
|
||||
@@ -65,8 +75,7 @@ async def main():
|
||||
id="uppercaser",
|
||||
name="Uppercaser",
|
||||
description="Converts greeting to uppercase",
|
||||
node_type="function",
|
||||
function="uppercase",
|
||||
node_type="event_loop",
|
||||
input_keys=["greeting"],
|
||||
output_keys=["final_greeting"],
|
||||
)
|
||||
@@ -98,23 +107,23 @@ async def main():
|
||||
runtime = Runtime(storage_path=Path("./agent_logs"))
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
|
||||
# 7. Register Function Implementations
|
||||
# Connect string names in NodeSpecs to actual Python functions
|
||||
executor.register_function("greeter", greet)
|
||||
executor.register_function("uppercaser", uppercase)
|
||||
# 7. Register Node Implementations
|
||||
# Connect node IDs in the graph to actual Python implementations
|
||||
executor.register_node("greeter", GreeterNode())
|
||||
executor.register_node("uppercaser", UppercaserNode())
|
||||
|
||||
# 8. Execute Agent
|
||||
print("▶ Executing agent with input: name='Alice'...")
|
||||
print("Executing agent with input: name='Alice'...")
|
||||
|
||||
result = await executor.execute(graph=graph, goal=goal, input_data={"name": "Alice"})
|
||||
|
||||
# 9. Verify Results
|
||||
if result.success:
|
||||
print("\n✅ Success!")
|
||||
print("\nSuccess!")
|
||||
print(f"Path taken: {' -> '.join(result.path)}")
|
||||
print(f"Final output: {result.output.get('final_greeting')}")
|
||||
else:
|
||||
print(f"\n❌ Failed: {result.error}")
|
||||
print(f"\nFailed: {result.error}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -122,7 +122,7 @@ async def example_4_custom_agent_with_mcp_tools():
|
||||
node_id="web-searcher",
|
||||
name="Web Search",
|
||||
description="Search the web for information",
|
||||
node_type="llm_tool_use",
|
||||
node_type="event_loop",
|
||||
system_prompt="Search for {query} and return the top results. Use the web_search tool.",
|
||||
tools=["web_search"], # This tool comes from tools MCP server
|
||||
input_keys=["query"],
|
||||
@@ -133,7 +133,7 @@ async def example_4_custom_agent_with_mcp_tools():
|
||||
node_id="summarizer",
|
||||
name="Summarize Results",
|
||||
description="Summarize the search results",
|
||||
node_type="llm_generate",
|
||||
node_type="event_loop",
|
||||
system_prompt="Summarize the following search results in 2-3 sentences: {search_results}",
|
||||
input_keys=["search_results"],
|
||||
output_keys=["summary"],
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
"name": "tools",
|
||||
"description": "Aden tools including web search, file operations, and PDF reading",
|
||||
"transport": "stdio",
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../tools",
|
||||
"env": {
|
||||
"BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}"
|
||||
|
||||
@@ -0,0 +1,13 @@
|
||||
"""Framework-provided agents."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
FRAMEWORK_AGENTS_DIR = Path(__file__).parent
|
||||
|
||||
|
||||
def list_framework_agents() -> list[Path]:
|
||||
"""List all framework agent directories."""
|
||||
return sorted(
|
||||
[p for p in FRAMEWORK_AGENTS_DIR.iterdir() if p.is_dir() and (p / "agent.py").exists()],
|
||||
key=lambda p: p.name,
|
||||
)
|
||||
@@ -0,0 +1,55 @@
|
||||
"""
|
||||
Credential Tester — verify credentials (Aden OAuth + local API keys) via live API calls.
|
||||
|
||||
Interactive agent that lists all testable accounts, lets the user pick one,
|
||||
loads the provider's tools, and runs a chat session to test the credential.
|
||||
"""
|
||||
|
||||
from .agent import (
|
||||
CredentialTesterAgent,
|
||||
_list_aden_accounts,
|
||||
_list_env_fallback_accounts,
|
||||
_list_local_accounts,
|
||||
configure_for_account,
|
||||
conversation_mode,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
get_tools_for_provider,
|
||||
goal,
|
||||
identity_prompt,
|
||||
list_connected_accounts,
|
||||
loop_config,
|
||||
nodes,
|
||||
pause_nodes,
|
||||
requires_account_selection,
|
||||
skip_credential_validation,
|
||||
terminal_nodes,
|
||||
)
|
||||
from .config import default_config
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"CredentialTesterAgent",
|
||||
"configure_for_account",
|
||||
"conversation_mode",
|
||||
"default_config",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"get_tools_for_provider",
|
||||
"goal",
|
||||
"identity_prompt",
|
||||
"list_connected_accounts",
|
||||
"loop_config",
|
||||
"nodes",
|
||||
"pause_nodes",
|
||||
"requires_account_selection",
|
||||
"skip_credential_validation",
|
||||
"terminal_nodes",
|
||||
# Internal list helpers (exposed for testing)
|
||||
"_list_aden_accounts",
|
||||
"_list_local_accounts",
|
||||
"_list_env_fallback_accounts",
|
||||
]
|
||||
@@ -0,0 +1,148 @@
|
||||
"""CLI entry point for Credential Tester agent."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import click
|
||||
|
||||
from .agent import CredentialTesterAgent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
|
||||
|
||||
def pick_account(agent: CredentialTesterAgent) -> dict | None:
|
||||
"""Interactive account picker. Returns selected account dict or None."""
|
||||
accounts = agent.list_accounts()
|
||||
if not accounts:
|
||||
click.echo("No connected accounts found.")
|
||||
click.echo("Set ADEN_API_KEY and connect accounts at https://app.adenhq.com")
|
||||
return None
|
||||
|
||||
click.echo("\nConnected accounts:\n")
|
||||
for i, acct in enumerate(accounts, 1):
|
||||
provider = acct.get("provider", "?")
|
||||
alias = acct.get("alias", "?")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
click.echo(f" {i}. {provider}/{alias}{detail}")
|
||||
|
||||
click.echo()
|
||||
while True:
|
||||
choice = click.prompt("Pick an account to test", type=int, default=1)
|
||||
if 1 <= choice <= len(accounts):
|
||||
return accounts[choice - 1]
|
||||
click.echo(f"Invalid choice. Enter 1-{len(accounts)}.")
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Credential Tester — verify synced credentials via live API calls."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--debug", is_flag=True)
|
||||
def tui(verbose, debug):
|
||||
"""Launch TUI to test a credential interactively."""
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
try:
|
||||
from framework.tui.app import AdenTUI
|
||||
except ImportError:
|
||||
click.echo("TUI requires 'textual'. Install with: pip install textual")
|
||||
sys.exit(1)
|
||||
|
||||
agent = CredentialTesterAgent()
|
||||
account = pick_account(agent)
|
||||
if account is None:
|
||||
sys.exit(1)
|
||||
|
||||
agent.select_account(account)
|
||||
provider = account.get("provider", "?")
|
||||
alias = account.get("alias", "?")
|
||||
click.echo(f"\nTesting {provider}/{alias}...\n")
|
||||
|
||||
async def run_tui():
|
||||
agent._setup()
|
||||
runtime = agent._agent_runtime
|
||||
await runtime.start()
|
||||
try:
|
||||
app = AdenTUI(runtime)
|
||||
await app.run_async()
|
||||
finally:
|
||||
await runtime.stop()
|
||||
|
||||
asyncio.run(run_tui())
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--debug", is_flag=True)
|
||||
def shell(verbose, debug):
|
||||
"""Interactive CLI session to test a credential."""
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose=False):
|
||||
agent = CredentialTesterAgent()
|
||||
account = pick_account(agent)
|
||||
if account is None:
|
||||
return
|
||||
|
||||
agent.select_account(account)
|
||||
provider = account.get("provider", "?")
|
||||
alias = account.get("alias", "?")
|
||||
|
||||
click.echo(f"\nTesting {provider}/{alias}")
|
||||
click.echo("Type your requests or 'quit' to exit.\n")
|
||||
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
result = await agent._agent_runtime.trigger_and_wait(
|
||||
entry_point_id="start",
|
||||
input_data={},
|
||||
)
|
||||
if result:
|
||||
click.echo(f"\nSession ended: {'success' if result.success else result.error}")
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
@cli.command(name="list")
|
||||
def list_accounts():
|
||||
"""List all connected accounts."""
|
||||
agent = CredentialTesterAgent()
|
||||
accounts = agent.list_accounts()
|
||||
|
||||
if not accounts:
|
||||
click.echo("No connected accounts found.")
|
||||
return
|
||||
|
||||
click.echo("\nConnected accounts:\n")
|
||||
for acct in accounts:
|
||||
provider = acct.get("provider", "?")
|
||||
alias = acct.get("alias", "?")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
click.echo(f" {provider}/{alias}{detail}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,621 @@
|
||||
"""Credential Tester agent — verify credentials via live API calls.
|
||||
|
||||
Supports both Aden OAuth2-synced accounts AND locally-stored API key accounts.
|
||||
Aden accounts use account="alias" routing; local accounts inject the key into
|
||||
the session environment so tools read it without an account= parameter.
|
||||
|
||||
When loaded via AgentRunner.load() (TUI picker, ``hive run``), the module-level
|
||||
``nodes`` / ``edges`` variables provide a static graph. The TUI detects
|
||||
``requires_account_selection`` and shows an account picker *before* starting
|
||||
the agent. ``configure_for_account()`` then scopes the node's tools to the
|
||||
selected provider.
|
||||
|
||||
When used directly (``CredentialTesterAgent``), the graph is built dynamically
|
||||
after the user picks an account programmatically.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from framework.graph import Goal, NodeSpec, SuccessCriterion
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config
|
||||
from .nodes import build_tester_node
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.runner import AgentRunner
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Goal
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
goal = Goal(
|
||||
id="credential-tester",
|
||||
name="Credential Tester",
|
||||
description="Verify that a credential can make real API calls.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="api-call-success",
|
||||
description="At least one API call succeeds using the credential",
|
||||
metric="api_call_success",
|
||||
target="true",
|
||||
weight=1.0,
|
||||
),
|
||||
],
|
||||
constraints=[],
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_tools_for_provider(provider_name: str) -> list[str]:
|
||||
"""Collect tool names for a credential by credential_id OR credential_group.
|
||||
|
||||
Matches on both ``credential_id`` (e.g. "google" → Gmail tools) and
|
||||
``credential_group`` (e.g. "google_custom_search" → all google search tools).
|
||||
"""
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
tools: list[str] = []
|
||||
for spec in CREDENTIAL_SPECS.values():
|
||||
if spec.credential_id == provider_name or spec.credential_group == provider_name:
|
||||
tools.extend(spec.tools)
|
||||
return sorted(set(tools))
|
||||
|
||||
|
||||
def _list_aden_accounts() -> list[dict]:
|
||||
"""List active accounts from the Aden platform (requires ADEN_API_KEY)."""
|
||||
import os
|
||||
|
||||
api_key = os.environ.get("ADEN_API_KEY")
|
||||
if not api_key:
|
||||
return []
|
||||
|
||||
try:
|
||||
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
|
||||
|
||||
client = AdenCredentialClient(
|
||||
AdenClientConfig(
|
||||
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
|
||||
)
|
||||
)
|
||||
try:
|
||||
integrations = client.list_integrations()
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
return [
|
||||
{
|
||||
"provider": c.provider,
|
||||
"alias": c.alias,
|
||||
"identity": {"email": c.email} if c.email else {},
|
||||
"integration_id": c.integration_id,
|
||||
"source": "aden",
|
||||
}
|
||||
for c in integrations
|
||||
if c.status == "active"
|
||||
]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _list_local_accounts() -> list[dict]:
|
||||
"""List named local API key accounts from LocalCredentialRegistry."""
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
return [
|
||||
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
|
||||
]
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def _list_env_fallback_accounts() -> list[dict]:
|
||||
"""Surface configured-but-unregistered credentials as testable entries.
|
||||
|
||||
Detects credentials available via env vars OR stored in the encrypted
|
||||
store in the old flat format (e.g. ``brave_search`` with no alias).
|
||||
These are users who haven't yet run ``save_account()`` but have a working key.
|
||||
Shows with alias="default" and status="unknown".
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect IDs in encrypted store (includes old flat entries like "brave_search")
|
||||
try:
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
encrypted_ids: set[str] = set(EncryptedFileStorage().list_all())
|
||||
except Exception:
|
||||
encrypted_ids = set()
|
||||
|
||||
def _is_configured(cred_name: str, spec) -> bool:
|
||||
# 1. Env var present
|
||||
if os.environ.get(spec.env_var):
|
||||
return True
|
||||
# 2. Old flat encrypted entry (no slash — new entries have {x}/{y})
|
||||
if cred_name in encrypted_ids:
|
||||
return True
|
||||
return False
|
||||
|
||||
seen_groups: set[str] = set()
|
||||
accounts: list[dict] = []
|
||||
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items():
|
||||
if not spec.direct_api_key_supported or not spec.tools:
|
||||
continue
|
||||
|
||||
if spec.credential_group:
|
||||
if spec.credential_group in seen_groups:
|
||||
continue
|
||||
group_available = all(
|
||||
_is_configured(n, s)
|
||||
for n, s in CREDENTIAL_SPECS.items()
|
||||
if s.credential_group == spec.credential_group
|
||||
)
|
||||
if not group_available:
|
||||
continue
|
||||
seen_groups.add(spec.credential_group)
|
||||
provider = spec.credential_group
|
||||
else:
|
||||
if not _is_configured(cred_name, spec):
|
||||
continue
|
||||
provider = cred_name
|
||||
|
||||
accounts.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"alias": "default",
|
||||
"identity": {},
|
||||
"integration_id": None,
|
||||
"source": "local",
|
||||
"status": "unknown",
|
||||
}
|
||||
)
|
||||
|
||||
return accounts
|
||||
|
||||
|
||||
def list_connected_accounts() -> list[dict]:
|
||||
"""List all testable accounts: Aden-synced + named local + env-var fallbacks."""
|
||||
aden = _list_aden_accounts()
|
||||
local = _list_local_accounts()
|
||||
|
||||
# Show env-var fallbacks only for credentials not already in the named registry
|
||||
local_providers = {a["provider"] for a in local}
|
||||
env_fallbacks = [
|
||||
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
|
||||
]
|
||||
|
||||
return aden + local + env_fallbacks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level hooks (read by AgentRunner.load / TUI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
skip_credential_validation = True
|
||||
"""Don't validate credentials at load time — we don't know which provider yet."""
|
||||
|
||||
requires_account_selection = True
|
||||
"""Signal TUI to show account picker before starting the agent."""
|
||||
|
||||
|
||||
def configure_for_account(runner: AgentRunner, account: dict) -> None:
|
||||
"""Scope the tester node's tools to the selected provider.
|
||||
|
||||
Handles both Aden accounts (account= routing) and local accounts
|
||||
(session-level env var injection, no account= parameter in prompt).
|
||||
"""
|
||||
provider = account["provider"]
|
||||
source = account.get("source", "aden")
|
||||
alias = account.get("alias", "unknown")
|
||||
identity = account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "aden":
|
||||
tools.append("get_account_info")
|
||||
email = identity.get("email", "")
|
||||
detail = f" (email: {email})" if email else ""
|
||||
_configure_aden_node(runner, provider, alias, detail, tools)
|
||||
else:
|
||||
status = account.get("status", "unknown")
|
||||
_activate_local_account(provider, alias)
|
||||
_configure_local_node(runner, provider, alias, identity, tools, status)
|
||||
|
||||
|
||||
def _activate_local_account(credential_id: str, alias: str) -> None:
|
||||
"""Inject a named local account's key into the session environment.
|
||||
|
||||
Handles three cases:
|
||||
1. Named account in LocalCredentialRegistry (new format: {credential_id}/{alias})
|
||||
2. Old flat credential in EncryptedFileStorage (id == credential_id, no alias)
|
||||
3. Env var already set — skip injection (nothing to do)
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect specs for this credential (handles grouped credentials too)
|
||||
group_specs = [
|
||||
(cred_name, spec)
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items()
|
||||
if spec.credential_group == credential_id
|
||||
or spec.credential_id == credential_id
|
||||
or cred_name == credential_id
|
||||
]
|
||||
# Deduplicate — credential_id and credential_group may both match the same spec
|
||||
seen_env_vars: set[str] = set()
|
||||
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
flat_storage = EncryptedFileStorage()
|
||||
|
||||
for _cred_name, spec in group_specs:
|
||||
if spec.env_var in seen_env_vars:
|
||||
continue
|
||||
# If env var is already set, nothing to do for this one
|
||||
if os.environ.get(spec.env_var):
|
||||
seen_env_vars.add(spec.env_var)
|
||||
continue
|
||||
|
||||
seen_env_vars.add(spec.env_var)
|
||||
|
||||
# Determine key name based on spec
|
||||
key_name = "api_key"
|
||||
if spec.credential_group and "cse" in spec.env_var.lower():
|
||||
key_name = "cse_id"
|
||||
|
||||
key: str | None = None
|
||||
|
||||
# 1. Try named account in registry (new format)
|
||||
if alias != "default":
|
||||
key = registry.get_key(credential_id, alias, key_name)
|
||||
else:
|
||||
# For "default" alias, check registry first, then fall back to flat store
|
||||
key = registry.get_key(credential_id, "default", key_name)
|
||||
|
||||
# 2. Fall back to old flat encrypted entry (id == credential_id, no alias)
|
||||
if key is None:
|
||||
flat_cred = flat_storage.load(credential_id)
|
||||
if flat_cred is not None:
|
||||
key = flat_cred.get_key(key_name) or flat_cred.get_default_key()
|
||||
|
||||
if key:
|
||||
os.environ[spec.env_var] = key
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _configure_aden_node(
|
||||
runner: AgentRunner,
|
||||
provider: str,
|
||||
alias: str,
|
||||
detail: str,
|
||||
tools: list[str],
|
||||
) -> None:
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the account: {provider}/{alias}{detail}
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Suggest a simple read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts).
|
||||
2. Execute the call when the user agrees.
|
||||
3. Report the result: success (with sample data) or failure (with error).
|
||||
4. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
"""
|
||||
break
|
||||
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
"I'll suggest a read-only API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
def _configure_local_node(
|
||||
runner: AgentRunner,
|
||||
provider: str,
|
||||
alias: str,
|
||||
identity: dict,
|
||||
tools: list[str],
|
||||
status: str,
|
||||
) -> None:
|
||||
identity_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(identity_parts)})" if identity_parts else ""
|
||||
status_note = " [key not yet validated]" if status == "unknown" else ""
|
||||
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the local API key: {provider}/{alias}{detail}{status_note}
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Suggest a simple test call to verify the credential works \
|
||||
(e.g. search for "test", list items, get profile info).
|
||||
2. Execute the call when the user agrees.
|
||||
3. Report the result: success (with sample data) or failure (with error).
|
||||
4. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Rules
|
||||
|
||||
- Do NOT pass an `account` parameter — this credential is injected \
|
||||
directly into the session environment and tools read it automatically.
|
||||
- Start with read-only operations before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
"""
|
||||
break
|
||||
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
"I'll suggest a test API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level graph variables (read by AgentRunner.load)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
description=(
|
||||
"Interactive credential testing — lets the user pick an account "
|
||||
"and verify it via API calls."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=[],
|
||||
output_keys=[],
|
||||
tools=["get_account_info"],
|
||||
system_prompt="""\
|
||||
You are a credential tester. Your job is to help the user verify that their \
|
||||
connected accounts and API keys can make real API calls.
|
||||
|
||||
# Startup
|
||||
|
||||
1. Call ``get_account_info`` to list the user's connected accounts.
|
||||
2. Present the list and ask the user which account to test.
|
||||
3. Once they pick one, note the account's **alias** (e.g. "Timothy", "work-slack").
|
||||
4. Suggest a simple read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts).
|
||||
5. Execute the call when the user agrees.
|
||||
6. Report the result: success (with sample data) or failure (with error).
|
||||
7. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Account routing (Aden accounts only)
|
||||
|
||||
IMPORTANT: For Aden-synced accounts, always pass the account's **alias** as the \
|
||||
``account`` parameter when calling any tool. For local API key accounts, do NOT \
|
||||
pass an account parameter — they are pre-injected into the session.
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
""",
|
||||
),
|
||||
]
|
||||
|
||||
edges = []
|
||||
|
||||
entry_node = "tester"
|
||||
entry_points = {"start": "tester"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = [] # Forever-alive: loops until user exits
|
||||
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = (
|
||||
"You are a credential tester that verifies connected accounts and API keys "
|
||||
"can make real API calls."
|
||||
)
|
||||
loop_config = {
|
||||
"max_iterations": 50,
|
||||
"max_tool_calls_per_turn": 10,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Programmatic agent class (used by __main__.py CLI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CredentialTesterAgent:
|
||||
"""Interactive agent that tests a specific credential via API calls.
|
||||
|
||||
Usage:
|
||||
agent = CredentialTesterAgent()
|
||||
accounts = agent.list_accounts()
|
||||
agent.select_account(accounts[0])
|
||||
await agent.start()
|
||||
await agent.stop()
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self._selected_account: dict | None = None
|
||||
self._agent_runtime: AgentRuntime | None = None
|
||||
self._tool_registry: ToolRegistry | None = None
|
||||
self._storage_path: Path | None = None
|
||||
|
||||
def list_accounts(self) -> list[dict]:
|
||||
"""List all testable accounts (Aden + local named + env-var fallbacks)."""
|
||||
return list_connected_accounts()
|
||||
|
||||
def select_account(self, account: dict) -> None:
|
||||
"""Select an account to test.
|
||||
|
||||
Args:
|
||||
account: Account dict from list_accounts() with
|
||||
provider, alias, identity, source keys.
|
||||
"""
|
||||
self._selected_account = account
|
||||
|
||||
@property
|
||||
def selected_provider(self) -> str:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
return self._selected_account["provider"]
|
||||
|
||||
@property
|
||||
def selected_alias(self) -> str:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
return self._selected_account.get("alias", "unknown")
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
provider = self.selected_provider
|
||||
alias = self.selected_alias
|
||||
source = self._selected_account.get("source", "aden")
|
||||
identity = self._selected_account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "local":
|
||||
_activate_local_account(provider, alias)
|
||||
elif source == "aden":
|
||||
tools.append("get_account_info")
|
||||
|
||||
tester_node = build_tester_node(
|
||||
provider=provider,
|
||||
alias=alias,
|
||||
tools=tools,
|
||||
identity=identity,
|
||||
source=source,
|
||||
)
|
||||
|
||||
return GraphSpec(
|
||||
id="credential-tester-graph",
|
||||
goal_id=goal.id,
|
||||
version="1.0.0",
|
||||
entry_node="tester",
|
||||
entry_points={"start": "tester"},
|
||||
terminal_nodes=[],
|
||||
pause_nodes=[],
|
||||
nodes=[tester_node],
|
||||
edges=[],
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config={
|
||||
"max_iterations": 50,
|
||||
"max_tool_calls_per_turn": 10,
|
||||
"max_history_tokens": 32000,
|
||||
},
|
||||
conversation_mode="continuous",
|
||||
identity_prompt=(
|
||||
f"You are testing the {provider}/{alias} credential. "
|
||||
"Help the user verify it works by making real API calls."
|
||||
),
|
||||
)
|
||||
|
||||
def _setup(self) -> None:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "credential_tester"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._tool_registry = ToolRegistry()
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
extra_kwargs = getattr(self.config, "extra_kwargs", {}) or {}
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
**extra_kwargs,
|
||||
)
|
||||
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
|
||||
graph = self._build_graph()
|
||||
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="start",
|
||||
name="Test Credential",
|
||||
entry_node="tester",
|
||||
trigger_type="manual",
|
||||
isolation_level="isolated",
|
||||
),
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(enabled=False),
|
||||
graph_id="credential_tester",
|
||||
)
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Set up and start the agent runtime."""
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the agent runtime."""
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def run(self) -> ExecutionResult:
|
||||
"""Run the agent (convenience for single execution)."""
|
||||
await self.start()
|
||||
try:
|
||||
result = await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id="start",
|
||||
input_data={},
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
@@ -0,0 +1,19 @@
|
||||
"""Runtime configuration for Credential Tester agent."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Credential Tester"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Test connected accounts by making real API calls. "
|
||||
"Pick an account, verify credentials work, and explore available tools."
|
||||
)
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
default_config = RuntimeConfig(temperature=0.3)
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Hive tools MCP server with provider-specific tools"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
"""Node definitions for Credential Tester agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
|
||||
def build_tester_node(
|
||||
provider: str,
|
||||
alias: str,
|
||||
tools: list[str],
|
||||
identity: dict[str, str],
|
||||
source: str = "aden",
|
||||
) -> NodeSpec:
|
||||
"""Build the tester node dynamically for the selected account.
|
||||
|
||||
Args:
|
||||
provider: Provider / credential name (e.g. "google", "brave_search").
|
||||
alias: User-set alias (e.g. "Timothy", "work").
|
||||
tools: Tool names available for this provider.
|
||||
identity: Identity dict (email, workspace, etc.) for context.
|
||||
source: "aden" or "local" — controls routing instructions in the prompt.
|
||||
"""
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
|
||||
if source == "aden":
|
||||
routing_section = f"""\
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
"""
|
||||
else:
|
||||
routing_section = """\
|
||||
# Credential routing
|
||||
|
||||
This is a local API key credential — do NOT pass an `account` parameter. \
|
||||
The key is pre-injected into the session environment and tools read it automatically.
|
||||
"""
|
||||
|
||||
account_label = "account" if source == "aden" else "local API key"
|
||||
|
||||
return NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
description=(
|
||||
f"Interactive testing node for {provider}/{alias}. "
|
||||
f"Has access to all {provider} tools to verify the credential works."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=[],
|
||||
output_keys=[],
|
||||
tools=tools,
|
||||
system_prompt=f"""\
|
||||
You are a credential tester for the {account_label}: {provider}/{alias}{detail}
|
||||
|
||||
Your job is to help the user verify that this credential works by making \
|
||||
real API calls using the available tools.
|
||||
|
||||
{routing_section}
|
||||
# Instructions
|
||||
|
||||
1. Start by greeting the user and confirming which account you're testing.
|
||||
2. Suggest a simple, safe, read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts, search for "test").
|
||||
3. Execute the call when the user agrees.
|
||||
4. Report the result clearly: success (with sample data) or failure (with error).
|
||||
5. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Available tools
|
||||
|
||||
You have access to {len(tools)} tools for {provider}:
|
||||
{chr(10).join(f"- {t}" for t in tools)}
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations (create, update, delete).
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
""",
|
||||
)
|
||||
@@ -0,0 +1,44 @@
|
||||
"""
|
||||
Hive Coder — Native coding agent that builds Hive agent packages.
|
||||
|
||||
Deeply understands the agent framework and produces complete Python packages
|
||||
with goals, nodes, edges, system prompts, MCP configuration, and tests
|
||||
from natural language specifications.
|
||||
"""
|
||||
|
||||
from .agent import (
|
||||
HiveCoderAgent,
|
||||
conversation_mode,
|
||||
default_agent,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
goal,
|
||||
identity_prompt,
|
||||
loop_config,
|
||||
nodes,
|
||||
pause_nodes,
|
||||
terminal_nodes,
|
||||
)
|
||||
from .config import AgentMetadata, RuntimeConfig, default_config, metadata
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"HiveCoderAgent",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"pause_nodes",
|
||||
"terminal_nodes",
|
||||
"conversation_mode",
|
||||
"identity_prompt",
|
||||
"loop_config",
|
||||
"RuntimeConfig",
|
||||
"AgentMetadata",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
@@ -0,0 +1,223 @@
|
||||
"""CLI entry point for Hive Coder agent."""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import click
|
||||
|
||||
from .agent import HiveCoderAgent, default_agent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
"""Configure logging for execution visibility."""
|
||||
if debug:
|
||||
level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose:
|
||||
level, fmt = logging.INFO, "%(message)s"
|
||||
else:
|
||||
level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
logging.getLogger("framework").setLevel(level)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Hive Coder — Build Hive agent packages from natural language."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--request", "-r", type=str, required=True, help="What agent to build")
|
||||
@click.option("--mock", is_flag=True, help="Run in mock mode")
|
||||
@click.option("--quiet", "-q", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def run(request, mock, quiet, verbose, debug):
|
||||
"""Execute agent building from a request."""
|
||||
if not quiet:
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
context = {"user_request": request}
|
||||
|
||||
result = asyncio.run(default_agent.run(context, mock_mode=mock))
|
||||
|
||||
output_data = {
|
||||
"success": result.success,
|
||||
"steps_executed": result.steps_executed,
|
||||
"output": result.output,
|
||||
}
|
||||
if result.error:
|
||||
output_data["error"] = result.error
|
||||
|
||||
click.echo(json.dumps(output_data, indent=2, default=str))
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--mock", is_flag=True, help="Run in mock mode")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def tui(mock, verbose, debug):
|
||||
"""Launch the TUI dashboard for interactive agent building."""
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
try:
|
||||
from framework.tui.app import AdenTUI
|
||||
except ImportError:
|
||||
click.echo("TUI requires the 'textual' package. Install with: pip install textual")
|
||||
sys.exit(1)
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
async def run_with_tui():
|
||||
agent = HiveCoderAgent()
|
||||
|
||||
agent._tool_registry = ToolRegistry()
|
||||
|
||||
storage_path = Path.home() / ".hive" / "agents" / "hive_coder"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
agent._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = None
|
||||
if not mock:
|
||||
llm = LiteLLMProvider(
|
||||
model=agent.config.model,
|
||||
api_key=agent.config.api_key,
|
||||
api_base=agent.config.api_base,
|
||||
)
|
||||
|
||||
tools = list(agent._tool_registry.get_tools().values())
|
||||
tool_executor = agent._tool_registry.get_executor()
|
||||
graph = agent._build_graph()
|
||||
|
||||
runtime = create_agent_runtime(
|
||||
graph=graph,
|
||||
goal=agent.goal,
|
||||
storage_path=storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="start",
|
||||
name="Build Agent",
|
||||
entry_node="coder",
|
||||
trigger_type="manual",
|
||||
isolation_level="isolated",
|
||||
),
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
)
|
||||
|
||||
await runtime.start()
|
||||
|
||||
try:
|
||||
app = AdenTUI(runtime)
|
||||
await app.run_async()
|
||||
finally:
|
||||
await runtime.stop()
|
||||
|
||||
asyncio.run(run_with_tui())
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--json", "output_json", is_flag=True)
|
||||
def info(output_json):
|
||||
"""Show agent information."""
|
||||
info_data = default_agent.info()
|
||||
if output_json:
|
||||
click.echo(json.dumps(info_data, indent=2))
|
||||
else:
|
||||
click.echo(f"Agent: {info_data['name']}")
|
||||
click.echo(f"Version: {info_data['version']}")
|
||||
click.echo(f"Description: {info_data['description']}")
|
||||
click.echo(f"\nNodes: {', '.join(info_data['nodes'])}")
|
||||
click.echo(f"Client-facing: {', '.join(info_data['client_facing_nodes'])}")
|
||||
click.echo(f"Entry: {info_data['entry_node']}")
|
||||
click.echo(f"Terminal: {', '.join(info_data['terminal_nodes']) or '(forever-alive)'}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate():
|
||||
"""Validate agent structure."""
|
||||
validation = default_agent.validate()
|
||||
if validation["valid"]:
|
||||
click.echo("Agent is valid")
|
||||
if validation["warnings"]:
|
||||
for warning in validation["warnings"]:
|
||||
click.echo(f" WARNING: {warning}")
|
||||
else:
|
||||
click.echo("Agent has errors:")
|
||||
for error in validation["errors"]:
|
||||
click.echo(f" ERROR: {error}")
|
||||
sys.exit(0 if validation["valid"] else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def shell(verbose):
|
||||
"""Interactive agent building session (CLI, no TUI)."""
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose=False):
|
||||
"""Async interactive shell."""
|
||||
setup_logging(verbose=verbose)
|
||||
|
||||
click.echo("=== Hive Coder ===")
|
||||
click.echo("Describe the agent you want to build (or 'quit' to exit):\n")
|
||||
|
||||
agent = HiveCoderAgent()
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
request = await asyncio.get_event_loop().run_in_executor(None, input, "Build> ")
|
||||
if request.lower() in ["quit", "exit", "q"]:
|
||||
click.echo("Goodbye!")
|
||||
break
|
||||
|
||||
if not request.strip():
|
||||
continue
|
||||
|
||||
click.echo("\nBuilding agent...\n")
|
||||
|
||||
result = await agent.trigger_and_wait("default", {"user_request": request})
|
||||
|
||||
if result is None:
|
||||
click.echo("\n[Execution timed out]\n")
|
||||
continue
|
||||
|
||||
if result.success:
|
||||
output = result.output or {}
|
||||
agent_name = output.get("agent_name", "unknown")
|
||||
validation = output.get("validation_result", "unknown")
|
||||
click.echo(f"\nAgent '{agent_name}' built. Validation: {validation}\n")
|
||||
else:
|
||||
click.echo(f"\nBuild failed: {result.error}\n")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -0,0 +1,357 @@
|
||||
"""Agent graph construction for Hive Coder."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import Constraint, Goal, SuccessCriterion
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import coder_node, queen_node
|
||||
|
||||
# ticket_receiver is no longer needed — the queen runs as an independent
|
||||
# GraphExecutor and receives escalation tickets via inject_event().
|
||||
# Keeping the import commented for reference:
|
||||
# from .ticket_receiver import TICKET_RECEIVER_ENTRY_POINT
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="agent-builder",
|
||||
name="Hive Agent Builder",
|
||||
description=(
|
||||
"Build complete, validated Hive agent packages from natural language "
|
||||
"specifications. Produces production-ready Python packages with goals, "
|
||||
"nodes, edges, system prompts, MCP configuration, and tests."
|
||||
),
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="valid-package",
|
||||
description="Generated agent package passes structural validation",
|
||||
metric="validation_pass",
|
||||
target="true",
|
||||
weight=0.30,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="complete-files",
|
||||
description=(
|
||||
"All required files generated: agent.py, config.py, "
|
||||
"nodes/__init__.py, __init__.py, __main__.py, mcp_servers.json"
|
||||
),
|
||||
metric="file_count",
|
||||
target=">=6",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="user-satisfaction",
|
||||
description="User reviews and approves the generated agent",
|
||||
metric="user_approval",
|
||||
target="true",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="framework-compliance",
|
||||
description=(
|
||||
"Generated code follows framework patterns: STEP 1/STEP 2 "
|
||||
"for client-facing, correct imports, entry_points format"
|
||||
),
|
||||
metric="pattern_compliance",
|
||||
target="100%",
|
||||
weight=0.20,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="dynamic-tool-discovery",
|
||||
description=(
|
||||
"Always discover available tools dynamically via "
|
||||
"discover_mcp_tools before referencing tools in agent designs"
|
||||
),
|
||||
constraint_type="hard",
|
||||
category="correctness",
|
||||
),
|
||||
Constraint(
|
||||
id="no-fabricated-tools",
|
||||
description="Only reference tools that exist in hive-tools MCP",
|
||||
constraint_type="hard",
|
||||
category="correctness",
|
||||
),
|
||||
Constraint(
|
||||
id="valid-python",
|
||||
description="All generated Python files must be syntactically correct",
|
||||
constraint_type="hard",
|
||||
category="correctness",
|
||||
),
|
||||
Constraint(
|
||||
id="self-verification",
|
||||
description="Run validation after writing code; fix errors before presenting",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Nodes: primary coder node only. The queen runs as an independent
|
||||
# GraphExecutor with queen_node — not as part of this graph.
|
||||
nodes = [coder_node]
|
||||
|
||||
# No edges needed — single forever-alive event_loop node
|
||||
edges = []
|
||||
|
||||
# Graph configuration
|
||||
entry_node = "coder"
|
||||
entry_points = {"start": "coder"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = [] # Forever-alive: loops until user exits
|
||||
|
||||
# No async entry points needed — the queen is now an independent executor,
|
||||
# not a secondary graph receiving events via add_graph().
|
||||
async_entry_points = []
|
||||
|
||||
# Module-level variables read by AgentRunner.load()
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = (
|
||||
"You are Hive Coder, the best agent-building coding agent on the planet. "
|
||||
"You deeply understand the Hive agent framework at the source code level "
|
||||
"and produce production-ready agent packages from natural language. "
|
||||
"You can dynamically discover available framework tools, inspect runtime "
|
||||
"sessions and checkpoints from agents you build, and run their test suites. "
|
||||
"You follow coding agent discipline: read before writing, verify "
|
||||
"assumptions by reading actual code, adhere to project conventions, "
|
||||
"self-verify with validation, and fix your own errors. You are concise, "
|
||||
"direct, and technically rigorous. No emojis. No fluff."
|
||||
)
|
||||
loop_config = {
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Queen graph — runs as an independent persistent conversation in the TUI.
|
||||
# Loaded by _load_judge_and_queen() in app.py, NOT by AgentRunner.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
queen_goal = Goal(
|
||||
id="queen-manager",
|
||||
name="Queen Manager",
|
||||
description=(
|
||||
"Manage the worker agent lifecycle and serve as the user's primary "
|
||||
"interactive interface. Triage health escalations from the judge."
|
||||
),
|
||||
success_criteria=[],
|
||||
constraints=[],
|
||||
)
|
||||
|
||||
queen_graph = GraphSpec(
|
||||
id="queen-graph",
|
||||
goal_id=queen_goal.id,
|
||||
version="1.0.0",
|
||||
entry_node="queen",
|
||||
entry_points={"start": "queen"},
|
||||
terminal_nodes=[],
|
||||
pause_nodes=[],
|
||||
nodes=[queen_node],
|
||||
edges=[],
|
||||
conversation_mode="continuous",
|
||||
loop_config={
|
||||
"max_iterations": 200,
|
||||
"max_tool_calls_per_turn": 10,
|
||||
"max_history_tokens": 32000,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class HiveCoderAgent:
|
||||
"""
|
||||
Hive Coder — builds Hive agent packages from natural language.
|
||||
|
||||
Single-node architecture: the coder runs in a continuous while(true) loop.
|
||||
The queen runs as an independent GraphExecutor (loaded by the TUI via
|
||||
_load_judge_and_queen), not as part of this graph.
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self.async_entry_points = async_entry_points
|
||||
self._graph: GraphSpec | None = None
|
||||
self._agent_runtime: AgentRuntime | None = None
|
||||
self._tool_registry: ToolRegistry | None = None
|
||||
self._storage_path: Path | None = None
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
"""Build the GraphSpec."""
|
||||
return GraphSpec(
|
||||
id="hive-coder-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config=loop_config,
|
||||
conversation_mode=conversation_mode,
|
||||
identity_prompt=identity_prompt,
|
||||
async_entry_points=self.async_entry_points,
|
||||
)
|
||||
|
||||
def _setup(self, mock_mode=False) -> None:
|
||||
"""Set up the agent runtime."""
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "hive_coder"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._tool_registry = ToolRegistry()
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
llm = None
|
||||
if not mock_mode:
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
|
||||
self._graph = self._build_graph()
|
||||
|
||||
checkpoint_config = CheckpointConfig(
|
||||
enabled=True,
|
||||
checkpoint_on_node_start=False,
|
||||
checkpoint_on_node_complete=True,
|
||||
checkpoint_max_age_days=7,
|
||||
async_checkpoint=True,
|
||||
)
|
||||
|
||||
entry_point_specs = [
|
||||
EntryPointSpec(
|
||||
id="default",
|
||||
name="Default",
|
||||
entry_node=self.entry_node,
|
||||
trigger_type="manual",
|
||||
isolation_level="shared",
|
||||
),
|
||||
]
|
||||
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=entry_point_specs,
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=checkpoint_config,
|
||||
graph_id="hive_coder",
|
||||
)
|
||||
|
||||
async def start(self, mock_mode=False) -> None:
|
||||
"""Set up and start the agent runtime."""
|
||||
if self._agent_runtime is None:
|
||||
self._setup(mock_mode=mock_mode)
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the agent runtime and clean up."""
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
entry_point: str = "default",
|
||||
input_data: dict | None = None,
|
||||
timeout: float | None = None,
|
||||
session_state: dict | None = None,
|
||||
) -> ExecutionResult | None:
|
||||
"""Execute the graph and wait for completion."""
|
||||
if self._agent_runtime is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
|
||||
return await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id=entry_point,
|
||||
input_data=input_data or {},
|
||||
session_state=session_state,
|
||||
)
|
||||
|
||||
async def run(self, context: dict, mock_mode=False, session_state=None) -> ExecutionResult:
|
||||
"""Run the agent (convenience method for single execution)."""
|
||||
await self.start(mock_mode=mock_mode)
|
||||
try:
|
||||
result = await self.trigger_and_wait("default", context, session_state=session_state)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
"""Get agent information."""
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {
|
||||
"name": self.goal.name,
|
||||
"description": self.goal.description,
|
||||
},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"pause_nodes": self.pause_nodes,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate agent structure."""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
node_ids = {node.id for node in self.nodes}
|
||||
for edge in self.edges:
|
||||
if edge.source not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: source '{edge.source}' not found")
|
||||
if edge.target not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: target '{edge.target}' not found")
|
||||
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
|
||||
for terminal in self.terminal_nodes:
|
||||
if terminal not in node_ids:
|
||||
errors.append(f"Terminal node '{terminal}' not found")
|
||||
|
||||
for ep_id, node_id in self.entry_points.items():
|
||||
if node_id not in node_ids:
|
||||
errors.append(f"Entry point '{ep_id}' references unknown node '{node_id}'")
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
}
|
||||
|
||||
|
||||
# Create default instance
|
||||
default_agent = HiveCoderAgent()
|
||||
+11
-6
@@ -1,4 +1,4 @@
|
||||
"""Runtime configuration."""
|
||||
"""Runtime configuration for Hive Coder agent."""
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
@@ -24,7 +24,7 @@ def _load_preferred_model() -> str:
|
||||
class RuntimeConfig:
|
||||
model: str = field(default_factory=_load_preferred_model)
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 8192
|
||||
max_tokens: int = 40000
|
||||
api_key: str | None = None
|
||||
api_base: str | None = None
|
||||
|
||||
@@ -34,12 +34,17 @@ default_config = RuntimeConfig()
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Deep Research Agent"
|
||||
name: str = "Hive Coder"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Interactive research agent that rigorously investigates topics through "
|
||||
"multi-source search, quality evaluation, and synthesis - with TUI conversation "
|
||||
"at key checkpoints for user guidance and feedback."
|
||||
"Native coding agent that builds production-ready Hive agent packages "
|
||||
"from natural language specifications. Deeply understands the agent framework "
|
||||
"and produces complete Python packages with goals, nodes, edges, system prompts, "
|
||||
"MCP configuration, and tests."
|
||||
)
|
||||
intro_message: str = (
|
||||
"I'm Hive Coder — I build Hive agents. Describe what kind of agent "
|
||||
"you want to create and I'll design, implement, and validate it for you."
|
||||
)
|
||||
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"coder-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "coder_tools_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "Unsandboxed file system tools for code generation and validation"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,679 @@
|
||||
"""Node definitions for Hive Coder agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Single node — like opencode's while(true) loop.
|
||||
# One continuous context handles the entire workflow:
|
||||
# discover → design → implement → verify → present → iterate.
|
||||
coder_node = NodeSpec(
|
||||
id="coder",
|
||||
name="Hive Coder",
|
||||
description=(
|
||||
"Autonomous coding agent that builds Hive agent packages. "
|
||||
"Handles the full lifecycle: understanding user intent, "
|
||||
"designing architecture, writing code, validating, and "
|
||||
"iterating on feedback — all in one continuous conversation."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["user_request"],
|
||||
output_keys=["agent_name", "validation_result"],
|
||||
success_criteria=(
|
||||
"A complete, validated Hive agent package exists at "
|
||||
"exports/{agent_name}/ and passes structural validation."
|
||||
),
|
||||
system_prompt="""\
|
||||
You are Hive Coder, the best agent-building coding agent. You build \
|
||||
production-ready Hive agent packages from natural language.
|
||||
|
||||
# Core Mandates
|
||||
|
||||
- **Read before writing.** NEVER write code from assumptions. Read \
|
||||
reference agents and templates first. Read every file before editing.
|
||||
- **Conventions first.** Follow existing project patterns exactly. \
|
||||
Analyze imports, structure, and style in reference agents.
|
||||
- **Verify assumptions.** Never assume a class, import, or pattern \
|
||||
exists. Read actual source to confirm. Search if unsure.
|
||||
- **Discover tools dynamically.** NEVER reference tools from static \
|
||||
docs. Always run discover_mcp_tools() to see what actually exists.
|
||||
- **Professional objectivity.** If a use case is a poor fit for the \
|
||||
framework, say so. Technical accuracy over validation.
|
||||
- **Concise.** No emojis. No preambles. No postambles. Substance only.
|
||||
- **Self-verify.** After writing code, run validation and tests. Fix \
|
||||
errors yourself. Don't declare success until validation passes.
|
||||
|
||||
# Tools
|
||||
|
||||
## File I/O
|
||||
- read_file(path, offset?, limit?) — read with line numbers
|
||||
- write_file(path, content) — create/overwrite, auto-mkdir
|
||||
- edit_file(path, old_text, new_text, replace_all?) — fuzzy-match edit
|
||||
- list_directory(path, recursive?) — list contents
|
||||
- search_files(pattern, path?, include?) — regex search
|
||||
- run_command(command, cwd?, timeout?) — shell execution
|
||||
- undo_changes(path?) — restore from git snapshot
|
||||
|
||||
## Meta-Agent
|
||||
- discover_mcp_tools(server_config_path?) — connect to MCP servers \
|
||||
and list all available tools with full schemas. Default: hive-tools.
|
||||
- list_agents() — list all agent packages in exports/ with session counts
|
||||
- list_agent_sessions(agent_name, status?, limit?) — list sessions
|
||||
- get_agent_session_state(agent_name, session_id) — full session state
|
||||
- get_agent_session_memory(agent_name, session_id, key?) — memory data
|
||||
- list_agent_checkpoints(agent_name, session_id) — list checkpoints
|
||||
- get_agent_checkpoint(agent_name, session_id, checkpoint_id?) — load checkpoint
|
||||
- run_agent_tests(agent_name, test_types?, fail_fast?) — run pytest with parsing
|
||||
|
||||
# Meta-Agent Capabilities
|
||||
|
||||
You are not just a file writer. You have deep integration with the \
|
||||
Hive framework:
|
||||
|
||||
## Tool Discovery (MANDATORY before designing)
|
||||
Before designing any agent, run discover_mcp_tools() to see what \
|
||||
tools are actually available from the hive-tools MCP server. This \
|
||||
returns full schemas with parameter names, types, and descriptions. \
|
||||
NEVER guess tool names or parameters from memory. The tool catalog \
|
||||
is the ground truth.
|
||||
|
||||
To check a specific agent's tools:
|
||||
discover_mcp_tools("exports/{agent_name}/mcp_servers.json")
|
||||
|
||||
## Agent Awareness
|
||||
Run list_agents() to see what agents already exist. Read their code \
|
||||
for patterns:
|
||||
read_file("exports/{name}/agent.py")
|
||||
read_file("exports/{name}/nodes/__init__.py")
|
||||
|
||||
## Post-Build Testing
|
||||
After writing agent code, validate structurally AND run tests:
|
||||
run_command("python -c 'from {name} import default_agent; \\
|
||||
print(default_agent.validate())'")
|
||||
run_agent_tests("{name}")
|
||||
|
||||
## Debugging Built Agents
|
||||
When a user says "my agent is failing" or "debug this agent":
|
||||
1. list_agent_sessions("{agent_name}") — find the session
|
||||
2. get_agent_session_state("{agent_name}", "{session_id}") — see status
|
||||
3. get_agent_session_memory("{agent_name}", "{session_id}") — inspect data
|
||||
4. list_agent_checkpoints / get_agent_checkpoint — trace execution
|
||||
|
||||
# Workflow
|
||||
|
||||
You operate in a continuous loop. The user describes what they want, \
|
||||
you build it. No rigid phases — use judgment. But the general flow is:
|
||||
|
||||
## 1. Understand
|
||||
|
||||
When the user describes what they want to build, hear the structure:
|
||||
- The actors, the trigger, the core loop, the output, the pain.
|
||||
|
||||
Play back a model: "Here's what I'm picturing: [concrete picture]. \
|
||||
Before I start — [1-2 questions you can't infer]."
|
||||
|
||||
Ask only what you CANNOT infer. Fill blanks with domain knowledge.
|
||||
|
||||
## 2. Qualify
|
||||
|
||||
Assess framework fit honestly. Run discover_mcp_tools() to check \
|
||||
what tools exist. Read the framework guide:
|
||||
read_file("core/framework/agents/hive_coder/reference/framework_guide.md")
|
||||
|
||||
Consider:
|
||||
- What works well (multi-turn, HITL, tool orchestration)
|
||||
- Limitations (LLM latency, context limits, cost)
|
||||
- Deal-breakers (missing tools, wrong paradigm)
|
||||
|
||||
Give a clear recommendation: proceed, adjust scope, or reconsider.
|
||||
|
||||
## 3. Design
|
||||
|
||||
Design the agent architecture:
|
||||
- Goal: id, name, description, 3-5 success criteria, 2-4 constraints
|
||||
- Nodes: **2-4 nodes MAXIMUM** (see rules below)
|
||||
- Edges: on_success for linear, conditional for routing
|
||||
- Lifecycle: ALWAYS forever-alive (`terminal_nodes=[]`) unless the user \
|
||||
explicitly requests a one-shot/batch agent. Forever-alive agents loop \
|
||||
continuously — the user exits by closing the TUI. This is the standard \
|
||||
pattern for all interactive agents.
|
||||
|
||||
### Node Count Rules (HARD LIMITS)
|
||||
|
||||
**2-4 nodes** for all agents. Never exceed 4 unless the user explicitly \
|
||||
requests more. Each node boundary serializes outputs to shared memory \
|
||||
and DESTROYS all in-context information (tool results, reasoning, history).
|
||||
|
||||
**MERGE nodes when:**
|
||||
- Node has NO tools (pure LLM reasoning) → merge into predecessor/successor
|
||||
- Node sets only 1 trivial output → collapse into predecessor
|
||||
- Multiple consecutive autonomous nodes → combine into one rich node
|
||||
- A "report" or "summary" node → merge into the client-facing node
|
||||
- A "confirm" or "schedule" node that calls no external service → remove
|
||||
|
||||
**SEPARATE nodes only when:**
|
||||
- Client-facing vs autonomous (different interaction models)
|
||||
- Fundamentally different tool sets
|
||||
- Fan-out parallelism (parallel branches MUST be separate)
|
||||
|
||||
**Typical patterns:**
|
||||
- 2 nodes: `interact (client-facing) → process (autonomous) → interact`
|
||||
- 3 nodes: `intake (CF) → process (auto) → review (CF) → intake`
|
||||
- WRONG: 7 nodes where half have no tools and just do LLM reasoning
|
||||
|
||||
Read reference agents before designing:
|
||||
list_agents()
|
||||
read_file("exports/deep_research_agent/agent.py")
|
||||
read_file("exports/deep_research_agent/nodes/__init__.py")
|
||||
|
||||
Present the design with ASCII art graph. Get user approval.
|
||||
|
||||
## 4. Implement
|
||||
|
||||
Read templates before writing code:
|
||||
read_file("core/framework/agents/hive_coder/reference/file_templates.md")
|
||||
read_file("core/framework/agents/hive_coder/reference/anti_patterns.md")
|
||||
|
||||
Write files in order:
|
||||
1. mkdir -p exports/{name}/nodes exports/{name}/tests
|
||||
2. config.py — RuntimeConfig + AgentMetadata
|
||||
3. nodes/__init__.py — NodeSpec definitions with system prompts
|
||||
4. agent.py — Goal, edges, graph, agent class
|
||||
5. __init__.py — package exports
|
||||
6. __main__.py — CLI with click
|
||||
7. mcp_servers.json — tool server config
|
||||
8. tests/ — fixtures
|
||||
|
||||
### Critical Rules
|
||||
|
||||
**Imports** (must match exactly — only import what you use):
|
||||
```python
|
||||
from framework.graph import (
|
||||
NodeSpec, EdgeSpec, EdgeCondition,
|
||||
Goal, SuccessCriterion, Constraint,
|
||||
)
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import (
|
||||
AgentRuntime, create_agent_runtime,
|
||||
)
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
```
|
||||
For agents with async entry points (timers, webhooks, events), also add:
|
||||
```python
|
||||
from framework.graph.edge import GraphSpec, AsyncEntryPointSpec
|
||||
from framework.runtime.agent_runtime import (
|
||||
AgentRuntime, AgentRuntimeConfig, create_agent_runtime,
|
||||
)
|
||||
```
|
||||
NEVER `from core.framework...` — PYTHONPATH includes core/.
|
||||
|
||||
**__init__.py MUST re-export ALL module-level variables** \
|
||||
(THIS IS THE #1 SOURCE OF AGENT LOAD FAILURES):
|
||||
The runner imports the package (__init__.py), NOT agent.py. It reads \
|
||||
goal, nodes, edges, entry_node, entry_points, pause_nodes, \
|
||||
terminal_nodes, conversation_mode, identity_prompt, loop_config via \
|
||||
getattr(). If ANY are missing from __init__.py, they silently default \
|
||||
to None or {} — causing "must define goal, nodes, edges" or "node X \
|
||||
is unreachable" errors. The __init__.py MUST import and re-export \
|
||||
ALL of these from .agent:
|
||||
```python
|
||||
from .agent import (
|
||||
MyAgent, default_agent, goal, nodes, edges,
|
||||
entry_node, entry_points, pause_nodes, terminal_nodes,
|
||||
conversation_mode, identity_prompt, loop_config,
|
||||
)
|
||||
```
|
||||
|
||||
**entry_points**: `{"start": "first-node-id"}`
|
||||
For agents with multiple entry points (e.g. a reminder trigger), \
|
||||
add them: `{"start": "intake", "reminder": "reminder"}`
|
||||
|
||||
**conversation_mode** — ONLY two valid values:
|
||||
- `"continuous"` — recommended for interactive agents (context carries \
|
||||
across node transitions)
|
||||
- Omit entirely — for isolated per-node conversations
|
||||
NEVER use: "client_facing", "interactive", "adaptive", or any other \
|
||||
value. These DO NOT EXIST.
|
||||
|
||||
**loop_config** — ONLY three valid keys:
|
||||
```python
|
||||
loop_config = {
|
||||
"max_iterations": 100,
|
||||
"max_tool_calls_per_turn": 20,
|
||||
"max_history_tokens": 32000,
|
||||
}
|
||||
```
|
||||
NEVER add: "strategy", "mode", "timeout", or other keys.
|
||||
|
||||
**mcp_servers.json**:
|
||||
```json
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../tools"
|
||||
}
|
||||
}
|
||||
```
|
||||
NO "mcpServers" wrapper. cwd "../../tools". command "uv".
|
||||
|
||||
**Storage**: `Path.home() / ".hive" / "agents" / "{name}"`
|
||||
|
||||
**Client-facing system prompts** — STEP 1/STEP 2 pattern:
|
||||
```
|
||||
STEP 1 — Present to user (text only, NO tool calls):
|
||||
[instructions]
|
||||
|
||||
STEP 2 — After user responds, call set_output:
|
||||
[set_output calls]
|
||||
```
|
||||
|
||||
**Autonomous system prompts** — set_output in SEPARATE turn.
|
||||
|
||||
**Tools** — NEVER fabricate tool names. Common hallucinations: \
|
||||
csv_read, csv_write, csv_append, file_upload, database_query. \
|
||||
If discover_mcp_tools() shows these don't exist, use alternatives \
|
||||
(e.g. save_data/load_data for data persistence).
|
||||
|
||||
**Node rules**:
|
||||
- **2-4 nodes MAX.** Never exceed 4. Merge thin nodes aggressively.
|
||||
- A node with 0 tools is NOT a real node — merge it.
|
||||
- node_type always "event_loop"
|
||||
- max_node_visits default is 0 (unbounded) — correct for forever-alive. \
|
||||
Only set >0 in one-shot agents with bounded feedback loops.
|
||||
- Feedback inputs: nullable_output_keys
|
||||
- terminal_nodes=[] for forever-alive (the default)
|
||||
- Every node MUST have at least one outgoing edge (no dead ends)
|
||||
- Agents are forever-alive unless user explicitly asks for one-shot
|
||||
|
||||
**Agent class**: CamelCase name, default_agent at module level. \
|
||||
Constructor takes `config=None`. Follow the exact pattern in \
|
||||
file_templates.md — do NOT invent constructor params like \
|
||||
`llm_provider` or `tool_registry`.
|
||||
|
||||
**Module-level variables** (read by AgentRunner.load()):
|
||||
goal, nodes, edges, entry_node, entry_points, pause_nodes,
|
||||
terminal_nodes, conversation_mode, identity_prompt, loop_config
|
||||
|
||||
For agents with async triggers, also export:
|
||||
async_entry_points, runtime_config
|
||||
|
||||
**Async entry points** (timers, webhooks, events):
|
||||
When an agent needs scheduled tasks, webhook reactions, or event-driven \
|
||||
triggers, use `AsyncEntryPointSpec` (from framework.graph.edge) and \
|
||||
`AgentRuntimeConfig` (from framework.runtime.agent_runtime):
|
||||
- Timer (cron): `trigger_type="timer"`, \
|
||||
`trigger_config={"cron": "0 9 * * *"}` — standard 5-field cron expression \
|
||||
(e.g. `"0 9 * * MON-FRI"` weekdays 9am, `"*/30 * * * *"` every 30 min)
|
||||
- Timer (interval): `trigger_type="timer"`, \
|
||||
`trigger_config={"interval_minutes": 20, "run_immediately": False}`
|
||||
- Event (for webhooks): `trigger_type="event"`, \
|
||||
`trigger_config={"event_types": ["webhook_received"]}`
|
||||
- `isolation_level="shared"` so async runs can read primary session memory
|
||||
- `runtime_config = AgentRuntimeConfig(webhook_routes=[...])` for HTTP webhooks
|
||||
- Reference: `exports/gmail_inbox_guardian/agent.py`
|
||||
- Full docs: `core/framework/agents/hive_coder/reference/framework_guide.md` \
|
||||
(Async Entry Points section)
|
||||
|
||||
## 5. Verify
|
||||
|
||||
Run THREE validation steps after writing. All must pass:
|
||||
|
||||
**Step A — Class validation** (checks graph structure):
|
||||
```
|
||||
run_command("python -c 'from {name} import default_agent; \\
|
||||
print(default_agent.validate())'")
|
||||
```
|
||||
|
||||
**Step B — Runner load test** (checks package export contract — \
|
||||
THIS IS THE SAME PATH THE TUI USES):
|
||||
```
|
||||
run_command("python -c 'from framework.runner.runner import \\
|
||||
AgentRunner; r = AgentRunner.load(\"exports/{name}\"); \\
|
||||
print(\"AgentRunner.load: OK\")'")
|
||||
```
|
||||
This catches missing __init__.py exports, bad conversation_mode, \
|
||||
invalid loop_config, and unreachable nodes. If Step A passes but \
|
||||
Step B fails, the problem is in __init__.py exports.
|
||||
|
||||
**Step C — Run tests:**
|
||||
```
|
||||
run_agent_tests("{name}")
|
||||
```
|
||||
|
||||
If anything fails: read error, fix with edit_file, re-validate. Up to 3x.
|
||||
|
||||
**CRITICAL: Testing forever-alive agents**
|
||||
Most agents use `terminal_nodes=[]` (forever-alive). This means \
|
||||
`runner.run()` NEVER returns — it hangs forever waiting for a \
|
||||
terminal node that doesn't exist. Agent tests MUST be structural:
|
||||
- Validate graph, node specs, edges, tools, prompts
|
||||
- Check goal/constraints/success criteria definitions
|
||||
- Test `AgentRunner.load()` + `_setup()` (skip if no API key)
|
||||
- NEVER call `runner.run()` or `trigger_and_wait()` in tests for \
|
||||
forever-alive agents — they will hang and time out.
|
||||
When you restructure an agent (change nodes/edges), always update \
|
||||
the tests to match. Stale tests referencing old node names will fail.
|
||||
|
||||
## 6. Present
|
||||
|
||||
Show the user what you built: agent name, goal summary, graph ASCII \
|
||||
art, files created, validation status. Offer to revise or build another.
|
||||
|
||||
After user confirms satisfaction:
|
||||
set_output("agent_name", "the_agent_name")
|
||||
set_output("validation_result", "valid")
|
||||
|
||||
If building another agent, just start the loop again — no need to \
|
||||
set_output until the user is done.
|
||||
|
||||
## 7. Live Test (optional)
|
||||
|
||||
After the user approves, offer to load and run the agent in-session. \
|
||||
This runs it alongside you.
|
||||
|
||||
```
|
||||
load_agent("exports/{name}") # registers as secondary graph
|
||||
start_agent("{name}") # triggers default entry point
|
||||
```
|
||||
|
||||
You can also:
|
||||
- `list_agents()` — see all loaded graphs and status
|
||||
- `restart_agent("{name}")` then `load_agent` — pick up code changes
|
||||
- `unload_agent("{name}")` — remove it from the session
|
||||
- `get_user_presence()` — check if user is around
|
||||
|
||||
The agent runs in a shared session: it can read memory you've set and \
|
||||
its outputs are visible to you.
|
||||
""",
|
||||
tools=[
|
||||
"read_file",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"run_command",
|
||||
"undo_changes",
|
||||
# Meta-agent tools
|
||||
"discover_mcp_tools",
|
||||
"list_agents",
|
||||
"list_agent_sessions",
|
||||
"get_agent_session_state",
|
||||
"get_agent_session_memory",
|
||||
"list_agent_checkpoints",
|
||||
"get_agent_checkpoint",
|
||||
"run_agent_tests",
|
||||
# Graph lifecycle tools (multi-graph sessions)
|
||||
"load_agent",
|
||||
"unload_agent",
|
||||
"start_agent",
|
||||
"restart_agent",
|
||||
"get_user_presence",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
ticket_triage_node = NodeSpec(
|
||||
id="ticket_triage",
|
||||
name="Ticket Triage",
|
||||
description=(
|
||||
"Queen's triage node. Receives an EscalationTicket from the Health Judge "
|
||||
"via event-driven entry point and decides: dismiss or notify the operator."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True, # Operator can chat with queen once connected (Ctrl+Q)
|
||||
max_node_visits=0,
|
||||
input_keys=["ticket"],
|
||||
output_keys=["intervention_decision"],
|
||||
nullable_output_keys=["intervention_decision"],
|
||||
success_criteria=(
|
||||
"A clear intervention decision: either dismissed with documented reasoning, "
|
||||
"or operator notified via notify_operator with specific analysis."
|
||||
),
|
||||
tools=["notify_operator"],
|
||||
system_prompt="""\
|
||||
You are the Queen (Hive Coder). The Worker Health Judge has escalated a worker \
|
||||
issue to you. The ticket is in your memory under key "ticket". Read it carefully.
|
||||
|
||||
## Dismiss criteria — do NOT call notify_operator:
|
||||
- severity is "low" AND steps_since_last_accept < 8
|
||||
- Cause is clearly a transient issue (single API timeout, brief stall that \
|
||||
self-resolved based on the evidence)
|
||||
- Evidence shows the agent is making real progress despite bad verdicts
|
||||
|
||||
## Intervene criteria — call notify_operator:
|
||||
- severity is "high" or "critical"
|
||||
- steps_since_last_accept >= 10 with no sign of recovery
|
||||
- stall_minutes > 4 (worker definitively stuck)
|
||||
- Evidence shows a doom loop (same error, same tool, no progress)
|
||||
- Cause suggests a logic bug, missing configuration, or unrecoverable state
|
||||
|
||||
## When intervening:
|
||||
Call notify_operator with:
|
||||
ticket_id: <ticket["ticket_id"]>
|
||||
analysis: "<2-3 sentences: what is wrong, why it matters, suggested action>"
|
||||
urgency: "<low|medium|high|critical>"
|
||||
|
||||
## After deciding:
|
||||
set_output("intervention_decision", "dismissed: <reason>" or "escalated: <summary>")
|
||||
|
||||
Be conservative but not passive. You are the last quality gate before the human \
|
||||
is disturbed. One unnecessary alert is less costly than alert fatigue — but \
|
||||
genuine stuck agents must be caught.
|
||||
""",
|
||||
)
|
||||
|
||||
ALL_QUEEN_TRIAGE_TOOLS = ["notify_operator"]
|
||||
|
||||
|
||||
queen_node = NodeSpec(
|
||||
id="queen",
|
||||
name="Queen",
|
||||
description=(
|
||||
"User's primary interactive interface with full coding capability. "
|
||||
"Can build agents directly or delegate to the worker. Manages the "
|
||||
"worker agent lifecycle and triages health escalations from the judge."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["greeting"],
|
||||
output_keys=[],
|
||||
nullable_output_keys=[],
|
||||
success_criteria=(
|
||||
"User's intent is understood, coding tasks are completed correctly, "
|
||||
"and the worker is managed effectively when delegated to."
|
||||
),
|
||||
tools=[
|
||||
# File I/O (from coder-tools MCP)
|
||||
"read_file",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"run_command",
|
||||
"undo_changes",
|
||||
# Meta-agent (from coder-tools MCP)
|
||||
"discover_mcp_tools",
|
||||
"list_agents",
|
||||
"list_agent_sessions",
|
||||
"get_agent_session_state",
|
||||
"get_agent_session_memory",
|
||||
"list_agent_checkpoints",
|
||||
"get_agent_checkpoint",
|
||||
"run_agent_tests",
|
||||
# Worker lifecycle
|
||||
"start_worker",
|
||||
"stop_worker",
|
||||
"get_worker_status",
|
||||
"inject_worker_message",
|
||||
# Monitoring
|
||||
"get_worker_health_summary",
|
||||
"notify_operator",
|
||||
],
|
||||
system_prompt="""\
|
||||
You are the Queen — the user's primary interface. You are a coding agent \
|
||||
with the same capabilities as the Hive Coder worker, PLUS the ability to \
|
||||
manage the worker's lifecycle.
|
||||
|
||||
# Core Mandates
|
||||
|
||||
- **Read before writing.** NEVER write code from assumptions. Read \
|
||||
reference agents and templates first. Read every file before editing.
|
||||
- **Conventions first.** Follow existing project patterns exactly. \
|
||||
Analyze imports, structure, and style in reference agents.
|
||||
- **Verify assumptions.** Never assume a class, import, or pattern \
|
||||
exists. Read actual source to confirm. Search if unsure.
|
||||
- **Discover tools dynamically.** NEVER reference tools from static \
|
||||
docs. Always run discover_mcp_tools() to see what actually exists.
|
||||
- **Self-verify.** After writing code, run validation and tests. Fix \
|
||||
errors yourself. Don't declare success until validation passes.
|
||||
- **Concise.** No emojis. No preambles. No postambles. Substance only.
|
||||
|
||||
# Tools
|
||||
|
||||
## File I/O
|
||||
- read_file(path, offset?, limit?) — read with line numbers
|
||||
- write_file(path, content) — create/overwrite, auto-mkdir
|
||||
- edit_file(path, old_text, new_text, replace_all?) — fuzzy-match edit
|
||||
- list_directory(path, recursive?) — list contents
|
||||
- search_files(pattern, path?, include?) — regex search
|
||||
- run_command(command, cwd?, timeout?) — shell execution
|
||||
- undo_changes(path?) — restore from git snapshot
|
||||
|
||||
## Meta-Agent
|
||||
- discover_mcp_tools(server_config_path?) — connect to MCP servers \
|
||||
and list all available tools with full schemas. Default: hive-tools.
|
||||
- list_agents() — list all agent packages in exports/ with session counts
|
||||
- list_agent_sessions(agent_name, status?, limit?) — list sessions
|
||||
- get_agent_session_state(agent_name, session_id) — full session state
|
||||
- get_agent_session_memory(agent_name, session_id, key?) — memory data
|
||||
- list_agent_checkpoints(agent_name, session_id) — list checkpoints
|
||||
- get_agent_checkpoint(agent_name, session_id, checkpoint_id?) — checkpoint
|
||||
- run_agent_tests(agent_name, test_types?, fail_fast?) — run pytest
|
||||
|
||||
## Worker Lifecycle
|
||||
- start_worker(task) — Start the worker with a task description. The \
|
||||
worker runs autonomously until it finishes or asks the user a question.
|
||||
- stop_worker() — Cancel the worker's current execution.
|
||||
- get_worker_status() — Check if the worker is idle, running, or waiting \
|
||||
for user input. Returns execution details.
|
||||
- inject_worker_message(content) — Send a message to the running worker. \
|
||||
Use this to relay user instructions or concerns.
|
||||
|
||||
## Monitoring
|
||||
- get_worker_health_summary() — Read the latest health data from the judge.
|
||||
- notify_operator(ticket_id, analysis, urgency) — Alert the user about a \
|
||||
critical issue. Use sparingly.
|
||||
|
||||
# Behavior
|
||||
|
||||
## Direct coding
|
||||
You can do any coding task directly — reading files, writing code, running \
|
||||
commands, building agents, debugging. You have the same tools as the worker. \
|
||||
For quick tasks (reading code, small edits, debugging), do them yourself.
|
||||
|
||||
## Worker delegation
|
||||
For large, autonomous tasks (building a full agent, running a long pipeline), \
|
||||
delegate to the worker via start_worker(task). The worker runs in the \
|
||||
background while you remain available to the user.
|
||||
|
||||
## When idle (worker not running):
|
||||
- Greet the user. Ask what they want to build or do.
|
||||
- For quick tasks, do them directly.
|
||||
- For large tasks, call start_worker(task) with a clear task description. \
|
||||
Summarize what you told the worker.
|
||||
|
||||
## When worker is running:
|
||||
- If the user asks about progress, call get_worker_status().
|
||||
- If the user has a concern or instruction for the worker, call \
|
||||
inject_worker_message(content) to relay it.
|
||||
- You can still do coding tasks directly while the worker runs.
|
||||
- If an escalation ticket arrives from the judge, assess severity:
|
||||
- Low/transient: acknowledge silently, do not disturb the user.
|
||||
- High/critical: notify the user with a brief analysis and suggested action.
|
||||
|
||||
## When worker asks user a question:
|
||||
- The system will route the user's response directly to the worker. \
|
||||
You do not need to relay it. The user will come back to you after responding.
|
||||
|
||||
# Agent Building Workflow
|
||||
|
||||
When building Hive agent packages, follow this workflow:
|
||||
|
||||
## 1. Understand & Qualify
|
||||
Hear what the user wants. Run discover_mcp_tools() to check tool availability. \
|
||||
Read the framework guide:
|
||||
read_file("core/framework/agents/hive_coder/reference/framework_guide.md")
|
||||
|
||||
## 2. Design
|
||||
Design the agent: Goal, 2-4 nodes MAX, edges. Read reference agents:
|
||||
list_agents()
|
||||
read_file("exports/deep_research_agent/nodes/__init__.py")
|
||||
|
||||
Present design with ASCII art. Get user approval.
|
||||
|
||||
## 3. Implement
|
||||
Read templates before writing:
|
||||
read_file("core/framework/agents/hive_coder/reference/file_templates.md")
|
||||
|
||||
Write files: config.py, nodes/__init__.py, agent.py, __init__.py, \
|
||||
__main__.py, mcp_servers.json, tests/.
|
||||
|
||||
## 4. Verify
|
||||
Run THREE validation steps:
|
||||
run_command("python -c 'from {name} import default_agent; print(default_agent.validate())'")
|
||||
run_command("python -c 'from framework.runner.runner import AgentRunner; \\
|
||||
r = AgentRunner.load(\"exports/{name}\"); print(\"OK\")'")
|
||||
run_agent_tests("{name}")
|
||||
|
||||
# Style
|
||||
|
||||
- Concise. No fluff. Direct.
|
||||
- No emojis.
|
||||
- When starting the worker, describe what you told it in one sentence.
|
||||
- When relaying status, be specific.
|
||||
- When an escalation arrives, lead with severity and recommended action.
|
||||
""",
|
||||
)
|
||||
|
||||
ALL_QUEEN_TOOLS = [
|
||||
# File I/O (from coder-tools MCP)
|
||||
"read_file",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"list_directory",
|
||||
"search_files",
|
||||
"run_command",
|
||||
"undo_changes",
|
||||
# Meta-agent (from coder-tools MCP)
|
||||
"discover_mcp_tools",
|
||||
"list_agents",
|
||||
"list_agent_sessions",
|
||||
"get_agent_session_state",
|
||||
"get_agent_session_memory",
|
||||
"list_agent_checkpoints",
|
||||
"get_agent_checkpoint",
|
||||
"run_agent_tests",
|
||||
# Worker lifecycle
|
||||
"start_worker",
|
||||
"stop_worker",
|
||||
"get_worker_status",
|
||||
"inject_worker_message",
|
||||
# Monitoring
|
||||
"get_worker_health_summary",
|
||||
"notify_operator",
|
||||
]
|
||||
|
||||
__all__ = [
|
||||
"coder_node",
|
||||
"ticket_triage_node",
|
||||
"queen_node",
|
||||
"ALL_QUEEN_TRIAGE_TOOLS",
|
||||
"ALL_QUEEN_TOOLS",
|
||||
]
|
||||
@@ -0,0 +1,107 @@
|
||||
# Common Mistakes When Building Hive Agents
|
||||
|
||||
## Critical Errors
|
||||
|
||||
1. **Using tools that don't exist** — Always verify tools are available in the hive-tools MCP server before assigning them to nodes. Never guess tool names.
|
||||
|
||||
2. **Wrong entry_points format** — MUST be `{"start": "first-node-id"}`. NOT a set, NOT `{node_id: [keys]}`.
|
||||
|
||||
3. **Wrong mcp_servers.json format** — Flat dict (no `"mcpServers"` wrapper). `cwd` must be `"../../tools"`. `command` must be `"uv"` with args `["run", "python", ...]`.
|
||||
|
||||
4. **Missing STEP 1/STEP 2 in client-facing prompts** — Without explicit phases, the LLM calls set_output before the user responds. Always use the pattern.
|
||||
|
||||
5. **Forgetting nullable_output_keys** — When a node receives inputs from multiple edges and some inputs only arrive on certain edges (e.g., feedback), mark those as nullable. Without this, the executor blocks waiting for a value that will never arrive.
|
||||
|
||||
6. **Creating dead-end nodes in forever-alive graphs** — Every node must have at least one outgoing edge. A node with no outgoing edges ends the execution, breaking the loop.
|
||||
|
||||
7. **Setting max_node_visits to a non-zero value in forever-alive agents** — The framework default is `max_node_visits=0` (unbounded). Setting it to any positive value (e.g., 1) means the node stops executing after that many visits, silently breaking the forever-alive loop. Only set `max_node_visits > 0` in one-shot agents with feedback loops that need bounded retries.
|
||||
|
||||
7. **Missing module-level exports in `__init__.py`** — The runner loads agents via `importlib.import_module(package_name)`, which imports `__init__.py`. It then reads `goal`, `nodes`, `edges`, `entry_node`, `entry_points`, `pause_nodes`, `terminal_nodes`, `conversation_mode`, `identity_prompt`, `loop_config` via `getattr()`. If ANY of these are missing from `__init__.py`, they default to `None` or `{}` — causing "must define goal, nodes, edges" errors or "node X is unreachable" validation failures. **ALL module-level variables from agent.py must be re-exported in `__init__.py`.**
|
||||
|
||||
## Value Errors
|
||||
|
||||
8. **Invalid `conversation_mode` value** — Only two valid values: `"continuous"` (recommended for interactive agents) or omit entirely (for isolated per-node conversations). Values like `"client_facing"`, `"interactive"`, `"adaptive"` do NOT exist and will cause runtime errors.
|
||||
|
||||
9. **Invalid `loop_config` keys** — Only three valid keys: `max_iterations` (int), `max_tool_calls_per_turn` (int), `max_history_tokens` (int). Keys like `"strategy"`, `"mode"`, `"timeout"` are NOT valid and are silently ignored or cause errors.
|
||||
|
||||
10. **Fabricating tools that don't exist** — Never guess tool names. Always verify via `discover_mcp_tools()`. Common hallucinations: `csv_read`, `csv_write`, `csv_append`, `file_upload`, `database_query`. If a required tool doesn't exist, redesign the agent to use tools that DO exist (e.g., `save_data`/`load_data` for data persistence).
|
||||
|
||||
## Design Errors
|
||||
|
||||
11. **Too many thin nodes** — Hard limit: **2-4 nodes** for most agents. Each node boundary serializes outputs to shared memory and loses all in-context information (tool results, intermediate reasoning, conversation history). A node with 0 tools that just does LLM reasoning is NOT a real node — merge it into its predecessor or successor.
|
||||
|
||||
**Merge when:**
|
||||
- Node has NO tools — pure LLM reasoning belongs in the node that produces or consumes its data
|
||||
- Node sets only 1 trivial output (e.g., `set_output("done", "true")`) — collapse into predecessor
|
||||
- Multiple consecutive autonomous nodes with same/similar tools — combine into one
|
||||
- A "report" or "summary" node that just presents analysis — merge into the client-facing node
|
||||
- A "schedule" or "confirm" node that doesn't actually schedule anything — remove entirely
|
||||
|
||||
**Keep separate when:**
|
||||
- Client-facing vs autonomous — different interaction models require separate nodes
|
||||
- Fundamentally different tool sets (e.g., web search vs file I/O)
|
||||
- Fan-out parallelism — parallel branches MUST be separate nodes
|
||||
|
||||
**Bad example** (7 nodes — WAY too many):
|
||||
```
|
||||
profile_setup → daily_intake → update_tracker → analyze_progress → generate_plan → schedule_reminders → report
|
||||
```
|
||||
`analyze_progress` has no tools. `schedule_reminders` just sets one boolean. `report` just presents analysis. `update_tracker` and `generate_plan` are sequential autonomous work.
|
||||
|
||||
**Good example** (3 nodes):
|
||||
```
|
||||
intake (client-facing) → process (autonomous: track + analyze + plan) → intake (loop back)
|
||||
```
|
||||
One client-facing node handles ALL user interaction (setup, logging, reports). One autonomous node handles ALL backend work (CSV update, analysis, plan generation) with tools and context preserved.
|
||||
|
||||
12. **Adding framework gating for LLM behavior** — Don't add output rollback, premature rejection, or interaction protocol injection. Fix with better prompts or custom judges.
|
||||
|
||||
13. **Not using continuous conversation mode** — Interactive agents should use `conversation_mode="continuous"`. Without it, each node starts with blank context.
|
||||
|
||||
14. **Adding terminal nodes by default** — ALL agents should use `terminal_nodes=[]` (forever-alive) unless the user explicitly requests a one-shot/batch agent. Forever-alive is the standard pattern. Every node must have at least one outgoing edge. Dead-end nodes break the loop.
|
||||
|
||||
15. **Calling set_output in same turn as tool calls** — Instruct the LLM to call set_output in a SEPARATE turn from real tool calls.
|
||||
|
||||
## File Template Errors
|
||||
|
||||
16. **Wrong import paths** — Use `from framework.graph import ...`, NOT `from core.framework.graph import ...`. The PYTHONPATH includes `core/`.
|
||||
|
||||
17. **Missing storage path** — Agent class must set `self._storage_path = Path.home() / ".hive" / "agents" / "agent_name"`.
|
||||
|
||||
18. **Missing mcp_servers.json** — Without this, the agent has no tools at runtime.
|
||||
|
||||
19. **Bare `python` command in mcp_servers.json** — Use `"command": "uv"` with args `["run", "python", ...]`.
|
||||
|
||||
## Testing Errors
|
||||
|
||||
20. **Using `runner.run()` on forever-alive agents** — `runner.run()` calls `trigger_and_wait()` which blocks until the graph reaches a terminal node. Forever-alive agents have `terminal_nodes=[]`, so **`runner.run()` hangs forever**. This is the #1 cause of stuck test suites.
|
||||
|
||||
**For forever-alive agents, write structural tests instead:**
|
||||
- Validate graph structure (nodes, edges, entry points)
|
||||
- Verify node specs (tools, prompts, client-facing flag)
|
||||
- Check goal/constraints/success criteria definitions
|
||||
- Test that `AgentRunner.load()` + `_setup()` succeeds (skip if no API key)
|
||||
|
||||
**What NOT to do:**
|
||||
```python
|
||||
# WRONG — hangs forever on forever-alive agents
|
||||
result = await runner.run({"topic": "quantum computing"})
|
||||
```
|
||||
|
||||
**Correct pattern for structure tests:**
|
||||
```python
|
||||
def test_research_has_web_tools(self):
|
||||
assert "web_search" in research_node.tools
|
||||
|
||||
def test_research_routes_back_to_interact(self):
|
||||
edges_to_interact = [e for e in edges if e.source == "research" and e.target == "interact"]
|
||||
assert edges_to_interact
|
||||
```
|
||||
|
||||
21. **Stale tests after agent restructuring** — When you change an agent's node count or names (e.g., 4 nodes → 2 nodes), the tests MUST be updated too. Tests referencing old node names (e.g., `"review"`, `"report"`) will fail or hang. Always check that test assertions match the current `nodes/__init__.py`.
|
||||
|
||||
22. **Running full integration tests without API keys** — Structural tests (validate, import) work without keys. Full integration tests need `ANTHROPIC_API_KEY`. Use `pytest.skip()` in the runner fixture when `_setup()` fails due to missing credentials.
|
||||
|
||||
23. **Forgetting sys.path setup in conftest.py** — Tests need `exports/` and `core/` on sys.path.
|
||||
|
||||
24. **Not using auto_responder for client-facing nodes** — Tests with client-facing nodes hang without an auto-responder that injects input. But note: even WITH auto_responder, forever-alive agents still hang because the graph never terminates. Auto-responder only helps for agents with terminal nodes.
|
||||
@@ -0,0 +1,597 @@
|
||||
# Agent File Templates
|
||||
|
||||
Complete code templates for each file in a Hive agent package.
|
||||
|
||||
## config.py
|
||||
|
||||
```python
|
||||
"""Runtime configuration."""
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def _load_preferred_model() -> str:
|
||||
"""Load preferred model from ~/.hive/configuration.json."""
|
||||
config_path = Path.home() / ".hive" / "configuration.json"
|
||||
if config_path.exists():
|
||||
try:
|
||||
with open(config_path) as f:
|
||||
config = json.load(f)
|
||||
llm = config.get("llm", {})
|
||||
if llm.get("provider") and llm.get("model"):
|
||||
return f"{llm['provider']}/{llm['model']}"
|
||||
except Exception:
|
||||
pass
|
||||
return "anthropic/claude-sonnet-4-20250514"
|
||||
|
||||
|
||||
@dataclass
|
||||
class RuntimeConfig:
|
||||
model: str = field(default_factory=_load_preferred_model)
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 40000
|
||||
api_key: str | None = None
|
||||
api_base: str | None = None
|
||||
|
||||
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "My Agent Name"
|
||||
version: str = "1.0.0"
|
||||
description: str = "What this agent does."
|
||||
intro_message: str = "Welcome! What would you like me to do?"
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
```
|
||||
|
||||
## nodes/__init__.py
|
||||
|
||||
```python
|
||||
"""Node definitions for My Agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Node 1: Intake (client-facing)
|
||||
intake_node = NodeSpec(
|
||||
id="intake",
|
||||
name="Intake",
|
||||
description="Gather requirements from the user",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0, # Unlimited for forever-alive
|
||||
input_keys=["topic"],
|
||||
output_keys=["brief"],
|
||||
success_criteria="The brief is specific and actionable.",
|
||||
system_prompt="""\
|
||||
You are an intake specialist.
|
||||
|
||||
**STEP 1 — Read and respond (text only, NO tool calls):**
|
||||
1. Read the topic provided
|
||||
2. If vague, ask 1-2 clarifying questions
|
||||
3. If clear, confirm your understanding
|
||||
|
||||
**STEP 2 — After the user confirms, call set_output:**
|
||||
- set_output("brief", "Clear description of what to do")
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
# Node 2: Worker (autonomous)
|
||||
worker_node = NodeSpec(
|
||||
id="worker",
|
||||
name="Worker",
|
||||
description="Do the main work",
|
||||
node_type="event_loop",
|
||||
max_node_visits=0,
|
||||
input_keys=["brief", "feedback"],
|
||||
output_keys=["results"],
|
||||
nullable_output_keys=["feedback"], # Only on feedback edge
|
||||
success_criteria="Results are complete and accurate.",
|
||||
system_prompt="""\
|
||||
You are a worker agent. Given a brief, do the work.
|
||||
|
||||
If feedback is provided, this is a follow-up — address the feedback.
|
||||
|
||||
Work in phases:
|
||||
1. Use tools to gather/process data
|
||||
2. Analyze results
|
||||
3. Call set_output for each key in a SEPARATE turn:
|
||||
- set_output("results", "structured results")
|
||||
""",
|
||||
tools=["web_search", "web_scrape", "save_data", "load_data", "list_data_files"],
|
||||
)
|
||||
|
||||
# Node 3: Review (client-facing)
|
||||
review_node = NodeSpec(
|
||||
id="review",
|
||||
name="Review",
|
||||
description="Present results for user approval",
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=["results", "brief"],
|
||||
output_keys=["next_action", "feedback"],
|
||||
nullable_output_keys=["feedback"],
|
||||
success_criteria="User has reviewed and decided next steps.",
|
||||
system_prompt="""\
|
||||
Present the results to the user.
|
||||
|
||||
**STEP 1 — Present (text only, NO tool calls):**
|
||||
1. Summary of work done
|
||||
2. Key results
|
||||
3. Ask: satisfied, or want changes?
|
||||
|
||||
**STEP 2 — After user responds, call set_output:**
|
||||
- set_output("next_action", "new_topic") — if starting fresh
|
||||
- set_output("next_action", "revise") — if changes needed
|
||||
- set_output("feedback", "what to change") — only if revising
|
||||
""",
|
||||
tools=[],
|
||||
)
|
||||
|
||||
__all__ = ["intake_node", "worker_node", "review_node"]
|
||||
```
|
||||
|
||||
## agent.py
|
||||
|
||||
```python
|
||||
"""Agent graph construction for My Agent."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.graph.checkpoint_config import CheckpointConfig
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import intake_node, worker_node, review_node
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="my-agent-goal",
|
||||
name="My Agent Goal",
|
||||
description="What this agent achieves.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(id="sc-1", description="...", metric="...", target="...", weight=0.5),
|
||||
SuccessCriterion(id="sc-2", description="...", metric="...", target="...", weight=0.5),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(id="c-1", description="...", constraint_type="hard", category="quality"),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes = [intake_node, worker_node, review_node]
|
||||
|
||||
# Edge definitions
|
||||
edges = [
|
||||
EdgeSpec(id="intake-to-worker", source="intake", target="worker",
|
||||
condition=EdgeCondition.ON_SUCCESS, priority=1),
|
||||
EdgeSpec(id="worker-to-review", source="worker", target="review",
|
||||
condition=EdgeCondition.ON_SUCCESS, priority=1),
|
||||
# Feedback loop
|
||||
EdgeSpec(id="review-to-worker", source="review", target="worker",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(next_action).lower() == 'revise'", priority=2),
|
||||
# Loop back for new topic
|
||||
EdgeSpec(id="review-to-intake", source="review", target="intake",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="str(next_action).lower() == 'new_topic'", priority=1),
|
||||
]
|
||||
|
||||
# Graph configuration
|
||||
entry_node = "intake"
|
||||
entry_points = {"start": "intake"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = [] # Forever-alive
|
||||
|
||||
# Module-level vars read by AgentRunner.load()
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = "You are a helpful agent."
|
||||
loop_config = {"max_iterations": 100, "max_tool_calls_per_turn": 20, "max_history_tokens": 32000}
|
||||
|
||||
|
||||
class MyAgent:
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._graph = None
|
||||
self._agent_runtime = None
|
||||
self._tool_registry = None
|
||||
self._storage_path = None
|
||||
|
||||
def _build_graph(self):
|
||||
return GraphSpec(
|
||||
id="my-agent-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config=loop_config,
|
||||
conversation_mode=conversation_mode,
|
||||
identity_prompt=identity_prompt,
|
||||
)
|
||||
|
||||
def _setup(self, mock_mode=False):
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "my_agent"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
self._tool_registry = ToolRegistry()
|
||||
mcp_config = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config)
|
||||
llm = None
|
||||
if not mock_mode:
|
||||
llm = LiteLLMProvider(model=self.config.model, api_key=self.config.api_key, api_base=self.config.api_base)
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
self._graph = self._build_graph()
|
||||
self._agent_runtime = create_agent_runtime(
|
||||
graph=self._graph, goal=self.goal, storage_path=self._storage_path,
|
||||
entry_points=[EntryPointSpec(id="default", name="Default", entry_node=self.entry_node,
|
||||
trigger_type="manual", isolation_level="shared")],
|
||||
llm=llm, tools=tools, tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(enabled=True, checkpoint_on_node_complete=True,
|
||||
checkpoint_max_age_days=7, async_checkpoint=True),
|
||||
)
|
||||
|
||||
async def start(self, mock_mode=False):
|
||||
if self._agent_runtime is None:
|
||||
self._setup(mock_mode=mock_mode)
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self):
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def trigger_and_wait(self, entry_point="default", input_data=None, timeout=None, session_state=None):
|
||||
if self._agent_runtime is None:
|
||||
raise RuntimeError("Agent not started. Call start() first.")
|
||||
return await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id=entry_point, input_data=input_data or {}, session_state=session_state)
|
||||
|
||||
async def run(self, context, mock_mode=False, session_state=None):
|
||||
await self.start(mock_mode=mock_mode)
|
||||
try:
|
||||
result = await self.trigger_and_wait("default", context, session_state=session_state)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
def info(self):
|
||||
return {
|
||||
"name": metadata.name, "version": metadata.version, "description": metadata.description,
|
||||
"goal": {"name": self.goal.name, "description": self.goal.description},
|
||||
"nodes": [n.id for n in self.nodes], "edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node, "entry_points": self.entry_points,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"client_facing_nodes": [n.id for n in self.nodes if n.client_facing],
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
errors, warnings = [], []
|
||||
node_ids = {n.id for n in self.nodes}
|
||||
for e in self.edges:
|
||||
if e.source not in node_ids: errors.append(f"Edge {e.id}: source '{e.source}' not found")
|
||||
if e.target not in node_ids: errors.append(f"Edge {e.id}: target '{e.target}' not found")
|
||||
if self.entry_node not in node_ids: errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
for t in self.terminal_nodes:
|
||||
if t not in node_ids: errors.append(f"Terminal node '{t}' not found")
|
||||
for ep_id, nid in self.entry_points.items():
|
||||
if nid not in node_ids: errors.append(f"Entry point '{ep_id}' references unknown node '{nid}'")
|
||||
return {"valid": len(errors) == 0, "errors": errors, "warnings": warnings}
|
||||
|
||||
|
||||
default_agent = MyAgent()
|
||||
```
|
||||
|
||||
## agent.py — Async Entry Points Variant
|
||||
|
||||
When an agent needs timers, webhooks, or event-driven triggers, add
|
||||
`async_entry_points` and optionally `runtime_config` as module-level variables.
|
||||
These are IN ADDITION to the standard variables above.
|
||||
|
||||
```python
|
||||
# Additional imports for async entry points
|
||||
from framework.graph.edge import GraphSpec, AsyncEntryPointSpec
|
||||
from framework.runtime.agent_runtime import (
|
||||
AgentRuntime, AgentRuntimeConfig, create_agent_runtime,
|
||||
)
|
||||
|
||||
# ... (goal, nodes, edges, entry_node, entry_points, etc. as above) ...
|
||||
|
||||
# Async entry points — event-driven triggers
|
||||
async_entry_points = [
|
||||
# Timer with cron: daily at 9am
|
||||
AsyncEntryPointSpec(
|
||||
id="daily-check",
|
||||
name="Daily Check",
|
||||
entry_node="process-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 9 * * *"},
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
),
|
||||
# Timer with fixed interval: every 20 minutes
|
||||
AsyncEntryPointSpec(
|
||||
id="scheduled-check",
|
||||
name="Scheduled Check",
|
||||
entry_node="process-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"interval_minutes": 20, "run_immediately": False},
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
),
|
||||
# Event: reacts to webhook events
|
||||
AsyncEntryPointSpec(
|
||||
id="webhook-event",
|
||||
name="Webhook Event Handler",
|
||||
entry_node="process-node",
|
||||
trigger_type="event",
|
||||
trigger_config={"event_types": ["webhook_received"]},
|
||||
isolation_level="shared",
|
||||
max_concurrent=10,
|
||||
),
|
||||
]
|
||||
|
||||
# Webhook server config (only needed if using webhooks)
|
||||
runtime_config = AgentRuntimeConfig(
|
||||
webhook_host="127.0.0.1",
|
||||
webhook_port=8080,
|
||||
webhook_routes=[
|
||||
{
|
||||
"source_id": "my-source",
|
||||
"path": "/webhooks/my-source",
|
||||
"methods": ["POST"],
|
||||
},
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
**Key rules for async entry points:**
|
||||
- `async_entry_points` is a list of `AsyncEntryPointSpec` (NOT `EntryPointSpec`)
|
||||
- `runtime_config` is `AgentRuntimeConfig` (NOT `RuntimeConfig` from config.py)
|
||||
- Valid trigger_types: `timer`, `event`, `webhook`, `manual`, `api`
|
||||
- Valid isolation_levels: `isolated`, `shared`, `synchronized`
|
||||
- Timer trigger_config (cron): `{"cron": "0 9 * * *"}` — standard 5-field cron expression
|
||||
- Timer trigger_config (interval): `{"interval_minutes": float, "run_immediately": bool}`
|
||||
- Event trigger_config: `{"event_types": ["webhook_received"], "filter_stream": "...", "filter_node": "..."}`
|
||||
- Use `isolation_level="shared"` for async entry points that need to read
|
||||
the primary session's memory (e.g., user-configured rules)
|
||||
- The `_build_graph()` method passes `async_entry_points` to GraphSpec
|
||||
- Reference: `exports/gmail_inbox_guardian/agent.py`
|
||||
|
||||
## __init__.py
|
||||
|
||||
**CRITICAL:** The runner imports the package (`__init__.py`) and reads ALL module-level
|
||||
variables via `getattr()`. Every variable defined in `agent.py` that the runner needs
|
||||
MUST be re-exported here. Missing exports cause silent failures (variables default to
|
||||
`None` or `{}`), leading to "must define goal, nodes, edges" errors or graph validation
|
||||
failures like "node X is unreachable".
|
||||
|
||||
```python
|
||||
"""My Agent — description."""
|
||||
|
||||
from .agent import (
|
||||
MyAgent,
|
||||
default_agent,
|
||||
goal,
|
||||
nodes,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
pause_nodes,
|
||||
terminal_nodes,
|
||||
conversation_mode,
|
||||
identity_prompt,
|
||||
loop_config,
|
||||
)
|
||||
from .config import default_config, metadata
|
||||
|
||||
__all__ = [
|
||||
"MyAgent",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"pause_nodes",
|
||||
"terminal_nodes",
|
||||
"conversation_mode",
|
||||
"identity_prompt",
|
||||
"loop_config",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
```
|
||||
|
||||
**If the agent uses async entry points**, also import and export:
|
||||
```python
|
||||
from .agent import (
|
||||
...,
|
||||
async_entry_points,
|
||||
runtime_config, # Only if using webhooks
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
...,
|
||||
"async_entry_points",
|
||||
"runtime_config",
|
||||
]
|
||||
```
|
||||
|
||||
## __main__.py
|
||||
|
||||
```python
|
||||
"""CLI entry point for My Agent."""
|
||||
|
||||
import asyncio, json, logging, sys
|
||||
import click
|
||||
from .agent import default_agent, MyAgent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
if debug: level, fmt = logging.DEBUG, "%(asctime)s %(name)s: %(message)s"
|
||||
elif verbose: level, fmt = logging.INFO, "%(message)s"
|
||||
else: level, fmt = logging.WARNING, "%(levelname)s: %(message)s"
|
||||
logging.basicConfig(level=level, format=fmt, stream=sys.stderr)
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""My Agent — description."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--topic", "-t", required=True)
|
||||
@click.option("--mock", is_flag=True)
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def run(topic, mock, verbose):
|
||||
"""Execute the agent."""
|
||||
setup_logging(verbose=verbose)
|
||||
result = asyncio.run(default_agent.run({"topic": topic}, mock_mode=mock))
|
||||
click.echo(json.dumps({"success": result.success, "output": result.output}, indent=2, default=str))
|
||||
sys.exit(0 if result.success else 1)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--mock", is_flag=True)
|
||||
def tui(mock):
|
||||
"""Launch TUI dashboard."""
|
||||
from pathlib import Path
|
||||
from framework.tui.app import AdenTUI
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
from framework.runtime.agent_runtime import create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
|
||||
async def run_tui():
|
||||
agent = MyAgent()
|
||||
agent._tool_registry = ToolRegistry()
|
||||
storage = Path.home() / ".hive" / "agents" / "my_agent"
|
||||
storage.mkdir(parents=True, exist_ok=True)
|
||||
mcp_cfg = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_cfg.exists(): agent._tool_registry.load_mcp_config(mcp_cfg)
|
||||
llm = None if mock else LiteLLMProvider(model=agent.config.model, api_key=agent.config.api_key, api_base=agent.config.api_base)
|
||||
runtime = create_agent_runtime(
|
||||
graph=agent._build_graph(), goal=agent.goal, storage_path=storage,
|
||||
entry_points=[EntryPointSpec(id="start", name="Start", entry_node="intake", trigger_type="manual", isolation_level="isolated")],
|
||||
llm=llm, tools=list(agent._tool_registry.get_tools().values()), tool_executor=agent._tool_registry.get_executor())
|
||||
await runtime.start()
|
||||
try:
|
||||
app = AdenTUI(runtime)
|
||||
await app.run_async()
|
||||
finally:
|
||||
await runtime.stop()
|
||||
asyncio.run(run_tui())
|
||||
|
||||
|
||||
@cli.command()
|
||||
def info():
|
||||
"""Show agent info."""
|
||||
data = default_agent.info()
|
||||
click.echo(f"Agent: {data['name']}\nVersion: {data['version']}\nDescription: {data['description']}")
|
||||
click.echo(f"Nodes: {', '.join(data['nodes'])}\nClient-facing: {', '.join(data['client_facing_nodes'])}")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def validate():
|
||||
"""Validate agent structure."""
|
||||
v = default_agent.validate()
|
||||
if v["valid"]: click.echo("Agent is valid")
|
||||
else:
|
||||
click.echo("Errors:")
|
||||
for e in v["errors"]: click.echo(f" {e}")
|
||||
sys.exit(0 if v["valid"] else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
```
|
||||
|
||||
## mcp_servers.json
|
||||
|
||||
```json
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../tools",
|
||||
"description": "Hive tools MCP server"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**CRITICAL FORMAT RULES:**
|
||||
- NO `"mcpServers"` wrapper (flat dict, not nested)
|
||||
- `cwd` MUST be `"../../tools"` (relative from `exports/AGENT_NAME/` to `tools/`)
|
||||
- `command` MUST be `"uv"` with `"args": ["run", "python", ...]` (NOT bare `"python"`)
|
||||
|
||||
## tests/conftest.py
|
||||
|
||||
```python
|
||||
"""Test fixtures."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
_repo_root = Path(__file__).resolve().parents[3]
|
||||
for _p in ["exports", "core"]:
|
||||
_path = str(_repo_root / _p)
|
||||
if _path not in sys.path:
|
||||
sys.path.insert(0, _path)
|
||||
|
||||
AGENT_PATH = str(Path(__file__).resolve().parents[1])
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def mock_mode():
|
||||
return True
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def runner(tmp_path_factory, mock_mode):
|
||||
from framework.runner.runner import AgentRunner
|
||||
storage = tmp_path_factory.mktemp("agent_storage")
|
||||
r = AgentRunner.load(AGENT_PATH, mock_mode=mock_mode, storage_path=storage)
|
||||
r._setup()
|
||||
yield r
|
||||
await r.cleanup_async()
|
||||
```
|
||||
|
||||
## entry_points Format
|
||||
|
||||
MUST be: `{"start": "first-node-id"}`
|
||||
NOT: `{"first-node-id": ["input_keys"]}` (WRONG)
|
||||
NOT: `{"first-node-id"}` (WRONG — this is a set)
|
||||
@@ -0,0 +1,433 @@
|
||||
# Hive Agent Framework — Condensed Reference
|
||||
|
||||
## Architecture
|
||||
|
||||
Agents are Python packages in `exports/`:
|
||||
```
|
||||
exports/my_agent/
|
||||
├── __init__.py # MUST re-export ALL module-level vars from agent.py
|
||||
├── __main__.py # CLI (run, tui, info, validate, shell)
|
||||
├── agent.py # Graph construction (goal, edges, agent class)
|
||||
├── config.py # Runtime config
|
||||
├── nodes/__init__.py # Node definitions (NodeSpec)
|
||||
├── mcp_servers.json # MCP tool server config
|
||||
└── tests/ # pytest tests
|
||||
```
|
||||
|
||||
## Agent Loading Contract
|
||||
|
||||
`AgentRunner.load()` imports the package (`__init__.py`) and reads these
|
||||
module-level variables via `getattr()`:
|
||||
|
||||
| Variable | Required | Default if missing | Consequence |
|
||||
|----------|----------|--------------------|-------------|
|
||||
| `goal` | YES | `None` | **FATAL** — "must define goal, nodes, edges" |
|
||||
| `nodes` | YES | `None` | **FATAL** — same error |
|
||||
| `edges` | YES | `None` | **FATAL** — same error |
|
||||
| `entry_node` | no | `nodes[0].id` | Probably wrong node |
|
||||
| `entry_points` | no | `{}` | **Nodes unreachable** — validation fails |
|
||||
| `terminal_nodes` | no | `[]` | OK for forever-alive |
|
||||
| `pause_nodes` | no | `[]` | OK |
|
||||
| `conversation_mode` | no | not passed | Isolated mode (no context carryover) |
|
||||
| `identity_prompt` | no | not passed | No agent-level identity |
|
||||
| `loop_config` | no | `{}` | No iteration limits |
|
||||
| `async_entry_points` | no | `[]` | No async triggers (timers, webhooks, events) |
|
||||
| `runtime_config` | no | `None` | No webhook server |
|
||||
|
||||
**CRITICAL:** `__init__.py` MUST import and re-export ALL of these from
|
||||
`agent.py`. Missing exports silently fall back to defaults, causing
|
||||
hard-to-debug failures.
|
||||
|
||||
**Why `default_agent.validate()` is NOT sufficient:**
|
||||
`validate()` checks the agent CLASS's internal graph (self.nodes, self.edges).
|
||||
These are always correct because the constructor references agent.py's module
|
||||
vars directly. But `AgentRunner.load()` reads from the PACKAGE (`__init__.py`),
|
||||
not the class. So `validate()` passes while `AgentRunner.load()` fails.
|
||||
Always test with `AgentRunner.load("exports/{name}")` — this is the same
|
||||
code path the TUI and `hive run` use.
|
||||
|
||||
## Goal
|
||||
|
||||
Defines success criteria and constraints:
|
||||
```python
|
||||
goal = Goal(
|
||||
id="kebab-case-id",
|
||||
name="Display Name",
|
||||
description="What the agent does",
|
||||
success_criteria=[
|
||||
SuccessCriterion(id="sc-id", description="...", metric="...", target="...", weight=0.25),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(id="c-id", description="...", constraint_type="hard", category="quality"),
|
||||
],
|
||||
)
|
||||
```
|
||||
- 3-5 success criteria, weights sum to 1.0
|
||||
- 1-5 constraints (hard/soft, categories: quality, accuracy, interaction, functional)
|
||||
|
||||
## NodeSpec Fields
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| id | str | required | kebab-case identifier |
|
||||
| name | str | required | Display name |
|
||||
| description | str | required | What the node does |
|
||||
| node_type | str | required | Always `"event_loop"` |
|
||||
| input_keys | list[str] | required | Memory keys this node reads |
|
||||
| output_keys | list[str] | required | Memory keys this node writes via set_output |
|
||||
| system_prompt | str | "" | LLM instructions |
|
||||
| tools | list[str] | [] | Tool names from MCP servers |
|
||||
| client_facing | bool | False | If True, streams to user and blocks for input |
|
||||
| nullable_output_keys | list[str] | [] | Keys that may remain unset |
|
||||
| max_node_visits | int | 0 | 0=unlimited (default); >1 for one-shot feedback loops |
|
||||
| max_retries | int | 3 | Retries on failure |
|
||||
| success_criteria | str | "" | Natural language for judge evaluation |
|
||||
|
||||
## EdgeSpec Fields
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| id | str | kebab-case identifier |
|
||||
| source | str | Source node ID |
|
||||
| target | str | Target node ID |
|
||||
| condition | EdgeCondition | ON_SUCCESS, ON_FAILURE, ALWAYS, CONDITIONAL |
|
||||
| condition_expr | str | Python expression evaluated against memory (for CONDITIONAL) |
|
||||
| priority | int | Positive=forward (evaluated first), negative=feedback (loop-back) |
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### STEP 1/STEP 2 (Client-Facing Nodes)
|
||||
```
|
||||
**STEP 1 — Respond to the user (text only, NO tool calls):**
|
||||
[Present information, ask questions]
|
||||
|
||||
**STEP 2 — After the user responds, call set_output:**
|
||||
- set_output("key", "value based on user response")
|
||||
```
|
||||
This prevents premature set_output before user interaction.
|
||||
|
||||
### Fewer, Richer Nodes (CRITICAL)
|
||||
|
||||
**Hard limit: 2-4 nodes for most agents.** Never exceed 5 unless the user
|
||||
explicitly requests a complex multi-phase pipeline.
|
||||
|
||||
Each node boundary serializes outputs to shared memory and **destroys** all
|
||||
in-context information: tool call results, intermediate reasoning, conversation
|
||||
history. A research node that searches, fetches, and analyzes in ONE node keeps
|
||||
all source material in its conversation context. Split across 3 nodes, each
|
||||
downstream node only sees the serialized summary string.
|
||||
|
||||
**Decision framework — merge unless ANY of these apply:**
|
||||
1. **Client-facing boundary** — Autonomous and client-facing work MUST be
|
||||
separate nodes (different interaction models)
|
||||
2. **Disjoint tool sets** — If tools are fundamentally different (e.g., web
|
||||
search vs database), separate nodes make sense
|
||||
3. **Parallel execution** — Fan-out branches must be separate nodes
|
||||
|
||||
**Red flags that you have too many nodes:**
|
||||
- A node with 0 tools (pure LLM reasoning) → merge into predecessor/successor
|
||||
- A node that sets only 1 trivial output → collapse into predecessor
|
||||
- Multiple consecutive autonomous nodes → combine into one rich node
|
||||
- A "report" node that presents analysis → merge into the client-facing node
|
||||
- A "confirm" or "schedule" node that doesn't call any external service → remove
|
||||
|
||||
**Typical agent structure (3 nodes):**
|
||||
```
|
||||
intake (client-facing) ←→ process (autonomous) ←→ review (client-facing)
|
||||
```
|
||||
Or for simpler agents, just 2 nodes:
|
||||
```
|
||||
interact (client-facing) → process (autonomous) → interact (loop)
|
||||
```
|
||||
|
||||
### nullable_output_keys
|
||||
For inputs that only arrive on certain edges:
|
||||
```python
|
||||
research_node = NodeSpec(
|
||||
input_keys=["brief", "feedback"],
|
||||
nullable_output_keys=["feedback"], # Only present on feedback edge
|
||||
max_node_visits=3,
|
||||
)
|
||||
```
|
||||
|
||||
### Mutually Exclusive Outputs
|
||||
For routing decisions:
|
||||
```python
|
||||
review_node = NodeSpec(
|
||||
output_keys=["approved", "feedback"],
|
||||
nullable_output_keys=["approved", "feedback"], # Node sets one or the other
|
||||
)
|
||||
```
|
||||
|
||||
### Forever-Alive Pattern
|
||||
`terminal_nodes=[]` — every node has outgoing edges, graph loops until user exits.
|
||||
Use `conversation_mode="continuous"` to preserve context across transitions.
|
||||
|
||||
### set_output
|
||||
- Synthetic tool injected by framework
|
||||
- Call separately from real tool calls (separate turn)
|
||||
- `set_output("key", "value")` stores to shared memory
|
||||
|
||||
## Edge Conditions
|
||||
|
||||
| Condition | When |
|
||||
|-----------|------|
|
||||
| ON_SUCCESS | Node completed successfully |
|
||||
| ON_FAILURE | Node failed |
|
||||
| ALWAYS | Unconditional |
|
||||
| CONDITIONAL | condition_expr evaluates to True against memory |
|
||||
|
||||
condition_expr examples:
|
||||
- `"needs_more_research == True"`
|
||||
- `"str(next_action).lower() == 'new_agent'"`
|
||||
- `"feedback is not None"`
|
||||
|
||||
## Graph Lifecycle
|
||||
|
||||
| Pattern | terminal_nodes | When |
|
||||
|---------|---------------|------|
|
||||
| **Forever-alive** | `[]` | **DEFAULT for all agents** |
|
||||
| Linear | `["last-node"]` | Only if user explicitly requests one-shot/batch |
|
||||
|
||||
**Forever-alive is the default.** Always use `terminal_nodes=[]`.
|
||||
The framework default for `max_node_visits` is 0 (unbounded), so
|
||||
nodes work correctly in forever-alive loops without explicit override.
|
||||
Only set `max_node_visits > 0` in one-shot agents with feedback loops.
|
||||
Every node must have at least one outgoing edge — no dead ends. The
|
||||
user exits by closing the TUI. Only use terminal nodes if the user
|
||||
explicitly asks for a batch/one-shot agent that runs once and exits.
|
||||
|
||||
## Continuous Conversation Mode
|
||||
|
||||
`conversation_mode` has ONLY two valid states:
|
||||
- `"continuous"` — recommended for interactive agents
|
||||
- Omit entirely — isolated per-node conversations (each node starts fresh)
|
||||
|
||||
**INVALID values** (do NOT use): `"client_facing"`, `"interactive"`,
|
||||
`"adaptive"`, `"shared"`. These do not exist in the framework.
|
||||
|
||||
When `conversation_mode="continuous"`:
|
||||
- Same conversation thread carries across node transitions
|
||||
- Layered system prompts: identity (agent-level) + narrative + focus (per-node)
|
||||
- Transition markers inserted at boundaries
|
||||
- Compaction happens opportunistically at phase transitions
|
||||
|
||||
## loop_config
|
||||
|
||||
Only three valid keys:
|
||||
```python
|
||||
loop_config = {
|
||||
"max_iterations": 100, # Max LLM turns per node visit
|
||||
"max_tool_calls_per_turn": 20, # Max tool calls per LLM response
|
||||
"max_history_tokens": 32000, # Triggers conversation compaction
|
||||
}
|
||||
```
|
||||
**INVALID keys** (do NOT use): `"strategy"`, `"mode"`, `"timeout"`,
|
||||
`"temperature"`. These are silently ignored or cause errors.
|
||||
|
||||
## Data Tools (Spillover)
|
||||
|
||||
For large data that exceeds context:
|
||||
- `save_data(filename, data)` — Write to session data dir
|
||||
- `load_data(filename, offset, limit)` — Read with pagination
|
||||
- `list_data_files()` — List files
|
||||
- `serve_file_to_user(filename, label)` — Clickable file:// URI
|
||||
|
||||
`data_dir` is auto-injected by framework — LLM never sees it.
|
||||
|
||||
## Fan-Out / Fan-In
|
||||
|
||||
Multiple ON_SUCCESS edges from same source → parallel execution via asyncio.gather().
|
||||
- Parallel nodes must have disjoint output_keys
|
||||
- Only one branch may have client_facing nodes
|
||||
- Fan-in node gets all outputs in shared memory
|
||||
|
||||
## Judge System
|
||||
|
||||
- **Implicit** (default): ACCEPTs when LLM finishes with no tool calls and all required outputs set
|
||||
- **SchemaJudge**: Validates against Pydantic model
|
||||
- **Custom**: Implement `evaluate(context) -> JudgeVerdict`
|
||||
|
||||
Judge is the SOLE acceptance mechanism — no ad-hoc framework gating.
|
||||
|
||||
## Async Entry Points (Webhooks, Timers, Events)
|
||||
|
||||
For agents that need to react to external events (incoming emails, scheduled
|
||||
tasks, API calls), use `AsyncEntryPointSpec` and optionally `AgentRuntimeConfig`.
|
||||
|
||||
### Imports
|
||||
```python
|
||||
from framework.graph.edge import GraphSpec, AsyncEntryPointSpec
|
||||
from framework.runtime.agent_runtime import AgentRuntime, AgentRuntimeConfig, create_agent_runtime
|
||||
```
|
||||
Note: `AsyncEntryPointSpec` is in `framework.graph.edge` (the graph/declarative layer).
|
||||
`AgentRuntimeConfig` is in `framework.runtime.agent_runtime` (the runtime layer).
|
||||
|
||||
### AsyncEntryPointSpec Fields
|
||||
|
||||
| Field | Type | Default | Description |
|
||||
|-------|------|---------|-------------|
|
||||
| id | str | required | Unique identifier |
|
||||
| name | str | required | Human-readable name |
|
||||
| entry_node | str | required | Node ID to start execution from |
|
||||
| trigger_type | str | `"manual"` | `webhook`, `api`, `timer`, `event`, `manual` |
|
||||
| trigger_config | dict | `{}` | Trigger-specific config (see below) |
|
||||
| isolation_level | str | `"shared"` | `isolated`, `shared`, `synchronized` |
|
||||
| priority | int | `0` | Execution priority (higher = more priority) |
|
||||
| max_concurrent | int | `10` | Max concurrent executions |
|
||||
|
||||
### Trigger Types
|
||||
|
||||
**timer** — Fires on a schedule. Two modes: cron expressions or fixed interval.
|
||||
|
||||
Cron (preferred for precise scheduling):
|
||||
```python
|
||||
AsyncEntryPointSpec(
|
||||
id="daily-digest",
|
||||
name="Daily Digest",
|
||||
entry_node="check-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"cron": "0 9 * * *"}, # daily at 9am
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
)
|
||||
```
|
||||
- `cron` (str) — standard cron expression (5 fields: min hour dom month dow)
|
||||
- Examples: `"0 9 * * *"` (daily 9am), `"0 9 * * MON-FRI"` (weekdays 9am), `"*/30 * * * *"` (every 30 min)
|
||||
|
||||
Fixed interval (simpler, for polling-style tasks):
|
||||
```python
|
||||
AsyncEntryPointSpec(
|
||||
id="scheduled-check",
|
||||
name="Scheduled Check",
|
||||
entry_node="check-node",
|
||||
trigger_type="timer",
|
||||
trigger_config={"interval_minutes": 20, "run_immediately": False},
|
||||
isolation_level="shared",
|
||||
max_concurrent=1,
|
||||
)
|
||||
```
|
||||
- `interval_minutes` (float) — how often to fire
|
||||
- `run_immediately` (bool, default False) — fire once on startup
|
||||
|
||||
**event** — Subscribes to EventBus (e.g., webhook events):
|
||||
```python
|
||||
AsyncEntryPointSpec(
|
||||
id="email-event",
|
||||
name="Email Event Handler",
|
||||
entry_node="process-emails",
|
||||
trigger_type="event",
|
||||
trigger_config={"event_types": ["webhook_received"]},
|
||||
isolation_level="shared",
|
||||
max_concurrent=10,
|
||||
)
|
||||
```
|
||||
- `event_types` (list[str]) — EventType values to subscribe to
|
||||
- `filter_stream` (str, optional) — only receive from this stream
|
||||
- `filter_node` (str, optional) — only receive from this node
|
||||
|
||||
**webhook** — HTTP endpoint (requires AgentRuntimeConfig):
|
||||
The webhook server publishes `WEBHOOK_RECEIVED` events on the EventBus.
|
||||
An `event` trigger type with `event_types: ["webhook_received"]` subscribes
|
||||
to those events. The flow is:
|
||||
```
|
||||
HTTP POST /webhooks/gmail → WebhookServer → EventBus (WEBHOOK_RECEIVED)
|
||||
→ event entry point → triggers graph execution from entry_node
|
||||
```
|
||||
|
||||
**manual** — Triggered programmatically via `runtime.trigger()`.
|
||||
|
||||
### Isolation Levels
|
||||
|
||||
| Level | Meaning |
|
||||
|-------|---------|
|
||||
| `isolated` | Private state per execution |
|
||||
| `shared` | Eventual consistency — async executions can read primary session memory |
|
||||
| `synchronized` | Shared with write locks (use when ordering matters) |
|
||||
|
||||
For most async patterns, use `shared` — the async execution reads the primary
|
||||
session's memory (e.g., user-configured rules) and runs its own workflow.
|
||||
|
||||
### AgentRuntimeConfig (for webhook servers)
|
||||
|
||||
```python
|
||||
from framework.runtime.agent_runtime import AgentRuntimeConfig
|
||||
|
||||
runtime_config = AgentRuntimeConfig(
|
||||
webhook_host="127.0.0.1",
|
||||
webhook_port=8080,
|
||||
webhook_routes=[
|
||||
{
|
||||
"source_id": "gmail",
|
||||
"path": "/webhooks/gmail",
|
||||
"methods": ["POST"],
|
||||
"secret": None, # Optional HMAC-SHA256 secret
|
||||
},
|
||||
],
|
||||
)
|
||||
```
|
||||
`runtime_config` is a module-level variable read by `AgentRunner.load()`.
|
||||
The runner passes it to `create_agent_runtime()`. On `runtime.start()`,
|
||||
if webhook_routes is non-empty, an embedded HTTP server starts.
|
||||
|
||||
### Session Sharing
|
||||
|
||||
Timer and event triggers automatically call `_get_primary_session_state()`
|
||||
before execution. This finds the active user-facing session and provides
|
||||
its memory to the async execution, filtered to only the async entry node's
|
||||
`input_keys`. This means the async flow can read user-configured values
|
||||
(like rules, preferences) without needing separate configuration.
|
||||
|
||||
### Module-Level Variables
|
||||
|
||||
Agents with async entry points must export two additional variables:
|
||||
```python
|
||||
# In agent.py:
|
||||
async_entry_points = [AsyncEntryPointSpec(...), ...]
|
||||
runtime_config = AgentRuntimeConfig(...) # Only if using webhooks
|
||||
```
|
||||
|
||||
Both must be re-exported from `__init__.py`:
|
||||
```python
|
||||
from .agent import (
|
||||
..., async_entry_points, runtime_config,
|
||||
)
|
||||
```
|
||||
|
||||
### Reference Agent
|
||||
|
||||
See `exports/gmail_inbox_guardian/agent.py` for a complete example with:
|
||||
- Primary client-facing intake node (user configures rules)
|
||||
- Timer-based scheduled inbox checks (every 20 min)
|
||||
- Webhook-triggered email event handling
|
||||
- Shared isolation for memory access across streams
|
||||
|
||||
## Framework Capabilities
|
||||
|
||||
**Works well:** Multi-turn conversations, HITL review, tool orchestration, structured outputs, parallel execution, context management, error recovery, session persistence.
|
||||
|
||||
**Limitations:** LLM latency (2-10s/turn), context window limits (~128K), cost per run, rate limits, node boundaries lose context.
|
||||
|
||||
**Not designed for:** Sub-second responses, millions of items, real-time streaming, guaranteed determinism, offline/air-gapped.
|
||||
|
||||
## Tool Discovery
|
||||
|
||||
Do NOT rely on a static tool list — it will be outdated. Always use
|
||||
`discover_mcp_tools()` to get the current tool catalog from the
|
||||
hive-tools MCP server. This returns full schemas including parameter
|
||||
names, types, and descriptions.
|
||||
|
||||
```
|
||||
discover_mcp_tools() # default: hive-tools
|
||||
discover_mcp_tools("exports/my_agent/mcp_servers.json") # specific agent
|
||||
```
|
||||
|
||||
Common tool categories (verify via discover_mcp_tools):
|
||||
- **Web**: search, scrape, PDF
|
||||
- **Data**: save/load/append/list data files, serve to user
|
||||
- **File**: view, write, replace, diff, list, grep
|
||||
- **Communication**: email, gmail, slack, telegram
|
||||
- **CRM**: hubspot, apollo, calcom
|
||||
- **GitHub**: stargazers, user profiles, repos
|
||||
- **Vision**: image analysis
|
||||
- **Time**: current time
|
||||
@@ -0,0 +1,31 @@
|
||||
"""Test fixtures for Hive Coder agent."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
_repo_root = Path(__file__).resolve().parents[3]
|
||||
for _p in ["exports", "core"]:
|
||||
_path = str(_repo_root / _p)
|
||||
if _path not in sys.path:
|
||||
sys.path.insert(0, _path)
|
||||
|
||||
AGENT_PATH = str(Path(__file__).resolve().parents[1])
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def mock_mode():
|
||||
return True
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def runner(tmp_path_factory, mock_mode):
|
||||
from framework.runner.runner import AgentRunner
|
||||
|
||||
storage = tmp_path_factory.mktemp("agent_storage")
|
||||
r = AgentRunner.load(AGENT_PATH, mock_mode=mock_mode, storage_path=storage)
|
||||
r._setup()
|
||||
yield r
|
||||
await r.cleanup_async()
|
||||
@@ -0,0 +1,27 @@
|
||||
"""Queen's ticket receiver entry point.
|
||||
|
||||
When the Worker Health Judge emits a WORKER_ESCALATION_TICKET event on the
|
||||
shared EventBus, this entry point fires and routes to the ``ticket_triage``
|
||||
node, where the Queen deliberates and decides whether to notify the operator.
|
||||
|
||||
Isolation level is ``isolated`` — the queen's triage memory is kept separate
|
||||
from the worker's shared memory. Each ticket triage runs in its own context.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from framework.graph.edge import AsyncEntryPointSpec
|
||||
|
||||
TICKET_RECEIVER_ENTRY_POINT = AsyncEntryPointSpec(
|
||||
id="ticket_receiver",
|
||||
name="Worker Escalation Ticket Receiver",
|
||||
entry_node="ticket_triage",
|
||||
trigger_type="event",
|
||||
trigger_config={
|
||||
"event_types": ["worker_escalation_ticket"],
|
||||
# Do not fire on our own graph's events (prevents loops if queen
|
||||
# somehow emits a worker_escalation_ticket for herself)
|
||||
"exclude_own_graph": True,
|
||||
},
|
||||
isolation_level="isolated",
|
||||
)
|
||||
@@ -245,20 +245,14 @@ class GraphBuilder:
|
||||
warnings.append(f"Node '{node.id}' should have a description")
|
||||
|
||||
# Type-specific validation
|
||||
if node.node_type == "llm_tool_use":
|
||||
if not node.tools:
|
||||
errors.append(f"LLM tool node '{node.id}' must specify tools")
|
||||
if not node.system_prompt:
|
||||
warnings.append(f"LLM node '{node.id}' should have a system_prompt")
|
||||
if node.node_type == "event_loop":
|
||||
if node.tools and not node.system_prompt:
|
||||
warnings.append(f"Event loop node '{node.id}' should have a system_prompt")
|
||||
|
||||
if node.node_type == "router":
|
||||
if not node.routes:
|
||||
errors.append(f"Router node '{node.id}' must specify routes")
|
||||
|
||||
if node.node_type == "function":
|
||||
if not node.function:
|
||||
errors.append(f"Function node '{node.id}' must specify function name")
|
||||
|
||||
# Check input/output keys
|
||||
if not node.input_keys:
|
||||
suggestions.append(f"Consider specifying input_keys for '{node.id}'")
|
||||
@@ -400,9 +394,13 @@ class GraphBuilder:
|
||||
if not terminal_candidates and self.session.nodes:
|
||||
warnings.append("No terminal nodes found (all nodes have outgoing edges)")
|
||||
|
||||
# Check reachability
|
||||
# Check reachability from ALL entry candidates (not just the first one).
|
||||
# Agents with async entry points have multiple nodes with no incoming
|
||||
# edges (e.g., a primary entry node and an event-driven entry node).
|
||||
if entry_candidates and self.session.nodes:
|
||||
reachable = self._compute_reachable(entry_candidates[0])
|
||||
reachable = set()
|
||||
for candidate in entry_candidates:
|
||||
reachable |= self._compute_reachable(candidate)
|
||||
unreachable = [n.id for n in self.session.nodes if n.id not in reachable]
|
||||
if unreachable:
|
||||
errors.append(f"Unreachable nodes: {unreachable}")
|
||||
@@ -443,14 +441,15 @@ class GraphBuilder:
|
||||
self.session.test_cases.append(test)
|
||||
self._save_session()
|
||||
|
||||
def run_test(
|
||||
async def run_test_async(
|
||||
self,
|
||||
test: TestCase,
|
||||
executor_factory: Callable,
|
||||
) -> TestResult:
|
||||
"""
|
||||
Run a single test case.
|
||||
Run a single test case asynchronously.
|
||||
|
||||
This method is safe to call from async contexts (Jupyter, FastAPI, etc.).
|
||||
executor_factory should return a configured GraphExecutor.
|
||||
"""
|
||||
self._require_phase([BuildPhase.ADDING_NODES, BuildPhase.ADDING_EDGES, BuildPhase.TESTING])
|
||||
@@ -462,14 +461,10 @@ class GraphBuilder:
|
||||
executor = executor_factory()
|
||||
|
||||
# Run the test
|
||||
import asyncio
|
||||
|
||||
result = asyncio.run(
|
||||
executor.execute(
|
||||
graph=graph,
|
||||
goal=self.session.goal,
|
||||
input_data=test.input,
|
||||
)
|
||||
result = await executor.execute(
|
||||
graph=graph,
|
||||
goal=self.session.goal,
|
||||
input_data=test.input,
|
||||
)
|
||||
|
||||
# Check result
|
||||
@@ -499,6 +494,36 @@ class GraphBuilder:
|
||||
|
||||
return test_result
|
||||
|
||||
def run_test(
|
||||
self,
|
||||
test: TestCase,
|
||||
executor_factory: Callable,
|
||||
) -> TestResult:
|
||||
"""
|
||||
Run a single test case.
|
||||
|
||||
This is a synchronous wrapper around run_test_async().
|
||||
If called from an async context (Jupyter, FastAPI, etc.), use run_test_async() instead.
|
||||
|
||||
executor_factory should return a configured GraphExecutor.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
# Check if an event loop is already running
|
||||
# get_running_loop() returns a loop if one exists, or raises RuntimeError if none exists
|
||||
try:
|
||||
asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
# No event loop running - safe to use asyncio.run()
|
||||
return asyncio.run(self.run_test_async(test, executor_factory))
|
||||
|
||||
# Event loop is running - cannot use asyncio.run()
|
||||
raise RuntimeError(
|
||||
"Cannot call run_test() from an async context. "
|
||||
"An event loop is already running. "
|
||||
"Please use 'await builder.run_test_async(test, executor_factory)' instead."
|
||||
)
|
||||
|
||||
def run_all_tests(self, executor_factory: Callable) -> list[TestResult]:
|
||||
"""Run all test cases."""
|
||||
results = []
|
||||
|
||||
+17
-3
@@ -11,9 +11,9 @@ Usage:
|
||||
|
||||
Testing commands:
|
||||
hive test-run <agent_path> --goal <goal_id>
|
||||
hive test-debug <goal_id> <test_id>
|
||||
hive test-list <goal_id>
|
||||
hive test-stats <goal_id>
|
||||
hive test-debug <agent_path> <test_name>
|
||||
hive test-list <agent_path>
|
||||
hive test-stats <agent_path>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
@@ -44,11 +44,25 @@ def _configure_paths():
|
||||
if exports_str not in sys.path:
|
||||
sys.path.insert(0, exports_str)
|
||||
|
||||
# Add examples/templates/ to sys.path so template agents are importable
|
||||
templates_dir = project_root / "examples" / "templates"
|
||||
if templates_dir.is_dir():
|
||||
templates_str = str(templates_dir)
|
||||
if templates_str not in sys.path:
|
||||
sys.path.insert(0, templates_str)
|
||||
|
||||
# Ensure core/ is also in sys.path (for non-editable-install scenarios)
|
||||
core_str = str(project_root / "core")
|
||||
if (project_root / "core").is_dir() and core_str not in sys.path:
|
||||
sys.path.insert(0, core_str)
|
||||
|
||||
# Add core/framework/agents/ so framework agents are importable as top-level packages
|
||||
framework_agents_dir = project_root / "core" / "framework" / "agents"
|
||||
if framework_agents_dir.is_dir():
|
||||
fa_str = str(framework_agents_dir)
|
||||
if fa_str not in sys.path:
|
||||
sys.path.insert(0, fa_str)
|
||||
|
||||
|
||||
def main():
|
||||
_configure_paths()
|
||||
|
||||
@@ -0,0 +1,116 @@
|
||||
"""Shared Hive configuration utilities.
|
||||
|
||||
Centralises reading of ~/.hive/configuration.json so that the runner
|
||||
and every agent template share one implementation instead of copy-pasting
|
||||
helper functions.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.graph.edge import DEFAULT_MAX_TOKENS
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Low-level config file access
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
HIVE_CONFIG_FILE = Path.home() / ".hive" / "configuration.json"
|
||||
|
||||
|
||||
def get_hive_config() -> dict[str, Any]:
|
||||
"""Load hive configuration from ~/.hive/configuration.json."""
|
||||
if not HIVE_CONFIG_FILE.exists():
|
||||
return {}
|
||||
try:
|
||||
with open(HIVE_CONFIG_FILE, encoding="utf-8-sig") as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, OSError):
|
||||
return {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Derived helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_preferred_model() -> str:
|
||||
"""Return the user's preferred LLM model string (e.g. 'anthropic/claude-sonnet-4-20250514')."""
|
||||
llm = get_hive_config().get("llm", {})
|
||||
if llm.get("provider") and llm.get("model"):
|
||||
return f"{llm['provider']}/{llm['model']}"
|
||||
return "anthropic/claude-sonnet-4-20250514"
|
||||
|
||||
|
||||
def get_max_tokens() -> int:
|
||||
"""Return the configured max_tokens, falling back to DEFAULT_MAX_TOKENS."""
|
||||
return get_hive_config().get("llm", {}).get("max_tokens", DEFAULT_MAX_TOKENS)
|
||||
|
||||
|
||||
def get_api_key() -> str | None:
|
||||
"""Return the API key, supporting env var, Claude Code subscription, and ZAI Code.
|
||||
|
||||
Priority:
|
||||
1. Claude Code subscription (``use_claude_code_subscription: true``)
|
||||
reads the OAuth token from ``~/.claude/.credentials.json``.
|
||||
2. Environment variable named in ``api_key_env_var``.
|
||||
"""
|
||||
llm = get_hive_config().get("llm", {})
|
||||
|
||||
# Claude Code subscription: read OAuth token directly
|
||||
if llm.get("use_claude_code_subscription"):
|
||||
try:
|
||||
from framework.runner.runner import get_claude_code_token
|
||||
|
||||
token = get_claude_code_token()
|
||||
if token:
|
||||
return token
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
# Standard env-var path (covers ZAI Code and all API-key providers)
|
||||
api_key_env_var = llm.get("api_key_env_var")
|
||||
if api_key_env_var:
|
||||
return os.environ.get(api_key_env_var)
|
||||
return None
|
||||
|
||||
|
||||
def get_api_base() -> str | None:
|
||||
"""Return the api_base URL for OpenAI-compatible endpoints, if configured."""
|
||||
return get_hive_config().get("llm", {}).get("api_base")
|
||||
|
||||
|
||||
def get_llm_extra_kwargs() -> dict[str, Any]:
|
||||
"""Return extra kwargs for LiteLLMProvider (e.g. OAuth headers).
|
||||
|
||||
When ``use_claude_code_subscription`` is enabled, returns
|
||||
``extra_headers`` with the OAuth Bearer token so that litellm's
|
||||
built-in Anthropic OAuth handler adds the required beta headers.
|
||||
"""
|
||||
llm = get_hive_config().get("llm", {})
|
||||
if llm.get("use_claude_code_subscription"):
|
||||
api_key = get_api_key()
|
||||
if api_key:
|
||||
return {
|
||||
"extra_headers": {"authorization": f"Bearer {api_key}"},
|
||||
}
|
||||
return {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# RuntimeConfig – shared across agent templates
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclass
|
||||
class RuntimeConfig:
|
||||
"""Agent runtime configuration loaded from ~/.hive/configuration.json."""
|
||||
|
||||
model: str = field(default_factory=get_preferred_model)
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = field(default_factory=get_max_tokens)
|
||||
api_key: str | None = field(default_factory=get_api_key)
|
||||
api_base: str | None = field(default_factory=get_api_base)
|
||||
extra_kwargs: dict[str, Any] = field(default_factory=get_llm_extra_kwargs)
|
||||
@@ -59,6 +59,13 @@ from .provider import (
|
||||
CredentialProvider,
|
||||
StaticProvider,
|
||||
)
|
||||
from .setup import (
|
||||
CredentialSetupSession,
|
||||
MissingCredential,
|
||||
SetupResult,
|
||||
detect_missing_credentials_from_nodes,
|
||||
run_credential_setup_cli,
|
||||
)
|
||||
from .storage import (
|
||||
CompositeStorage,
|
||||
CredentialStorage,
|
||||
@@ -68,6 +75,7 @@ from .storage import (
|
||||
)
|
||||
from .store import CredentialStore
|
||||
from .template import TemplateResolver
|
||||
from .validation import ensure_credential_key_env, validate_agent_credentials
|
||||
|
||||
# Aden sync components (lazy import to avoid httpx dependency when not needed)
|
||||
# Usage: from core.framework.credentials.aden import AdenSyncProvider
|
||||
@@ -84,6 +92,14 @@ try:
|
||||
except ImportError:
|
||||
_ADEN_AVAILABLE = False
|
||||
|
||||
# Local credential registry (named API key accounts with identity metadata)
|
||||
try:
|
||||
from .local import LocalAccountInfo, LocalCredentialRegistry
|
||||
|
||||
_LOCAL_AVAILABLE = True
|
||||
except ImportError:
|
||||
_LOCAL_AVAILABLE = False
|
||||
|
||||
__all__ = [
|
||||
# Main store
|
||||
"CredentialStore",
|
||||
@@ -111,12 +127,25 @@ __all__ = [
|
||||
"CredentialRefreshError",
|
||||
"CredentialValidationError",
|
||||
"CredentialDecryptionError",
|
||||
# Validation
|
||||
"ensure_credential_key_env",
|
||||
"validate_agent_credentials",
|
||||
# Interactive setup
|
||||
"CredentialSetupSession",
|
||||
"MissingCredential",
|
||||
"SetupResult",
|
||||
"detect_missing_credentials_from_nodes",
|
||||
"run_credential_setup_cli",
|
||||
# Aden sync (optional - requires httpx)
|
||||
"AdenSyncProvider",
|
||||
"AdenCredentialClient",
|
||||
"AdenClientConfig",
|
||||
"AdenCachedStorage",
|
||||
# Local credential registry (optional - requires cryptography)
|
||||
"LocalCredentialRegistry",
|
||||
"LocalAccountInfo",
|
||||
]
|
||||
|
||||
# Track Aden availability for runtime checks
|
||||
ADEN_AVAILABLE = _ADEN_AVAILABLE
|
||||
LOCAL_AVAILABLE = _LOCAL_AVAILABLE
|
||||
|
||||
@@ -1,29 +1,31 @@
|
||||
"""
|
||||
Aden Credential Client.
|
||||
|
||||
HTTP client for communicating with the Aden authentication server.
|
||||
The Aden server handles OAuth2 authorization flows and token management.
|
||||
This client fetches tokens and delegates refresh operations to Aden.
|
||||
HTTP client for the Aden authentication server.
|
||||
Aden holds all OAuth secrets; agents receive only short-lived access tokens.
|
||||
|
||||
API (all endpoints authenticated with Bearer {api_key}):
|
||||
|
||||
GET /v1/credentials — list integrations
|
||||
GET /v1/credentials/{integration_id} — get access token (auto-refreshes)
|
||||
POST /v1/credentials/{integration_id}/refresh — force refresh
|
||||
GET /v1/credentials/{integration_id}/validate — check validity
|
||||
|
||||
Integration IDs are base64-encoded hashes assigned by the Aden platform
|
||||
(e.g. "Z29vZ2xlOlRpbW90aHk6MTYwNjc6MTM2ODQ"), NOT provider names.
|
||||
|
||||
Usage:
|
||||
# API key loaded from ADEN_API_KEY environment variable by default
|
||||
client = AdenCredentialClient(AdenClientConfig(
|
||||
base_url="https://api.adenhq.com",
|
||||
))
|
||||
|
||||
# Or explicitly provide the API key
|
||||
client = AdenCredentialClient(AdenClientConfig(
|
||||
base_url="https://api.adenhq.com",
|
||||
api_key="your-api-key",
|
||||
))
|
||||
# List what's connected
|
||||
for info in client.list_integrations():
|
||||
print(f"{info.provider}/{info.alias}: {info.status}")
|
||||
|
||||
# Fetch a credential
|
||||
response = client.get_credential("hubspot")
|
||||
if response:
|
||||
print(f"Token expires at: {response.expires_at}")
|
||||
|
||||
# Request a refresh
|
||||
refreshed = client.request_refresh("hubspot")
|
||||
# Get an access token
|
||||
cred = client.get_credential(info.integration_id)
|
||||
print(cred.access_token)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -88,8 +90,7 @@ class AdenClientConfig:
|
||||
"""Base URL of the Aden server (e.g., 'https://api.adenhq.com')."""
|
||||
|
||||
api_key: str | None = None
|
||||
"""Agent's API key for authenticating with Aden.
|
||||
If not provided, loaded from ADEN_API_KEY environment variable."""
|
||||
"""Agent API key. Loaded from ADEN_API_KEY env var if not provided."""
|
||||
|
||||
tenant_id: str | None = None
|
||||
"""Optional tenant ID for multi-tenant deployments."""
|
||||
@@ -104,7 +105,6 @@ class AdenClientConfig:
|
||||
"""Base delay between retries in seconds (exponential backoff)."""
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Load API key from environment if not provided."""
|
||||
if self.api_key is None:
|
||||
self.api_key = os.environ.get("ADEN_API_KEY")
|
||||
if not self.api_key:
|
||||
@@ -115,71 +115,124 @@ class AdenClientConfig:
|
||||
|
||||
|
||||
@dataclass
|
||||
class AdenCredentialResponse:
|
||||
"""Response from Aden server containing credential data."""
|
||||
class AdenIntegrationInfo:
|
||||
"""An integration from GET /v1/credentials.
|
||||
|
||||
Example response item::
|
||||
|
||||
{
|
||||
"integration_id": "Z29vZ2xlOlRpbW90aHk6MTYwNjc6MTM2ODQ",
|
||||
"provider": "google",
|
||||
"alias": "Timothy",
|
||||
"status": "active",
|
||||
"email": "timothy@acho.io",
|
||||
"expires_at": "2026-02-20T21:46:04.863Z"
|
||||
}
|
||||
"""
|
||||
|
||||
integration_id: str
|
||||
"""Unique identifier for the integration (e.g., 'hubspot')."""
|
||||
"""Base64-encoded hash ID assigned by Aden."""
|
||||
|
||||
integration_type: str
|
||||
"""Type of integration (e.g., 'hubspot', 'github', 'slack')."""
|
||||
provider: str
|
||||
"""Provider type (e.g. "google", "slack", "hubspot")."""
|
||||
|
||||
access_token: str
|
||||
"""The access token for API calls."""
|
||||
alias: str
|
||||
"""User-set alias on the Aden platform."""
|
||||
|
||||
token_type: str = "Bearer"
|
||||
"""Token type (usually 'Bearer')."""
|
||||
status: str
|
||||
"""Status: "active", "expired", "requires_reauth"."""
|
||||
|
||||
email: str = ""
|
||||
"""Email associated with this connection."""
|
||||
|
||||
expires_at: datetime | None = None
|
||||
"""When the access token expires (UTC)."""
|
||||
"""When the current access token expires."""
|
||||
|
||||
scopes: list[str] = field(default_factory=list)
|
||||
"""OAuth2 scopes granted to this token."""
|
||||
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
"""Additional integration-specific metadata."""
|
||||
# Backward compat — old code reads integration_type
|
||||
@property
|
||||
def integration_type(self) -> str:
|
||||
return self.provider
|
||||
|
||||
@classmethod
|
||||
def from_dict(
|
||||
cls, data: dict[str, Any], integration_id: str | None = None
|
||||
) -> AdenCredentialResponse:
|
||||
"""Create from API response dictionary."""
|
||||
def from_dict(cls, data: dict[str, Any]) -> AdenIntegrationInfo:
|
||||
expires_at = None
|
||||
if data.get("expires_at"):
|
||||
expires_at = datetime.fromisoformat(data["expires_at"].replace("Z", "+00:00"))
|
||||
|
||||
return cls(
|
||||
integration_id=integration_id or data.get("alias", data.get("provider", "")),
|
||||
integration_type=data.get("provider", ""),
|
||||
access_token=data["access_token"],
|
||||
token_type=data.get("token_type", "Bearer"),
|
||||
integration_id=data.get("integration_id", ""),
|
||||
provider=data.get("provider", ""),
|
||||
alias=data.get("alias", ""),
|
||||
status=data.get("status", "unknown"),
|
||||
email=data.get("email", ""),
|
||||
expires_at=expires_at,
|
||||
scopes=data.get("scopes", []),
|
||||
metadata={"email": data.get("email")} if data.get("email") else {},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AdenIntegrationInfo:
|
||||
"""Information about an available integration."""
|
||||
class AdenCredentialResponse:
|
||||
"""Response from GET /v1/credentials/{integration_id}.
|
||||
|
||||
Example::
|
||||
|
||||
{
|
||||
"access_token": "ya29.a0AfH6SM...",
|
||||
"token_type": "Bearer",
|
||||
"expires_at": "2026-02-20T12:00:00.000Z",
|
||||
"provider": "google",
|
||||
"alias": "Timothy",
|
||||
"email": "timothy@acho.io"
|
||||
}
|
||||
"""
|
||||
|
||||
integration_id: str
|
||||
integration_type: str
|
||||
status: str # "active", "requires_reauth", "expired"
|
||||
"""The integration_id used in the request."""
|
||||
|
||||
access_token: str
|
||||
"""Short-lived access token for API calls."""
|
||||
|
||||
token_type: str = "Bearer"
|
||||
|
||||
expires_at: datetime | None = None
|
||||
|
||||
provider: str = ""
|
||||
"""Provider type (e.g. "google")."""
|
||||
|
||||
alias: str = ""
|
||||
"""User-set alias."""
|
||||
|
||||
email: str = ""
|
||||
"""Email associated with this connection."""
|
||||
|
||||
scopes: list[str] = field(default_factory=list)
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
# Backward compat
|
||||
@property
|
||||
def integration_type(self) -> str:
|
||||
return self.provider
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> AdenIntegrationInfo:
|
||||
"""Create from API response dictionary."""
|
||||
def from_dict(cls, data: dict[str, Any], integration_id: str = "") -> AdenCredentialResponse:
|
||||
expires_at = None
|
||||
if data.get("expires_at"):
|
||||
expires_at = datetime.fromisoformat(data["expires_at"].replace("Z", "+00:00"))
|
||||
|
||||
# Build metadata from email if present
|
||||
metadata = data.get("metadata") or {}
|
||||
if not metadata and data.get("email"):
|
||||
metadata = {"email": data["email"]}
|
||||
|
||||
return cls(
|
||||
integration_id=data["integration_id"],
|
||||
integration_type=data.get("provider", data["integration_id"]),
|
||||
status=data.get("status", "unknown"),
|
||||
integration_id=integration_id or data.get("integration_id", ""),
|
||||
access_token=data["access_token"],
|
||||
token_type=data.get("token_type", "Bearer"),
|
||||
expires_at=expires_at,
|
||||
provider=data.get("provider", ""),
|
||||
alias=data.get("alias", ""),
|
||||
email=data.get("email", ""),
|
||||
scopes=data.get("scopes", []),
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
|
||||
@@ -187,56 +240,33 @@ class AdenCredentialClient:
|
||||
"""
|
||||
HTTP client for Aden credential server.
|
||||
|
||||
Handles communication with the Aden authentication server,
|
||||
including fetching credentials, requesting refreshes, and
|
||||
reporting usage statistics.
|
||||
|
||||
The client automatically handles:
|
||||
- Retries with exponential backoff for transient failures
|
||||
- Proper error classification (auth, not found, rate limit, etc.)
|
||||
- Request headers for authentication and tenant isolation
|
||||
|
||||
Usage:
|
||||
# API key loaded from ADEN_API_KEY environment variable
|
||||
config = AdenClientConfig(
|
||||
client = AdenCredentialClient(AdenClientConfig(
|
||||
base_url="https://api.adenhq.com",
|
||||
)
|
||||
))
|
||||
|
||||
client = AdenCredentialClient(config)
|
||||
# List integrations
|
||||
for info in client.list_integrations():
|
||||
print(f"{info.provider}/{info.alias}: {info.status}")
|
||||
|
||||
# Fetch a credential
|
||||
cred = client.get_credential("hubspot")
|
||||
if cred:
|
||||
headers = {"Authorization": f"Bearer {cred.access_token}"}
|
||||
# Get access token (uses base64 integration_id, NOT provider name)
|
||||
cred = client.get_credential(info.integration_id)
|
||||
headers = {"Authorization": f"Bearer {cred.access_token}"}
|
||||
|
||||
# List all integrations
|
||||
integrations = client.list_integrations()
|
||||
for info in integrations:
|
||||
print(f"{info.integration_id}: {info.status}")
|
||||
|
||||
# Clean up
|
||||
client.close()
|
||||
"""
|
||||
|
||||
def __init__(self, config: AdenClientConfig):
|
||||
"""
|
||||
Initialize the Aden client.
|
||||
|
||||
Args:
|
||||
config: Client configuration including base URL and API key.
|
||||
"""
|
||||
self.config = config
|
||||
self._client: httpx.Client | None = None
|
||||
|
||||
def _get_client(self) -> httpx.Client:
|
||||
"""Get or create the HTTP client."""
|
||||
if self._client is None:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.config.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "hive-credential-store/1.0",
|
||||
}
|
||||
|
||||
if self.config.tenant_id:
|
||||
headers["X-Tenant-ID"] = self.config.tenant_id
|
||||
|
||||
@@ -245,7 +275,6 @@ class AdenCredentialClient:
|
||||
timeout=self.config.timeout,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
return self._client
|
||||
|
||||
def _request_with_retry(
|
||||
@@ -262,10 +291,13 @@ class AdenCredentialClient:
|
||||
try:
|
||||
response = client.request(method, path, **kwargs)
|
||||
|
||||
# Handle specific error codes
|
||||
if response.status_code == 401:
|
||||
raise AdenAuthenticationError("Agent API key is invalid or revoked")
|
||||
|
||||
if response.status_code == 403:
|
||||
data = response.json()
|
||||
raise AdenClientError(data.get("message", "Forbidden"))
|
||||
|
||||
if response.status_code == 404:
|
||||
raise AdenNotFoundError(f"Integration not found: {path}")
|
||||
|
||||
@@ -278,14 +310,15 @@ class AdenCredentialClient:
|
||||
|
||||
if response.status_code == 400:
|
||||
data = response.json()
|
||||
if data.get("error") == "refresh_failed":
|
||||
msg = data.get("message", "Bad request")
|
||||
if data.get("error") == "refresh_failed" or "refresh" in msg.lower():
|
||||
raise AdenRefreshError(
|
||||
data.get("message", "Token refresh failed"),
|
||||
msg,
|
||||
requires_reauthorization=data.get("requires_reauthorization", False),
|
||||
reauthorization_url=data.get("reauthorization_url"),
|
||||
)
|
||||
raise AdenClientError(f"Bad request: {msg}")
|
||||
|
||||
# Success or other error
|
||||
response.raise_for_status()
|
||||
return response
|
||||
|
||||
@@ -306,30 +339,40 @@ class AdenCredentialClient:
|
||||
AdenRefreshError,
|
||||
AdenRateLimitError,
|
||||
):
|
||||
# Don't retry these errors
|
||||
raise
|
||||
|
||||
# Should not reach here, but just in case
|
||||
raise AdenClientError(
|
||||
f"Request failed after {self.config.retry_attempts} attempts"
|
||||
) from last_error
|
||||
|
||||
def get_credential(self, integration_id: str) -> AdenCredentialResponse | None:
|
||||
def list_integrations(self) -> list[AdenIntegrationInfo]:
|
||||
"""
|
||||
Fetch the current credential for an integration.
|
||||
List all integrations for this agent's team.
|
||||
|
||||
The Aden server may refresh the token internally if it's expired
|
||||
before returning it.
|
||||
|
||||
Args:
|
||||
integration_id: The integration identifier (e.g., 'hubspot').
|
||||
GET /v1/credentials → {"integrations": [...]}
|
||||
|
||||
Returns:
|
||||
Credential response with access token, or None if not found.
|
||||
List of AdenIntegrationInfo with integration_id, provider,
|
||||
alias, status, email, expires_at.
|
||||
"""
|
||||
response = self._request_with_retry("GET", "/v1/credentials")
|
||||
data = response.json()
|
||||
return [AdenIntegrationInfo.from_dict(item) for item in data.get("integrations", [])]
|
||||
|
||||
Raises:
|
||||
AdenAuthenticationError: If API key is invalid.
|
||||
AdenClientError: For connection failures.
|
||||
# Alias
|
||||
list_connections = list_integrations
|
||||
|
||||
def get_credential(self, integration_id: str) -> AdenCredentialResponse | None:
|
||||
"""
|
||||
Get access token for an integration. Auto-refreshes if near expiry.
|
||||
|
||||
GET /v1/credentials/{integration_id}
|
||||
|
||||
Args:
|
||||
integration_id: Base64 hash ID from list_integrations().
|
||||
|
||||
Returns:
|
||||
AdenCredentialResponse with access_token, or None if not found.
|
||||
"""
|
||||
try:
|
||||
response = self._request_with_retry("GET", f"/v1/credentials/{integration_id}")
|
||||
@@ -340,100 +383,34 @@ class AdenCredentialClient:
|
||||
|
||||
def request_refresh(self, integration_id: str) -> AdenCredentialResponse:
|
||||
"""
|
||||
Request the Aden server to refresh the token.
|
||||
Force refresh the access token.
|
||||
|
||||
Use this when the local store detects an expired or near-expiry token.
|
||||
The Aden server handles the actual OAuth2 refresh token flow.
|
||||
POST /v1/credentials/{integration_id}/refresh
|
||||
|
||||
Args:
|
||||
integration_id: The integration identifier.
|
||||
integration_id: Base64 hash ID.
|
||||
|
||||
Returns:
|
||||
Credential response with new access token.
|
||||
|
||||
Raises:
|
||||
AdenRefreshError: If refresh fails (may require re-authorization).
|
||||
AdenNotFoundError: If integration not found.
|
||||
AdenAuthenticationError: If API key is invalid.
|
||||
AdenRateLimitError: If rate limited.
|
||||
AdenCredentialResponse with new access_token.
|
||||
"""
|
||||
response = self._request_with_retry("POST", f"/v1/credentials/{integration_id}/refresh")
|
||||
data = response.json()
|
||||
return AdenCredentialResponse.from_dict(data, integration_id=integration_id)
|
||||
|
||||
def list_integrations(self) -> list[AdenIntegrationInfo]:
|
||||
"""
|
||||
List all integrations available for this agent/tenant.
|
||||
|
||||
Returns:
|
||||
List of integration info objects.
|
||||
|
||||
Raises:
|
||||
AdenAuthenticationError: If API key is invalid.
|
||||
AdenClientError: For connection failures.
|
||||
"""
|
||||
response = self._request_with_retry("GET", "/v1/credentials")
|
||||
data = response.json()
|
||||
return [AdenIntegrationInfo.from_dict(item) for item in data.get("integrations", [])]
|
||||
|
||||
def validate_token(self, integration_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Check if a token is still valid without fetching it.
|
||||
Check if an integration's OAuth connection is valid.
|
||||
|
||||
Args:
|
||||
integration_id: The integration identifier.
|
||||
GET /v1/credentials/{integration_id}/validate
|
||||
|
||||
Returns:
|
||||
Dict with 'valid' bool and optional 'expires_at', 'reason',
|
||||
'requires_reauthorization', 'reauthorization_url'.
|
||||
|
||||
Raises:
|
||||
AdenNotFoundError: If integration not found.
|
||||
AdenAuthenticationError: If API key is invalid.
|
||||
{"valid": bool, "status": str, "expires_at": str, "error": str|null}
|
||||
"""
|
||||
response = self._request_with_retry("GET", f"/v1/credentials/{integration_id}/validate")
|
||||
return response.json()
|
||||
|
||||
def report_usage(
|
||||
self,
|
||||
integration_id: str,
|
||||
operation: str,
|
||||
status: str = "success",
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Report credential usage statistics to Aden.
|
||||
|
||||
This is optional and used for analytics/billing.
|
||||
|
||||
Args:
|
||||
integration_id: The integration identifier.
|
||||
operation: Operation name (e.g., 'api_call').
|
||||
status: Operation status ('success', 'error').
|
||||
metadata: Additional operation metadata.
|
||||
"""
|
||||
try:
|
||||
self._request_with_retry(
|
||||
"POST",
|
||||
f"/v1/credentials/{integration_id}/usage",
|
||||
json={
|
||||
"operation": operation,
|
||||
"status": status,
|
||||
"timestamp": datetime.utcnow().isoformat() + "Z",
|
||||
"metadata": metadata or {},
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
# Usage reporting is best-effort, don't fail on errors
|
||||
logger.warning(f"Failed to report usage for '{integration_id}': {e}")
|
||||
|
||||
def health_check(self) -> dict[str, Any]:
|
||||
"""
|
||||
Check Aden server health and connectivity.
|
||||
|
||||
Returns:
|
||||
Dict with 'status', 'version', 'timestamp', and optionally 'error'.
|
||||
"""
|
||||
"""Check Aden server health."""
|
||||
try:
|
||||
client = self._get_client()
|
||||
response = client.get("/health")
|
||||
@@ -441,26 +418,17 @@ class AdenCredentialClient:
|
||||
data = response.json()
|
||||
data["latency_ms"] = response.elapsed.total_seconds() * 1000
|
||||
return data
|
||||
return {
|
||||
"status": "degraded",
|
||||
"error": f"Unexpected status code: {response.status_code}",
|
||||
}
|
||||
return {"status": "degraded", "error": f"HTTP {response.status_code}"}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "unhealthy",
|
||||
"error": str(e),
|
||||
}
|
||||
return {"status": "unhealthy", "error": str(e)}
|
||||
|
||||
def close(self) -> None:
|
||||
"""Close the HTTP client and release resources."""
|
||||
if self._client:
|
||||
self._client.close()
|
||||
self._client = None
|
||||
|
||||
def __enter__(self) -> AdenCredentialClient:
|
||||
"""Context manager entry."""
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
"""Context manager exit."""
|
||||
self.close()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user