Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3579fd422c |
@@ -1,58 +1,4 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(grep -n \"_is_context_too_large_error\" core/framework/agent_loop/agent_loop.py core/framework/agent_loop/internals/*.py)",
|
||||
"Read(//^class/ {cls=$3} /def test_/**)",
|
||||
"Read(//^ @pytest.mark.asyncio/{getline n; print NR\": \"n} /^ def test_/**)",
|
||||
"Bash(python3)",
|
||||
"Bash(grep -nE 'Tool\\\\\\(\\\\s*$|name=\"[a-z_]+\",' core/framework/tools/queen_lifecycle_tools.py)",
|
||||
"Bash(awk -F'\"' '{print $2}')",
|
||||
"Bash(grep -n \"create_colony\\\\|colony-spawn\\\\|colony_spawn\" /home/timothy/aden/hive/core/framework/agents/queen/nodes/__init__.py /home/timothy/aden/hive/core/framework/tools/*.py)",
|
||||
"Bash(git stash:*)",
|
||||
"Bash(python3 -c \"import sys,json; d=json.loads\\(sys.stdin.read\\(\\)\\); print\\('keys:', list\\(d.keys\\(\\)\\)[:10]\\)\")",
|
||||
"Bash(python3 -c ':*)",
|
||||
"Bash(uv run:*)",
|
||||
"Read(//tmp/**)",
|
||||
"Bash(grep -n \"useColony\\\\|const { queens, queenProfiles\" /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
|
||||
"Bash(awk 'NR==385,/\\\\}, \\\\[/' /home/timothy/aden/hive/core/frontend/src/pages/queen-dm.tsx)",
|
||||
"Bash(xargs -I{} sh -c 'if ! grep -q \"^import base64\\\\|^from base64\" \"{}\"; then echo \"MISSING: {}\"; fi')",
|
||||
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -type f -exec grep -l \"FileConversationStore\\\\|class.*ConversationStore\" {} \\\\;)",
|
||||
"Bash(find /home/timothy/aden/hive/core/framework -name \"*.py\" -exec grep -l \"run_parallel_workers\\\\|create_colony\" {} \\\\;)",
|
||||
"Bash(awk '/^ async def execute\\\\\\(self, ctx: AgentContext\\\\\\)/,/^ async def [a-z_]+/ {print NR\": \"$0}' /home/timothy/aden/hive/core/framework/agent_loop/agent_loop.py)",
|
||||
"Bash(grep -r \"max_concurrent_workers\\\\|max_depth\\\\|recursion\\\\|spawn.*bomb\" /home/timothy/aden/hive/core/framework/host/*.py)",
|
||||
"Bash(wc -l /home/timothy/aden/hive/tools/src/gcu/browser/*.py /home/timothy/aden/hive/tools/src/gcu/browser/tools/*.py)",
|
||||
"Bash(file /tmp/gcu_verify/*.png)",
|
||||
"Bash(ps -eo pid,cmd)",
|
||||
"Bash(ps -o pid,lstart,cmd -p 746640)",
|
||||
"Bash(kill 746636)",
|
||||
"Bash(ps -eo pid,lstart,cmd)",
|
||||
"Bash(grep -E \"^d|\\\\.py$\")",
|
||||
"Bash(grep -E \"\\\\.\\(ts|tsx\\)$\")",
|
||||
"Bash(xargs cat:*)",
|
||||
"Bash(find /home/timothy/aden/hive -path \"*/.venv\" -prune -o -name \"*.py\" -type f -exec grep -l \"frontend\\\\|UI\\\\|terminal\\\\|interactive\\\\|TUI\" {} \\\\;)",
|
||||
"Bash(wc -l /home/timothy/.hive/backup/*/SKILL.md)",
|
||||
"Bash(awk -F'::' '{print $1}')",
|
||||
"Bash(wait)",
|
||||
"Bash(pkill -f \"pytest.*test_event_loop_node\")",
|
||||
"Bash(pkill -f \"pytest.*TestToolConcurrency\")",
|
||||
"Bash(grep -n \"def.*discover\\\\|/api/agents\\\\|agents_discover\" /home/timothy/aden/hive/core/framework/server/*.py)",
|
||||
"Bash(bun run:*)",
|
||||
"Bash(npx eslint:*)",
|
||||
"Bash(npm run:*)",
|
||||
"Bash(npm test:*)",
|
||||
"Bash(grep -E \"\\\\.tsx$|^d\")",
|
||||
"Bash(grep -E \"test_.*\\\\.py$\")",
|
||||
"Bash(grep \"\\\\.py$\")",
|
||||
"Bash(grep -l \"save_agent_draft\\\\|confirm_and_build\\\\|replan_agent\\\\|load_built_agent\\\\|planning\\\\|building\\\\|staging\" /home/timothy/aden/hive/core/framework/agents/queen/reference/*.md)",
|
||||
"Bash(grep -E \"\\\\.tsx$|\\\\.ts$\")",
|
||||
"Bash(find /home/timothy/aden/hive/core/framework/tools -name \"*.py\" -exec grep -l \"switch_to_\" {} \\\\;)"
|
||||
],
|
||||
"additionalDirectories": [
|
||||
"/home/timothy/.hive/skills/writing-hive-skills",
|
||||
"/tmp",
|
||||
"/home/timothy/.hive/skills"
|
||||
]
|
||||
},
|
||||
"hooks": {
|
||||
"PostToolUse": [
|
||||
{
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(npm install:*)",
|
||||
"Bash(npm test:*)",
|
||||
"Skill(building-agents-construction)",
|
||||
"Skill(building-agents-construction:*)",
|
||||
"Bash(PYTHONPATH=core:exports pytest:*)",
|
||||
"mcp__agent-builder__create_session",
|
||||
"mcp__agent-builder__get_session_status",
|
||||
"mcp__agent-builder__set_goal",
|
||||
"mcp__agent-builder__list_mcp_servers",
|
||||
"mcp__agent-builder__test_node",
|
||||
"mcp__agent-builder__add_node",
|
||||
"mcp__agent-builder__add_edge",
|
||||
"mcp__agent-builder__validate_graph",
|
||||
"Bash(ruff check:*)",
|
||||
"Bash(PYTHONPATH=core:exports python:*)",
|
||||
"mcp__agent-builder__list_tests",
|
||||
"mcp__agent-builder__generate_constraint_tests",
|
||||
"Bash(python -m agent:*)",
|
||||
"Bash(python agent.py:*)",
|
||||
"Bash(python -c:*)",
|
||||
"Bash(done)",
|
||||
"Bash(xargs cat:*)",
|
||||
"mcp__agent-builder__list_mcp_tools",
|
||||
"mcp__agent-builder__add_mcp_server",
|
||||
"mcp__agent-builder__check_missing_credentials",
|
||||
"mcp__agent-builder__store_credential",
|
||||
"mcp__agent-builder__list_stored_credentials",
|
||||
"mcp__agent-builder__delete_stored_credential",
|
||||
"mcp__agent-builder__verify_credentials",
|
||||
"Bash(PYTHONPATH=/home/timothy/oss/hive/core:/home/timothy/oss/hive/exports python:*)",
|
||||
"Bash(PYTHONPATH=core:exports:tools/src python -m hubspot_input:*)",
|
||||
"mcp__agent-builder__export_graph"
|
||||
]
|
||||
},
|
||||
"enabledMcpjsonServers": ["agent-builder", "tools"],
|
||||
"enableAllProjectMcpServers": true
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(git status:*)",
|
||||
"Bash(gh run view:*)",
|
||||
"Bash(uv run:*)",
|
||||
"Bash(env:*)",
|
||||
"Bash(python -m py_compile:*)",
|
||||
"Bash(python -m pytest:*)",
|
||||
"Bash(source:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(PYTHONPATH=core:exports:tools/src uv run pytest:*)"
|
||||
]
|
||||
},
|
||||
"enabledMcpjsonServers": ["tools"]
|
||||
}
|
||||
@@ -0,0 +1,463 @@
|
||||
---
|
||||
name: agent-workflow
|
||||
description: Complete workflow for building, implementing, and testing goal-driven agents. Orchestrates building-agents-* and testing-agent skills. Use when starting a new agent project, unsure which skill to use, or need end-to-end guidance.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: workflow-orchestrator
|
||||
orchestrates:
|
||||
- building-agents-core
|
||||
- building-agents-construction
|
||||
- building-agents-patterns
|
||||
- testing-agent
|
||||
- setup-credentials
|
||||
---
|
||||
|
||||
# Agent Development Workflow
|
||||
|
||||
Complete Standard Operating Procedure (SOP) for building production-ready goal-driven agents.
|
||||
|
||||
## Overview
|
||||
|
||||
This workflow orchestrates specialized skills to take you from initial concept to production-ready agent:
|
||||
|
||||
1. **Understand Concepts** → `/building-agents-core` (optional)
|
||||
2. **Build Structure** → `/building-agents-construction`
|
||||
3. **Optimize Design** → `/building-agents-patterns` (optional)
|
||||
4. **Setup Credentials** → `/setup-credentials` (if agent uses tools requiring API keys)
|
||||
5. **Test & Validate** → `/testing-agent`
|
||||
|
||||
## When to Use This Workflow
|
||||
|
||||
Use this meta-skill when:
|
||||
- Starting a new agent from scratch
|
||||
- Unclear which skill to use first
|
||||
- Need end-to-end guidance for agent development
|
||||
- Want consistent, repeatable agent builds
|
||||
|
||||
**Skip this workflow** if:
|
||||
- You only need to test an existing agent → use `/testing-agent` directly
|
||||
- You know exactly which phase you're in → use specific skill directly
|
||||
|
||||
## Quick Decision Tree
|
||||
|
||||
```
|
||||
"Need to understand agent concepts" → building-agents-core
|
||||
"Build a new agent" → building-agents-construction
|
||||
"Optimize my agent design" → building-agents-patterns
|
||||
"Set up API keys for my agent" → setup-credentials
|
||||
"Test my agent" → testing-agent
|
||||
"Not sure what I need" → Read phases below, then decide
|
||||
"Agent has structure but needs implementation" → See agent directory STATUS.md
|
||||
```
|
||||
|
||||
## Phase 0: Understand Concepts (Optional)
|
||||
|
||||
**Duration**: 5-10 minutes
|
||||
**Skill**: `/building-agents-core`
|
||||
**Input**: Questions about agent architecture
|
||||
|
||||
### When to Use
|
||||
|
||||
- First time building an agent
|
||||
- Need to understand node types, edges, goals
|
||||
- Want to validate tool availability
|
||||
- Learning about pause/resume architecture
|
||||
|
||||
### What This Phase Provides
|
||||
|
||||
- Architecture overview (Python packages, not JSON)
|
||||
- Core concepts (Goal, Node, Edge, Pause/Resume)
|
||||
- Tool discovery and validation procedures
|
||||
- Workflow overview
|
||||
|
||||
**Skip this phase** if you already understand agent fundamentals.
|
||||
|
||||
## Phase 1: Build Agent Structure
|
||||
|
||||
**Duration**: 15-30 minutes
|
||||
**Skill**: `/building-agents-construction`
|
||||
**Input**: User requirements ("Build an agent that...")
|
||||
|
||||
### What This Phase Does
|
||||
|
||||
Creates the complete agent architecture:
|
||||
- Package structure (`exports/agent_name/`)
|
||||
- Goal with success criteria and constraints
|
||||
- Workflow graph (nodes and edges)
|
||||
- Node specifications
|
||||
- CLI interface
|
||||
- Documentation
|
||||
|
||||
### Process
|
||||
|
||||
1. **Create package** - Directory structure with skeleton files
|
||||
2. **Define goal** - Success criteria and constraints written to agent.py
|
||||
3. **Design nodes** - Each node approved and written incrementally
|
||||
4. **Connect edges** - Workflow graph with conditional routing
|
||||
5. **Finalize** - Agent class, exports, and documentation
|
||||
|
||||
### Outputs
|
||||
|
||||
- ✅ `exports/agent_name/` package created
|
||||
- ✅ Goal defined in agent.py
|
||||
- ✅ 3-5 success criteria defined
|
||||
- ✅ 1-5 constraints defined
|
||||
- ✅ 5-10 nodes specified in nodes/__init__.py
|
||||
- ✅ 8-15 edges connecting workflow
|
||||
- ✅ Validated structure (passes `python -m agent_name validate`)
|
||||
- ✅ README.md with usage instructions
|
||||
- ✅ CLI commands (info, validate, run, shell)
|
||||
|
||||
### Success Criteria
|
||||
|
||||
You're ready for Phase 2 when:
|
||||
- Agent structure validates without errors
|
||||
- All nodes and edges are defined
|
||||
- CLI commands work (info, validate)
|
||||
- You see: "Agent complete: exports/agent_name/"
|
||||
|
||||
### Common Outputs
|
||||
|
||||
The building-agents-construction skill produces:
|
||||
```
|
||||
exports/agent_name/
|
||||
├── __init__.py (package exports)
|
||||
├── __main__.py (CLI interface)
|
||||
├── agent.py (goal, graph, agent class)
|
||||
├── nodes/__init__.py (node specifications)
|
||||
├── config.py (configuration)
|
||||
├── implementations.py (may be created for Python functions)
|
||||
└── README.md (documentation)
|
||||
```
|
||||
|
||||
### Next Steps
|
||||
|
||||
**If structure complete and validated:**
|
||||
→ Check `exports/agent_name/STATUS.md` or `IMPLEMENTATION_GUIDE.md`
|
||||
→ These files explain implementation options
|
||||
→ You may need to add Python functions or MCP tools (not covered by current skills)
|
||||
|
||||
**If want to optimize design:**
|
||||
→ Proceed to Phase 1.5 (building-agents-patterns)
|
||||
|
||||
**If ready to test:**
|
||||
→ Proceed to Phase 2
|
||||
|
||||
## Phase 1.5: Optimize Design (Optional)
|
||||
|
||||
**Duration**: 10-15 minutes
|
||||
**Skill**: `/building-agents-patterns`
|
||||
**Input**: Completed agent structure
|
||||
|
||||
### When to Use
|
||||
|
||||
- Want to add pause/resume functionality
|
||||
- Need error handling patterns
|
||||
- Want to optimize performance
|
||||
- Need examples of complex routing
|
||||
- Want best practices guidance
|
||||
|
||||
### What This Phase Provides
|
||||
|
||||
- Practical examples and patterns
|
||||
- Pause/resume architecture
|
||||
- Error handling strategies
|
||||
- Anti-patterns to avoid
|
||||
- Performance optimization techniques
|
||||
|
||||
**Skip this phase** if your agent design is straightforward.
|
||||
|
||||
## Phase 2: Test & Validate
|
||||
|
||||
**Duration**: 20-40 minutes
|
||||
**Skill**: `/testing-agent`
|
||||
**Input**: Working agent from Phase 1
|
||||
|
||||
### What This Phase Does
|
||||
|
||||
Creates comprehensive test suite:
|
||||
- Constraint tests (verify hard requirements)
|
||||
- Success criteria tests (measure goal achievement)
|
||||
- Edge case tests (handle failures gracefully)
|
||||
- Integration tests (end-to-end workflows)
|
||||
|
||||
### Process
|
||||
|
||||
1. **Analyze agent** - Read goal, constraints, success criteria
|
||||
2. **Generate tests** - Create pytest files in `exports/agent_name/tests/`
|
||||
3. **User approval** - Review and approve each test
|
||||
4. **Run evaluation** - Execute tests and collect results
|
||||
5. **Debug failures** - Identify and fix issues
|
||||
6. **Iterate** - Repeat until all tests pass
|
||||
|
||||
### Outputs
|
||||
|
||||
- ✅ Test files in `exports/agent_name/tests/`
|
||||
- ✅ Test report with pass/fail metrics
|
||||
- ✅ Coverage of all success criteria
|
||||
- ✅ Coverage of all constraints
|
||||
- ✅ Edge case handling verified
|
||||
|
||||
### Success Criteria
|
||||
|
||||
You're done when:
|
||||
- All tests pass
|
||||
- All success criteria validated
|
||||
- All constraints verified
|
||||
- Agent handles edge cases
|
||||
- Test coverage is comprehensive
|
||||
|
||||
### Next Steps
|
||||
|
||||
**Agent ready for:**
|
||||
- Production deployment
|
||||
- Integration into larger systems
|
||||
- Documentation and handoff
|
||||
- Continuous monitoring
|
||||
|
||||
## Phase Transitions
|
||||
|
||||
### From Phase 1 to Phase 2
|
||||
|
||||
**Trigger signals:**
|
||||
- "Agent complete: exports/..."
|
||||
- Structure validation passes
|
||||
- README indicates implementation complete
|
||||
|
||||
**Before proceeding:**
|
||||
- Verify agent can be imported: `from exports.agent_name import default_agent`
|
||||
- Check if implementation is needed (see STATUS.md or IMPLEMENTATION_GUIDE.md)
|
||||
- Confirm agent executes without import errors
|
||||
|
||||
### Skipping Phases
|
||||
|
||||
**When to skip Phase 1:**
|
||||
- Agent structure already exists
|
||||
- Only need to add tests
|
||||
- Modifying existing agent
|
||||
|
||||
**When to skip Phase 2:**
|
||||
- Prototyping or exploring
|
||||
- Agent not production-bound
|
||||
- Manual testing sufficient
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Complete New Build (Simple)
|
||||
|
||||
```
|
||||
User: "Build an agent that monitors files"
|
||||
→ Use /building-agents-construction
|
||||
→ Agent structure created
|
||||
→ Use /testing-agent
|
||||
→ Tests created and passing
|
||||
→ Done: Production-ready agent
|
||||
```
|
||||
|
||||
### Pattern 1b: Complete New Build (With Learning)
|
||||
|
||||
```
|
||||
User: "Build an agent (first time)"
|
||||
→ Use /building-agents-core (understand concepts)
|
||||
→ Use /building-agents-construction (build structure)
|
||||
→ Use /building-agents-patterns (optimize design)
|
||||
→ Use /testing-agent (validate)
|
||||
→ Done: Production-ready agent
|
||||
```
|
||||
|
||||
### Pattern 2: Test Existing Agent
|
||||
|
||||
```
|
||||
User: "Test my agent at exports/my_agent"
|
||||
→ Skip Phase 1
|
||||
→ Use /testing-agent directly
|
||||
→ Tests created
|
||||
→ Done: Validated agent
|
||||
```
|
||||
|
||||
### Pattern 3: Iterative Development
|
||||
|
||||
```
|
||||
User: "Build an agent"
|
||||
→ Use /building-agents-construction (Phase 1)
|
||||
→ Implementation needed (see STATUS.md)
|
||||
→ [User implements functions]
|
||||
→ Use /testing-agent (Phase 2)
|
||||
→ Tests reveal bugs
|
||||
→ [Fix bugs manually]
|
||||
→ Re-run tests
|
||||
→ Done: Working agent
|
||||
```
|
||||
|
||||
### Pattern 4: Complex Agent with Patterns
|
||||
|
||||
```
|
||||
User: "Build an agent with multi-turn conversations"
|
||||
→ Use /building-agents-core (learn pause/resume)
|
||||
→ Use /building-agents-construction (build structure)
|
||||
→ Use /building-agents-patterns (implement pause/resume pattern)
|
||||
→ Use /testing-agent (validate conversation flows)
|
||||
→ Done: Complex conversational agent
|
||||
```
|
||||
|
||||
## Skill Dependencies
|
||||
|
||||
```
|
||||
agent-workflow (meta-skill)
|
||||
│
|
||||
├── building-agents-core (foundational)
|
||||
│ ├── Architecture concepts
|
||||
│ ├── Node/Edge/Goal definitions
|
||||
│ ├── Tool discovery procedures
|
||||
│ └── Workflow overview
|
||||
│
|
||||
├── building-agents-construction (procedural)
|
||||
│ ├── Creates package structure
|
||||
│ ├── Defines goal
|
||||
│ ├── Adds nodes incrementally
|
||||
│ ├── Connects edges
|
||||
│ ├── Finalizes agent class
|
||||
│ └── Requires: building-agents-core
|
||||
│
|
||||
├── building-agents-patterns (reference)
|
||||
│ ├── Best practices
|
||||
│ ├── Pause/resume patterns
|
||||
│ ├── Error handling
|
||||
│ ├── Anti-patterns
|
||||
│ └── Performance optimization
|
||||
│
|
||||
└── testing-agent
|
||||
├── Reads agent goal
|
||||
├── Generates tests
|
||||
├── Runs evaluation
|
||||
└── Reports results
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Agent structure won't validate"
|
||||
|
||||
- Check node IDs match between nodes/__init__.py and agent.py
|
||||
- Verify all edges reference valid node IDs
|
||||
- Ensure entry_node exists in nodes list
|
||||
- Run: `PYTHONPATH=core:exports python -m agent_name validate`
|
||||
|
||||
### "Agent has structure but won't run"
|
||||
|
||||
- Check for STATUS.md or IMPLEMENTATION_GUIDE.md in agent directory
|
||||
- Implementation may be needed (Python functions or MCP tools)
|
||||
- This is expected - building-agents-construction creates structure, not implementation
|
||||
- See implementation guide for completion options
|
||||
|
||||
### "Tests are failing"
|
||||
|
||||
- Review test output for specific failures
|
||||
- Check agent goal and success criteria
|
||||
- Verify constraints are met
|
||||
- Use `/testing-agent` to debug and iterate
|
||||
- Fix agent code and re-run tests
|
||||
|
||||
### "Not sure which phase I'm in"
|
||||
|
||||
Run these checks:
|
||||
|
||||
```bash
|
||||
# Check if agent structure exists
|
||||
ls exports/my_agent/agent.py
|
||||
|
||||
# Check if it validates
|
||||
PYTHONPATH=core:exports python -m my_agent validate
|
||||
|
||||
# Check if tests exist
|
||||
ls exports/my_agent/tests/
|
||||
|
||||
# If structure exists and validates → Phase 2 (testing)
|
||||
# If structure doesn't exist → Phase 1 (building)
|
||||
# If tests exist but failing → Debug phase
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### For Phase 1 (Building)
|
||||
|
||||
1. **Start with clear requirements** - Know what the agent should do
|
||||
2. **Define success criteria early** - Measurable goals drive design
|
||||
3. **Keep nodes focused** - One responsibility per node
|
||||
4. **Use descriptive names** - Node IDs should explain purpose
|
||||
5. **Validate incrementally** - Check structure after each major addition
|
||||
|
||||
### For Phase 2 (Testing)
|
||||
|
||||
1. **Test constraints first** - Hard requirements must pass
|
||||
2. **Mock external dependencies** - Use mock mode for LLMs/APIs
|
||||
3. **Cover edge cases** - Test failures, not just success paths
|
||||
4. **Iterate quickly** - Fix one test at a time
|
||||
5. **Document test patterns** - Future tests follow same structure
|
||||
|
||||
### General Workflow
|
||||
|
||||
1. **Use version control** - Git commit after each phase
|
||||
2. **Document decisions** - Update README with changes
|
||||
3. **Keep iterations small** - Build → Test → Fix → Repeat
|
||||
4. **Preserve working states** - Tag successful iterations
|
||||
5. **Learn from failures** - Failed tests reveal design issues
|
||||
|
||||
## Exit Criteria
|
||||
|
||||
You're done with the workflow when:
|
||||
|
||||
✅ Agent structure validates
|
||||
✅ All tests pass
|
||||
✅ Success criteria met
|
||||
✅ Constraints verified
|
||||
✅ Documentation complete
|
||||
✅ Agent ready for deployment
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- **building-agents-core**: See `.claude/skills/building-agents-core/SKILL.md`
|
||||
- **building-agents-construction**: See `.claude/skills/building-agents-construction/SKILL.md`
|
||||
- **building-agents-patterns**: See `.claude/skills/building-agents-patterns/SKILL.md`
|
||||
- **testing-agent**: See `.claude/skills/testing-agent/SKILL.md`
|
||||
- **Agent framework docs**: See `core/README.md`
|
||||
- **Example agents**: See `exports/` directory
|
||||
|
||||
## Summary
|
||||
|
||||
This workflow provides a proven path from concept to production-ready agent:
|
||||
|
||||
1. **Learn** with `/building-agents-core` → Understand fundamentals (optional)
|
||||
2. **Build** with `/building-agents-construction` → Get validated structure
|
||||
3. **Optimize** with `/building-agents-patterns` → Apply best practices (optional)
|
||||
4. **Test** with `/testing-agent` → Get verified functionality
|
||||
|
||||
The workflow is **flexible** - skip phases as needed, iterate freely, and adapt to your specific requirements. The goal is **production-ready agents** built with **consistent, repeatable processes**.
|
||||
|
||||
## Skill Selection Guide
|
||||
|
||||
**Choose building-agents-core when:**
|
||||
- First time building agents
|
||||
- Need to understand architecture
|
||||
- Validating tool availability
|
||||
- Learning about node types and edges
|
||||
|
||||
**Choose building-agents-construction when:**
|
||||
- Actually building an agent
|
||||
- Have clear requirements
|
||||
- Ready to write code
|
||||
- Want step-by-step guidance
|
||||
|
||||
**Choose building-agents-patterns when:**
|
||||
- Agent structure complete
|
||||
- Need advanced patterns
|
||||
- Implementing pause/resume
|
||||
- Optimizing performance
|
||||
- Want best practices
|
||||
|
||||
**Choose testing-agent when:**
|
||||
- Agent structure complete
|
||||
- Ready to validate functionality
|
||||
- Need comprehensive test coverage
|
||||
- Debugging agent behavior
|
||||
@@ -0,0 +1,199 @@
|
||||
# Example: File Monitor Agent
|
||||
|
||||
This example shows the complete agent-workflow in action for building a file monitoring agent.
|
||||
|
||||
## Initial Request
|
||||
|
||||
```
|
||||
User: "Build an agent that monitors ~/Downloads and copies new files to ~/Documents"
|
||||
```
|
||||
|
||||
## Phase 1: Building (20 minutes)
|
||||
|
||||
### Step 1: Create Structure
|
||||
|
||||
Agent invokes `/building-agents` skill and:
|
||||
|
||||
1. Creates `exports/file_monitor_agent/` package
|
||||
2. Writes skeleton files (__init__.py, __main__.py, agent.py, etc.)
|
||||
|
||||
**Output**: Package structure visible immediately
|
||||
|
||||
### Step 2: Define Goal
|
||||
|
||||
```python
|
||||
goal = Goal(
|
||||
id="file-monitor-copy",
|
||||
name="Automated File Monitor & Copy",
|
||||
success_criteria=[
|
||||
# 100% detection rate
|
||||
# 100% copy success
|
||||
# 100% conflict resolution
|
||||
# >99% uptime
|
||||
],
|
||||
constraints=[
|
||||
# Preserve originals
|
||||
# Handle errors gracefully
|
||||
# Track state
|
||||
# Respect permissions
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
**Output**: Goal written to agent.py
|
||||
|
||||
### Step 3: Design Nodes
|
||||
|
||||
7 nodes approved and written incrementally:
|
||||
|
||||
1. `initialize-state` - Set up tracking
|
||||
2. `list-downloads` - Scan directory
|
||||
3. `identify-new-files` - Find new files
|
||||
4. `check-for-new-files` - Router
|
||||
5. `copy-files` - Copy with conflict resolution
|
||||
6. `update-state` - Mark as processed
|
||||
7. `wait-interval` - Sleep between cycles
|
||||
|
||||
**Output**: All nodes in nodes/__init__.py
|
||||
|
||||
### Step 4: Connect Edges
|
||||
|
||||
8 edges connecting the workflow loop:
|
||||
|
||||
```
|
||||
initialize → list → identify → check
|
||||
↓ ↓
|
||||
copy wait
|
||||
↓ ↑
|
||||
update ↓
|
||||
↓ ↓
|
||||
wait → list (loop)
|
||||
```
|
||||
|
||||
**Output**: Edges written to agent.py
|
||||
|
||||
### Step 5: Finalize
|
||||
|
||||
```bash
|
||||
$ PYTHONPATH=core:exports python -m file_monitor_agent validate
|
||||
✓ Agent is valid
|
||||
|
||||
$ PYTHONPATH=core:exports python -m file_monitor_agent info
|
||||
Agent: File Monitor & Copy Agent
|
||||
Nodes: 7
|
||||
Edges: 8
|
||||
```
|
||||
|
||||
**Phase 1 Complete**: Structure validated ✅
|
||||
|
||||
### Status After Phase 1
|
||||
|
||||
```
|
||||
exports/file_monitor_agent/
|
||||
├── __init__.py ✅ (exports)
|
||||
├── __main__.py ✅ (CLI)
|
||||
├── agent.py ✅ (goal, graph, agent class)
|
||||
├── nodes/__init__.py ✅ (7 nodes)
|
||||
├── config.py ✅ (configuration)
|
||||
├── implementations.py ✅ (Python functions)
|
||||
├── README.md ✅ (documentation)
|
||||
├── IMPLEMENTATION_GUIDE.md ✅ (next steps)
|
||||
└── STATUS.md ✅ (current state)
|
||||
```
|
||||
|
||||
**Note**: Implementation gap exists - data flow needs connection (covered in STATUS.md)
|
||||
|
||||
## Phase 2: Testing (25 minutes)
|
||||
|
||||
### Step 1: Analyze Agent
|
||||
|
||||
Agent invokes `/testing-agent` skill and:
|
||||
|
||||
1. Reads goal from `exports/file_monitor_agent/agent.py`
|
||||
2. Identifies 4 success criteria to test
|
||||
3. Identifies 4 constraints to verify
|
||||
4. Plans test coverage
|
||||
|
||||
### Step 2: Generate Tests
|
||||
|
||||
Creates test files:
|
||||
|
||||
```
|
||||
exports/file_monitor_agent/tests/
|
||||
├── conftest.py (fixtures)
|
||||
├── test_constraints.py (4 constraint tests)
|
||||
├── test_success_criteria.py (4 success tests)
|
||||
└── test_edge_cases.py (error handling)
|
||||
```
|
||||
|
||||
Tests approved incrementally by user.
|
||||
|
||||
### Step 3: Run Tests
|
||||
|
||||
```bash
|
||||
$ PYTHONPATH=core:exports pytest exports/file_monitor_agent/tests/
|
||||
|
||||
test_constraints.py::test_preserves_originals PASSED
|
||||
test_constraints.py::test_handles_errors PASSED
|
||||
test_constraints.py::test_tracks_state PASSED
|
||||
test_constraints.py::test_respects_permissions PASSED
|
||||
|
||||
test_success_criteria.py::test_detects_all_files PASSED
|
||||
test_success_criteria.py::test_copies_all_files PASSED
|
||||
test_success_criteria.py::test_resolves_conflicts PASSED
|
||||
test_success_criteria.py::test_continuous_run PASSED
|
||||
|
||||
test_edge_cases.py::test_empty_directory PASSED
|
||||
test_edge_cases.py::test_permission_denied PASSED
|
||||
test_edge_cases.py::test_disk_full PASSED
|
||||
test_edge_cases.py::test_large_files PASSED
|
||||
|
||||
========================== 12 passed in 3.42s ==========================
|
||||
```
|
||||
|
||||
**Phase 2 Complete**: All tests pass ✅
|
||||
|
||||
## Final Output
|
||||
|
||||
**Production-Ready Agent:**
|
||||
|
||||
```bash
|
||||
# Run the agent
|
||||
./RUN_AGENT.sh
|
||||
|
||||
# Or manually
|
||||
PYTHONPATH=core:exports:tools/src python -m file_monitor_agent run
|
||||
```
|
||||
|
||||
**Capabilities:**
|
||||
- Monitors ~/Downloads continuously
|
||||
- Copies new files to ~/Documents
|
||||
- Resolves conflicts with timestamps
|
||||
- Handles errors gracefully
|
||||
- Tracks processed files
|
||||
- Runs as background service
|
||||
|
||||
**Total Time**: ~45 minutes from concept to production
|
||||
|
||||
## Key Learnings
|
||||
|
||||
1. **Incremental building** - Files written immediately, visible throughout
|
||||
2. **Validation early** - Structure validated before moving to implementation
|
||||
3. **Test-driven** - Tests reveal real behavior
|
||||
4. **Documentation included** - README, STATUS, and guides auto-generated
|
||||
5. **Repeatable process** - Same workflow for any agent type
|
||||
|
||||
## Variations
|
||||
|
||||
**For simpler agents:**
|
||||
- Fewer nodes (3-5 instead of 7)
|
||||
- Simpler workflow (linear instead of looping)
|
||||
- Faster build time (10-15 minutes)
|
||||
|
||||
**For complex agents:**
|
||||
- More nodes (10-15+)
|
||||
- Multiple subgraphs
|
||||
- Pause/resume points for human-in-the-loop
|
||||
- Longer build time (45-60 minutes)
|
||||
|
||||
The workflow scales to your needs!
|
||||
@@ -1,241 +0,0 @@
|
||||
---
|
||||
name: browser-edge-cases
|
||||
description: SOP for debugging browser automation failures on complex websites. Use when browser tools fail on specific sites like LinkedIn, Twitter/X, SPAs, or sites with Shadow DOM.
|
||||
license: MIT
|
||||
---
|
||||
|
||||
# Browser Tool Edge Cases
|
||||
|
||||
Standard Operating Procedure for debugging and fixing browser automation failures on complex websites.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
- `browser_scroll` succeeds but page doesn't move
|
||||
- `browser_click` succeeds but no action triggered
|
||||
- `browser_type` text disappears or doesn't work
|
||||
- `browser_snapshot` hangs or returns stale content
|
||||
- `browser_navigate` loads wrong content
|
||||
|
||||
## SOP: Debugging Browser Tool Failures
|
||||
|
||||
### Phase 1: Reproduce & Isolate
|
||||
|
||||
```
|
||||
1. Create minimal test case demonstrating failure
|
||||
2. Test against simple site (example.com) to verify tool works
|
||||
3. Test against problematic site to confirm issue
|
||||
```
|
||||
|
||||
**Quick isolation test:**
|
||||
```python
|
||||
# Test 1: Does the tool work at all?
|
||||
await browser_navigate(tab_id, "https://example.com")
|
||||
result = await browser_scroll(tab_id, "down", 100)
|
||||
# Should work on simple sites
|
||||
|
||||
# Test 2: Does it fail on the problematic site?
|
||||
await browser_navigate(tab_id, "https://linkedin.com/feed")
|
||||
result = await browser_scroll(tab_id, "down", 100)
|
||||
# If this fails but example.com works → site-specific edge case
|
||||
```
|
||||
|
||||
### Phase 2: Analyze Root Cause
|
||||
|
||||
**Step 2a: Check console for errors**
|
||||
```python
|
||||
console = await browser_console(tab_id)
|
||||
# Look for: CSP violations, React errors, JavaScript exceptions
|
||||
```
|
||||
|
||||
**Step 2b: Inspect DOM structure**
|
||||
```python
|
||||
html = await browser_html(tab_id)
|
||||
snapshot = await browser_snapshot(tab_id)
|
||||
# Look for:
|
||||
# - Nested scrollable divs (overflow: scroll/auto)
|
||||
# - Shadow DOM roots
|
||||
# - iframes
|
||||
# - Custom widgets
|
||||
```
|
||||
|
||||
**Step 2c: Identify the pattern**
|
||||
|
||||
| Symptom | Likely Cause | Check |
|
||||
|---------|--------------|-------|
|
||||
| Scroll doesn't move | Nested scroll container | Look for `overflow: scroll` divs |
|
||||
| Click no effect | Element covered | Check `getBoundingClientRect` vs viewport |
|
||||
| Type clears | Autocomplete/React | Check for event listeners on input |
|
||||
| Snapshot hangs | Huge DOM | Check node count in snapshot |
|
||||
| Snapshot stale | SPA hydration | Wait after navigation |
|
||||
|
||||
### Phase 3: Implement Multi-Layer Fix
|
||||
|
||||
**Pattern: Always have fallbacks**
|
||||
|
||||
```python
|
||||
async def robust_operation(tab_id):
|
||||
# Method 1: Primary approach
|
||||
try:
|
||||
result = await primary_method(tab_id)
|
||||
if verify_success(result):
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Method 2: CDP fallback
|
||||
try:
|
||||
result = await cdp_fallback(tab_id)
|
||||
if verify_success(result):
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Method 3: JavaScript fallback
|
||||
return await javascript_fallback(tab_id)
|
||||
```
|
||||
|
||||
**Pattern: Always add timeouts**
|
||||
|
||||
```python
|
||||
# Bad - can hang forever
|
||||
result = await browser_snapshot(tab_id)
|
||||
|
||||
# Good - fails fast with useful error
|
||||
try:
|
||||
result = await browser_snapshot(tab_id, timeout_s=10.0)
|
||||
except asyncio.TimeoutError:
|
||||
# Handle timeout gracefully
|
||||
result = await fallback_snapshot(tab_id)
|
||||
```
|
||||
|
||||
### Phase 4: Verify Fix
|
||||
|
||||
```
|
||||
1. Run against problematic site → should work
|
||||
2. Run against simple site → should still work (regression check)
|
||||
3. Document in registry.md
|
||||
```
|
||||
|
||||
## Pattern Library
|
||||
|
||||
### P1: Nested Scrollable Containers
|
||||
|
||||
**Sites:** LinkedIn, Twitter/X, any SPA with scrollable feeds
|
||||
|
||||
**Detection:**
|
||||
```javascript
|
||||
// Find largest scrollable container
|
||||
const candidates = [];
|
||||
document.querySelectorAll('*').forEach(el => {
|
||||
const style = getComputedStyle(el);
|
||||
if (style.overflow.includes('scroll') || style.overflow.includes('auto')) {
|
||||
const rect = el.getBoundingClientRect();
|
||||
if (rect.width > 100 && rect.height > 100) {
|
||||
candidates.push({el, area: rect.width * rect.height});
|
||||
}
|
||||
}
|
||||
});
|
||||
candidates.sort((a, b) => b.area - a.area);
|
||||
return candidates[0]?.el;
|
||||
```
|
||||
|
||||
**Fix:** Dispatch scroll events at container's center, not viewport center.
|
||||
|
||||
### P2: Element Covered by Overlay
|
||||
|
||||
**Sites:** Modals, tooltips, SPAs with loading overlays
|
||||
|
||||
**Detection:**
|
||||
```javascript
|
||||
const rect = element.getBoundingClientRect();
|
||||
const centerX = rect.left + rect.width / 2;
|
||||
const centerY = rect.top + rect.height / 2;
|
||||
const topElement = document.elementFromPoint(centerX, centerY);
|
||||
return topElement === element || element.contains(topElement);
|
||||
```
|
||||
|
||||
**Fix:** Wait for overlay to disappear, or use JavaScript click.
|
||||
|
||||
### P3: React Synthetic Events
|
||||
|
||||
**Sites:** React SPAs, modern web apps
|
||||
|
||||
**Detection:** If CDP click doesn't trigger handler but manual click works.
|
||||
|
||||
**Fix:** Use JavaScript click as primary:
|
||||
```javascript
|
||||
element.click();
|
||||
```
|
||||
|
||||
### P4: Huge DOM / Accessibility Tree
|
||||
|
||||
**Sites:** LinkedIn, Facebook, Twitter (feeds with 1000s of nodes)
|
||||
|
||||
**Detection:**
|
||||
```javascript
|
||||
document.querySelectorAll('*').length > 5000
|
||||
```
|
||||
|
||||
**Fix:**
|
||||
1. Add timeout to snapshot operation
|
||||
2. Truncate tree at 2000 nodes
|
||||
3. Fall back to DOM-based snapshot if accessibility tree too large
|
||||
|
||||
### P5: SPA Hydration Delay
|
||||
|
||||
**Sites:** React, Vue, Angular SPAs after navigation
|
||||
|
||||
**Detection:**
|
||||
```javascript
|
||||
// Check if React app has hydrated
|
||||
document.querySelector('[data-reactroot]') ||
|
||||
document.querySelector('[data-reactid]')
|
||||
```
|
||||
|
||||
**Fix:** Wait for specific selector after navigation:
|
||||
```python
|
||||
await browser_navigate(tab_id, url, wait_until="load")
|
||||
await browser_wait(tab_id, selector='[data-testid="content"]', timeout_ms=5000)
|
||||
```
|
||||
|
||||
### P6: Shadow DOM
|
||||
|
||||
**Sites:** Components using Shadow DOM, Lit elements
|
||||
|
||||
**Detection:**
|
||||
```javascript
|
||||
document.querySelectorAll('*').some(el => el.shadowRoot)
|
||||
```
|
||||
|
||||
**Fix:** Pierce shadow root:
|
||||
```javascript
|
||||
function queryShadow(selector) {
|
||||
const parts = selector.split('>>>');
|
||||
let node = document;
|
||||
for (const part of parts) {
|
||||
if (node.shadowRoot) {
|
||||
node = node.shadowRoot.querySelector(part.trim());
|
||||
} else {
|
||||
node = node.querySelector(part.trim());
|
||||
}
|
||||
}
|
||||
return node;
|
||||
}
|
||||
```
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Issue | Primary Fix | Fallback |
|
||||
|-------|-------------|----------|
|
||||
| Scroll not working | Find scrollable container | Mouse wheel at container center |
|
||||
| Click no effect | JavaScript click() | CDP mouse events |
|
||||
| Type clears | Add delay_ms | Use execCommand |
|
||||
| Snapshot hangs | Add timeout_s | DOM snapshot fallback |
|
||||
| Stale content | Wait for selector | Increase wait_until timeout |
|
||||
| Shadow DOM | Pierce selector | JavaScript traversal |
|
||||
|
||||
## References
|
||||
|
||||
- [registry.md](registry.md) - Full list of known edge cases
|
||||
- [scripts/test_case.py](scripts/test_case.py) - Template for testing new cases
|
||||
- [BROWSER_USE_PATTERNS.md](../../tools/BROWSER_USE_PATTERNS.md) - Implementation patterns from browser-use
|
||||
@@ -1,261 +0,0 @@
|
||||
# Browser Edge Case Registry
|
||||
|
||||
Curated list of known browser automation edge cases with symptoms, causes, and fixes.
|
||||
|
||||
---
|
||||
|
||||
## Scroll Issues
|
||||
|
||||
### #1: LinkedIn Nested Scroll Container
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | LinkedIn (linkedin.com/feed) |
|
||||
| **Symptom** | `browser_scroll()` returns `{ok: true}` but page doesn't move |
|
||||
| **Root Cause** | Content is in a nested scrollable div (`overflow: scroll`), not the main window |
|
||||
| **Detection** | `document.querySelectorAll('*')` with `overflow: scroll/auto` has large candidates |
|
||||
| **Fix** | JavaScript finds largest scrollable container, uses `container.scrollBy()` |
|
||||
| **Code** | `bridge.py:808-891` - smart scroll with container detection |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
### #2: Twitter/X Lazy Loading
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Twitter/X (x.com) |
|
||||
| **Symptom** | Infinite scroll doesn't load new content |
|
||||
| **Root Cause** | Lazy loading requires content to be visible before loading more |
|
||||
| **Detection** | Scroll position at bottom but no new `[data-testid="tweet"]` elements |
|
||||
| **Fix** | Add `wait_for_selector` between scroll calls with 1s delay |
|
||||
| **Code** | Test file: `tests/test_x_page_load_repro.py` |
|
||||
| **Verified** | - |
|
||||
|
||||
### #3: Modal/Dialog Scroll Container
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Any site with modal dialogs |
|
||||
| **Symptom** | Scroll scrolls background page, not modal content |
|
||||
| **Root Cause** | Modal has its own scroll container with `overflow: scroll` |
|
||||
| **Detection** | Visible element with `position: fixed` and scrollable content |
|
||||
| **Fix** | Find visible modal container (highest z-index scrollable), scroll that |
|
||||
| **Code** | - |
|
||||
| **Verified** | - |
|
||||
|
||||
---
|
||||
|
||||
## Click Issues
|
||||
|
||||
### #4: Element Covered by Overlay
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | SPAs, sites with loading overlays |
|
||||
| **Symptom** | Click succeeds but no action triggered |
|
||||
| **Root Cause** | Element is covered by transparent overlay, tooltip, or iframe |
|
||||
| **Detection** | `document.elementFromPoint(x, y) !== target` |
|
||||
| **Fix** | Wait for overlay to disappear, or use JavaScript `element.click()` |
|
||||
| **Code** | `bridge.py:394-591` - JavaScript click as primary |
|
||||
| **Verified** | - |
|
||||
|
||||
### #5: React Synthetic Events
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | React applications |
|
||||
| **Symptom** | CDP click doesn't trigger React handler |
|
||||
| **Root Cause** | React uses synthetic events that don't respond to CDP events |
|
||||
| **Detection** | Site uses React (check for `__reactFiber$` or `data-reactroot`) |
|
||||
| **Fix** | Use JavaScript `element.click()` as primary method |
|
||||
| **Code** | `bridge.py:394-591` - JavaScript-first click |
|
||||
| **Verified** | - |
|
||||
|
||||
### #6: Shadow DOM Elements
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Components using Shadow DOM, Lit elements |
|
||||
| **Symptom** | `querySelector` can't find element |
|
||||
| **Root Cause** | Element is inside a shadow root, not main DOM tree |
|
||||
| **Detection** | `element.shadowRoot !== null` on parent elements |
|
||||
| **Fix** | Use piercing selector (`host >>> target`) or traverse shadow roots |
|
||||
| **Code** | See SKILL.md P6 pattern |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
---
|
||||
|
||||
## Input Issues
|
||||
|
||||
### #7: ContentEditable / Rich Text Editors
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Rich text editors (Notion, Slack web, etc.) |
|
||||
| **Symptom** | `browser_type()` doesn't insert text |
|
||||
| **Root Cause** | Element is `contenteditable`, not an `<input>` or `<textarea>` |
|
||||
| **Detection** | `element.contentEditable === 'true'` |
|
||||
| **Fix** | Focus via JavaScript, use `execCommand('insertText')` or `Input.dispatchKeyEvent` |
|
||||
| **Code** | `bridge.py:616-694` - contentEditable handling |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
### #8: Autocomplete Field Clearing
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Search fields with autocomplete, address forms |
|
||||
| **Symptom** | Typed text gets cleared immediately |
|
||||
| **Root Cause** | Field expects realistic keystroke timing for autocomplete |
|
||||
| **Detection** | Field has autocomplete listeners or dropdown appears |
|
||||
| **Fix** | Add `delay_ms=50` between keystrokes |
|
||||
| **Code** | `bridge.py:type()` - delay_ms parameter |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
### #9: Custom Date Pickers
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Forms with custom date widgets |
|
||||
| **Symptom** | Can't type date into date field |
|
||||
| **Root Cause** | Custom widget intercepts and blocks keyboard input |
|
||||
| **Detection** | Typing doesn't change field value |
|
||||
| **Fix** | Click calendar widget icon, select date from dropdown |
|
||||
| **Code** | - |
|
||||
| **Verified** | - |
|
||||
|
||||
---
|
||||
|
||||
## Snapshot Issues
|
||||
|
||||
### #10: LinkedIn Huge DOM Tree
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | LinkedIn, Facebook, Twitter feeds |
|
||||
| **Symptom** | `browser_snapshot()` hangs forever |
|
||||
| **Root Cause** | 10k+ DOM nodes, accessibility tree has 50k+ nodes |
|
||||
| **Detection** | `document.querySelectorAll('*').length > 5000` |
|
||||
| **Fix** | Add `timeout_s` param with `asyncio.timeout()`, proper error handling |
|
||||
| **Code** | `bridge.py:1041-1028` - snapshot with timeout protection |
|
||||
| **Verified** | 2026-04-03 ✓ (0.08s on LinkedIn) |
|
||||
|
||||
### #11: SPA Hydration Delay
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | React/Vue/Angular SPAs |
|
||||
| **Symptom** | Snapshot shows old content after navigation |
|
||||
| **Root Cause** | Client-side hydration hasn't completed when snapshot runs |
|
||||
| **Detection** | `document.readyState === 'complete'` but content missing |
|
||||
| **Fix** | Wait for specific selector after navigation |
|
||||
| **Code** | Test file: `tests/test_x_page_load_repro.py` |
|
||||
| **Verified** | - |
|
||||
|
||||
### #12: iframe Content Missing
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Sites with embedded content |
|
||||
| **Symptom** | Snapshot missing iframe content |
|
||||
| **Root Cause** | Accessibility tree doesn't include iframe content |
|
||||
| **Detection** | `document.querySelectorAll('iframe')` has results |
|
||||
| **Fix** | Use `DOM.getFrameOwner` + separate snapshot for each iframe |
|
||||
| **Code** | - |
|
||||
| **Verified** | - |
|
||||
|
||||
---
|
||||
|
||||
## Navigation Issues
|
||||
|
||||
### #13: SPA Navigation Events
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | React Router, Vue Router SPAs |
|
||||
| **Symptom** | `wait_until="load"` fires before content ready |
|
||||
| **Root Cause** | SPA uses client-side routing, no full page load |
|
||||
| **Detection** | URL changes but `load` event already fired |
|
||||
| **Fix** | Use `wait_until="networkidle"` or `wait_for_selector` |
|
||||
| **Code** | `bridge.py:navigate()` - wait_until options |
|
||||
| **Verified** | - |
|
||||
|
||||
### #14: Cross-Origin Redirects
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | OAuth flows, SSO logins |
|
||||
| **Symptom** | Navigation fails during redirect |
|
||||
| **Root Cause** | Cross-origin security prevents CDP tracking |
|
||||
| **Detection** | URL changes to different domain |
|
||||
| **Fix** | Use `wait_for_url` with pattern matching instead of exact URL |
|
||||
| **Code** | - |
|
||||
| **Verified** | - |
|
||||
|
||||
---
|
||||
|
||||
## Screenshot Issues
|
||||
|
||||
### #15: Selector Screenshot Not Implemented
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Any site |
|
||||
| **Symptom** | `browser_screenshot(selector="h1")` takes full viewport instead of element |
|
||||
| **Root Cause** | `selector` param existed in signature but was silently ignored in both `bridge.py` and `inspection.py` |
|
||||
| **Detection** | Screenshot with selector same byte size as screenshot without selector |
|
||||
| **Fix** | Use CDP `Runtime.evaluate` to call `getBoundingClientRect()` on the element, pass result as `clip` to `Page.captureScreenshot` |
|
||||
| **Code** | `bridge.py:1315-1344` - selector clip logic; `inspection.py:94-96` - pass selector to bridge |
|
||||
| **Verified** | 2026-04-03 ✓ (JS rect query returns correct viewport coords; requires server restart) |
|
||||
|
||||
### #16: Stale Browser Context (Group ID Mismatch)
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | Any |
|
||||
| **Symptom** | `browser_open()` returns `"No group with id: XXXXXXX"` even though `browser_status` shows `running: true` |
|
||||
| **Root Cause** | In-memory `_contexts` dict has a stale `groupId` from a Chrome tab group that was closed outside the tool (e.g. user closed the tab group) |
|
||||
| **Detection** | `browser_status` returns `running: true` but `browser_open` fails with "No group with id" |
|
||||
| **Fix** | Call `browser_stop()` to clear stale context from `_contexts`, then `browser_start()` again |
|
||||
| **Code** | `tools/lifecycle.py:144-160` - `already_running` check uses cached dict without validating against Chrome |
|
||||
| **Verified** | 2026-04-03 ✓ |
|
||||
|
||||
---
|
||||
|
||||
## How to Add New Edge Cases
|
||||
|
||||
1. **Reproduce** the issue with minimal test case
|
||||
2. **Document** using the template below
|
||||
3. **Implement** fix with multi-layer fallback
|
||||
4. **Verify** against both problematic and simple sites
|
||||
5. **Submit** by appending to this file
|
||||
|
||||
### Template
|
||||
|
||||
```markdown
|
||||
### #N: [Short Title]
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| **Site** | [URL or site type] |
|
||||
| **Symptom** | [What the user observes] |
|
||||
| **Root Cause** | [Technical explanation] |
|
||||
| **Detection** | [JavaScript to detect this case] |
|
||||
| **Fix** | [Solution approach] |
|
||||
| **Code** | [File:line reference if implemented] |
|
||||
| **Verified** | [Date or "pending"] |
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Statistics
|
||||
|
||||
| Category | Count |
|
||||
|----------|-------|
|
||||
| Scroll Issues | 3 |
|
||||
| Click Issues | 3 |
|
||||
| Input Issues | 3 |
|
||||
| Snapshot Issues | 3 |
|
||||
| Navigation Issues | 2 |
|
||||
| Screenshot Issues | 2 |
|
||||
| **Total** | **16** |
|
||||
|
||||
Last updated: 2026-04-03
|
||||
@@ -1,113 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #2: Twitter/X Lazy Loading Scroll
|
||||
|
||||
Symptom: Infinite scroll doesn't load new content
|
||||
Root Cause: Lazy loading requires content to be visible before loading more
|
||||
Fix: Add wait_for_selector between scroll calls
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
BRIDGE_PORT = 9229
|
||||
CONTEXT_NAME = "twitter-scroll-test"
|
||||
|
||||
|
||||
async def test_twitter_lazy_scroll():
|
||||
"""Test that repeated scrolls with waits load new content."""
|
||||
print("=" * 70)
|
||||
print("TEST #2: Twitter/X Lazy Loading Scroll")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
print(f"Waiting for extension... ({i + 1}/10)")
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Navigate to Twitter/X
|
||||
print("\n--- Navigating to X.com ---")
|
||||
await bridge.navigate(tab_id, "https://x.com", wait_until="networkidle", timeout_ms=30000)
|
||||
print("✓ Page loaded")
|
||||
|
||||
# Wait for tweets to appear
|
||||
print("\n--- Waiting for tweets ---")
|
||||
await bridge.wait_for_selector(tab_id, '[data-testid="tweet"]', timeout_ms=10000)
|
||||
|
||||
# Count initial tweets
|
||||
initial_count = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
print(f"Initial tweet count: {initial_count.get('result', 0)}")
|
||||
|
||||
# Take screenshot of initial state
|
||||
screenshot = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
|
||||
# Scroll multiple times with waits
|
||||
print("\n--- Scrolling with waits ---")
|
||||
for i in range(3):
|
||||
result = await bridge.scroll(tab_id, "down", 500)
|
||||
print(f" Scroll {i + 1}: {result.get('method', 'unknown')} method")
|
||||
|
||||
# Wait for new content to load
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Count tweets after scroll
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
count = count_result.get("result", 0)
|
||||
print(f" Tweet count after scroll: {count}")
|
||||
|
||||
# Final count
|
||||
final_count = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'[data-testid=\"tweet\"]').length; })()",
|
||||
)
|
||||
final = final_count.get("result", 0)
|
||||
initial = initial_count.get("result", 0)
|
||||
|
||||
print("\n--- Results ---")
|
||||
print(f"Initial tweets: {initial}")
|
||||
print(f"Final tweets: {final}")
|
||||
|
||||
if final > initial:
|
||||
print(f"✓ PASS: Loaded {final - initial} new tweets")
|
||||
else:
|
||||
print("✗ FAIL: No new tweets loaded (may need login)")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_twitter_lazy_scroll())
|
||||
@@ -1,96 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #3: Modal/Dialog Scroll Container
|
||||
|
||||
Symptom: Scroll scrolls background page, not modal content
|
||||
Root Cause: Modal has its own scroll container with overflow: scroll
|
||||
Fix: Find visible modal container (highest z-index scrollable), scroll that
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
BRIDGE_PORT = 9229
|
||||
CONTEXT_NAME = "modal-scroll-test"
|
||||
|
||||
# Test site with modal - using a demo site
|
||||
MODAL_DEMO_URL = "https://www.w3schools.com/howto/howto_css_modals.asp"
|
||||
|
||||
|
||||
async def test_modal_scroll():
|
||||
"""Test that scroll targets modal content, not background."""
|
||||
print("=" * 70)
|
||||
print("TEST #3: Modal/Dialog Scroll Container")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Navigate to modal demo
|
||||
print("\n--- Navigating to modal demo ---")
|
||||
await bridge.navigate(tab_id, MODAL_DEMO_URL, wait_until="load")
|
||||
print("✓ Page loaded")
|
||||
|
||||
# Take screenshot before
|
||||
screenshot_before = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot before: {len(screenshot_before.get('data', ''))} bytes")
|
||||
|
||||
# Click button to open modal
|
||||
print("\n--- Opening modal ---")
|
||||
# Find and click the "Open Modal" button
|
||||
result = await bridge.click(tab_id, ".ws-btn", timeout_ms=5000)
|
||||
print(f"Click result: {result}")
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Take screenshot with modal open
|
||||
screenshot_modal = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot modal open: {len(screenshot_modal.get('data', ''))} bytes")
|
||||
|
||||
# Try to scroll within modal
|
||||
print("\n--- Scrolling modal content ---")
|
||||
result = await bridge.scroll(tab_id, "down", 100)
|
||||
print(f"Scroll result: {result}")
|
||||
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Take screenshot after scroll
|
||||
screenshot_after = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot after scroll: {len(screenshot_after.get('data', ''))} bytes")
|
||||
|
||||
# Check if modal content scrolled (not background)
|
||||
# This is a visual check - we can verify by comparing screenshots
|
||||
print("\n--- Results ---")
|
||||
print(f"Modal scroll test completed. Method used: {result.get('method', 'unknown')}")
|
||||
print("Visual verification needed: Check if modal content scrolled vs background")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_modal_scroll())
|
||||
@@ -1,123 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #4: Element Covered by Overlay
|
||||
|
||||
Symptom: Click succeeds but no action triggered
|
||||
Root Cause: Element is covered by transparent overlay, tooltip, or iframe
|
||||
Detection: document.elementFromPoint(x, y) !== target
|
||||
Fix: Wait for overlay to disappear, or use JavaScript element.click()
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "overlay-click-test"
|
||||
|
||||
|
||||
async def test_overlay_click():
|
||||
"""Test clicking elements that are covered by overlays."""
|
||||
print("=" * 70)
|
||||
print("TEST #4: Element Covered by Overlay")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Create a test page with overlay
|
||||
print("\n--- Creating test page with overlay ---")
|
||||
test_html = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Overlay Test</title></head>
|
||||
<body>
|
||||
<button id="target-btn" onclick="alert('Clicked!')">Click Me</button>
|
||||
<div id="overlay" style="position:fixed;top:0;left:0;
|
||||
width:100%;height:100%;
|
||||
background:rgba(0,0,0,0.3);z-index:1000;"></div>
|
||||
<script>
|
||||
window.clickCount = 0;
|
||||
document.getElementById('target-btn').addEventListener('click', () => {
|
||||
window.clickCount++;
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Navigate to data URL
|
||||
import base64
|
||||
|
||||
data_url = f"data:text/html;base64,{base64.b64encode(test_html.encode()).decode()}"
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
|
||||
# Screenshot before
|
||||
screenshot = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
|
||||
# Try to click the covered button
|
||||
print("\n--- Attempting to click covered button ---")
|
||||
|
||||
# First, check if element is covered
|
||||
coverage_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const btn = document.getElementById('target-btn');
|
||||
const rect = btn.getBoundingClientRect();
|
||||
const centerX = rect.left + rect.width / 2;
|
||||
const centerY = rect.top + rect.height / 2;
|
||||
const topElement = document.elementFromPoint(centerX, centerY);
|
||||
return {
|
||||
isCovered: topElement !== btn && !btn.contains(topElement),
|
||||
topElement: topElement?.tagName,
|
||||
targetElement: btn.tagName
|
||||
};
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f"Coverage check: {coverage_check.get('result', {})}")
|
||||
|
||||
# Try CDP click (may fail due to overlay)
|
||||
click_result = await bridge.click(tab_id, "#target-btn", timeout_ms=5000)
|
||||
print(f"Click result: {click_result}")
|
||||
|
||||
# Check if click registered
|
||||
count_result = await bridge.evaluate(tab_id, "(function() { return window.clickCount; })()")
|
||||
count = count_result.get("result", 0)
|
||||
print(f"Click count after CDP click: {count}")
|
||||
|
||||
if count > 0:
|
||||
print("✓ PASS: JavaScript click penetrated overlay")
|
||||
else:
|
||||
print("✗ FAIL: Click did not reach button (overlay blocked it)")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_overlay_click())
|
||||
@@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #6: Shadow DOM Elements
|
||||
|
||||
Symptom: querySelector can't find element
|
||||
Root Cause: Element is inside a shadow root, not main DOM tree
|
||||
Detection: element.shadowRoot !== null on parent elements
|
||||
Fix: Use piercing selector (host >>> target) or traverse shadow roots
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "shadow-dom-test"
|
||||
|
||||
|
||||
async def test_shadow_dom():
|
||||
"""Test clicking elements inside Shadow DOM."""
|
||||
print("=" * 70)
|
||||
print("TEST #6: Shadow DOM Elements")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Create test page with Shadow DOM
|
||||
print("\n--- Creating test page with Shadow DOM ---")
|
||||
test_html = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Shadow DOM Test</title></head>
|
||||
<body>
|
||||
<div id="shadow-host"></div>
|
||||
<script>
|
||||
const host = document.getElementById('shadow-host');
|
||||
const shadow = host.attachShadow({ mode: 'open' });
|
||||
shadow.innerHTML = `
|
||||
<style>
|
||||
button { padding: 10px 20px; font-size: 16px; }
|
||||
</style>
|
||||
<button id="shadow-btn">Shadow Button</button>
|
||||
`;
|
||||
shadow.getElementById('shadow-btn').addEventListener('click', () => {
|
||||
window.shadowClickCount = (window.shadowClickCount || 0) + 1;
|
||||
console.log('Shadow button clicked:', window.shadowClickCount);
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Write to file and use file:// URL (data: URLs don't work well with extension)
|
||||
test_file = Path("/tmp/shadow_dom_test.html")
|
||||
test_file.write_text(test_html.strip())
|
||||
file_url = f"file://{test_file}"
|
||||
await bridge.navigate(tab_id, file_url, wait_until="load")
|
||||
print("✓ Page loaded")
|
||||
|
||||
# Screenshot
|
||||
screenshot = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
|
||||
# Detect Shadow DOM
|
||||
print("\n--- Detecting Shadow DOM ---")
|
||||
detection = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const hosts = [];
|
||||
document.querySelectorAll('*').forEach(el => {
|
||||
if (el.shadowRoot) {
|
||||
hosts.push({
|
||||
tag: el.tagName,
|
||||
id: el.id,
|
||||
hasButton: el.shadowRoot.querySelector('button') !== null
|
||||
});
|
||||
}
|
||||
});
|
||||
return { count: hosts.length, hosts };
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f"Shadow DOM detection: {detection.get('result', {})}")
|
||||
|
||||
# Try to click shadow button using regular selector (should fail)
|
||||
print("\n--- Attempting click with regular selector ---")
|
||||
try:
|
||||
result = await bridge.click(tab_id, "#shadow-btn", timeout_ms=3000)
|
||||
print(f"Result: {result}")
|
||||
except Exception as e:
|
||||
print(f"Expected failure: {e}")
|
||||
|
||||
# Try to click using JavaScript that pierces shadow DOM
|
||||
print("\n--- Clicking via JavaScript shadow piercing ---")
|
||||
click_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const host = document.getElementById('shadow-host');
|
||||
const btn = host.shadowRoot.getElementById('shadow-btn');
|
||||
if (btn) {
|
||||
btn.click();
|
||||
return { success: true, clicked: 'shadow-btn' };
|
||||
}
|
||||
return { success: false, error: 'Button not found' };
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f"JS click result: {click_result.get('result', {})}")
|
||||
|
||||
# Verify click was registered
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return window.shadowClickCount || 0; })()"
|
||||
)
|
||||
count = count_result.get("result") or 0
|
||||
print(f"Shadow click count: {count}")
|
||||
|
||||
if count and count > 0:
|
||||
print("✓ PASS: Shadow DOM element clicked successfully")
|
||||
else:
|
||||
print("✗ FAIL: Could not click Shadow DOM element")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_shadow_dom())
|
||||
@@ -1,180 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #7: ContentEditable / Rich Text Editors
|
||||
|
||||
Symptom: browser_type() doesn't insert text
|
||||
Root Cause: Element is contenteditable, not an <input> or <textarea>
|
||||
Detection: element.contentEditable === 'true'
|
||||
Fix: Focus via JavaScript, use execCommand('insertText') or Input.dispatchKeyEvent
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "contenteditable-test"
|
||||
|
||||
|
||||
async def test_contenteditable():
|
||||
"""Test typing into contenteditable elements."""
|
||||
print("=" * 70)
|
||||
print("TEST #7: ContentEditable / Rich Text Editors")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Create test page with contenteditable
|
||||
test_html = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>ContentEditable Test</title></head>
|
||||
<body>
|
||||
<h2>ContentEditable Test</h2>
|
||||
|
||||
<h3>1. Simple contenteditable div</h3>
|
||||
<div id="editor1" contenteditable="true"
|
||||
style="border:1px solid #ccc;padding:10px;
|
||||
min-height:50px;">Start text</div>
|
||||
|
||||
<h3>2. Rich text editor (like Notion)</h3>
|
||||
<div id="editor2" contenteditable="true"
|
||||
style="border:1px solid #ccc;padding:10px;
|
||||
min-height:50px;">
|
||||
<p>Type here...</p>
|
||||
</div>
|
||||
|
||||
<h3>3. Regular input (for comparison)</h3>
|
||||
<input id="input1" type="text" placeholder="Regular input" />
|
||||
|
||||
<script>
|
||||
// Track content changes
|
||||
window.editor1Content = '';
|
||||
window.editor2Content = '';
|
||||
|
||||
document.getElementById('editor1').addEventListener('input', (e) => {
|
||||
window.editor1Content = e.target.innerText;
|
||||
});
|
||||
document.getElementById('editor2').addEventListener('input', (e) => {
|
||||
window.editor2Content = e.target.innerText;
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Write to file and use file:// URL (data: URLs don't work well with extension)
|
||||
test_file = Path("/tmp/contenteditable_test.html")
|
||||
test_file.write_text(test_html.strip())
|
||||
file_url = f"file://{test_file}"
|
||||
await bridge.navigate(tab_id, file_url, wait_until="load")
|
||||
print("✓ Page loaded")
|
||||
|
||||
# Screenshot with timeout protection
|
||||
try:
|
||||
screenshot = await asyncio.wait_for(bridge.screenshot(tab_id), timeout=10.0)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
except asyncio.TimeoutError:
|
||||
print("Screenshot timed out (skipping)")
|
||||
|
||||
# Detect contenteditable
|
||||
print("\n--- Detecting contenteditable elements ---")
|
||||
detection = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const editables = document.querySelectorAll('[contenteditable="true"]');
|
||||
return {
|
||||
count: editables.length,
|
||||
ids: Array.from(editables).map(el => el.id)
|
||||
};
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f"Contenteditable detection: {detection.get('result', {})}")
|
||||
|
||||
# Test 1: Type into regular input (baseline)
|
||||
print("\n--- Test 1: Regular input ---")
|
||||
await bridge.click(tab_id, "#input1")
|
||||
await bridge.type_text(tab_id, "#input1", "Hello input")
|
||||
input_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.getElementById('input1').value; })()"
|
||||
)
|
||||
print(f"Input value: {input_result.get('result', '')}")
|
||||
|
||||
# Test 2: Type into contenteditable div
|
||||
print("\n--- Test 2: Contenteditable div ---")
|
||||
await bridge.click(tab_id, "#editor1")
|
||||
await bridge.type_text(tab_id, "#editor1", "Hello contenteditable", clear_first=True)
|
||||
editor_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.getElementById('editor1').innerText; })()",
|
||||
)
|
||||
print(f"Editor1 innerText: {editor_result.get('result', '')}")
|
||||
|
||||
# Test 3: Use JavaScript insertText for rich editor
|
||||
print("\n--- Test 3: JavaScript insertText for rich editor ---")
|
||||
insert_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const editor = document.getElementById('editor2');
|
||||
editor.focus();
|
||||
document.execCommand('selectAll', false, null);
|
||||
document.execCommand('insertText', false, 'Hello from execCommand');
|
||||
return editor.innerText;
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f"Editor2 after execCommand: {insert_result.get('result', '')}")
|
||||
|
||||
# Screenshot after with timeout protection
|
||||
try:
|
||||
screenshot_after = await asyncio.wait_for(bridge.screenshot(tab_id), timeout=10.0)
|
||||
print(f"Screenshot after: {len(screenshot_after.get('data', ''))} bytes")
|
||||
except asyncio.TimeoutError:
|
||||
print("Screenshot after timed out (skipping)")
|
||||
|
||||
# Results
|
||||
print("\n--- Results ---")
|
||||
input_val = input_result.get("result", "")
|
||||
editor1_val = editor_result.get("result", "")
|
||||
editor2_val = insert_result.get("result", "")
|
||||
|
||||
input_pass = "Hello input" in input_val
|
||||
editor1_pass = "Hello contenteditable" in editor1_val
|
||||
editor2_pass = "execCommand" in editor2_val
|
||||
|
||||
print(f"Input: {'✓ PASS' if input_pass else '✗ FAIL'} - {input_val}")
|
||||
print(f"Editor1: {'✓ PASS' if editor1_pass else '✗ FAIL'} - {editor1_val}")
|
||||
print(f"Editor2: {'✓ PASS' if editor2_pass else '✗ FAIL'} - {editor2_val}")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_contenteditable())
|
||||
@@ -1,253 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #8: Autocomplete Field Clearing
|
||||
|
||||
Symptom: Typed text gets cleared immediately
|
||||
Root Cause: Field expects realistic keystroke timing for autocomplete
|
||||
Detection: Field has autocomplete listeners or dropdown appears
|
||||
Fix: Add delay_ms between keystrokes
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "autocomplete-test"
|
||||
|
||||
|
||||
async def test_autocomplete():
|
||||
"""Test typing into fields with autocomplete behavior."""
|
||||
print("=" * 70)
|
||||
print("TEST #8: Autocomplete Field Clearing")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Create test page with autocomplete behavior
|
||||
test_html = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Autocomplete Test</title>
|
||||
<style>
|
||||
.autocomplete-items {
|
||||
position: absolute;
|
||||
border: 1px solid #d4d4d4;
|
||||
border-top: none;
|
||||
z-index: 99;
|
||||
top: 100%;
|
||||
left: 0;
|
||||
right: 0;
|
||||
max-height: 200px;
|
||||
overflow-y: auto;
|
||||
background: white;
|
||||
}
|
||||
.autocomplete-items div {
|
||||
padding: 10px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.autocomplete-items div:hover {
|
||||
background-color: #e9e9e9;
|
||||
}
|
||||
.autocomplete-active {
|
||||
background-color: DodgerBlue !important;
|
||||
color: white;
|
||||
}
|
||||
.autocomplete { position: relative; display: inline-block; }
|
||||
input { width: 300px; padding: 10px; font-size: 16px; }
|
||||
</style></head>
|
||||
<body>
|
||||
<h2>Autocomplete Test</h2>
|
||||
|
||||
<div class="autocomplete">
|
||||
<input id="search" type="text" placeholder="Search countries..." autocomplete="off">
|
||||
</div>
|
||||
|
||||
<div id="log" style="margin-top:20px;font-family:monospace;"></div>
|
||||
|
||||
<script>
|
||||
const countries = [
|
||||
"Afghanistan","Albania","Algeria",
|
||||
"Andorra","Angola","Argentina",
|
||||
"Armenia","Australia","Austria",
|
||||
"Azerbaijan","Bahamas","Bahrain",
|
||||
"Bangladesh","Belarus","Belgium",
|
||||
"Belize","Benin","Bhutan",
|
||||
"Bolivia","Brazil","Canada",
|
||||
"China","Colombia","Denmark",
|
||||
"Egypt","France","Germany",
|
||||
"India","Indonesia","Italy",
|
||||
"Japan","Mexico","Netherlands",
|
||||
"Nigeria","Norway","Pakistan",
|
||||
"Peru","Philippines","Poland",
|
||||
"Portugal","Russia","Spain",
|
||||
"Sweden","Switzerland","Thailand",
|
||||
"Turkey","Ukraine",
|
||||
"United Kingdom","United States",
|
||||
"Vietnam"
|
||||
];
|
||||
|
||||
const input = document.getElementById('search');
|
||||
const log = document.getElementById('log');
|
||||
let currentFocus = -1;
|
||||
let typingTimeout = null;
|
||||
|
||||
// Track events for testing
|
||||
window.inputEvents = [];
|
||||
window.inputValue = '';
|
||||
|
||||
function logEvent(type, value) {
|
||||
window.inputEvents.push({ type, value, time: Date.now() });
|
||||
const entry = document.createElement('div');
|
||||
entry.textContent = type + ': ' + value;
|
||||
log.insertBefore(entry, log.firstChild);
|
||||
}
|
||||
|
||||
// Simulate autocomplete that clears fast typing
|
||||
input.addEventListener('input', function(e) {
|
||||
const val = this.value;
|
||||
|
||||
// Clear previous dropdown
|
||||
closeAllLists();
|
||||
|
||||
if (!val) return;
|
||||
|
||||
// If typing too fast (autocomplete-style), clear and restart
|
||||
clearTimeout(typingTimeout);
|
||||
typingTimeout = setTimeout(() => {
|
||||
logEvent('input', val);
|
||||
window.inputValue = val;
|
||||
|
||||
// Create dropdown
|
||||
const div = document.createElement('div');
|
||||
div.setAttribute('id', this.id + 'autocomplete-list');
|
||||
div.setAttribute('class', 'autocomplete-items');
|
||||
this.parentNode.appendChild(div);
|
||||
|
||||
countries.filter(
|
||||
c => c.substr(0, val.length).toUpperCase()
|
||||
=== val.toUpperCase()
|
||||
).slice(0, 5).forEach(country => {
|
||||
const item = document.createElement('div');
|
||||
item.innerHTML = '<strong>'
|
||||
+ country.substr(0, val.length)
|
||||
+ '</strong>'
|
||||
+ country.substr(val.length);
|
||||
item.addEventListener('click', function() {
|
||||
input.value = country;
|
||||
closeAllLists();
|
||||
logEvent('select', country);
|
||||
window.inputValue = country;
|
||||
});
|
||||
div.appendChild(item);
|
||||
});
|
||||
}, 100); // 100ms debounce
|
||||
});
|
||||
|
||||
function closeAllLists() {
|
||||
document.querySelectorAll('.autocomplete-items').forEach(el => el.remove());
|
||||
}
|
||||
|
||||
document.addEventListener('click', function() {
|
||||
closeAllLists();
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Write to file and use file:// URL (data: URLs don't work well with extension)
|
||||
test_file = Path("/tmp/autocomplete_test.html")
|
||||
test_file.write_text(test_html.strip())
|
||||
file_url = f"file://{test_file}"
|
||||
await bridge.navigate(tab_id, file_url, wait_until="load")
|
||||
print("✓ Page loaded")
|
||||
|
||||
# Screenshot
|
||||
screenshot = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
|
||||
# Test 1: Fast typing (no delay) - may fail
|
||||
print("\n--- Test 1: Fast typing (delay_ms=0) ---")
|
||||
await bridge.click(tab_id, "#search")
|
||||
await bridge.type_text(tab_id, "#search", "Ger", clear_first=True, delay_ms=0)
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
fast_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.getElementById('search').value; })()"
|
||||
)
|
||||
fast_value = fast_result.get("result", "")
|
||||
print(f"Value after fast typing: '{fast_value}'")
|
||||
|
||||
# Check events
|
||||
events_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return window.inputEvents; })()"
|
||||
)
|
||||
print(f"Events logged: {events_result.get('result', [])}")
|
||||
|
||||
# Test 2: Slow typing (with delay) - should work
|
||||
print("\n--- Test 2: Slow typing (delay_ms=100) ---")
|
||||
await bridge.click(tab_id, "#search")
|
||||
await bridge.type_text(tab_id, "#search", "United", clear_first=True, delay_ms=100)
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
slow_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.getElementById('search').value; })()"
|
||||
)
|
||||
slow_value = slow_result.get("result", "")
|
||||
print(f"Value after slow typing: '{slow_value}'")
|
||||
|
||||
# Check if dropdown appeared
|
||||
dropdown_result = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.querySelectorAll("
|
||||
"'.autocomplete-items div').length; })()",
|
||||
)
|
||||
dropdown_count = dropdown_result.get("result", 0)
|
||||
print(f"Dropdown items: {dropdown_count}")
|
||||
|
||||
# Screenshot with dropdown
|
||||
screenshot_dropdown = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot with dropdown: {len(screenshot_dropdown.get('data', ''))} bytes")
|
||||
|
||||
# Results
|
||||
print("\n--- Results ---")
|
||||
if "United" in slow_value:
|
||||
print("✓ PASS: Slow typing with delay_ms worked")
|
||||
else:
|
||||
print("✗ FAIL: Slow typing still didn't work")
|
||||
|
||||
if dropdown_count > 0:
|
||||
print("✓ PASS: Autocomplete dropdown appeared")
|
||||
else:
|
||||
print("⚠ WARNING: No autocomplete dropdown")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_autocomplete())
|
||||
@@ -1,162 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #10: LinkedIn Huge DOM Tree
|
||||
|
||||
Symptom: browser_snapshot() hangs forever
|
||||
Root Cause: 10k+ DOM nodes, accessibility tree has 50k+ nodes
|
||||
Detection: document.querySelectorAll('*').length > 5000
|
||||
Fix: Add timeout (10s default), truncate tree at 2000 nodes
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
import base64
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "huge-dom-test"
|
||||
|
||||
|
||||
async def test_huge_dom():
|
||||
"""Test snapshot performance on huge DOM trees."""
|
||||
print("=" * 70)
|
||||
print("TEST #10: Huge DOM Tree (LinkedIn-style)")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Test 1: Small DOM (baseline)
|
||||
print("\n--- Test 1: Small DOM (baseline) ---")
|
||||
small_html = """
|
||||
<!DOCTYPE html>
|
||||
<html><body>
|
||||
<h1>Small Page</h1>
|
||||
<p>A few elements</p>
|
||||
<button>Click me</button>
|
||||
</body></html>
|
||||
"""
|
||||
data_url = f"data:text/html;base64,{base64.b64encode(small_html.encode()).decode()}"
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
|
||||
start = time.perf_counter()
|
||||
snapshot = await bridge.snapshot(tab_id, timeout_s=5.0)
|
||||
elapsed = time.perf_counter() - start
|
||||
tree_len = len(snapshot.get("tree", ""))
|
||||
print(f"Small DOM snapshot: {elapsed:.3f}s, {tree_len} chars")
|
||||
|
||||
# Test 2: Generate huge DOM
|
||||
print("\n--- Test 2: Huge DOM (5000+ elements) ---")
|
||||
huge_html = """
|
||||
<!DOCTYPE html>
|
||||
<html><body>
|
||||
<h1>Huge DOM Test</h1>
|
||||
<div id="container"></div>
|
||||
<script>
|
||||
const container = document.getElementById('container');
|
||||
for (let i = 0; i < 5000; i++) {
|
||||
const div = document.createElement('div');
|
||||
div.className = 'item-' + i;
|
||||
div.innerHTML = '<span>Item ' + i + '</span><button>Action</button>';
|
||||
container.appendChild(div);
|
||||
}
|
||||
</script>
|
||||
</body></html>
|
||||
"""
|
||||
data_url = f"data:text/html;base64,{base64.b64encode(huge_html.encode()).decode()}"
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
|
||||
# Count elements
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
|
||||
)
|
||||
elem_count = count_result.get("result", 0)
|
||||
print(f"DOM elements: {elem_count}")
|
||||
|
||||
# Skip screenshot on huge DOM - it can timeout
|
||||
# Instead verify page loaded by checking DOM
|
||||
print("✓ Page verified (skipping screenshot on huge DOM)")
|
||||
|
||||
# Test snapshot with timeout
|
||||
print("\n--- Testing snapshot with 10s timeout ---")
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
snapshot = await bridge.snapshot(tab_id, timeout_s=10.0)
|
||||
elapsed = time.perf_counter() - start
|
||||
tree_len = len(snapshot.get("tree", ""))
|
||||
truncated = "(truncated)" in snapshot.get("tree", "")
|
||||
print(f"✓ Huge DOM snapshot: {elapsed:.3f}s, {tree_len} chars, truncated={truncated}")
|
||||
|
||||
if elapsed < 5.0:
|
||||
print("✓ PASS: Snapshot completed quickly")
|
||||
else:
|
||||
print(f"⚠ WARNING: Snapshot took {elapsed:.1f}s")
|
||||
|
||||
if truncated:
|
||||
print("✓ PASS: Tree was truncated to prevent hang")
|
||||
else:
|
||||
print("⚠ WARNING: Tree not truncated (may need adjustment)")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
print("✗ FAIL: Snapshot timed out (this shouldn't happen)")
|
||||
|
||||
# Test 3: Real LinkedIn
|
||||
print("\n--- Test 3: Real LinkedIn Feed ---")
|
||||
await bridge.navigate(
|
||||
tab_id, "https://www.linkedin.com/feed", wait_until="load", timeout_ms=30000
|
||||
)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
count_result = await bridge.evaluate(
|
||||
tab_id, "(function() { return document.querySelectorAll('*').length; })()"
|
||||
)
|
||||
elem_count = count_result.get("result", 0)
|
||||
print(f"LinkedIn DOM elements: {elem_count}")
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
snapshot = await bridge.snapshot(tab_id, timeout_s=15.0)
|
||||
elapsed = time.perf_counter() - start
|
||||
tree_len = len(snapshot.get("tree", ""))
|
||||
truncated = "(truncated)" in snapshot.get("tree", "")
|
||||
print(f"LinkedIn snapshot: {elapsed:.3f}s, {tree_len} chars, truncated={truncated}")
|
||||
|
||||
if elapsed < 5.0:
|
||||
print("✓ PASS: LinkedIn snapshot fast enough")
|
||||
elif elapsed < 15.0:
|
||||
print("⚠ WARNING: LinkedIn snapshot slow but within timeout")
|
||||
else:
|
||||
print("✗ FAIL: LinkedIn snapshot too slow")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
print("✗ FAIL: LinkedIn snapshot timed out")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_huge_dom())
|
||||
@@ -1,190 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #13: SPA Navigation Events
|
||||
|
||||
Symptom: wait_until="load" fires before content ready
|
||||
Root Cause: SPA uses client-side routing, no full page load
|
||||
Detection: URL changes but load event already fired
|
||||
Fix: Use wait_until="networkidle" or wait_for_selector
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "spa-nav-test"
|
||||
|
||||
|
||||
async def test_spa_navigation():
|
||||
"""Test navigation timing on SPA pages."""
|
||||
print("=" * 70)
|
||||
print("TEST #13: SPA Navigation Events")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
else:
|
||||
print("✗ Extension not connected")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Create a test SPA
|
||||
spa_html = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>SPA Test</title>
|
||||
<style>
|
||||
nav a { margin-right: 10px; }
|
||||
.page { padding: 20px; border: 1px solid #ccc; margin-top: 10px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<nav>
|
||||
<a href="#home" onclick="navigate('home')">Home</a>
|
||||
<a href="#about" onclick="navigate('about')">About</a>
|
||||
<a href="#contact" onclick="navigate('contact')">Contact</a>
|
||||
</nav>
|
||||
<div id="app" class="page">
|
||||
<h1>Loading...</h1>
|
||||
</div>
|
||||
<script>
|
||||
// Simulate SPA routing
|
||||
let currentPage = '';
|
||||
|
||||
async function navigate(page) {
|
||||
event.preventDefault();
|
||||
currentPage = page;
|
||||
|
||||
// Show loading state
|
||||
document.getElementById('app').innerHTML = '<h1>Loading...</h1>';
|
||||
|
||||
// Simulate async content loading (like real SPAs)
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
|
||||
// Render content
|
||||
const content = {
|
||||
home: '<h1>Home Page</h1><p>Welcome!</p>'
|
||||
+ '<button id="home-btn">Home Action</button>',
|
||||
about: '<h1>About Page</h1><p>Simulated SPA.</p>'
|
||||
+ '<button id="about-btn">About Action</button>',
|
||||
contact: '<h1>Contact Page</h1>'
|
||||
+ '<p>Contact us at test@example.com</p>'
|
||||
+ '<button id="contact-btn">Contact Action</button>'
|
||||
};
|
||||
|
||||
document.getElementById('app').innerHTML = content[page] || '<h1>404</h1>';
|
||||
window.location.hash = page;
|
||||
}
|
||||
|
||||
// Initial load with delay (simulates SPA hydration)
|
||||
setTimeout(() => {
|
||||
navigate('home');
|
||||
}, 1000);
|
||||
|
||||
// Track for testing
|
||||
window.pageLoads = [];
|
||||
window.addEventListener('hashchange', () => {
|
||||
window.pageLoads.push(window.location.hash);
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Write to file and use file:// URL (data: URLs don't work well with extension)
|
||||
test_file = Path("/tmp/spa_test.html")
|
||||
test_file.write_text(spa_html.strip())
|
||||
file_url = f"file://{test_file}"
|
||||
|
||||
# Test 1: wait_until="load" - may fire before content ready
|
||||
print("\n--- Test 1: wait_until='load' ---")
|
||||
start = time.perf_counter()
|
||||
await bridge.navigate(tab_id, file_url, wait_until="load")
|
||||
elapsed = time.perf_counter() - start
|
||||
print(f"Navigation completed in {elapsed:.3f}s")
|
||||
|
||||
# Check content immediately
|
||||
content = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.getElementById('app').innerText; })()",
|
||||
)
|
||||
print(f"Content immediately after load: '{content.get('result', '')}'")
|
||||
|
||||
# Screenshot
|
||||
screenshot = await bridge.screenshot(tab_id)
|
||||
print(f"Screenshot: {len(screenshot.get('data', ''))} bytes")
|
||||
|
||||
# Wait for content
|
||||
print("\n--- Waiting for content to hydrate ---")
|
||||
await bridge.wait_for_selector(tab_id, "#home-btn", timeout_ms=5000)
|
||||
print("✓ Content loaded")
|
||||
|
||||
# Check content after wait
|
||||
content_after = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.getElementById('app').innerText; })()",
|
||||
)
|
||||
print(f"Content after wait: '{content_after.get('result', '')}'")
|
||||
|
||||
# Test 2: SPA navigation (no full page load)
|
||||
print("\n--- Test 2: SPA client-side navigation ---")
|
||||
|
||||
# Click "About" link
|
||||
await bridge.click(tab_id, 'a[href="#about"]')
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Check if content changed
|
||||
about_content = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.getElementById('app').innerText; })()",
|
||||
)
|
||||
print(f"Content after SPA nav: '{about_content.get('result', '')}'")
|
||||
|
||||
if "About Page" in about_content.get("result", ""):
|
||||
print("✓ PASS: SPA navigation worked")
|
||||
else:
|
||||
print("✗ FAIL: SPA navigation didn't update content")
|
||||
|
||||
# Test 3: wait_until="networkidle"
|
||||
print("\n--- Test 3: wait_until='networkidle' ---")
|
||||
await bridge.navigate(tab_id, file_url, wait_until="networkidle", timeout_ms=10000)
|
||||
|
||||
# Check content immediately
|
||||
content_networkidle = await bridge.evaluate(
|
||||
tab_id,
|
||||
"(function() { return document.getElementById('app').innerText; })()",
|
||||
)
|
||||
print(f"Content after networkidle: '{content_networkidle.get('result', '')}'")
|
||||
|
||||
if "Home Page" in content_networkidle.get("result", ""):
|
||||
print("✓ PASS: networkidle waited for content")
|
||||
else:
|
||||
print("⚠ WARNING: networkidle didn't wait long enough")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(test_spa_navigation())
|
||||
@@ -1,267 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Test #15: Screenshot Functionality
|
||||
|
||||
Tests browser_screenshot across multiple scenarios:
|
||||
- Basic viewport screenshot
|
||||
- Full-page screenshot
|
||||
- Selector-based screenshot
|
||||
- Screenshot on complex DOM
|
||||
- Timeout handling
|
||||
|
||||
Category: screenshot
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
CONTEXT_NAME = "screenshot-test"
|
||||
|
||||
SIMPLE_HTML = """<!DOCTYPE html>
|
||||
<html>
|
||||
<head><style>
|
||||
body { margin: 0; background: #fff; font-family: sans-serif; }
|
||||
h1 { color: #333; padding: 20px; }
|
||||
.box { width: 200px; height: 100px; background: #4a90e2; margin: 20px; }
|
||||
.long-content { height: 2000px; background: linear-gradient(blue, red); }
|
||||
</style></head>
|
||||
<body>
|
||||
<h1 id="title">Screenshot Test Page</h1>
|
||||
<div class="box" id="target-box">Target Box</div>
|
||||
<div class="long-content"></div>
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
|
||||
def check_png(data: str) -> bool:
|
||||
"""Verify that base64 data decodes to a valid PNG."""
|
||||
try:
|
||||
raw = base64.b64decode(data)
|
||||
return raw[:8] == b"\x89PNG\r\n\x1a\n"
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
async def test_basic_screenshot(bridge: BeelineBridge, tab_id: int, data_url: str):
|
||||
print("\n--- Test 1: Basic Viewport Screenshot ---")
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
start = time.perf_counter()
|
||||
result = await bridge.screenshot(tab_id)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
ok = result.get("ok")
|
||||
data = result.get("data", "")
|
||||
mime = result.get("mimeType", "")
|
||||
|
||||
print(f" ok={ok}, mimeType={mime}, elapsed={elapsed:.3f}s")
|
||||
print(f" data length: {len(data)} chars")
|
||||
|
||||
if ok and data:
|
||||
valid_png = check_png(data)
|
||||
print(f" valid PNG: {valid_png}")
|
||||
if valid_png:
|
||||
raw = base64.b64decode(data)
|
||||
print(f" PNG size: {len(raw)} bytes")
|
||||
print(" ✓ PASS: Basic screenshot works")
|
||||
return True
|
||||
else:
|
||||
print(" ✗ FAIL: Data is not a valid PNG")
|
||||
else:
|
||||
print(f" ✗ FAIL: {result.get('error', 'no data')}")
|
||||
return False
|
||||
|
||||
|
||||
async def test_full_page_screenshot(bridge: BeelineBridge, tab_id: int, data_url: str):
|
||||
print("\n--- Test 2: Full Page Screenshot ---")
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
viewport_result = await bridge.screenshot(tab_id, full_page=False)
|
||||
full_result = await bridge.screenshot(tab_id, full_page=True)
|
||||
|
||||
v_data = viewport_result.get("data", "")
|
||||
f_data = full_result.get("data", "")
|
||||
|
||||
if not v_data or not f_data:
|
||||
print(f" ✗ FAIL: viewport ok={viewport_result.get('ok')}, full ok={full_result.get('ok')}")
|
||||
return False
|
||||
|
||||
v_size = len(base64.b64decode(v_data))
|
||||
f_size = len(base64.b64decode(f_data))
|
||||
print(f" Viewport PNG: {v_size} bytes")
|
||||
print(f" Full page PNG: {f_size} bytes")
|
||||
|
||||
if f_size > v_size:
|
||||
print(" ✓ PASS: Full page larger than viewport")
|
||||
return True
|
||||
else:
|
||||
print(" ✗ FAIL: Full page not larger than viewport (may not capture long pages)")
|
||||
return False
|
||||
|
||||
|
||||
async def test_selector_screenshot(bridge: BeelineBridge, tab_id: int, data_url: str):
|
||||
print("\n--- Test 3: Selector Screenshot ---")
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# selector param exists in signature but may not be implemented
|
||||
result = await bridge.screenshot(tab_id, selector="#target-box")
|
||||
|
||||
ok = result.get("ok")
|
||||
data = result.get("data", "")
|
||||
|
||||
if ok and data:
|
||||
# If implemented, the box screenshot should be smaller than a full viewport screenshot
|
||||
full_result = await bridge.screenshot(tab_id)
|
||||
full_data = full_result.get("data", "")
|
||||
|
||||
if full_data:
|
||||
sel_size = len(base64.b64decode(data))
|
||||
full_size = len(base64.b64decode(full_data))
|
||||
print(f" Selector PNG: {sel_size} bytes")
|
||||
print(f" Full page PNG: {full_size} bytes")
|
||||
if sel_size < full_size:
|
||||
print(" ✓ PASS: Selector screenshot smaller than full page")
|
||||
return True
|
||||
else:
|
||||
print(" ⚠ WARNING: Selector screenshot not smaller (may be full page)")
|
||||
return False
|
||||
else:
|
||||
print(
|
||||
" ⚠ NOT IMPLEMENTED: selector param ignored"
|
||||
f" (returns full page) - error={result.get('error')}"
|
||||
)
|
||||
print(" NOTE: selector parameter exists in signature but is not used in implementation")
|
||||
return False
|
||||
|
||||
|
||||
async def test_screenshot_url_metadata(bridge: BeelineBridge, tab_id: int):
|
||||
print("\n--- Test 4: Screenshot URL Metadata ---")
|
||||
await bridge.navigate(tab_id, "https://example.com", wait_until="load")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
result = await bridge.screenshot(tab_id)
|
||||
url = result.get("url", "")
|
||||
tab = result.get("tabId")
|
||||
|
||||
print(f" url={url!r}, tabId={tab}")
|
||||
|
||||
if "example.com" in url:
|
||||
print(" ✓ PASS: URL metadata captured correctly")
|
||||
return True
|
||||
else:
|
||||
print(f" ✗ FAIL: Expected example.com in URL, got {url!r}")
|
||||
return False
|
||||
|
||||
|
||||
async def test_screenshot_timeout(bridge: BeelineBridge, tab_id: int, data_url: str):
|
||||
print("\n--- Test 5: Timeout Handling ---")
|
||||
await bridge.navigate(tab_id, data_url, wait_until="load")
|
||||
|
||||
# Very short timeout - likely still completes since simple page
|
||||
start = time.perf_counter()
|
||||
result = await bridge.screenshot(tab_id, timeout_s=0.001)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
if not result.get("ok"):
|
||||
err = result.get("error", "")
|
||||
if "timed out" in err or "cancelled" in err:
|
||||
print(f" ✓ PASS: Timeout handled gracefully: {err!r}")
|
||||
return True
|
||||
else:
|
||||
print(f" ⚠ Fast enough to beat timeout: {err!r} in {elapsed:.3f}s")
|
||||
return True # Not a failure, just fast
|
||||
else:
|
||||
print(
|
||||
f" ⚠ Screenshot completed before timeout ({elapsed:.3f}s) - too fast to test timeout"
|
||||
)
|
||||
return True # Still ok, just very fast
|
||||
|
||||
|
||||
async def test_screenshot_complex_site(bridge: BeelineBridge, tab_id: int):
|
||||
print("\n--- Test 6: Complex Site (example.com) ---")
|
||||
await bridge.navigate(tab_id, "https://example.com", wait_until="load")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
start = time.perf_counter()
|
||||
result = await bridge.screenshot(tab_id)
|
||||
elapsed = time.perf_counter() - start
|
||||
|
||||
ok = result.get("ok")
|
||||
data = result.get("data", "")
|
||||
|
||||
print(f" ok={ok}, elapsed={elapsed:.3f}s, data_len={len(data)}")
|
||||
if ok and check_png(data):
|
||||
print(" ✓ PASS: Screenshot on real site works")
|
||||
return True
|
||||
else:
|
||||
print(f" ✗ FAIL: {result.get('error', 'bad data')}")
|
||||
return False
|
||||
|
||||
|
||||
async def main():
|
||||
print("=" * 70)
|
||||
print("TEST #15: Screenshot Functionality")
|
||||
print("=" * 70)
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
await bridge.start()
|
||||
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
print(f"Waiting for extension... ({i + 1}/10)")
|
||||
else:
|
||||
print("✗ Extension not connected. Ensure Chrome with Beeline extension is running.")
|
||||
return
|
||||
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
data_url = f"data:text/html;base64,{base64.b64encode(SIMPLE_HTML.encode()).decode()}"
|
||||
|
||||
results = {
|
||||
"basic": await test_basic_screenshot(bridge, tab_id, data_url),
|
||||
"full_page": await test_full_page_screenshot(bridge, tab_id, data_url),
|
||||
"selector": await test_selector_screenshot(bridge, tab_id, data_url),
|
||||
"metadata": await test_screenshot_url_metadata(bridge, tab_id),
|
||||
"timeout": await test_screenshot_timeout(bridge, tab_id, data_url),
|
||||
"complex_site": await test_screenshot_complex_site(bridge, tab_id),
|
||||
}
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("SUMMARY")
|
||||
print("=" * 70)
|
||||
for name, passed in results.items():
|
||||
status = "✓ PASS" if passed else "✗ FAIL"
|
||||
print(f" {status}: {name}")
|
||||
|
||||
passed_count = sum(1 for v in results.values() if v)
|
||||
total = len(results)
|
||||
print(f"\n {passed_count}/{total} tests passed")
|
||||
|
||||
await bridge.destroy_context(group_id)
|
||||
print("\n✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
print("✓ Bridge stopped")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -1,333 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Browser Edge Case Test Template
|
||||
|
||||
This script provides a template for testing and debugging browser tool failures
|
||||
on specific websites. Use this to reproduce, isolate, and verify fixes.
|
||||
|
||||
Usage:
|
||||
1. Copy this file: cp test_case.py test_#[number]_[site].py
|
||||
2. Fill in the CONFIG section with your test details
|
||||
3. Run: uv run python test_#[number]_[site].py
|
||||
|
||||
Example:
|
||||
uv run python test_01_linkedin_scroll.py
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# Add tools to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "tools" / "src"))
|
||||
|
||||
from gcu.browser.bridge import BeelineBridge
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# CONFIG: Fill in these values for your test case
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
TEST_CASE = {
|
||||
"number": 1,
|
||||
"name": "LinkedIn Nested Scroll Container",
|
||||
"site": "https://www.linkedin.com/feed",
|
||||
"simple_site": "https://example.com",
|
||||
"category": "scroll", # scroll, click, input, snapshot, navigation
|
||||
"symptom": "scroll() returns success but page doesn't move",
|
||||
}
|
||||
|
||||
BRIDGE_PORT = 9229
|
||||
CONTEXT_NAME = "edge-case-test"
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TEST FUNCTIONS
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
async def test_simple_site(bridge: BeelineBridge, tab_id: int) -> dict:
|
||||
"""Test that the tool works on a simple site (baseline)."""
|
||||
print("\n--- Baseline Test (Simple Site) ---")
|
||||
|
||||
await bridge.navigate(tab_id, TEST_CASE["simple_site"], wait_until="load")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Adjust this based on category
|
||||
if TEST_CASE["category"] == "scroll":
|
||||
result = await bridge.scroll(tab_id, "down", 100)
|
||||
print(f" Scroll result: {result}")
|
||||
return result
|
||||
elif TEST_CASE["category"] == "click":
|
||||
# Add click test
|
||||
pass
|
||||
elif TEST_CASE["category"] == "snapshot":
|
||||
result = await bridge.snapshot(tab_id, timeout_s=5.0)
|
||||
print(f" Snapshot length: {len(result.get('tree', ''))}")
|
||||
return result
|
||||
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
async def test_problematic_site(bridge: BeelineBridge, tab_id: int) -> dict:
|
||||
"""Test the tool on the problematic site."""
|
||||
print("\n--- Problem Site Test ---")
|
||||
|
||||
await bridge.navigate(tab_id, TEST_CASE["site"], wait_until="load", timeout_ms=30000)
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# Adjust this based on category
|
||||
if TEST_CASE["category"] == "scroll":
|
||||
# Get scroll positions before
|
||||
before = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const results = { window: { y: window.scrollY } };
|
||||
document.querySelectorAll('*').forEach((el, i) => {
|
||||
const style = getComputedStyle(el);
|
||||
if ((style.overflowY === 'scroll' || style.overflowY === 'auto') &&
|
||||
el.scrollHeight > el.clientHeight) {
|
||||
results['el_' + i] = {
|
||||
tag: el.tagName,
|
||||
scrollTop: el.scrollTop,
|
||||
class: el.className.substring(0, 30)
|
||||
};
|
||||
}
|
||||
});
|
||||
return results;
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f" Before scroll: {before.get('result', {})}")
|
||||
|
||||
# Try to scroll
|
||||
result = await bridge.scroll(tab_id, "down", 500)
|
||||
print(f" Scroll result: {result}")
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Get scroll positions after
|
||||
after = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const results = { window: { y: window.scrollY } };
|
||||
document.querySelectorAll('*').forEach((el, i) => {
|
||||
const style = getComputedStyle(el);
|
||||
if ((style.overflowY === 'scroll' || style.overflowY === 'auto') &&
|
||||
el.scrollHeight > el.clientHeight) {
|
||||
results['el_' + i] = {
|
||||
tag: el.tagName,
|
||||
scrollTop: el.scrollTop,
|
||||
class: el.className.substring(0, 30)
|
||||
};
|
||||
}
|
||||
});
|
||||
return results;
|
||||
})();
|
||||
""",
|
||||
)
|
||||
print(f" After scroll: {after.get('result', {})}")
|
||||
|
||||
# Check if anything changed
|
||||
before_data = before.get("result", {}) or {}
|
||||
after_data = after.get("result", {}) or {}
|
||||
|
||||
changed = False
|
||||
for key in after_data:
|
||||
if key in before_data:
|
||||
b_val = (
|
||||
before_data[key].get("scrollTop", 0)
|
||||
if isinstance(before_data[key], dict)
|
||||
else 0
|
||||
)
|
||||
a_val = (
|
||||
after_data[key].get("scrollTop", 0) if isinstance(after_data[key], dict) else 0
|
||||
)
|
||||
if a_val != b_val:
|
||||
print(f" ✓ CHANGE DETECTED: {key} scrolled from {b_val} to {a_val}")
|
||||
changed = True
|
||||
|
||||
if not changed:
|
||||
print(" ✗ NO CHANGE: Scroll did not affect any container")
|
||||
|
||||
return {"ok": changed, "scroll_result": result}
|
||||
|
||||
elif TEST_CASE["category"] == "snapshot":
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
result = await bridge.snapshot(tab_id, timeout_s=15.0)
|
||||
elapsed = time.perf_counter() - start
|
||||
tree_len = len(result.get("tree", ""))
|
||||
print(f" Snapshot completed in {elapsed:.2f}s, {tree_len} chars")
|
||||
return {"ok": True, "elapsed": elapsed, "tree_length": tree_len}
|
||||
except asyncio.TimeoutError:
|
||||
print(" ✗ SNAPSHOT TIMED OUT")
|
||||
return {"ok": False, "error": "timeout"}
|
||||
|
||||
return {"ok": True}
|
||||
|
||||
|
||||
async def detect_root_cause(bridge: BeelineBridge, tab_id: int) -> dict:
|
||||
"""Run detection scripts to identify the root cause."""
|
||||
print("\n--- Root Cause Detection ---")
|
||||
|
||||
detections = {}
|
||||
|
||||
# Detection 1: Nested scrollable containers
|
||||
scroll_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const candidates = [];
|
||||
document.querySelectorAll('*').forEach(el => {
|
||||
const style = getComputedStyle(el);
|
||||
if (style.overflow.includes('scroll') || style.overflow.includes('auto')) {
|
||||
const rect = el.getBoundingClientRect();
|
||||
if (rect.width > 100 && rect.height > 100) {
|
||||
candidates.push({
|
||||
tag: el.tagName,
|
||||
area: rect.width * rect.height,
|
||||
class: el.className.substring(0, 30)
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
candidates.sort((a, b) => b.area - a.area);
|
||||
return {
|
||||
count: candidates.length,
|
||||
largest: candidates[0]
|
||||
};
|
||||
})();
|
||||
""",
|
||||
)
|
||||
detections["nested_scroll"] = scroll_check.get("result", {})
|
||||
print(f" Nested scroll containers: {detections['nested_scroll']}")
|
||||
|
||||
# Detection 2: Shadow DOM
|
||||
shadow_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const withShadow = [];
|
||||
document.querySelectorAll('*').forEach(el => {
|
||||
if (el.shadowRoot) {
|
||||
withShadow.push(el.tagName);
|
||||
}
|
||||
});
|
||||
return { count: withShadow.length, elements: withShadow.slice(0, 5) };
|
||||
})();
|
||||
""",
|
||||
)
|
||||
detections["shadow_dom"] = shadow_check.get("result", {})
|
||||
print(f" Shadow DOM: {detections['shadow_dom']}")
|
||||
|
||||
# Detection 3: iframes
|
||||
iframe_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
const iframes = document.querySelectorAll('iframe');
|
||||
return { count: iframes.length };
|
||||
})();
|
||||
""",
|
||||
)
|
||||
detections["iframes"] = iframe_check.get("result", {})
|
||||
print(f" iframes: {detections['iframes']}")
|
||||
|
||||
# Detection 4: DOM size
|
||||
dom_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
return {
|
||||
elements: document.querySelectorAll('*').length,
|
||||
body_children: document.body.children.length
|
||||
};
|
||||
})();
|
||||
""",
|
||||
)
|
||||
detections["dom_size"] = dom_check.get("result", {})
|
||||
print(f" DOM size: {detections['dom_size']}")
|
||||
|
||||
# Detection 5: Framework detection
|
||||
framework_check = await bridge.evaluate(
|
||||
tab_id,
|
||||
"""
|
||||
(function() {
|
||||
return {
|
||||
react: !!document.querySelector('[data-reactroot], [data-reactid]'),
|
||||
vue: !!document.querySelector('[data-v-]'),
|
||||
angular: !!document.querySelector('[ng-app], [ng-version]')
|
||||
};
|
||||
})();
|
||||
""",
|
||||
)
|
||||
detections["frameworks"] = framework_check.get("result", {})
|
||||
print(f" Frameworks: {detections['frameworks']}")
|
||||
|
||||
return detections
|
||||
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# MAIN
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
|
||||
async def main():
|
||||
print("=" * 70)
|
||||
print(f"EDGE CASE TEST #{TEST_CASE['number']}: {TEST_CASE['name']}")
|
||||
print("=" * 70)
|
||||
print(f"Site: {TEST_CASE['site']}")
|
||||
print(f"Category: {TEST_CASE['category']}")
|
||||
print(f"Symptom: {TEST_CASE['symptom']}")
|
||||
|
||||
bridge = BeelineBridge()
|
||||
|
||||
try:
|
||||
print("\n--- Starting Bridge ---")
|
||||
await bridge.start()
|
||||
|
||||
# Wait for extension connection
|
||||
for i in range(10):
|
||||
await asyncio.sleep(1)
|
||||
if bridge.is_connected:
|
||||
print("✓ Extension connected!")
|
||||
break
|
||||
print(f"Waiting for extension... ({i + 1}/10)")
|
||||
else:
|
||||
print("✗ Extension not connected. Ensure Chrome with Beeline extension is running.")
|
||||
return
|
||||
|
||||
# Create browser context
|
||||
context = await bridge.create_context(CONTEXT_NAME)
|
||||
tab_id = context.get("tabId")
|
||||
group_id = context.get("groupId")
|
||||
print(f"✓ Created tab: {tab_id}")
|
||||
|
||||
# Run tests
|
||||
baseline_result = await test_simple_site(bridge, tab_id)
|
||||
problem_result = await test_problematic_site(bridge, tab_id)
|
||||
detections = await detect_root_cause(bridge, tab_id)
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 70)
|
||||
print("SUMMARY")
|
||||
print("=" * 70)
|
||||
print(f"Baseline test: {'✓ PASS' if baseline_result.get('ok') else '✗ FAIL'}")
|
||||
print(f"Problem test: {'✓ PASS' if problem_result.get('ok') else '✗ FAIL'}")
|
||||
print(f"Root cause indicators: {list(k for k, v in detections.items() if v)}")
|
||||
|
||||
# Cleanup
|
||||
print("\n--- Cleanup ---")
|
||||
await bridge.destroy_context(group_id)
|
||||
print("✓ Context destroyed")
|
||||
|
||||
finally:
|
||||
await bridge.stop()
|
||||
print("✓ Bridge stopped")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,361 @@
|
||||
---
|
||||
name: building-agents-construction
|
||||
description: Step-by-step guide for building goal-driven agents. Creates package structure, defines goals, adds nodes, connects edges, and finalizes agent class. Use when actively building an agent.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.0"
|
||||
type: procedural
|
||||
part_of: building-agents
|
||||
requires: building-agents-core
|
||||
---
|
||||
|
||||
# Agent Construction - EXECUTE THESE STEPS
|
||||
|
||||
**THIS IS AN EXECUTABLE WORKFLOW. DO NOT DISPLAY THIS FILE. EXECUTE THE STEPS BELOW.**
|
||||
|
||||
When this skill is loaded, IMMEDIATELY begin executing Step 1. Do not explain what you will do - just do it.
|
||||
|
||||
---
|
||||
|
||||
## STEP 1: Initialize Build Environment
|
||||
|
||||
**EXECUTE THESE TOOL CALLS NOW:**
|
||||
|
||||
1. Register the hive-tools MCP server:
|
||||
|
||||
```
|
||||
mcp__agent-builder__add_mcp_server(
|
||||
name="hive-tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args='["mcp_server.py", "--stdio"]',
|
||||
cwd="tools",
|
||||
description="Hive tools MCP server"
|
||||
)
|
||||
```
|
||||
|
||||
2. Create a build session (replace AGENT_NAME with the user's requested agent name in snake_case):
|
||||
|
||||
```
|
||||
mcp__agent-builder__create_session(name="AGENT_NAME")
|
||||
```
|
||||
|
||||
3. Discover available tools:
|
||||
|
||||
```
|
||||
mcp__agent-builder__list_mcp_tools()
|
||||
```
|
||||
|
||||
4. Create the package directory:
|
||||
|
||||
```
|
||||
mkdir -p exports/AGENT_NAME/nodes
|
||||
```
|
||||
|
||||
**AFTER completing these calls**, tell the user:
|
||||
|
||||
> ✅ Build environment initialized
|
||||
>
|
||||
> - Session created
|
||||
> - Available tools: [list the tools from step 3]
|
||||
>
|
||||
> Proceeding to define the agent goal...
|
||||
|
||||
**THEN immediately proceed to STEP 2.**
|
||||
|
||||
---
|
||||
|
||||
## STEP 2: Define and Approve Goal
|
||||
|
||||
**PROPOSE a goal to the user.** Based on what they asked for, propose:
|
||||
|
||||
- Goal ID (kebab-case)
|
||||
- Goal name
|
||||
- Goal description
|
||||
- 3-5 success criteria (each with: id, description, metric, target, weight)
|
||||
- 2-4 constraints (each with: id, description, constraint_type, category)
|
||||
|
||||
**FORMAT your proposal as a clear summary, then ask for approval:**
|
||||
|
||||
> **Proposed Goal: [Name]**
|
||||
>
|
||||
> [Description]
|
||||
>
|
||||
> **Success Criteria:**
|
||||
>
|
||||
> 1. [criterion 1]
|
||||
> 2. [criterion 2]
|
||||
> ...
|
||||
>
|
||||
> **Constraints:**
|
||||
>
|
||||
> 1. [constraint 1]
|
||||
> 2. [constraint 2]
|
||||
> ...
|
||||
|
||||
**THEN call AskUserQuestion:**
|
||||
|
||||
```
|
||||
AskUserQuestion(questions=[{
|
||||
"question": "Do you approve this goal definition?",
|
||||
"header": "Goal",
|
||||
"options": [
|
||||
{"label": "Approve", "description": "Goal looks good, proceed"},
|
||||
{"label": "Modify", "description": "I want to change something"}
|
||||
],
|
||||
"multiSelect": false
|
||||
}])
|
||||
```
|
||||
|
||||
**WAIT for user response.**
|
||||
|
||||
- If **Approve**: Call `mcp__agent-builder__set_goal(...)` with the goal details, then proceed to STEP 3
|
||||
- If **Modify**: Ask what they want to change, update proposal, ask again
|
||||
|
||||
---
|
||||
|
||||
## STEP 3: Design Node Workflow
|
||||
|
||||
**BEFORE designing nodes**, review the available tools from Step 1. Nodes can ONLY use tools that exist.
|
||||
|
||||
**DESIGN the workflow** as a series of nodes. For each node, determine:
|
||||
|
||||
- node_id (kebab-case)
|
||||
- name
|
||||
- description
|
||||
- node_type: `"llm_generate"` (no tools) or `"llm_tool_use"` (uses tools)
|
||||
- input_keys (what data this node receives)
|
||||
- output_keys (what data this node produces)
|
||||
- tools (ONLY tools that exist - empty list for llm_generate)
|
||||
- system_prompt
|
||||
|
||||
**PRESENT the workflow to the user:**
|
||||
|
||||
> **Proposed Workflow: [N] nodes**
|
||||
>
|
||||
> 1. **[node-id]** - [description]
|
||||
>
|
||||
> - Type: [llm_generate/llm_tool_use]
|
||||
> - Input: [keys]
|
||||
> - Output: [keys]
|
||||
> - Tools: [tools or "none"]
|
||||
>
|
||||
> 2. **[node-id]** - [description]
|
||||
> ...
|
||||
>
|
||||
> **Flow:** node1 → node2 → node3 → ...
|
||||
|
||||
**THEN call AskUserQuestion:**
|
||||
|
||||
```
|
||||
AskUserQuestion(questions=[{
|
||||
"question": "Do you approve this workflow design?",
|
||||
"header": "Workflow",
|
||||
"options": [
|
||||
{"label": "Approve", "description": "Workflow looks good, proceed to build nodes"},
|
||||
{"label": "Modify", "description": "I want to change the workflow"}
|
||||
],
|
||||
"multiSelect": false
|
||||
}])
|
||||
```
|
||||
|
||||
**WAIT for user response.**
|
||||
|
||||
- If **Approve**: Proceed to STEP 4
|
||||
- If **Modify**: Ask what they want to change, update design, ask again
|
||||
|
||||
---
|
||||
|
||||
## STEP 4: Build Nodes One by One
|
||||
|
||||
**FOR EACH node in the approved workflow:**
|
||||
|
||||
1. **Call** `mcp__agent-builder__add_node(...)` with the node details
|
||||
|
||||
- input_keys and output_keys must be JSON strings: `'["key1", "key2"]'`
|
||||
- tools must be a JSON string: `'["tool1"]'` or `'[]'`
|
||||
|
||||
2. **Call** `mcp__agent-builder__test_node(...)` to validate:
|
||||
|
||||
```
|
||||
mcp__agent-builder__test_node(
|
||||
node_id="the-node-id",
|
||||
test_input='{"key": "test value"}',
|
||||
mock_llm_response='{"output_key": "test output"}'
|
||||
)
|
||||
```
|
||||
|
||||
3. **Check result:**
|
||||
|
||||
- If valid: Tell user "✅ Node [id] validated" and continue to next node
|
||||
- If invalid: Show errors, fix the node, re-validate
|
||||
|
||||
4. **Show progress** after each node:
|
||||
|
||||
```
|
||||
mcp__agent-builder__get_session_status()
|
||||
```
|
||||
|
||||
> ✅ Node [X] of [Y] complete: [node-id]
|
||||
|
||||
**AFTER all nodes are added and validated**, proceed to STEP 5.
|
||||
|
||||
---
|
||||
|
||||
## STEP 5: Connect Edges
|
||||
|
||||
**DETERMINE the edges** based on the workflow flow. For each connection:
|
||||
|
||||
- edge_id (kebab-case)
|
||||
- source (node that outputs)
|
||||
- target (node that receives)
|
||||
- condition: `"on_success"`, `"always"`, `"on_failure"`, or `"conditional"`
|
||||
- condition_expr (Python expression, only if conditional)
|
||||
- priority (integer, lower = higher priority)
|
||||
|
||||
**FOR EACH edge, call:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__add_edge(
|
||||
edge_id="source-to-target",
|
||||
source="source-node-id",
|
||||
target="target-node-id",
|
||||
condition="on_success",
|
||||
condition_expr="",
|
||||
priority=1
|
||||
)
|
||||
```
|
||||
|
||||
**AFTER all edges are added, validate the graph:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__validate_graph()
|
||||
```
|
||||
|
||||
- If valid: Tell user "✅ Graph structure validated" and proceed to STEP 6
|
||||
- If invalid: Show errors, fix edges, re-validate
|
||||
|
||||
---
|
||||
|
||||
## STEP 6: Generate Agent Package
|
||||
|
||||
**EXPORT the graph data:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__export_graph()
|
||||
```
|
||||
|
||||
This returns JSON with all the goal, nodes, edges, and MCP server configurations.
|
||||
|
||||
**THEN write the Python package files** using the exported data. Create these files in `exports/AGENT_NAME/`:
|
||||
|
||||
1. `config.py` - Runtime configuration with model settings
|
||||
2. `nodes/__init__.py` - All NodeSpec definitions
|
||||
3. `agent.py` - Goal, edges, graph config, and agent class
|
||||
4. `__init__.py` - Package exports
|
||||
5. `__main__.py` - CLI interface
|
||||
6. `mcp_servers.json` - MCP server configurations
|
||||
7. `README.md` - Usage documentation
|
||||
|
||||
**IMPORTANT entry_points format:**
|
||||
|
||||
- MUST be: `{"start": "first-node-id"}`
|
||||
- NOT: `{"first-node-id": ["input_keys"]}` (WRONG)
|
||||
- NOT: `{"first-node-id"}` (WRONG - this is a set)
|
||||
|
||||
**Use the example agent** at `.claude/skills/building-agents-construction/examples/online_research_agent/` as a template for file structure and patterns.
|
||||
|
||||
**AFTER writing all files, tell the user:**
|
||||
|
||||
> ✅ Agent package created: `exports/AGENT_NAME/`
|
||||
>
|
||||
> **Files generated:**
|
||||
>
|
||||
> - `__init__.py` - Package exports
|
||||
> - `agent.py` - Goal, nodes, edges, agent class
|
||||
> - `config.py` - Runtime configuration
|
||||
> - `__main__.py` - CLI interface
|
||||
> - `nodes/__init__.py` - Node definitions
|
||||
> - `mcp_servers.json` - MCP server config
|
||||
> - `README.md` - Usage documentation
|
||||
>
|
||||
> **Test your agent:**
|
||||
>
|
||||
> ```bash
|
||||
> cd /home/timothy/oss/hive
|
||||
> PYTHONPATH=core:exports python -m AGENT_NAME validate
|
||||
> PYTHONPATH=core:exports python -m AGENT_NAME info
|
||||
> ```
|
||||
|
||||
---
|
||||
|
||||
## STEP 7: Verify and Test
|
||||
|
||||
**RUN validation:**
|
||||
|
||||
```bash
|
||||
cd /home/timothy/oss/hive && PYTHONPATH=core:exports python -m AGENT_NAME validate
|
||||
```
|
||||
|
||||
- If valid: Agent is complete!
|
||||
- If errors: Fix the issues and re-run
|
||||
|
||||
**SHOW final session summary:**
|
||||
|
||||
```
|
||||
mcp__agent-builder__get_session_status()
|
||||
```
|
||||
|
||||
**TELL the user the agent is ready** and suggest next steps:
|
||||
|
||||
- Run with mock mode to test without API calls
|
||||
- Use `/testing-agent` skill for comprehensive testing
|
||||
- Use `/setup-credentials` if the agent needs API keys
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: Node Types
|
||||
|
||||
| Type | tools param | Use when |
|
||||
| -------------- | ---------------------- | ---------------------------------------------- |
|
||||
| `llm_generate` | `'[]'` | Pure reasoning, JSON output, no external calls |
|
||||
| `llm_tool_use` | `'["tool1", "tool2"]'` | Needs to call MCP tools |
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: Edge Conditions
|
||||
|
||||
| Condition | When edge is followed |
|
||||
| ------------- | ------------------------------------- |
|
||||
| `on_success` | Source node completed successfully |
|
||||
| `on_failure` | Source node failed |
|
||||
| `always` | Always, regardless of success/failure |
|
||||
| `conditional` | When condition_expr evaluates to True |
|
||||
|
||||
---
|
||||
|
||||
## REFERENCE: System Prompt Best Practice
|
||||
|
||||
For nodes with JSON output, include this in the system_prompt:
|
||||
|
||||
```
|
||||
CRITICAL: Return ONLY raw JSON. NO markdown, NO code blocks.
|
||||
Just the JSON object starting with { and ending with }.
|
||||
|
||||
Return this exact structure:
|
||||
{
|
||||
"key1": "...",
|
||||
"key2": "..."
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## COMMON MISTAKES TO AVOID
|
||||
|
||||
1. **Using tools that don't exist** - Always check `mcp__agent-builder__list_mcp_tools()` first
|
||||
2. **Wrong entry_points format** - Must be `{"start": "node-id"}`, NOT a set or list
|
||||
3. **Skipping validation** - Always validate nodes and graph before proceeding
|
||||
4. **Not waiting for approval** - Always ask user before major steps
|
||||
5. **Displaying this file** - Execute the steps, don't show documentation
|
||||
@@ -0,0 +1,80 @@
|
||||
# Online Research Agent
|
||||
|
||||
Deep-dive research agent that searches 10+ sources and produces comprehensive narrative reports with citations.
|
||||
|
||||
## Features
|
||||
|
||||
- Generates multiple search queries from a topic
|
||||
- Searches and fetches 15+ web sources
|
||||
- Evaluates and ranks sources by relevance
|
||||
- Synthesizes findings into themes
|
||||
- Writes narrative report with numbered citations
|
||||
- Quality checks for uncited claims
|
||||
- Saves report to local markdown file
|
||||
|
||||
## Usage
|
||||
|
||||
### CLI
|
||||
|
||||
```bash
|
||||
# Show agent info
|
||||
python -m online_research_agent info
|
||||
|
||||
# Validate structure
|
||||
python -m online_research_agent validate
|
||||
|
||||
# Run research on a topic
|
||||
python -m online_research_agent run --topic "impact of AI on healthcare"
|
||||
|
||||
# Interactive shell
|
||||
python -m online_research_agent shell
|
||||
```
|
||||
|
||||
### Python API
|
||||
|
||||
```python
|
||||
from online_research_agent import default_agent
|
||||
|
||||
# Simple usage
|
||||
result = await default_agent.run({"topic": "climate change solutions"})
|
||||
|
||||
# Check output
|
||||
if result.success:
|
||||
print(f"Report saved to: {result.output['file_path']}")
|
||||
print(result.output['final_report'])
|
||||
```
|
||||
|
||||
## Workflow
|
||||
|
||||
```
|
||||
parse-query → search-sources → fetch-content → evaluate-sources
|
||||
↓
|
||||
write-report ← synthesize-findings
|
||||
↓
|
||||
quality-check → save-report
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
Reports are saved to `./research_reports/` as markdown files with:
|
||||
|
||||
1. Executive Summary
|
||||
2. Introduction
|
||||
3. Key Findings (by theme)
|
||||
4. Analysis
|
||||
5. Conclusion
|
||||
6. References
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.11+
|
||||
- LLM provider API key (Groq, Cerebras, etc.)
|
||||
- Internet access for web search/fetch
|
||||
|
||||
## Configuration
|
||||
|
||||
Edit `config.py` to change:
|
||||
|
||||
- `model`: LLM model (default: groq/moonshotai/kimi-k2-instruct-0905)
|
||||
- `temperature`: Generation temperature (default: 0.7)
|
||||
- `max_tokens`: Max tokens per response (default: 16384)
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Online Research Agent - Deep-dive research with narrative reports.
|
||||
|
||||
Research any topic by searching multiple sources, synthesizing information,
|
||||
and producing a well-structured narrative report with citations.
|
||||
"""
|
||||
|
||||
from .agent import OnlineResearchAgent, default_agent, goal, nodes, edges
|
||||
from .config import RuntimeConfig, AgentMetadata, default_config, metadata
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"OnlineResearchAgent",
|
||||
"default_agent",
|
||||
"goal",
|
||||
"nodes",
|
||||
"edges",
|
||||
"RuntimeConfig",
|
||||
"AgentMetadata",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
+43
-29
@@ -1,5 +1,7 @@
|
||||
"""
|
||||
CLI entry point for Local Business Extractor.
|
||||
CLI entry point for Online Research Agent.
|
||||
|
||||
Uses AgentRuntime for multi-entrypoint support with HITL pause/resume.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -8,7 +10,7 @@ import logging
|
||||
import sys
|
||||
import click
|
||||
|
||||
from .agent import default_agent, LocalBusinessExtractor
|
||||
from .agent import default_agent, OnlineResearchAgent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
@@ -26,29 +28,24 @@ def setup_logging(verbose=False, debug=False):
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Local Business Extractor - Find businesses, extract contacts, sync to Sheets."""
|
||||
"""Online Research Agent - Deep-dive research with narrative reports."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option(
|
||||
"--query",
|
||||
"-q",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Search query (e.g. 'bakeries in San Francisco')",
|
||||
)
|
||||
@click.option("--quiet", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--topic", "-t", type=str, required=True, help="Research topic")
|
||||
@click.option("--mock", is_flag=True, help="Run in mock mode")
|
||||
@click.option("--quiet", "-q", is_flag=True, help="Only output result JSON")
|
||||
@click.option("--verbose", "-v", is_flag=True, help="Show execution details")
|
||||
@click.option("--debug", is_flag=True, help="Show debug logging")
|
||||
def run(query, quiet, verbose, debug):
|
||||
"""Extract businesses matching a search query."""
|
||||
def run(topic, mock, quiet, verbose, debug):
|
||||
"""Execute research on a topic."""
|
||||
if not quiet:
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
|
||||
context = {"user_request": query}
|
||||
context = {"topic": topic}
|
||||
|
||||
result = asyncio.run(default_agent.run(context))
|
||||
result = asyncio.run(default_agent.run(context, mock_mode=mock))
|
||||
|
||||
output_data = {
|
||||
"success": result.success,
|
||||
@@ -84,9 +81,6 @@ def validate():
|
||||
validation = default_agent.validate()
|
||||
if validation["valid"]:
|
||||
click.echo("Agent is valid")
|
||||
if validation["warnings"]:
|
||||
for warning in validation["warnings"]:
|
||||
click.echo(f" WARNING: {warning}")
|
||||
else:
|
||||
click.echo("Agent has errors:")
|
||||
for error in validation["errors"]:
|
||||
@@ -97,7 +91,7 @@ def validate():
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
def shell(verbose):
|
||||
"""Interactive session (CLI)."""
|
||||
"""Interactive research session."""
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
@@ -105,37 +99,57 @@ async def _interactive_shell(verbose=False):
|
||||
"""Async interactive shell."""
|
||||
setup_logging(verbose=verbose)
|
||||
|
||||
click.echo("=== Local Business Extractor ===")
|
||||
click.echo("Enter a search query (or 'quit' to exit):\n")
|
||||
click.echo("=== Online Research Agent ===")
|
||||
click.echo("Enter a topic to research (or 'quit' to exit):\n")
|
||||
|
||||
agent = LocalBusinessExtractor()
|
||||
agent = OnlineResearchAgent()
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
query = await asyncio.get_event_loop().run_in_executor(None, input, "Query> ")
|
||||
if query.lower() in ["quit", "exit", "q"]:
|
||||
topic = await asyncio.get_event_loop().run_in_executor(
|
||||
None, input, "Topic> "
|
||||
)
|
||||
if topic.lower() in ["quit", "exit", "q"]:
|
||||
click.echo("Goodbye!")
|
||||
break
|
||||
|
||||
if not query.strip():
|
||||
if not topic.strip():
|
||||
continue
|
||||
|
||||
click.echo("\nExtracting...\n")
|
||||
click.echo("\nResearching... (this may take a few minutes)\n")
|
||||
|
||||
result = await agent.run({"user_request": query})
|
||||
result = await agent.trigger_and_wait("start", {"topic": topic})
|
||||
|
||||
if result is None:
|
||||
click.echo("\n[Execution timed out]\n")
|
||||
continue
|
||||
|
||||
if result.success:
|
||||
click.echo("\nExtraction complete\n")
|
||||
output = result.output
|
||||
if "file_path" in output:
|
||||
click.echo(f"\nReport saved to: {output['file_path']}\n")
|
||||
if "final_report" in output:
|
||||
click.echo("\n--- Report Preview ---\n")
|
||||
preview = (
|
||||
output["final_report"][:500] + "..."
|
||||
if len(output.get("final_report", "")) > 500
|
||||
else output.get("final_report", "")
|
||||
)
|
||||
click.echo(preview)
|
||||
click.echo("\n")
|
||||
else:
|
||||
click.echo(f"\nExtraction failed: {result.error}\n")
|
||||
click.echo(f"\nResearch failed: {result.error}\n")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
@@ -0,0 +1,429 @@
|
||||
"""Agent graph construction for Online Research Agent."""
|
||||
|
||||
from framework.graph import EdgeSpec, EdgeCondition, Goal, SuccessCriterion, Constraint
|
||||
from framework.graph.edge import GraphSpec
|
||||
from framework.graph.executor import ExecutionResult
|
||||
from framework.runtime.agent_runtime import AgentRuntime, create_agent_runtime
|
||||
from framework.runtime.execution_stream import EntryPointSpec
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.runner.tool_registry import ToolRegistry
|
||||
|
||||
from .config import default_config, metadata
|
||||
from .nodes import (
|
||||
parse_query_node,
|
||||
search_sources_node,
|
||||
fetch_content_node,
|
||||
evaluate_sources_node,
|
||||
synthesize_findings_node,
|
||||
write_report_node,
|
||||
quality_check_node,
|
||||
save_report_node,
|
||||
)
|
||||
|
||||
# Goal definition
|
||||
goal = Goal(
|
||||
id="comprehensive-online-research",
|
||||
name="Comprehensive Online Research",
|
||||
description="Research any topic by searching multiple sources, synthesizing information, and producing a well-structured narrative report with citations.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="source-coverage",
|
||||
description="Query 10+ diverse sources",
|
||||
metric="source_count",
|
||||
target=">=10",
|
||||
weight=0.20,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="relevance",
|
||||
description="All sources directly address the query",
|
||||
metric="relevance_score",
|
||||
target="90%",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="synthesis",
|
||||
description="Synthesize findings into coherent narrative",
|
||||
metric="coherence_score",
|
||||
target="85%",
|
||||
weight=0.25,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="citations",
|
||||
description="Include citations for all claims",
|
||||
metric="citation_coverage",
|
||||
target="100%",
|
||||
weight=0.15,
|
||||
),
|
||||
SuccessCriterion(
|
||||
id="actionable",
|
||||
description="Report answers the user's question",
|
||||
metric="answer_completeness",
|
||||
target="90%",
|
||||
weight=0.15,
|
||||
),
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="no-hallucination",
|
||||
description="Only include information found in sources",
|
||||
constraint_type="quality",
|
||||
category="accuracy",
|
||||
),
|
||||
Constraint(
|
||||
id="source-attribution",
|
||||
description="Every factual claim must cite its source",
|
||||
constraint_type="quality",
|
||||
category="accuracy",
|
||||
),
|
||||
Constraint(
|
||||
id="recency-preference",
|
||||
description="Prefer recent sources when relevant",
|
||||
constraint_type="quality",
|
||||
category="relevance",
|
||||
),
|
||||
Constraint(
|
||||
id="no-paywalled",
|
||||
description="Avoid sources that require payment to access",
|
||||
constraint_type="functional",
|
||||
category="accessibility",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Node list
|
||||
nodes = [
|
||||
parse_query_node,
|
||||
search_sources_node,
|
||||
fetch_content_node,
|
||||
evaluate_sources_node,
|
||||
synthesize_findings_node,
|
||||
write_report_node,
|
||||
quality_check_node,
|
||||
save_report_node,
|
||||
]
|
||||
|
||||
# Edge definitions
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="parse-to-search",
|
||||
source="parse-query",
|
||||
target="search-sources",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="search-to-fetch",
|
||||
source="search-sources",
|
||||
target="fetch-content",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="fetch-to-evaluate",
|
||||
source="fetch-content",
|
||||
target="evaluate-sources",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="evaluate-to-synthesize",
|
||||
source="evaluate-sources",
|
||||
target="synthesize-findings",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="synthesize-to-write",
|
||||
source="synthesize-findings",
|
||||
target="write-report",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="write-to-quality",
|
||||
source="write-report",
|
||||
target="quality-check",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="quality-to-save",
|
||||
source="quality-check",
|
||||
target="save-report",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
|
||||
# Graph configuration
|
||||
entry_node = "parse-query"
|
||||
entry_points = {"start": "parse-query"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = ["save-report"]
|
||||
|
||||
|
||||
class OnlineResearchAgent:
|
||||
"""
|
||||
Online Research Agent - Deep-dive research with narrative reports.
|
||||
|
||||
Uses AgentRuntime for multi-entrypoint support with HITL pause/resume.
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self.goal = goal
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.entry_node = entry_node
|
||||
self.entry_points = entry_points
|
||||
self.pause_nodes = pause_nodes
|
||||
self.terminal_nodes = terminal_nodes
|
||||
self._runtime: AgentRuntime | None = None
|
||||
self._graph: GraphSpec | None = None
|
||||
|
||||
def _build_entry_point_specs(self) -> list[EntryPointSpec]:
|
||||
"""Convert entry_points dict to EntryPointSpec list."""
|
||||
specs = []
|
||||
for ep_id, node_id in self.entry_points.items():
|
||||
if ep_id == "start":
|
||||
trigger_type = "manual"
|
||||
name = "Start"
|
||||
elif "_resume" in ep_id:
|
||||
trigger_type = "resume"
|
||||
name = f"Resume from {ep_id.replace('_resume', '')}"
|
||||
else:
|
||||
trigger_type = "manual"
|
||||
name = ep_id.replace("-", " ").title()
|
||||
|
||||
specs.append(
|
||||
EntryPointSpec(
|
||||
id=ep_id,
|
||||
name=name,
|
||||
entry_node=node_id,
|
||||
trigger_type=trigger_type,
|
||||
isolation_level="shared",
|
||||
)
|
||||
)
|
||||
return specs
|
||||
|
||||
def _create_runtime(self, mock_mode=False) -> AgentRuntime:
|
||||
"""Create AgentRuntime instance."""
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
# Persistent storage in ~/.hive for telemetry and run history
|
||||
storage_path = Path.home() / ".hive" / "online_research_agent"
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
tool_registry = ToolRegistry()
|
||||
|
||||
# Load MCP servers (always load, needed for tool validation)
|
||||
agent_dir = Path(__file__).parent
|
||||
mcp_config_path = agent_dir / "mcp_servers.json"
|
||||
|
||||
if mcp_config_path.exists():
|
||||
with open(mcp_config_path) as f:
|
||||
mcp_servers = json.load(f)
|
||||
|
||||
for server_config in mcp_servers.get("servers", []):
|
||||
# Resolve relative cwd paths
|
||||
cwd = server_config.get("cwd")
|
||||
if cwd and not Path(cwd).is_absolute():
|
||||
server_config["cwd"] = str(agent_dir / cwd)
|
||||
tool_registry.register_mcp_server(server_config)
|
||||
|
||||
llm = None
|
||||
if not mock_mode:
|
||||
# LiteLLMProvider uses environment variables for API keys
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
)
|
||||
|
||||
self._graph = GraphSpec(
|
||||
id="online-research-agent-graph",
|
||||
goal_id=self.goal.id,
|
||||
version="1.0.0",
|
||||
entry_node=self.entry_node,
|
||||
entry_points=self.entry_points,
|
||||
terminal_nodes=self.terminal_nodes,
|
||||
pause_nodes=self.pause_nodes,
|
||||
nodes=self.nodes,
|
||||
edges=self.edges,
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
)
|
||||
|
||||
# Create AgentRuntime with all entry points
|
||||
self._runtime = create_agent_runtime(
|
||||
graph=self._graph,
|
||||
goal=self.goal,
|
||||
storage_path=storage_path,
|
||||
entry_points=self._build_entry_point_specs(),
|
||||
llm=llm,
|
||||
tools=list(tool_registry.get_tools().values()),
|
||||
tool_executor=tool_registry.get_executor(),
|
||||
)
|
||||
|
||||
return self._runtime
|
||||
|
||||
async def start(self, mock_mode=False) -> None:
|
||||
"""Start the agent runtime."""
|
||||
if self._runtime is None:
|
||||
self._create_runtime(mock_mode=mock_mode)
|
||||
await self._runtime.start()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the agent runtime."""
|
||||
if self._runtime is not None:
|
||||
await self._runtime.stop()
|
||||
|
||||
async def trigger(
|
||||
self,
|
||||
entry_point: str,
|
||||
input_data: dict,
|
||||
correlation_id: str | None = None,
|
||||
session_state: dict | None = None,
|
||||
) -> str:
|
||||
"""
|
||||
Trigger execution at a specific entry point (non-blocking).
|
||||
|
||||
Args:
|
||||
entry_point: Entry point ID (e.g., "start", "pause-node_resume")
|
||||
input_data: Input data for the execution
|
||||
correlation_id: Optional ID to correlate related executions
|
||||
session_state: Optional session state to resume from (with paused_at, memory)
|
||||
|
||||
Returns:
|
||||
Execution ID for tracking
|
||||
"""
|
||||
if self._runtime is None or not self._runtime.is_running:
|
||||
raise RuntimeError("Agent runtime not started. Call start() first.")
|
||||
return await self._runtime.trigger(
|
||||
entry_point, input_data, correlation_id, session_state=session_state
|
||||
)
|
||||
|
||||
async def trigger_and_wait(
|
||||
self,
|
||||
entry_point: str,
|
||||
input_data: dict,
|
||||
timeout: float | None = None,
|
||||
session_state: dict | None = None,
|
||||
) -> ExecutionResult | None:
|
||||
"""
|
||||
Trigger execution and wait for completion.
|
||||
|
||||
Args:
|
||||
entry_point: Entry point ID
|
||||
input_data: Input data for the execution
|
||||
timeout: Maximum time to wait (seconds)
|
||||
session_state: Optional session state to resume from (with paused_at, memory)
|
||||
|
||||
Returns:
|
||||
ExecutionResult or None if timeout
|
||||
"""
|
||||
if self._runtime is None or not self._runtime.is_running:
|
||||
raise RuntimeError("Agent runtime not started. Call start() first.")
|
||||
return await self._runtime.trigger_and_wait(
|
||||
entry_point, input_data, timeout, session_state=session_state
|
||||
)
|
||||
|
||||
async def run(
|
||||
self, context: dict, mock_mode=False, session_state=None
|
||||
) -> ExecutionResult:
|
||||
"""
|
||||
Run the agent (convenience method for simple single execution).
|
||||
|
||||
For more control, use start() + trigger_and_wait() + stop().
|
||||
"""
|
||||
await self.start(mock_mode=mock_mode)
|
||||
try:
|
||||
# Determine entry point based on session_state
|
||||
if session_state and "paused_at" in session_state:
|
||||
paused_node = session_state["paused_at"]
|
||||
resume_key = f"{paused_node}_resume"
|
||||
if resume_key in self.entry_points:
|
||||
entry_point = resume_key
|
||||
else:
|
||||
entry_point = "start"
|
||||
else:
|
||||
entry_point = "start"
|
||||
|
||||
result = await self.trigger_and_wait(
|
||||
entry_point, context, session_state=session_state
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
|
||||
async def get_goal_progress(self) -> dict:
|
||||
"""Get goal progress across all executions."""
|
||||
if self._runtime is None:
|
||||
raise RuntimeError("Agent runtime not started")
|
||||
return await self._runtime.get_goal_progress()
|
||||
|
||||
def get_stats(self) -> dict:
|
||||
"""Get runtime statistics."""
|
||||
if self._runtime is None:
|
||||
return {"running": False}
|
||||
return self._runtime.get_stats()
|
||||
|
||||
def info(self):
|
||||
"""Get agent information."""
|
||||
return {
|
||||
"name": metadata.name,
|
||||
"version": metadata.version,
|
||||
"description": metadata.description,
|
||||
"goal": {
|
||||
"name": self.goal.name,
|
||||
"description": self.goal.description,
|
||||
},
|
||||
"nodes": [n.id for n in self.nodes],
|
||||
"edges": [e.id for e in self.edges],
|
||||
"entry_node": self.entry_node,
|
||||
"entry_points": self.entry_points,
|
||||
"pause_nodes": self.pause_nodes,
|
||||
"terminal_nodes": self.terminal_nodes,
|
||||
"multi_entrypoint": True,
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate agent structure."""
|
||||
errors = []
|
||||
warnings = []
|
||||
|
||||
node_ids = {node.id for node in self.nodes}
|
||||
for edge in self.edges:
|
||||
if edge.source not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: source '{edge.source}' not found")
|
||||
if edge.target not in node_ids:
|
||||
errors.append(f"Edge {edge.id}: target '{edge.target}' not found")
|
||||
|
||||
if self.entry_node not in node_ids:
|
||||
errors.append(f"Entry node '{self.entry_node}' not found")
|
||||
|
||||
for terminal in self.terminal_nodes:
|
||||
if terminal not in node_ids:
|
||||
errors.append(f"Terminal node '{terminal}' not found")
|
||||
|
||||
for pause in self.pause_nodes:
|
||||
if pause not in node_ids:
|
||||
errors.append(f"Pause node '{pause}' not found")
|
||||
|
||||
# Validate entry points
|
||||
for ep_id, node_id in self.entry_points.items():
|
||||
if node_id not in node_ids:
|
||||
errors.append(
|
||||
f"Entry point '{ep_id}' references unknown node '{node_id}'"
|
||||
)
|
||||
|
||||
return {
|
||||
"valid": len(errors) == 0,
|
||||
"errors": errors,
|
||||
"warnings": warnings,
|
||||
}
|
||||
|
||||
|
||||
# Create default instance
|
||||
default_agent = OnlineResearchAgent()
|
||||
+4
-4
@@ -24,7 +24,7 @@ def _load_preferred_model() -> str:
|
||||
class RuntimeConfig:
|
||||
model: str = field(default_factory=_load_preferred_model)
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 40000
|
||||
max_tokens: int = 8192
|
||||
api_key: str | None = None
|
||||
api_base: str | None = None
|
||||
|
||||
@@ -32,12 +32,12 @@ class RuntimeConfig:
|
||||
default_config = RuntimeConfig()
|
||||
|
||||
|
||||
# Agent metadata
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Email Reply Agent"
|
||||
name: str = "Online Research Agent"
|
||||
version: str = "1.0.0"
|
||||
description: str = "Filter unreplied emails, confirm recipients, send personalized replies."
|
||||
intro_message: str = "Tell me which emails you want to reply to (e.g., 'emails from @company.com in the last week')."
|
||||
description: str = "Research any topic by searching multiple sources, synthesizing information, and producing a well-structured narrative report with citations."
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
+9
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"hive-tools": {
|
||||
"transport": "stdio",
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"cwd": "../../tools",
|
||||
"description": "Hive tools MCP server providing web_search, web_scrape, and write_to_file"
|
||||
}
|
||||
}
|
||||
+396
@@ -0,0 +1,396 @@
|
||||
"""Node definitions for Online Research Agent."""
|
||||
|
||||
from framework.graph import NodeSpec
|
||||
|
||||
# Node 1: Parse Query
|
||||
parse_query_node = NodeSpec(
|
||||
id="parse-query",
|
||||
name="Parse Query",
|
||||
description="Analyze the research topic and generate 3-5 diverse search queries to cover different aspects",
|
||||
node_type="llm_generate",
|
||||
input_keys=["topic"],
|
||||
output_keys=["search_queries", "research_focus", "key_aspects"],
|
||||
output_schema={
|
||||
"research_focus": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Brief statement of what we're researching",
|
||||
},
|
||||
"key_aspects": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of 3-5 key aspects to investigate",
|
||||
},
|
||||
"search_queries": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of 3-5 search queries",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a research query strategist. Given a research topic, analyze it and generate search queries.
|
||||
|
||||
Your task:
|
||||
1. Understand the core research question
|
||||
2. Identify 3-5 key aspects to investigate
|
||||
3. Generate 3-5 diverse search queries that will find comprehensive information
|
||||
|
||||
CRITICAL: Return ONLY raw JSON. NO markdown, NO code blocks.
|
||||
|
||||
Return this JSON structure:
|
||||
{
|
||||
"research_focus": "Brief statement of what we're researching",
|
||||
"key_aspects": ["aspect1", "aspect2", "aspect3"],
|
||||
"search_queries": [
|
||||
"query 1 - broad overview",
|
||||
"query 2 - specific angle",
|
||||
"query 3 - recent developments",
|
||||
"query 4 - expert opinions",
|
||||
"query 5 - data/statistics"
|
||||
]
|
||||
}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 2: Search Sources
|
||||
search_sources_node = NodeSpec(
|
||||
id="search-sources",
|
||||
name="Search Sources",
|
||||
description="Execute web searches using the generated queries to find 15+ source URLs",
|
||||
node_type="llm_tool_use",
|
||||
input_keys=["search_queries", "research_focus"],
|
||||
output_keys=["source_urls", "search_results_summary"],
|
||||
output_schema={
|
||||
"source_urls": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of source URLs found",
|
||||
},
|
||||
"search_results_summary": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Brief summary of what was found",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a research assistant executing web searches. Use the web_search tool to find sources.
|
||||
|
||||
Your task:
|
||||
1. Execute each search query using web_search tool
|
||||
2. Collect URLs from search results
|
||||
3. Aim for 15+ diverse sources
|
||||
|
||||
After searching, return JSON with found sources:
|
||||
{
|
||||
"source_urls": ["url1", "url2", ...],
|
||||
"search_results_summary": "Brief summary of what was found"
|
||||
}
|
||||
""",
|
||||
tools=["web_search"],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 3: Fetch Content
|
||||
fetch_content_node = NodeSpec(
|
||||
id="fetch-content",
|
||||
name="Fetch Content",
|
||||
description="Fetch and extract content from the discovered source URLs",
|
||||
node_type="llm_tool_use",
|
||||
input_keys=["source_urls", "research_focus"],
|
||||
output_keys=["fetched_sources", "fetch_errors"],
|
||||
output_schema={
|
||||
"fetched_sources": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of fetched source objects with url, title, content",
|
||||
},
|
||||
"fetch_errors": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of URLs that failed to fetch",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a content fetcher. Use web_scrape tool to retrieve content from URLs.
|
||||
|
||||
Your task:
|
||||
1. Fetch content from each source URL using web_scrape tool
|
||||
2. Extract the main content relevant to the research focus
|
||||
3. Track any URLs that failed to fetch
|
||||
|
||||
After fetching, return JSON:
|
||||
{
|
||||
"fetched_sources": [
|
||||
{"url": "...", "title": "...", "content": "extracted text..."},
|
||||
...
|
||||
],
|
||||
"fetch_errors": ["url that failed", ...]
|
||||
}
|
||||
""",
|
||||
tools=["web_scrape"],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 4: Evaluate Sources
|
||||
evaluate_sources_node = NodeSpec(
|
||||
id="evaluate-sources",
|
||||
name="Evaluate Sources",
|
||||
description="Score sources for relevance and quality, filter to top 10",
|
||||
node_type="llm_generate",
|
||||
input_keys=["fetched_sources", "research_focus", "key_aspects"],
|
||||
output_keys=["ranked_sources", "source_analysis"],
|
||||
output_schema={
|
||||
"ranked_sources": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of ranked sources with scores",
|
||||
},
|
||||
"source_analysis": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Overview of source quality and coverage",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a source evaluator. Assess each source for quality and relevance.
|
||||
|
||||
Scoring criteria:
|
||||
- Relevance to research focus (1-10)
|
||||
- Source credibility (1-10)
|
||||
- Information depth (1-10)
|
||||
- Recency if relevant (1-10)
|
||||
|
||||
Your task:
|
||||
1. Score each source
|
||||
2. Rank by combined score
|
||||
3. Select top 10 sources
|
||||
4. Note what each source uniquely contributes
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"ranked_sources": [
|
||||
{"url": "...", "title": "...", "content": "...", "score": 8.5, "unique_value": "..."},
|
||||
...
|
||||
],
|
||||
"source_analysis": "Overview of source quality and coverage"
|
||||
}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 5: Synthesize Findings
|
||||
synthesize_findings_node = NodeSpec(
|
||||
id="synthesize-findings",
|
||||
name="Synthesize Findings",
|
||||
description="Extract key facts from sources and identify common themes",
|
||||
node_type="llm_generate",
|
||||
input_keys=["ranked_sources", "research_focus", "key_aspects"],
|
||||
output_keys=["key_findings", "themes", "source_citations"],
|
||||
output_schema={
|
||||
"key_findings": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of key findings with sources and confidence",
|
||||
},
|
||||
"themes": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of themes with descriptions and supporting sources",
|
||||
},
|
||||
"source_citations": {
|
||||
"type": "object",
|
||||
"required": True,
|
||||
"description": "Map of facts to supporting URLs",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a research synthesizer. Analyze multiple sources to extract insights.
|
||||
|
||||
Your task:
|
||||
1. Identify key facts from each source
|
||||
2. Find common themes across sources
|
||||
3. Note contradictions or debates
|
||||
4. Build a citation map (fact -> source URL)
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"key_findings": [
|
||||
{"finding": "...", "sources": ["url1", "url2"], "confidence": "high/medium/low"},
|
||||
...
|
||||
],
|
||||
"themes": [
|
||||
{"theme": "...", "description": "...", "supporting_sources": ["url1", ...]},
|
||||
...
|
||||
],
|
||||
"source_citations": {
|
||||
"fact or claim": ["supporting url1", "url2"],
|
||||
...
|
||||
}
|
||||
}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 6: Write Report
|
||||
write_report_node = NodeSpec(
|
||||
id="write-report",
|
||||
name="Write Report",
|
||||
description="Generate a narrative report with proper citations",
|
||||
node_type="llm_generate",
|
||||
input_keys=[
|
||||
"key_findings",
|
||||
"themes",
|
||||
"source_citations",
|
||||
"research_focus",
|
||||
"ranked_sources",
|
||||
],
|
||||
output_keys=["report_content", "references"],
|
||||
output_schema={
|
||||
"report_content": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Full markdown report text with citations",
|
||||
},
|
||||
"references": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of reference objects with number, url, title",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a research report writer. Create a well-structured narrative report.
|
||||
|
||||
Report structure:
|
||||
1. Executive Summary (2-3 paragraphs)
|
||||
2. Introduction (context and scope)
|
||||
3. Key Findings (organized by theme)
|
||||
4. Analysis (synthesis and implications)
|
||||
5. Conclusion
|
||||
6. References (numbered list of all sources)
|
||||
|
||||
Citation format: Use numbered citations like [1], [2] that correspond to the References section.
|
||||
|
||||
IMPORTANT:
|
||||
- Every factual claim MUST have a citation
|
||||
- Write in clear, professional prose
|
||||
- Be objective and balanced
|
||||
- Highlight areas of consensus and debate
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"report_content": "Full markdown report text with citations...",
|
||||
"references": [
|
||||
{"number": 1, "url": "...", "title": "..."},
|
||||
...
|
||||
]
|
||||
}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 7: Quality Check
|
||||
quality_check_node = NodeSpec(
|
||||
id="quality-check",
|
||||
name="Quality Check",
|
||||
description="Verify all claims have citations and report is coherent",
|
||||
node_type="llm_generate",
|
||||
input_keys=["report_content", "references", "source_citations"],
|
||||
output_keys=["quality_score", "issues", "final_report"],
|
||||
output_schema={
|
||||
"quality_score": {
|
||||
"type": "number",
|
||||
"required": True,
|
||||
"description": "Quality score 0-1",
|
||||
},
|
||||
"issues": {
|
||||
"type": "array",
|
||||
"required": True,
|
||||
"description": "List of issues found and fixed",
|
||||
},
|
||||
"final_report": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Corrected full report",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a quality assurance reviewer. Check the research report for issues.
|
||||
|
||||
Check for:
|
||||
1. Uncited claims (factual statements without [n] citation)
|
||||
2. Broken citations (references to non-existent numbers)
|
||||
3. Coherence (logical flow between sections)
|
||||
4. Completeness (all key aspects covered)
|
||||
5. Accuracy (claims match source content)
|
||||
|
||||
If issues found, fix them in the final report.
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"quality_score": 0.95,
|
||||
"issues": [
|
||||
{"type": "uncited_claim", "location": "paragraph 3", "fixed": true},
|
||||
...
|
||||
],
|
||||
"final_report": "Corrected full report with all issues fixed..."
|
||||
}
|
||||
""",
|
||||
tools=[],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
# Node 8: Save Report
|
||||
save_report_node = NodeSpec(
|
||||
id="save-report",
|
||||
name="Save Report",
|
||||
description="Write the final report to a local markdown file",
|
||||
node_type="llm_tool_use",
|
||||
input_keys=["final_report", "references", "research_focus"],
|
||||
output_keys=["file_path", "save_status"],
|
||||
output_schema={
|
||||
"file_path": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Path where report was saved",
|
||||
},
|
||||
"save_status": {
|
||||
"type": "string",
|
||||
"required": True,
|
||||
"description": "Status of save operation",
|
||||
},
|
||||
},
|
||||
system_prompt="""\
|
||||
You are a file manager. Save the research report to disk.
|
||||
|
||||
Your task:
|
||||
1. Generate a filename from the research focus (slugified, with date)
|
||||
2. Use the write_to_file tool to save the report as markdown
|
||||
3. Save to the ./research_reports/ directory
|
||||
|
||||
Filename format: research_YYYY-MM-DD_topic-slug.md
|
||||
|
||||
Return JSON:
|
||||
{
|
||||
"file_path": "research_reports/research_2026-01-23_topic-name.md",
|
||||
"save_status": "success"
|
||||
}
|
||||
""",
|
||||
tools=["write_to_file"],
|
||||
max_retries=3,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"parse_query_node",
|
||||
"search_sources_node",
|
||||
"fetch_content_node",
|
||||
"evaluate_sources_node",
|
||||
"synthesize_findings_node",
|
||||
"write_report_node",
|
||||
"quality_check_node",
|
||||
"save_report_node",
|
||||
]
|
||||
@@ -0,0 +1,303 @@
|
||||
---
|
||||
name: building-agents-core
|
||||
description: Core concepts for goal-driven agents - architecture, node types, tool discovery, and workflow overview. Use when starting agent development or need to understand agent fundamentals.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "1.0"
|
||||
type: foundational
|
||||
part_of: building-agents
|
||||
---
|
||||
|
||||
# Building Agents - Core Concepts
|
||||
|
||||
Foundational knowledge for building goal-driven agents as Python packages.
|
||||
|
||||
## Architecture: Python Services (Not JSON Configs)
|
||||
|
||||
Agents are built as Python packages:
|
||||
|
||||
```
|
||||
exports/my_agent/
|
||||
├── __init__.py # Package exports
|
||||
├── __main__.py # CLI (run, info, validate, shell)
|
||||
├── agent.py # Graph construction (goal, edges, agent class)
|
||||
├── nodes/__init__.py # Node definitions (NodeSpec)
|
||||
├── config.py # Runtime config
|
||||
└── README.md # Documentation
|
||||
```
|
||||
|
||||
**Key Principle: Agent is visible and editable during build**
|
||||
|
||||
- ✅ Files created immediately as components are approved
|
||||
- ✅ User can watch files grow in their editor
|
||||
- ✅ No session state - just direct file writes
|
||||
- ✅ No "export" step - agent is ready when build completes
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Goal
|
||||
|
||||
Success criteria and constraints (written to agent.py)
|
||||
|
||||
```python
|
||||
goal = Goal(
|
||||
id="research-goal",
|
||||
name="Technical Research Agent",
|
||||
description="Research technical topics thoroughly",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="completeness",
|
||||
description="Cover all aspects of topic",
|
||||
metric="coverage_score",
|
||||
target=">=0.9",
|
||||
weight=0.4,
|
||||
),
|
||||
# 3-5 success criteria total
|
||||
],
|
||||
constraints=[
|
||||
Constraint(
|
||||
id="accuracy",
|
||||
description="All information must be verified",
|
||||
constraint_type="hard",
|
||||
category="quality",
|
||||
),
|
||||
# 1-5 constraints total
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
### Node
|
||||
|
||||
Unit of work (written to nodes/__init__.py)
|
||||
|
||||
**Node Types:**
|
||||
|
||||
- `llm_generate` - Text generation, parsing
|
||||
- `llm_tool_use` - Actions requiring tools
|
||||
- `router` - Conditional branching
|
||||
- `function` - Deterministic operations
|
||||
|
||||
```python
|
||||
search_node = NodeSpec(
|
||||
id="search-web",
|
||||
name="Search Web",
|
||||
description="Search for information online",
|
||||
node_type="llm_tool_use",
|
||||
input_keys=["query"],
|
||||
output_keys=["search_results"],
|
||||
system_prompt="Search the web for: {query}",
|
||||
tools=["web_search"],
|
||||
max_retries=3,
|
||||
)
|
||||
```
|
||||
|
||||
### Edge
|
||||
|
||||
Connection between nodes (written to agent.py)
|
||||
|
||||
**Edge Conditions:**
|
||||
|
||||
- `on_success` - Proceed if node succeeds
|
||||
- `on_failure` - Handle errors
|
||||
- `always` - Always proceed
|
||||
- `conditional` - Based on expression
|
||||
|
||||
```python
|
||||
EdgeSpec(
|
||||
id="search-to-analyze",
|
||||
source="search-web",
|
||||
target="analyze-results",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
priority=1,
|
||||
)
|
||||
```
|
||||
|
||||
### Pause/Resume
|
||||
|
||||
Multi-turn conversations
|
||||
|
||||
- **Pause nodes** - Stop execution, wait for user input
|
||||
- **Resume entry points** - Continue from pause with user's response
|
||||
|
||||
```python
|
||||
# Example pause/resume configuration
|
||||
pause_nodes = ["request-clarification"]
|
||||
entry_points = {
|
||||
"start": "analyze-request",
|
||||
"request-clarification_resume": "process-clarification"
|
||||
}
|
||||
```
|
||||
|
||||
## Tool Discovery & Validation
|
||||
|
||||
**CRITICAL:** Before adding a node with tools, you MUST verify the tools exist.
|
||||
|
||||
Tools are provided by MCP servers. Never assume a tool exists - always discover dynamically.
|
||||
|
||||
### Step 1: Register MCP Server (if not already done)
|
||||
|
||||
```python
|
||||
mcp__agent-builder__add_mcp_server(
|
||||
name="tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args='["mcp_server.py", "--stdio"]',
|
||||
cwd="../tools"
|
||||
)
|
||||
```
|
||||
|
||||
### Step 2: Discover Available Tools
|
||||
|
||||
```python
|
||||
# List all tools from all registered servers
|
||||
mcp__agent-builder__list_mcp_tools()
|
||||
|
||||
# Or list tools from a specific server
|
||||
mcp__agent-builder__list_mcp_tools(server_name="tools")
|
||||
```
|
||||
|
||||
This returns available tools with their descriptions and parameters:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"tools_by_server": {
|
||||
"tools": [
|
||||
{
|
||||
"name": "web_search",
|
||||
"description": "Search the web...",
|
||||
"parameters": ["query"]
|
||||
},
|
||||
{
|
||||
"name": "web_scrape",
|
||||
"description": "Scrape a URL...",
|
||||
"parameters": ["url"]
|
||||
}
|
||||
]
|
||||
},
|
||||
"total_tools": 14
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Validate Before Adding Nodes
|
||||
|
||||
Before writing a node with `tools=[...]`:
|
||||
|
||||
1. Call `list_mcp_tools()` to get available tools
|
||||
2. Check each tool in your node exists in the response
|
||||
3. If a tool doesn't exist:
|
||||
- **DO NOT proceed** with the node
|
||||
- Inform the user: "The tool 'X' is not available. Available tools are: ..."
|
||||
- Ask if they want to use an alternative or proceed without the tool
|
||||
|
||||
### Tool Validation Anti-Patterns
|
||||
|
||||
❌ **Never assume a tool exists** - always call `list_mcp_tools()` first
|
||||
❌ **Never write a node with unverified tools** - validate before writing
|
||||
❌ **Never silently drop tools** - if a tool doesn't exist, inform the user
|
||||
❌ **Never guess tool names** - use exact names from discovery response
|
||||
|
||||
### Example Validation Flow
|
||||
|
||||
```python
|
||||
# 1. User requests: "Add a node that searches the web"
|
||||
# 2. Discover available tools
|
||||
tools_response = mcp__agent-builder__list_mcp_tools()
|
||||
|
||||
# 3. Check if web_search exists
|
||||
available = [t["name"] for tools in tools_response["tools_by_server"].values() for t in tools]
|
||||
if "web_search" not in available:
|
||||
# Inform user and ask how to proceed
|
||||
print("❌ 'web_search' not available. Available tools:", available)
|
||||
else:
|
||||
# Proceed with node creation
|
||||
# ...
|
||||
```
|
||||
|
||||
## Workflow Overview: Incremental File Construction
|
||||
|
||||
```
|
||||
1. CREATE PACKAGE → mkdir + write skeletons
|
||||
2. DEFINE GOAL → Write to agent.py + config.py
|
||||
3. FOR EACH NODE:
|
||||
- Propose design
|
||||
- User approves
|
||||
- Write to nodes/__init__.py IMMEDIATELY ← FILE WRITTEN
|
||||
- (Optional) Validate with test_node ← MCP VALIDATION
|
||||
- User can open file and see it
|
||||
4. CONNECT EDGES → Update agent.py ← FILE WRITTEN
|
||||
- (Optional) Validate with validate_graph ← MCP VALIDATION
|
||||
5. FINALIZE → Write agent class to agent.py ← FILE WRITTEN
|
||||
6. DONE - Agent ready at exports/my_agent/
|
||||
```
|
||||
|
||||
**Files written immediately. MCP tools optional for validation/testing bookkeeping.**
|
||||
|
||||
### The Key Difference
|
||||
|
||||
**OLD (Bad):**
|
||||
|
||||
```
|
||||
MCP add_node → Session State → MCP add_node → Session State → ...
|
||||
↓
|
||||
MCP export_graph
|
||||
↓
|
||||
Files appear
|
||||
```
|
||||
|
||||
**NEW (Good):**
|
||||
|
||||
```
|
||||
Write node to file → (Optional: MCP test_node) → Write node to file → ...
|
||||
↓ ↓
|
||||
File visible File visible
|
||||
immediately immediately
|
||||
```
|
||||
|
||||
**Bottom line:** Use Write/Edit for construction, MCP for validation if needed.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use building-agents-core when:
|
||||
- Starting a new agent project and need to understand fundamentals
|
||||
- Need to understand agent architecture before building
|
||||
- Want to validate tool availability before proceeding
|
||||
- Learning about node types, edges, and graph execution
|
||||
|
||||
**Next Steps:**
|
||||
- Ready to build? → Use `building-agents-construction` skill
|
||||
- Need patterns and examples? → Use `building-agents-patterns` skill
|
||||
|
||||
## MCP Tools for Validation
|
||||
|
||||
After writing files, optionally use MCP tools for validation:
|
||||
|
||||
**test_node** - Validate node configuration with mock inputs
|
||||
```python
|
||||
mcp__agent-builder__test_node(
|
||||
node_id="search-web",
|
||||
test_input='{"query": "test query"}',
|
||||
mock_llm_response='{"results": "mock output"}'
|
||||
)
|
||||
```
|
||||
|
||||
**validate_graph** - Check graph structure
|
||||
```python
|
||||
mcp__agent-builder__validate_graph()
|
||||
# Returns: unreachable nodes, missing connections, etc.
|
||||
```
|
||||
|
||||
**create_session** - Track session state for bookkeeping
|
||||
```python
|
||||
mcp__agent-builder__create_session(session_name="my-build")
|
||||
```
|
||||
|
||||
**Key Point:** Files are written FIRST. MCP tools are for validation only.
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **building-agents-construction** - Step-by-step building process
|
||||
- **building-agents-patterns** - Best practices and examples
|
||||
- **agent-workflow** - Complete workflow orchestrator
|
||||
- **testing-agent** - Test and validate completed agents
|
||||
@@ -0,0 +1,497 @@
|
||||
---
|
||||
name: building-agents-patterns
|
||||
description: Best practices, patterns, and examples for building goal-driven agents. Includes pause/resume architecture, hybrid workflows, anti-patterns, and handoff to testing. Use when optimizing agent design.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "1.0"
|
||||
type: reference
|
||||
part_of: building-agents
|
||||
---
|
||||
|
||||
# Building Agents - Patterns & Best Practices
|
||||
|
||||
Design patterns, examples, and best practices for building robust goal-driven agents.
|
||||
|
||||
**Prerequisites:** Complete agent structure using `building-agents-construction`.
|
||||
|
||||
## Practical Example: Hybrid Workflow
|
||||
|
||||
How to build a node using both direct file writes and optional MCP validation:
|
||||
|
||||
```python
|
||||
# 1. WRITE TO FILE FIRST (Primary - makes it visible)
|
||||
node_code = '''
|
||||
search_node = NodeSpec(
|
||||
id="search-web",
|
||||
node_type="llm_tool_use",
|
||||
input_keys=["query"],
|
||||
output_keys=["search_results"],
|
||||
system_prompt="Search the web for: {query}",
|
||||
tools=["web_search"],
|
||||
)
|
||||
'''
|
||||
|
||||
Edit(
|
||||
file_path="exports/research_agent/nodes/__init__.py",
|
||||
old_string="# Nodes will be added here",
|
||||
new_string=node_code
|
||||
)
|
||||
|
||||
print("✅ Added search_node to nodes/__init__.py")
|
||||
print("📁 Open exports/research_agent/nodes/__init__.py to see it!")
|
||||
|
||||
# 2. OPTIONALLY VALIDATE WITH MCP (Secondary - bookkeeping)
|
||||
validation = mcp__agent-builder__test_node(
|
||||
node_id="search-web",
|
||||
test_input='{"query": "python tutorials"}',
|
||||
mock_llm_response='{"search_results": [...mock results...]}'
|
||||
)
|
||||
|
||||
print(f"✓ Validation: {validation['success']}")
|
||||
```
|
||||
|
||||
**User experience:**
|
||||
|
||||
- Immediately sees node in their editor (from step 1)
|
||||
- Gets validation feedback (from step 2)
|
||||
- Can edit the file directly if needed
|
||||
|
||||
This combines visibility (files) with validation (MCP tools).
|
||||
|
||||
## Pause/Resume Architecture
|
||||
|
||||
For agents needing multi-turn conversations with user interaction:
|
||||
|
||||
### Basic Pause/Resume Flow
|
||||
|
||||
```python
|
||||
# Define pause nodes - execution stops at these nodes
|
||||
pause_nodes = ["request-clarification", "await-approval"]
|
||||
|
||||
# Define entry points - where to resume from each pause
|
||||
entry_points = {
|
||||
"start": "analyze-request", # Initial entry
|
||||
"request-clarification_resume": "process-clarification", # Resume from clarification
|
||||
"await-approval_resume": "execute-action", # Resume from approval
|
||||
}
|
||||
```
|
||||
|
||||
### Example: Multi-Turn Research Agent
|
||||
|
||||
```python
|
||||
# Nodes
|
||||
nodes = [
|
||||
NodeSpec(id="analyze-request", ...),
|
||||
NodeSpec(id="request-clarification", ...), # PAUSE NODE
|
||||
NodeSpec(id="process-clarification", ...),
|
||||
NodeSpec(id="generate-results", ...),
|
||||
NodeSpec(id="await-approval", ...), # PAUSE NODE
|
||||
NodeSpec(id="execute-action", ...),
|
||||
]
|
||||
|
||||
# Edges with resume flows
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="analyze-to-clarify",
|
||||
source="analyze-request",
|
||||
target="request-clarification",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="needs_clarification == true",
|
||||
),
|
||||
# When resumed, goes to process-clarification
|
||||
EdgeSpec(
|
||||
id="clarify-to-process",
|
||||
source="request-clarification",
|
||||
target="process-clarification",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="results-to-approval",
|
||||
source="generate-results",
|
||||
target="await-approval",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
# When resumed, goes to execute-action
|
||||
EdgeSpec(
|
||||
id="approval-to-execute",
|
||||
source="await-approval",
|
||||
target="execute-action",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
]
|
||||
|
||||
# Configuration
|
||||
pause_nodes = ["request-clarification", "await-approval"]
|
||||
entry_points = {
|
||||
"start": "analyze-request",
|
||||
"request-clarification_resume": "process-clarification",
|
||||
"await-approval_resume": "execute-action",
|
||||
}
|
||||
```
|
||||
|
||||
### Running Pause/Resume Agents
|
||||
|
||||
```python
|
||||
# Initial run - will pause at first pause node
|
||||
result1 = await agent.run(
|
||||
context={"query": "research topic"},
|
||||
session_state=None
|
||||
)
|
||||
|
||||
# Check if paused
|
||||
if result1.paused_at:
|
||||
print(f"Paused at: {result1.paused_at}")
|
||||
|
||||
# Resume with user input
|
||||
result2 = await agent.run(
|
||||
context={"user_response": "clarification details"},
|
||||
session_state=result1.session_state # Pass previous state
|
||||
)
|
||||
```
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
### What NOT to Do
|
||||
|
||||
❌ **Don't rely on `export_graph`** - Write files immediately, not at end
|
||||
```python
|
||||
# BAD: Building in session state, exporting at end
|
||||
mcp__agent-builder__add_node(...)
|
||||
mcp__agent-builder__add_node(...)
|
||||
mcp__agent-builder__export_graph() # Files appear only now
|
||||
|
||||
# GOOD: Writing files immediately
|
||||
Write(file_path="...", content=node_code) # File visible now
|
||||
Write(file_path="...", content=node_code) # File visible now
|
||||
```
|
||||
|
||||
❌ **Don't hide code in session** - Write to files as components approved
|
||||
```python
|
||||
# BAD: Accumulating changes invisibly
|
||||
session.add_component(component1)
|
||||
session.add_component(component2)
|
||||
# User can't see anything yet
|
||||
|
||||
# GOOD: Incremental visibility
|
||||
Edit(file_path="...", ...) # User sees change 1
|
||||
Edit(file_path="...", ...) # User sees change 2
|
||||
```
|
||||
|
||||
❌ **Don't wait to write files** - Agent visible from first step
|
||||
```python
|
||||
# BAD: Building everything before writing
|
||||
design_all_nodes()
|
||||
design_all_edges()
|
||||
write_everything_at_once()
|
||||
|
||||
# GOOD: Write as you go
|
||||
write_package_structure() # Visible
|
||||
write_goal() # Visible
|
||||
write_node_1() # Visible
|
||||
write_node_2() # Visible
|
||||
```
|
||||
|
||||
❌ **Don't batch everything** - Write incrementally
|
||||
```python
|
||||
# BAD: Batching all nodes
|
||||
nodes = [design_node_1(), design_node_2(), ...]
|
||||
write_all_nodes(nodes)
|
||||
|
||||
# GOOD: One at a time with user feedback
|
||||
write_node_1() # User approves
|
||||
write_node_2() # User approves
|
||||
write_node_3() # User approves
|
||||
```
|
||||
|
||||
### MCP Tools - Correct Usage
|
||||
|
||||
**MCP tools OK for:**
|
||||
✅ `test_node` - Validate node configuration with mock inputs
|
||||
✅ `validate_graph` - Check graph structure
|
||||
✅ `create_session` - Track session state for bookkeeping
|
||||
✅ Other validation tools
|
||||
|
||||
**Just don't:** Use MCP as the primary construction method or rely on export_graph
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Show Progress After Each Write
|
||||
|
||||
```python
|
||||
# After writing a node
|
||||
print("✅ Added analyze_request_node to nodes/__init__.py")
|
||||
print("📊 Progress: 1/6 nodes added")
|
||||
print("📁 Open exports/my_agent/nodes/__init__.py to see it!")
|
||||
```
|
||||
|
||||
### 2. Let User Open Files During Build
|
||||
|
||||
```python
|
||||
# Encourage file inspection
|
||||
print("✅ Goal written to agent.py")
|
||||
print("")
|
||||
print("💡 Tip: Open exports/my_agent/agent.py in your editor to see the goal!")
|
||||
```
|
||||
|
||||
### 3. Write Incrementally - One Component at a Time
|
||||
|
||||
```python
|
||||
# Good flow
|
||||
write_package_structure()
|
||||
show_user("Package created")
|
||||
|
||||
write_goal()
|
||||
show_user("Goal written")
|
||||
|
||||
for node in nodes:
|
||||
get_approval(node)
|
||||
write_node(node)
|
||||
show_user(f"Node {node.id} written")
|
||||
```
|
||||
|
||||
### 4. Test As You Build
|
||||
|
||||
```python
|
||||
# After adding several nodes
|
||||
print("💡 You can test current state with:")
|
||||
print(" PYTHONPATH=core:exports python -m my_agent validate")
|
||||
print(" PYTHONPATH=core:exports python -m my_agent info")
|
||||
```
|
||||
|
||||
### 5. Keep User Informed
|
||||
|
||||
```python
|
||||
# Clear status updates
|
||||
print("🔨 Creating package structure...")
|
||||
print("✅ Package created: exports/my_agent/")
|
||||
print("")
|
||||
print("📝 Next: Define agent goal")
|
||||
```
|
||||
|
||||
## Continuous Monitoring Agents
|
||||
|
||||
For agents that run continuously without terminal nodes:
|
||||
|
||||
```python
|
||||
# No terminal nodes - loops forever
|
||||
terminal_nodes = []
|
||||
|
||||
# Workflow loops back to start
|
||||
edges = [
|
||||
EdgeSpec(id="monitor-to-check", source="monitor", target="check-condition"),
|
||||
EdgeSpec(id="check-to-wait", source="check-condition", target="wait"),
|
||||
EdgeSpec(id="wait-to-monitor", source="wait", target="monitor"), # Loop
|
||||
]
|
||||
|
||||
# Entry node only
|
||||
entry_node = "monitor"
|
||||
entry_points = {"start": "monitor"}
|
||||
pause_nodes = []
|
||||
```
|
||||
|
||||
**Example: File Monitor**
|
||||
|
||||
```python
|
||||
nodes = [
|
||||
NodeSpec(id="list-files", ...),
|
||||
NodeSpec(id="check-new-files", node_type="router", ...),
|
||||
NodeSpec(id="process-files", ...),
|
||||
NodeSpec(id="wait-interval", node_type="function", ...),
|
||||
]
|
||||
|
||||
edges = [
|
||||
EdgeSpec(id="list-to-check", source="list-files", target="check-new-files"),
|
||||
EdgeSpec(
|
||||
id="check-to-process",
|
||||
source="check-new-files",
|
||||
target="process-files",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="new_files_count > 0",
|
||||
),
|
||||
EdgeSpec(
|
||||
id="check-to-wait",
|
||||
source="check-new-files",
|
||||
target="wait-interval",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="new_files_count == 0",
|
||||
),
|
||||
EdgeSpec(id="process-to-wait", source="process-files", target="wait-interval"),
|
||||
EdgeSpec(id="wait-to-list", source="wait-interval", target="list-files"), # Loop back
|
||||
]
|
||||
|
||||
terminal_nodes = [] # No terminal - runs forever
|
||||
```
|
||||
|
||||
## Complex Routing Patterns
|
||||
|
||||
### Multi-Condition Router
|
||||
|
||||
```python
|
||||
router_node = NodeSpec(
|
||||
id="decision-router",
|
||||
node_type="router",
|
||||
input_keys=["analysis_result"],
|
||||
output_keys=["decision"],
|
||||
system_prompt="""
|
||||
Based on the analysis result, decide the next action:
|
||||
- If confidence > 0.9: route to "execute"
|
||||
- If 0.5 <= confidence <= 0.9: route to "review"
|
||||
- If confidence < 0.5: route to "clarify"
|
||||
|
||||
Return: {"decision": "execute|review|clarify"}
|
||||
""",
|
||||
)
|
||||
|
||||
# Edges for each route
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="router-to-execute",
|
||||
source="decision-router",
|
||||
target="execute-action",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="decision == 'execute'",
|
||||
priority=1,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="router-to-review",
|
||||
source="decision-router",
|
||||
target="human-review",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="decision == 'review'",
|
||||
priority=2,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="router-to-clarify",
|
||||
source="decision-router",
|
||||
target="request-clarification",
|
||||
condition=EdgeCondition.CONDITIONAL,
|
||||
condition_expr="decision == 'clarify'",
|
||||
priority=3,
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### Graceful Failure with Fallback
|
||||
|
||||
```python
|
||||
# Primary node with error handling
|
||||
nodes = [
|
||||
NodeSpec(id="api-call", max_retries=3, ...),
|
||||
NodeSpec(id="fallback-cache", ...),
|
||||
NodeSpec(id="report-error", ...),
|
||||
]
|
||||
|
||||
edges = [
|
||||
# Success path
|
||||
EdgeSpec(
|
||||
id="api-success",
|
||||
source="api-call",
|
||||
target="process-results",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
),
|
||||
# Fallback on failure
|
||||
EdgeSpec(
|
||||
id="api-to-fallback",
|
||||
source="api-call",
|
||||
target="fallback-cache",
|
||||
condition=EdgeCondition.ON_FAILURE,
|
||||
priority=1,
|
||||
),
|
||||
# Report if fallback also fails
|
||||
EdgeSpec(
|
||||
id="fallback-to-error",
|
||||
source="fallback-cache",
|
||||
target="report-error",
|
||||
condition=EdgeCondition.ON_FAILURE,
|
||||
priority=1,
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Parallel Node Execution
|
||||
|
||||
```python
|
||||
# Use multiple edges from same source for parallel execution
|
||||
edges = [
|
||||
EdgeSpec(
|
||||
id="start-to-search1",
|
||||
source="start",
|
||||
target="search-source-1",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="start-to-search2",
|
||||
source="start",
|
||||
target="search-source-2",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
EdgeSpec(
|
||||
id="start-to-search3",
|
||||
source="start",
|
||||
target="search-source-3",
|
||||
condition=EdgeCondition.ALWAYS,
|
||||
),
|
||||
# Converge results
|
||||
EdgeSpec(
|
||||
id="search1-to-merge",
|
||||
source="search-source-1",
|
||||
target="merge-results",
|
||||
),
|
||||
EdgeSpec(
|
||||
id="search2-to-merge",
|
||||
source="search-source-2",
|
||||
target="merge-results",
|
||||
),
|
||||
EdgeSpec(
|
||||
id="search3-to-merge",
|
||||
source="search-source-3",
|
||||
target="merge-results",
|
||||
),
|
||||
]
|
||||
```
|
||||
|
||||
## Handoff to Testing
|
||||
|
||||
When agent is complete, transition to testing phase:
|
||||
|
||||
```python
|
||||
print("""
|
||||
✅ Agent complete: exports/my_agent/
|
||||
|
||||
Next steps:
|
||||
1. Switch to testing-agent skill
|
||||
2. Generate and approve tests
|
||||
3. Run evaluation
|
||||
4. Debug any failures
|
||||
|
||||
Command: "Test the agent at exports/my_agent/"
|
||||
""")
|
||||
```
|
||||
|
||||
### Pre-Testing Checklist
|
||||
|
||||
Before handing off to testing-agent:
|
||||
|
||||
- [ ] Agent structure validates: `python -m agent_name validate`
|
||||
- [ ] All nodes defined in nodes/__init__.py
|
||||
- [ ] All edges connect valid nodes
|
||||
- [ ] Entry node specified
|
||||
- [ ] Agent can be imported: `from exports.agent_name import default_agent`
|
||||
- [ ] README.md with usage instructions
|
||||
- [ ] CLI commands work (info, validate)
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **building-agents-core** - Fundamental concepts
|
||||
- **building-agents-construction** - Step-by-step building
|
||||
- **testing-agent** - Test and validate agents
|
||||
- **agent-workflow** - Complete workflow orchestrator
|
||||
|
||||
---
|
||||
|
||||
**Remember: Agent is actively constructed, visible the whole time. No hidden state. No surprise exports. Just transparent, incremental file building.**
|
||||
@@ -0,0 +1,572 @@
|
||||
---
|
||||
name: setup-credentials
|
||||
description: Set up and install credentials for an agent. Detects missing credentials from agent config, collects them from the user, and stores them securely in the encrypted credential store at ~/.hive/credentials.
|
||||
license: Apache-2.0
|
||||
metadata:
|
||||
author: hive
|
||||
version: "2.1"
|
||||
type: utility
|
||||
---
|
||||
|
||||
# Setup Credentials
|
||||
|
||||
Interactive credential setup for agents with multiple authentication options. Detects what's missing, offers auth method choices, validates with health checks, and stores credentials securely.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Before running or testing an agent for the first time
|
||||
- When `AgentRunner.run()` fails with "missing required credentials"
|
||||
- When a user asks to configure credentials for an agent
|
||||
- After building a new agent that uses tools requiring API keys
|
||||
|
||||
## Workflow
|
||||
|
||||
### Step 1: Identify the Agent
|
||||
|
||||
Determine which agent needs credentials. The user will either:
|
||||
|
||||
- Name the agent directly (e.g., "set up credentials for hubspot-agent")
|
||||
- Have an agent directory open (check `exports/` for agent dirs)
|
||||
- Be working on an agent in the current session
|
||||
|
||||
Locate the agent's directory under `exports/{agent_name}/`.
|
||||
|
||||
### Step 2: Detect Required Credentials
|
||||
|
||||
Read the agent's configuration to determine which tools and node types it uses:
|
||||
|
||||
```python
|
||||
from core.framework.runner import AgentRunner
|
||||
|
||||
runner = AgentRunner.load("exports/{agent_name}")
|
||||
validation = runner.validate()
|
||||
|
||||
# validation.missing_credentials contains env var names
|
||||
# validation.warnings contains detailed messages with help URLs
|
||||
```
|
||||
|
||||
Alternatively, check the credential store directly:
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore
|
||||
|
||||
# Use encrypted storage (default: ~/.hive/credentials)
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
|
||||
# Check what's available
|
||||
available = store.list_credentials()
|
||||
print(f"Available credentials: {available}")
|
||||
|
||||
# Check if specific credential exists
|
||||
if store.is_available("hubspot"):
|
||||
print("HubSpot credential found")
|
||||
else:
|
||||
print("HubSpot credential missing")
|
||||
```
|
||||
|
||||
To see all known credential specs (for help URLs and setup instructions):
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
print(f"{name}: env_var={spec.env_var}, aden={spec.aden_supported}")
|
||||
```
|
||||
|
||||
### Step 3: Present Auth Options for Each Missing Credential
|
||||
|
||||
For each missing credential, check what authentication methods are available:
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
spec = CREDENTIAL_SPECS.get("hubspot")
|
||||
if spec:
|
||||
# Determine available auth options
|
||||
auth_options = []
|
||||
if spec.aden_supported:
|
||||
auth_options.append("aden")
|
||||
if spec.direct_api_key_supported:
|
||||
auth_options.append("direct")
|
||||
auth_options.append("custom") # Always available
|
||||
|
||||
# Get setup info
|
||||
setup_info = {
|
||||
"env_var": spec.env_var,
|
||||
"description": spec.description,
|
||||
"help_url": spec.help_url,
|
||||
"api_key_instructions": spec.api_key_instructions,
|
||||
}
|
||||
```
|
||||
|
||||
Present the available options using AskUserQuestion:
|
||||
|
||||
```
|
||||
Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
|
||||
1) Aden Authorization Server (Recommended)
|
||||
Secure OAuth2 flow via integration.adenhq.com
|
||||
- Quick setup with automatic token refresh
|
||||
- No need to manage API keys manually
|
||||
|
||||
2) Direct API Key
|
||||
Enter your own API key manually
|
||||
- Requires creating a HubSpot Private App
|
||||
- Full control over scopes and permissions
|
||||
|
||||
3) Custom Credential Store (Advanced)
|
||||
Programmatic configuration for CI/CD
|
||||
- For automated deployments
|
||||
- Requires manual API calls
|
||||
```
|
||||
|
||||
### Step 4: Execute Auth Flow Based on User Choice
|
||||
|
||||
#### Option 1: Aden Authorization Server
|
||||
|
||||
This is the recommended flow for supported integrations (HubSpot, etc.).
|
||||
|
||||
**How Aden OAuth Works:**
|
||||
|
||||
The ADEN_API_KEY represents a user who has already completed OAuth authorization on Aden's platform. When users sign up and connect integrations on Aden, those OAuth tokens are stored server-side. Having an ADEN_API_KEY means:
|
||||
|
||||
1. User has an Aden account
|
||||
2. User has already authorized integrations (HubSpot, etc.) via OAuth on Aden
|
||||
3. We just need to sync those credentials down to the local credential store
|
||||
|
||||
**4.1a. Check for ADEN_API_KEY**
|
||||
|
||||
```python
|
||||
import os
|
||||
aden_key = os.environ.get("ADEN_API_KEY")
|
||||
```
|
||||
|
||||
If not set, guide user to get one from Aden (this is where they do OAuth):
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import open_browser, get_aden_setup_url
|
||||
|
||||
# Open browser to Aden - user will sign up and connect integrations there
|
||||
url = get_aden_setup_url() # https://integration.adenhq.com/setup
|
||||
success, msg = open_browser(url)
|
||||
|
||||
print("Please sign in to Aden and connect your integrations (HubSpot, etc.).")
|
||||
print("Once done, copy your API key and return here.")
|
||||
```
|
||||
|
||||
Ask user to provide the ADEN_API_KEY they received.
|
||||
|
||||
**4.1b. Save ADEN_API_KEY to Shell Config**
|
||||
|
||||
With user approval, persist ADEN_API_KEY to their shell config:
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import (
|
||||
detect_shell,
|
||||
add_env_var_to_shell_config,
|
||||
get_shell_source_command,
|
||||
)
|
||||
|
||||
shell_type = detect_shell() # 'bash', 'zsh', or 'unknown'
|
||||
|
||||
# Ask user for approval before modifying shell config
|
||||
# If approved:
|
||||
success, config_path = add_env_var_to_shell_config(
|
||||
"ADEN_API_KEY",
|
||||
user_provided_key,
|
||||
comment="Aden authorization server API key"
|
||||
)
|
||||
|
||||
if success:
|
||||
source_cmd = get_shell_source_command()
|
||||
print(f"Saved to {config_path}")
|
||||
print(f"Run: {source_cmd}")
|
||||
```
|
||||
|
||||
Also save to `~/.hive/configuration.json` for the framework:
|
||||
|
||||
```python
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
config_path = Path.home() / ".hive" / "configuration.json"
|
||||
config = json.loads(config_path.read_text()) if config_path.exists() else {}
|
||||
|
||||
config["aden"] = {
|
||||
"api_key_configured": True,
|
||||
"api_url": "https://api.adenhq.com"
|
||||
}
|
||||
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps(config, indent=2))
|
||||
```
|
||||
|
||||
**4.1c. Sync Credentials from Aden Server**
|
||||
|
||||
Since the user has already authorized integrations on Aden, use the one-liner factory method:
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore
|
||||
|
||||
# This single call handles everything:
|
||||
# - Creates encrypted local storage at ~/.hive/credentials
|
||||
# - Configures Aden client from ADEN_API_KEY env var
|
||||
# - Syncs all credentials from Aden server automatically
|
||||
store = CredentialStore.with_aden_sync(
|
||||
base_url="https://api.adenhq.com",
|
||||
auto_sync=True, # Syncs on creation
|
||||
)
|
||||
|
||||
# Check what was synced
|
||||
synced = store.list_credentials()
|
||||
print(f"Synced credentials: {synced}")
|
||||
|
||||
# If the required credential wasn't synced, the user hasn't authorized it on Aden yet
|
||||
if "hubspot" not in synced:
|
||||
print("HubSpot not found in your Aden account.")
|
||||
print("Please visit https://integration.adenhq.com to connect HubSpot, then try again.")
|
||||
```
|
||||
|
||||
For more control over the sync process:
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore
|
||||
from core.framework.credentials.aden import (
|
||||
AdenCredentialClient,
|
||||
AdenClientConfig,
|
||||
AdenSyncProvider,
|
||||
)
|
||||
|
||||
# Create client (API key loaded from ADEN_API_KEY env var)
|
||||
client = AdenCredentialClient(AdenClientConfig(
|
||||
base_url="https://api.adenhq.com",
|
||||
))
|
||||
|
||||
# Create provider and store
|
||||
provider = AdenSyncProvider(client=client)
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
|
||||
# Manual sync
|
||||
synced_count = provider.sync_all(store)
|
||||
print(f"Synced {synced_count} credentials from Aden")
|
||||
```
|
||||
|
||||
**4.1d. Run Health Check**
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import check_credential_health
|
||||
|
||||
# Get the token from the store
|
||||
cred = store.get_credential("hubspot")
|
||||
token = cred.keys["access_token"].value.get_secret_value()
|
||||
|
||||
result = check_credential_health("hubspot", token)
|
||||
if result.valid:
|
||||
print("HubSpot credentials validated successfully!")
|
||||
else:
|
||||
print(f"Validation failed: {result.message}")
|
||||
# Offer to retry the OAuth flow
|
||||
```
|
||||
|
||||
#### Option 2: Direct API Key
|
||||
|
||||
For users who prefer manual API key management.
|
||||
|
||||
**4.2a. Show Setup Instructions**
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
spec = CREDENTIAL_SPECS.get("hubspot")
|
||||
if spec and spec.api_key_instructions:
|
||||
print(spec.api_key_instructions)
|
||||
# Output:
|
||||
# To get a HubSpot Private App token:
|
||||
# 1. Go to HubSpot Settings > Integrations > Private Apps
|
||||
# 2. Click "Create a private app"
|
||||
# 3. Name your app (e.g., "Hive Agent")
|
||||
# ...
|
||||
|
||||
if spec and spec.help_url:
|
||||
print(f"More info: {spec.help_url}")
|
||||
```
|
||||
|
||||
**4.2b. Collect API Key from User**
|
||||
|
||||
Use AskUserQuestion to securely collect the API key:
|
||||
|
||||
```
|
||||
Please provide your HubSpot access token:
|
||||
(This will be stored securely in ~/.hive/credentials)
|
||||
```
|
||||
|
||||
**4.2c. Run Health Check Before Storing**
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import check_credential_health
|
||||
|
||||
result = check_credential_health("hubspot", user_provided_token)
|
||||
if not result.valid:
|
||||
print(f"Warning: {result.message}")
|
||||
# Ask user if they want to:
|
||||
# 1. Try a different token
|
||||
# 2. Continue anyway (not recommended)
|
||||
```
|
||||
|
||||
**4.2d. Store in Encrypted Credential Store**
|
||||
|
||||
```python
|
||||
from core.framework.credentials import CredentialStore, CredentialObject, CredentialKey
|
||||
from pydantic import SecretStr
|
||||
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
|
||||
cred = CredentialObject(
|
||||
id="hubspot",
|
||||
name="HubSpot Access Token",
|
||||
keys={
|
||||
"access_token": CredentialKey(
|
||||
name="access_token",
|
||||
value=SecretStr(user_provided_token),
|
||||
)
|
||||
},
|
||||
)
|
||||
store.save_credential(cred)
|
||||
```
|
||||
|
||||
**4.2e. Export to Current Session**
|
||||
|
||||
```bash
|
||||
export HUBSPOT_ACCESS_TOKEN="the-value"
|
||||
```
|
||||
|
||||
#### Option 3: Custom Credential Store (Advanced)
|
||||
|
||||
For programmatic/CI/CD setups.
|
||||
|
||||
**4.3a. Show Documentation**
|
||||
|
||||
```
|
||||
For advanced credential management, you can use the CredentialStore API directly:
|
||||
|
||||
from core.framework.credentials import CredentialStore, CredentialObject, CredentialKey
|
||||
from pydantic import SecretStr
|
||||
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
|
||||
cred = CredentialObject(
|
||||
id="hubspot",
|
||||
name="HubSpot Access Token",
|
||||
keys={"access_token": CredentialKey(name="access_token", value=SecretStr("..."))}
|
||||
)
|
||||
store.save_credential(cred)
|
||||
|
||||
For CI/CD environments:
|
||||
- Set HIVE_CREDENTIAL_KEY for encryption
|
||||
- Pre-populate ~/.hive/credentials programmatically
|
||||
- Or use environment variables directly (HUBSPOT_ACCESS_TOKEN)
|
||||
|
||||
Documentation: See core/framework/credentials/README.md
|
||||
```
|
||||
|
||||
### Step 5: Record Configuration Method
|
||||
|
||||
Track which auth method was used for each credential in `~/.hive/configuration.json`:
|
||||
|
||||
```python
|
||||
import json
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
config_path = Path.home() / ".hive" / "configuration.json"
|
||||
config = json.loads(config_path.read_text()) if config_path.exists() else {}
|
||||
|
||||
if "credential_methods" not in config:
|
||||
config["credential_methods"] = {}
|
||||
|
||||
config["credential_methods"]["hubspot"] = {
|
||||
"method": "aden", # or "direct" or "custom"
|
||||
"configured_at": datetime.now().isoformat(),
|
||||
}
|
||||
|
||||
config_path.write_text(json.dumps(config, indent=2))
|
||||
```
|
||||
|
||||
### Step 6: Verify All Credentials
|
||||
|
||||
Run validation again to confirm everything is set:
|
||||
|
||||
```python
|
||||
runner = AgentRunner.load("exports/{agent_name}")
|
||||
validation = runner.validate()
|
||||
assert not validation.missing_credentials, "Still missing credentials!"
|
||||
```
|
||||
|
||||
Report the result to the user.
|
||||
|
||||
## Health Check Reference
|
||||
|
||||
Health checks validate credentials by making lightweight API calls:
|
||||
|
||||
| Credential | Endpoint | What It Checks |
|
||||
| -------------- | --------------------------------------- | --------------------------------- |
|
||||
| `hubspot` | `GET /crm/v3/objects/contacts?limit=1` | Bearer token validity, CRM scopes |
|
||||
| `brave_search` | `GET /res/v1/web/search?q=test&count=1` | API key validity |
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import check_credential_health, HealthCheckResult
|
||||
|
||||
result: HealthCheckResult = check_credential_health("hubspot", token_value)
|
||||
# result.valid: bool
|
||||
# result.message: str
|
||||
# result.details: dict (status_code, rate_limited, etc.)
|
||||
```
|
||||
|
||||
## Encryption Key (HIVE_CREDENTIAL_KEY)
|
||||
|
||||
The encrypted credential store requires `HIVE_CREDENTIAL_KEY` to encrypt/decrypt credentials.
|
||||
|
||||
- If the user doesn't have one, `EncryptedFileStorage` will auto-generate one and log it
|
||||
- The user MUST persist this key (e.g., in `~/.bashrc` or a secrets manager)
|
||||
- Without this key, stored credentials cannot be decrypted
|
||||
- This is the ONLY secret that should live in `~/.bashrc` or environment config
|
||||
|
||||
If `HIVE_CREDENTIAL_KEY` is not set:
|
||||
|
||||
1. Let the store generate one
|
||||
2. Tell the user to save it: `export HIVE_CREDENTIAL_KEY="{generated_key}"`
|
||||
3. Recommend adding it to `~/.bashrc` or their shell profile
|
||||
|
||||
## Security Rules
|
||||
|
||||
- **NEVER** log, print, or echo credential values in tool output
|
||||
- **NEVER** store credentials in plaintext files, git-tracked files, or agent configs
|
||||
- **NEVER** hardcode credentials in source code
|
||||
- **ALWAYS** use `SecretStr` from Pydantic when handling credential values in Python
|
||||
- **ALWAYS** use the encrypted credential store (`~/.hive/credentials`) for persistence
|
||||
- **ALWAYS** run health checks before storing credentials (when possible)
|
||||
- **ALWAYS** verify credentials were stored by re-running validation, not by reading them back
|
||||
- When modifying `~/.bashrc` or `~/.zshrc`, confirm with the user first
|
||||
|
||||
## Credential Sources Reference
|
||||
|
||||
All credential specs are defined in `tools/src/aden_tools/credentials/`:
|
||||
|
||||
| File | Category | Credentials | Aden Supported |
|
||||
| ----------------- | ------------- | --------------------------------------------- | -------------- |
|
||||
| `llm.py` | LLM Providers | `anthropic` | No |
|
||||
| `search.py` | Search Tools | `brave_search`, `google_search`, `google_cse` | No |
|
||||
| `integrations.py` | Integrations | `hubspot` | Yes |
|
||||
|
||||
**Note:** Additional LLM providers (Cerebras, Groq, OpenAI) are handled by LiteLLM via environment
|
||||
variables (`CEREBRAS_API_KEY`, `GROQ_API_KEY`, `OPENAI_API_KEY`) but are not yet in CREDENTIAL_SPECS.
|
||||
Add them to `llm.py` as needed.
|
||||
|
||||
To check what's registered:
|
||||
|
||||
```python
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
for name, spec in CREDENTIAL_SPECS.items():
|
||||
print(f"{name}: aden={spec.aden_supported}, direct={spec.direct_api_key_supported}")
|
||||
```
|
||||
|
||||
## Migration: CredentialManager → CredentialStore
|
||||
|
||||
**CredentialManager is deprecated.** Use CredentialStore instead.
|
||||
|
||||
| Old (Deprecated) | New (Recommended) |
|
||||
| ----------------------------------------- | -------------------------------------------------------------------- |
|
||||
| `CredentialManager()` | `CredentialStore.with_encrypted_storage()` |
|
||||
| `creds.get("hubspot")` | `store.get("hubspot")` or `store.get_key("hubspot", "access_token")` |
|
||||
| `creds.validate_for_tools(tools)` | Use `store.is_available(cred_id)` per credential |
|
||||
| `creds.get_auth_options("hubspot")` | Check `CREDENTIAL_SPECS["hubspot"].aden_supported` |
|
||||
| `creds.get_setup_instructions("hubspot")` | Access `CREDENTIAL_SPECS["hubspot"]` directly |
|
||||
|
||||
**Why migrate?**
|
||||
|
||||
- **CredentialStore** supports encrypted storage, multi-key credentials, template resolution, and automatic token refresh
|
||||
- **CredentialManager** only reads from environment variables and .env files (no encryption, no refresh)
|
||||
- **CredentialStoreAdapter** exists for backward compatibility during migration
|
||||
|
||||
```python
|
||||
# Old way (deprecated)
|
||||
from aden_tools.credentials import CredentialManager
|
||||
creds = CredentialManager()
|
||||
token = creds.get("hubspot")
|
||||
|
||||
# New way (recommended)
|
||||
from core.framework.credentials import CredentialStore
|
||||
store = CredentialStore.with_encrypted_storage()
|
||||
token = store.get("hubspot")
|
||||
|
||||
# With Aden sync (recommended for OAuth integrations)
|
||||
store = CredentialStore.with_aden_sync()
|
||||
token = store.get_key("hubspot", "access_token")
|
||||
```
|
||||
|
||||
## Example Session
|
||||
|
||||
```
|
||||
User: /setup-credentials for my hubspot-agent
|
||||
|
||||
Agent: Let me check what credentials your hubspot-agent needs.
|
||||
|
||||
[Runs validation, finds ANTHROPIC_API_KEY and HUBSPOT_ACCESS_TOKEN missing]
|
||||
|
||||
Agent: Your hubspot-agent requires 2 credentials:
|
||||
|
||||
1. ANTHROPIC_API_KEY - Anthropic API key for LLM calls
|
||||
2. HUBSPOT_ACCESS_TOKEN - HubSpot access token
|
||||
|
||||
Let's set these up one at a time.
|
||||
|
||||
--- Setting up ANTHROPIC_API_KEY ---
|
||||
|
||||
This credential only supports direct API key entry.
|
||||
|
||||
[Shows instructions for getting Anthropic API key]
|
||||
[AskUserQuestion: "Please provide your Anthropic API key:"]
|
||||
[User provides key]
|
||||
[Stores in credential store, exports to env]
|
||||
|
||||
--- Setting up HUBSPOT_ACCESS_TOKEN ---
|
||||
|
||||
[AskUserQuestion with 3 options]
|
||||
|
||||
Choose how to configure HUBSPOT_ACCESS_TOKEN:
|
||||
1) Aden Authorization Server (Recommended)
|
||||
2) Direct API Key
|
||||
3) Custom Credential Store (Advanced)
|
||||
|
||||
User: 1 (Aden)
|
||||
|
||||
Agent: Great! Let me check if you have an Aden API key configured...
|
||||
|
||||
[Checks for ADEN_API_KEY, not found]
|
||||
[Opens browser to integration.adenhq.com/setup]
|
||||
|
||||
Agent: I've opened Aden's setup page. Please:
|
||||
1. Sign in or create an account
|
||||
2. Connect your HubSpot account (OAuth happens on Aden's site)
|
||||
3. Copy the API key shown after setup
|
||||
|
||||
[AskUserQuestion: "Please provide your Aden API key:"]
|
||||
[User provides key]
|
||||
|
||||
Agent: Would you like me to save this to your shell config (~/.zshrc) for future sessions?
|
||||
|
||||
[User approves]
|
||||
[Saves to ~/.zshrc and ~/.hive/configuration.json]
|
||||
|
||||
Agent: Let me sync your credentials from Aden...
|
||||
|
||||
[Syncs credentials from Aden server - OAuth already done on Aden's side]
|
||||
[Runs health check]
|
||||
|
||||
Agent: HubSpot credentials validated successfully!
|
||||
|
||||
All credentials are now configured:
|
||||
- ANTHROPIC_API_KEY: Stored in encrypted credential store
|
||||
- HUBSPOT_ACCESS_TOKEN: Synced from Aden (OAuth completed on Aden)
|
||||
- Validation passed - your agent is ready to run!
|
||||
```
|
||||
@@ -1,225 +0,0 @@
|
||||
# Integration Test Reporting Skill
|
||||
|
||||
Run the Level 2 dummy agent integration test suite and produce a detailed HTML report with per-test input → outcome analysis.
|
||||
|
||||
## Trigger
|
||||
|
||||
User wants to run integration tests and see results:
|
||||
- `/test-reporting`
|
||||
- `/test-reporting test_component_queen_live.py`
|
||||
- `/test-reporting --all`
|
||||
|
||||
## SOP: Running Tests
|
||||
|
||||
### Step 1: Select Scope
|
||||
|
||||
If the user provides a specific test file or pattern, use it. Otherwise run the full suite.
|
||||
|
||||
```bash
|
||||
# Full suite
|
||||
cd core && echo "1" | uv run python tests/dummy_agents/run_all.py --interactive 2>&1
|
||||
|
||||
# Specific file (requires manual provider setup)
|
||||
cd core && uv run python -c "
|
||||
import sys
|
||||
sys.path.insert(0, '.')
|
||||
from tests.dummy_agents.run_all import detect_available
|
||||
from tests.dummy_agents.conftest import set_llm_selection
|
||||
|
||||
avail = detect_available()
|
||||
claude = [p for p in avail if 'Claude Code' in p['name']]
|
||||
if not claude:
|
||||
avail_names = [p['name'] for p in avail]
|
||||
raise RuntimeError(f'No Claude Code subscription. Available: {avail_names}')
|
||||
provider = claude[0]
|
||||
set_llm_selection(
|
||||
model=provider['model'],
|
||||
api_key=provider['api_key'],
|
||||
extra_headers=provider.get('extra_headers'),
|
||||
api_base=provider.get('api_base'),
|
||||
)
|
||||
|
||||
import pytest
|
||||
sys.exit(pytest.main([
|
||||
'tests/dummy_agents/TEST_FILE_HERE',
|
||||
'-v', '--override-ini=asyncio_mode=auto', '--no-header', '--tb=long',
|
||||
'--log-cli-level=WARNING', '--junitxml=/tmp/hive_test_results.xml',
|
||||
]))
|
||||
"
|
||||
```
|
||||
|
||||
### Step 2: Collect Results
|
||||
|
||||
After the test run completes, collect:
|
||||
1. **JUnit XML** from `--junitxml` output (if available)
|
||||
2. **stdout/stderr** from the run
|
||||
3. **Summary table** from `run_all.py` output (the Unicode table)
|
||||
|
||||
### Step 3: Generate HTML Report
|
||||
|
||||
Write the report to `/tmp/hive_integration_test_report.html`.
|
||||
|
||||
The report MUST include these sections:
|
||||
|
||||
#### Header
|
||||
- Run timestamp (ISO 8601)
|
||||
- Provider used (model name, source)
|
||||
- Total tests / passed / failed / skipped
|
||||
- Total wall-clock time
|
||||
- Overall verdict: PASS (all green) or FAIL (with count)
|
||||
|
||||
#### Per-Test Table
|
||||
|
||||
For EVERY test (not just failures), include a row with:
|
||||
|
||||
| Column | Description |
|
||||
|--------|-------------|
|
||||
| Component | Test file grouping (e.g., `component_queen_live`) |
|
||||
| Test Name | Function name (e.g., `test_queen_starts_in_planning_without_worker`) |
|
||||
| Status | PASS / FAIL / SKIP / ERROR with color badge |
|
||||
| Duration | Wall-clock seconds |
|
||||
| What | One-line description of what the test verifies |
|
||||
| How | How it works (setup → action → assertion) |
|
||||
| Why | Why this test matters (what bug/behavior it catches) |
|
||||
| Input | The input data or configuration (graph spec, initial prompt, phase, etc.) |
|
||||
| Expected Outcome | What the test asserts |
|
||||
| Actual Outcome | What actually happened (PASS: matches expected / FAIL: actual vs expected) |
|
||||
| Failure Detail | For failures only: full traceback + diagnosis |
|
||||
|
||||
#### What / How / Why Descriptions
|
||||
|
||||
These MUST be derived from the test function's docstring and code. Read each test file to extract:
|
||||
- **What**: From the docstring first line
|
||||
- **How**: From the test body (what fixtures, what graph, what assertions)
|
||||
- **Why**: From the docstring body or "Why this matters" section in the test module
|
||||
|
||||
Use these mappings for the component test files:
|
||||
|
||||
```
|
||||
test_component_llm.py → "LLM Provider" — streaming, tool calling, tokens
|
||||
test_component_tools.py → "Tool Registry + MCP" — connection, execution
|
||||
test_component_event_loop.py → "EventLoopNode" — iteration, output, stall
|
||||
test_component_edges.py → "Edge Evaluation" — conditional, priority
|
||||
test_component_conversation.py → "Conversation Persistence" — storage, cursor
|
||||
test_component_escalation.py → "Escalation Flow" — worker→queen signaling
|
||||
test_component_continuous.py → "Continuous Mode" — conversation threading
|
||||
test_component_queen.py → "Queen Phase (Unit)" — phase state, tools, events
|
||||
test_component_queen_live.py → "Queen Phase (Live)" — real queen, real LLM
|
||||
test_component_queen_state_machine.py → "Queen State Machine" — edge cases, races
|
||||
test_component_worker_comms.py → "Worker Communication" — events, data flow
|
||||
test_component_strict_outcomes.py → "Strict Outcomes" — exact path, output, quality
|
||||
```
|
||||
|
||||
#### HTML Template
|
||||
|
||||
Use this structure:
|
||||
|
||||
```html
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Hive Integration Test Report — {timestamp}</title>
|
||||
<style>
|
||||
:root { --pass: #22c55e; --fail: #ef4444; --skip: #f59e0b; --bg: #0f172a; --surface: #1e293b; --text: #e2e8f0; --muted: #94a3b8; --border: #334155; }
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body { font-family: 'SF Mono', 'Fira Code', monospace; background: var(--bg); color: var(--text); padding: 2rem; line-height: 1.6; }
|
||||
h1, h2, h3 { font-weight: 600; }
|
||||
h1 { font-size: 1.5rem; margin-bottom: 1rem; }
|
||||
h2 { font-size: 1.2rem; margin: 2rem 0 1rem; border-bottom: 1px solid var(--border); padding-bottom: 0.5rem; }
|
||||
.summary { display: grid; grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); gap: 1rem; margin-bottom: 2rem; }
|
||||
.card { background: var(--surface); padding: 1rem; border-radius: 8px; border: 1px solid var(--border); }
|
||||
.card .label { color: var(--muted); font-size: 0.75rem; text-transform: uppercase; }
|
||||
.card .value { font-size: 1.5rem; font-weight: 700; margin-top: 0.25rem; }
|
||||
.card .value.pass { color: var(--pass); }
|
||||
.card .value.fail { color: var(--fail); }
|
||||
table { width: 100%; border-collapse: collapse; font-size: 0.8rem; }
|
||||
th { background: var(--surface); position: sticky; top: 0; text-align: left; padding: 0.5rem; border-bottom: 2px solid var(--border); color: var(--muted); text-transform: uppercase; font-size: 0.7rem; }
|
||||
td { padding: 0.5rem; border-bottom: 1px solid var(--border); vertical-align: top; }
|
||||
tr:hover { background: rgba(255,255,255,0.03); }
|
||||
.badge { display: inline-block; padding: 2px 8px; border-radius: 4px; font-size: 0.7rem; font-weight: 700; }
|
||||
.badge.pass { background: rgba(34,197,94,0.2); color: var(--pass); }
|
||||
.badge.fail { background: rgba(239,68,68,0.2); color: var(--fail); }
|
||||
.badge.skip { background: rgba(245,158,11,0.2); color: var(--skip); }
|
||||
.detail { background: #1a1a2e; padding: 0.75rem; border-radius: 4px; margin-top: 0.5rem; font-size: 0.75rem; white-space: pre-wrap; overflow-x: auto; max-height: 200px; overflow-y: auto; }
|
||||
.component-header { background: var(--surface); padding: 0.75rem 0.5rem; font-weight: 600; font-size: 0.85rem; }
|
||||
.meta { color: var(--muted); font-size: 0.75rem; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Hive Integration Test Report</h1>
|
||||
<p class="meta">Generated: {timestamp} | Provider: {provider} | Duration: {duration}s</p>
|
||||
|
||||
<div class="summary">
|
||||
<div class="card"><div class="label">Total</div><div class="value">{total}</div></div>
|
||||
<div class="card"><div class="label">Passed</div><div class="value pass">{passed}</div></div>
|
||||
<div class="card"><div class="label">Failed</div><div class="value fail">{failed}</div></div>
|
||||
<div class="card"><div class="label">Verdict</div><div class="value {verdict_class}">{verdict}</div></div>
|
||||
</div>
|
||||
|
||||
<h2>Test Results</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Component</th>
|
||||
<th>Test</th>
|
||||
<th>Status</th>
|
||||
<th>Time</th>
|
||||
<th>What</th>
|
||||
<th>Input → Expected → Actual</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<!-- For each test: -->
|
||||
<tr>
|
||||
<td>{component}</td>
|
||||
<td>{test_name}</td>
|
||||
<td><span class="badge {status_class}">{status}</span></td>
|
||||
<td>{duration}s</td>
|
||||
<td>{what_description}</td>
|
||||
<td>
|
||||
<strong>Input:</strong> {input_description}<br>
|
||||
<strong>Expected:</strong> {expected_outcome}<br>
|
||||
<strong>Actual:</strong> {actual_outcome}
|
||||
<!-- If failed: -->
|
||||
<div class="detail">{failure_traceback}</div>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
<h2>Failure Analysis</h2>
|
||||
<!-- Only if there are failures -->
|
||||
<p>For each failure, provide:</p>
|
||||
<ul>
|
||||
<li><strong>Root cause:</strong> Why it failed</li>
|
||||
<li><strong>Impact:</strong> What this means for the system</li>
|
||||
<li><strong>Suggested fix:</strong> How to address it</li>
|
||||
</ul>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
```
|
||||
|
||||
### Step 4: Output
|
||||
|
||||
1. Write the HTML file to `/tmp/hive_integration_test_report.html`
|
||||
2. Print the file path so the user can open it
|
||||
3. Print a concise summary to the terminal:
|
||||
```
|
||||
Test Report: /tmp/hive_integration_test_report.html
|
||||
Result: 74/76 PASSED (2 failures)
|
||||
Failures:
|
||||
- parallel_merge::test_parallel_disjoint_output_keys
|
||||
- worker::test_worker_timestamped_note_artifact
|
||||
```
|
||||
|
||||
## Key Rules
|
||||
|
||||
1. ALWAYS use `--junitxml` when running pytest to get structured results
|
||||
2. ALWAYS read the test source files to populate What/How/Why columns — do not guess
|
||||
3. For Input/Expected/Actual, extract from the test's graph spec, assertions, and result
|
||||
4. Color-code everything: green for pass, red for fail, amber for skip
|
||||
5. Include the full traceback for failures in a scrollable `<div class="detail">`
|
||||
6. Group tests by component (file name) with a visual separator
|
||||
7. The report must be self-contained HTML (no external CSS/JS dependencies)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,351 @@
|
||||
# Example: Testing a YouTube Research Agent
|
||||
|
||||
This example walks through testing a YouTube research agent that finds relevant videos based on a topic.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Agent built with building-agents skill at `exports/youtube-research/`
|
||||
- Goal defined with success criteria and constraints
|
||||
|
||||
## Step 1: Load the Goal
|
||||
|
||||
First, load the goal that was defined during the Goal stage:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "youtube-research",
|
||||
"name": "YouTube Research Agent",
|
||||
"description": "Find relevant YouTube videos on a given topic",
|
||||
"success_criteria": [
|
||||
{
|
||||
"id": "find_videos",
|
||||
"description": "Find 3-5 relevant videos",
|
||||
"metric": "video_count",
|
||||
"target": "3-5",
|
||||
"weight": 1.0
|
||||
},
|
||||
{
|
||||
"id": "relevance",
|
||||
"description": "Videos must be relevant to the topic",
|
||||
"metric": "relevance_score",
|
||||
"target": ">0.8",
|
||||
"weight": 0.8
|
||||
}
|
||||
],
|
||||
"constraints": [
|
||||
{
|
||||
"id": "api_limits",
|
||||
"description": "Must not exceed YouTube API rate limits",
|
||||
"constraint_type": "hard",
|
||||
"category": "technical"
|
||||
},
|
||||
{
|
||||
"id": "content_safety",
|
||||
"description": "Must filter out inappropriate content",
|
||||
"constraint_type": "hard",
|
||||
"category": "safety"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Step 2: Get Constraint Test Guidelines
|
||||
|
||||
During the Goal stage (or early Eval), get test guidelines for constraints:
|
||||
|
||||
```python
|
||||
result = generate_constraint_tests(
|
||||
goal_id="youtube-research",
|
||||
goal_json='<goal JSON above>',
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
**The result contains guidelines (not generated tests):**
|
||||
- `output_file`: Where to write tests
|
||||
- `file_header`: Imports and fixtures to use
|
||||
- `test_template`: Format for test functions
|
||||
- `constraints_formatted`: The constraints to test
|
||||
- `test_guidelines`: Rules for writing tests
|
||||
|
||||
## Step 3: Write Constraint Tests
|
||||
|
||||
Using the guidelines, write tests directly with the Write tool:
|
||||
|
||||
```python
|
||||
# Write constraint tests using the provided file_header and guidelines
|
||||
Write(
|
||||
file_path="exports/youtube-research/tests/test_constraints.py",
|
||||
content='''
|
||||
"""Constraint tests for youtube-research agent."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from exports.youtube_research import default_agent
|
||||
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not os.environ.get("ANTHROPIC_API_KEY") and not os.environ.get("MOCK_MODE"),
|
||||
reason="API key required for real testing."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_constraint_api_limits_respected():
|
||||
"""Verify API rate limits are not exceeded."""
|
||||
import time
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
|
||||
for i in range(10):
|
||||
result = await default_agent.run({"topic": f"test_{i}"}, mock_mode=mock_mode)
|
||||
time.sleep(0.1)
|
||||
|
||||
# Should complete without rate limit errors
|
||||
assert "rate limit" not in str(result).lower()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_constraint_content_safety_filter():
|
||||
"""Verify inappropriate content is filtered."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "general topic"}, mock_mode=mock_mode)
|
||||
|
||||
for video in result.videos:
|
||||
assert video.safe_for_work is True
|
||||
assert video.age_restricted is False
|
||||
'''
|
||||
)
|
||||
```
|
||||
|
||||
## Step 4: Get Success Criteria Test Guidelines
|
||||
|
||||
After the agent is built, get success criteria test guidelines:
|
||||
|
||||
```python
|
||||
result = generate_success_tests(
|
||||
goal_id="youtube-research",
|
||||
goal_json='<goal JSON>',
|
||||
node_names="search_node,filter_node,rank_node,format_node",
|
||||
tool_names="youtube_search,video_details,channel_info",
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
## Step 5: Write Success Criteria Tests
|
||||
|
||||
Using the guidelines, write success criteria tests:
|
||||
|
||||
```python
|
||||
Write(
|
||||
file_path="exports/youtube-research/tests/test_success_criteria.py",
|
||||
content='''
|
||||
"""Success criteria tests for youtube-research agent."""
|
||||
|
||||
import os
|
||||
import pytest
|
||||
from exports.youtube_research import default_agent
|
||||
|
||||
|
||||
pytestmark = pytest.mark.skipif(
|
||||
not os.environ.get("ANTHROPIC_API_KEY") and not os.environ.get("MOCK_MODE"),
|
||||
reason="API key required for real testing."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_happy_path():
|
||||
"""Test finding videos for a common topic."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "machine learning"}, mock_mode=mock_mode)
|
||||
|
||||
assert result.success
|
||||
assert 3 <= len(result.videos) <= 5
|
||||
assert all(v.title for v in result.videos)
|
||||
assert all(v.video_id for v in result.videos)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_minimum_boundary():
|
||||
"""Test at minimum threshold (3 videos)."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "niche topic xyz"}, mock_mode=mock_mode)
|
||||
|
||||
assert len(result.videos) >= 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_relevance_score_threshold():
|
||||
"""Test relevance scoring meets threshold."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "python programming"}, mock_mode=mock_mode)
|
||||
|
||||
for video in result.videos:
|
||||
assert video.relevance_score > 0.8
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_find_videos_no_results_graceful():
|
||||
"""Test graceful handling of no results."""
|
||||
mock_mode = bool(os.environ.get("MOCK_MODE"))
|
||||
result = await default_agent.run({"topic": "xyznonexistent123"}, mock_mode=mock_mode)
|
||||
|
||||
# Should not crash, return empty or message
|
||||
assert result.videos == [] or result.message
|
||||
'''
|
||||
)
|
||||
```
|
||||
|
||||
## Step 6: Run All Tests
|
||||
|
||||
Execute all tests:
|
||||
|
||||
```python
|
||||
result = run_tests(
|
||||
goal_id="youtube-research",
|
||||
agent_path="exports/youtube-research",
|
||||
test_types='["all"]',
|
||||
parallel=4
|
||||
)
|
||||
```
|
||||
|
||||
**Results:**
|
||||
|
||||
```json
|
||||
{
|
||||
"goal_id": "youtube-research",
|
||||
"overall_passed": false,
|
||||
"summary": {
|
||||
"total": 6,
|
||||
"passed": 5,
|
||||
"failed": 1,
|
||||
"pass_rate": "83.3%"
|
||||
},
|
||||
"duration_ms": 4521,
|
||||
"results": [
|
||||
{"test_id": "test_constraint_api_001", "passed": true, "duration_ms": 1234},
|
||||
{"test_id": "test_constraint_content_001", "passed": true, "duration_ms": 456},
|
||||
{"test_id": "test_success_001", "passed": true, "duration_ms": 789},
|
||||
{"test_id": "test_success_002", "passed": true, "duration_ms": 654},
|
||||
{"test_id": "test_success_003", "passed": true, "duration_ms": 543},
|
||||
{"test_id": "test_success_004", "passed": false, "duration_ms": 845,
|
||||
"error_category": "IMPLEMENTATION_ERROR",
|
||||
"error_message": "TypeError: 'NoneType' object has no attribute 'videos'"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Step 7: Debug the Failed Test
|
||||
|
||||
```python
|
||||
result = debug_test(
|
||||
goal_id="youtube-research",
|
||||
test_name="test_find_videos_no_results_graceful",
|
||||
agent_path="exports/youtube-research"
|
||||
)
|
||||
```
|
||||
|
||||
**Debug Output:**
|
||||
|
||||
```json
|
||||
{
|
||||
"test_id": "test_success_004",
|
||||
"test_name": "test_find_videos_no_results_graceful",
|
||||
"input": {"topic": "xyznonexistent123"},
|
||||
"expected": "Empty list or message",
|
||||
"actual": {"error": "TypeError: 'NoneType' object has no attribute 'videos'"},
|
||||
"passed": false,
|
||||
"error_message": "TypeError: 'NoneType' object has no attribute 'videos'",
|
||||
"error_category": "IMPLEMENTATION_ERROR",
|
||||
"stack_trace": "Traceback (most recent call last):\n File \"filter_node.py\", line 42\n for video in result.videos:\nTypeError: 'NoneType' object has no attribute 'videos'",
|
||||
"logs": [
|
||||
{"timestamp": "2026-01-20T10:00:01", "node": "search_node", "level": "INFO", "msg": "Searching for: xyznonexistent123"},
|
||||
{"timestamp": "2026-01-20T10:00:02", "node": "search_node", "level": "WARNING", "msg": "No results found"},
|
||||
{"timestamp": "2026-01-20T10:00:02", "node": "filter_node", "level": "ERROR", "msg": "NoneType error"}
|
||||
],
|
||||
"runtime_data": {
|
||||
"execution_path": ["start", "search_node", "filter_node"],
|
||||
"node_outputs": {
|
||||
"search_node": null
|
||||
}
|
||||
},
|
||||
"suggested_fix": "Add null check in filter_node before accessing .videos attribute",
|
||||
"iteration_guidance": {
|
||||
"stage": "Agent",
|
||||
"action": "Fix the code in nodes/edges",
|
||||
"restart_required": false,
|
||||
"description": "The goal is correct, but filter_node doesn't handle null results from search_node."
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Step 8: Iterate Based on Category
|
||||
|
||||
Since this is an **IMPLEMENTATION_ERROR**, we:
|
||||
|
||||
1. **Don't restart** the Goal → Agent → Eval flow
|
||||
2. **Fix the agent** using building-agents skill:
|
||||
- Modify `filter_node` to handle null results
|
||||
3. **Re-run Eval** (tests only)
|
||||
|
||||
### Fix in building-agents:
|
||||
|
||||
```python
|
||||
# Update the filter_node to handle null
|
||||
add_node(
|
||||
node_id="filter_node",
|
||||
name="Filter Node",
|
||||
description="Filter and rank videos",
|
||||
node_type="function",
|
||||
input_keys=["search_results"],
|
||||
output_keys=["filtered_videos"],
|
||||
system_prompt="""
|
||||
Filter videos by relevance.
|
||||
IMPORTANT: Handle case where search_results is None or empty.
|
||||
Return empty list if no results.
|
||||
"""
|
||||
)
|
||||
```
|
||||
|
||||
### Re-export and re-test:
|
||||
|
||||
```python
|
||||
# Re-export the fixed agent
|
||||
export_graph(path="exports/youtube-research")
|
||||
|
||||
# Re-run tests
|
||||
result = run_tests(
|
||||
goal_id="youtube-research",
|
||||
agent_path="exports/youtube-research",
|
||||
test_types='["all"]'
|
||||
)
|
||||
```
|
||||
|
||||
**Updated Results:**
|
||||
|
||||
```json
|
||||
{
|
||||
"goal_id": "youtube-research",
|
||||
"overall_passed": true,
|
||||
"summary": {
|
||||
"total": 6,
|
||||
"passed": 6,
|
||||
"failed": 0,
|
||||
"pass_rate": "100.0%"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
1. **Got guidelines** for constraint tests during Goal stage
|
||||
2. **Wrote** constraint tests using Write tool
|
||||
3. **Got guidelines** for success criteria tests during Eval stage
|
||||
4. **Wrote** success criteria tests using Write tool
|
||||
5. **Ran** tests in parallel
|
||||
6. **Debugged** the one failure
|
||||
7. **Categorized** as IMPLEMENTATION_ERROR
|
||||
8. **Fixed** the agent (not the goal)
|
||||
9. **Re-ran** Eval only (didn't restart full flow)
|
||||
10. **Passed** all tests
|
||||
|
||||
The agent is now validated and ready for production use.
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core",
|
||||
"env": {
|
||||
"PYTHONPATH": "../tools/src"
|
||||
}
|
||||
},
|
||||
"tools": {
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"cwd": "tools",
|
||||
"env": {
|
||||
"PYTHONPATH": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/agent-workflow
|
||||
@@ -0,0 +1 @@
|
||||
../../.claude/skills/building-agents-construction
|
||||
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/building-agents-core
|
||||
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/building-agents-patterns
|
||||
Symlink
+1
@@ -0,0 +1 @@
|
||||
../../.claude/skills/testing-agent
|
||||
@@ -1,10 +1,9 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Report a bug to help us improve
|
||||
title: "[Bug]: "
|
||||
labels: bug, enhancement
|
||||
title: '[Bug]: '
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Describe the Bug
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: Suggest a new feature or enhancement
|
||||
title: "[Feature]: "
|
||||
title: '[Feature]: '
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Problem Statement
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
name: Integration Bounty
|
||||
description: A bounty task for the integration contribution program
|
||||
title: "[Bounty]: "
|
||||
labels: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Integration Bounty
|
||||
|
||||
This issue is part of the [Integration Bounty Program](../../docs/bounty-program/README.md).
|
||||
**Claim this bounty** by commenting below — a maintainer will assign you within 24 hours.
|
||||
|
||||
- type: dropdown
|
||||
id: bounty-type
|
||||
attributes:
|
||||
label: Bounty Type
|
||||
options:
|
||||
- "Test a Tool (20 pts)"
|
||||
- "Write Docs (20 pts)"
|
||||
- "Code Contribution (30 pts)"
|
||||
- "New Integration (75 pts)"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: difficulty
|
||||
attributes:
|
||||
label: Difficulty
|
||||
options:
|
||||
- Easy
|
||||
- Medium
|
||||
- Hard
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: tool-name
|
||||
attributes:
|
||||
label: Tool Name
|
||||
description: The integration this bounty targets (e.g., `airtable`, `salesforce`)
|
||||
placeholder: e.g., airtable
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What needs to be done to complete this bounty.
|
||||
placeholder: |
|
||||
Describe the specific task, including:
|
||||
- What the contributor needs to do
|
||||
- Links to relevant files in the repo
|
||||
- Any setup requirements (API keys, accounts, etc.)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: acceptance-criteria
|
||||
attributes:
|
||||
label: Acceptance Criteria
|
||||
description: What "done" looks like. The PR or report must meet all criteria.
|
||||
placeholder: |
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] CI passes
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: relevant-files
|
||||
attributes:
|
||||
label: Relevant Files
|
||||
description: Links to tool directory, credential spec, health check file, etc.
|
||||
placeholder: |
|
||||
- Tool: `tools/src/aden_tools/tools/{tool_name}/`
|
||||
- Credential spec: `tools/src/aden_tools/credentials/{category}.py`
|
||||
- Health checks: `tools/src/aden_tools/credentials/health_check.py`
|
||||
|
||||
- type: textarea
|
||||
id: resources
|
||||
attributes:
|
||||
label: Resources
|
||||
description: Links to API docs, examples, or guides that will help the contributor.
|
||||
placeholder: |
|
||||
- [Building Tools Guide](../../tools/BUILDING_TOOLS.md)
|
||||
- [Tool README Template](../../docs/bounty-program/templates/tool-readme-template.md)
|
||||
- API docs: https://...
|
||||
@@ -1,71 +0,0 @@
|
||||
---
|
||||
name: Integration Request
|
||||
about: Suggest a new integration
|
||||
title: "[Integration]:"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Service
|
||||
|
||||
Name and brief description of the service and what it enables agents to do.
|
||||
|
||||
**Description:** [e.g., "API key for Slack Bot" — short one-liner for the credential spec]
|
||||
|
||||
## Credential Identity
|
||||
|
||||
- **credential_id:** [e.g., `slack`]
|
||||
- **env_var:** [e.g., `SLACK_BOT_TOKEN`]
|
||||
- **credential_key:** [e.g., `access_token`, `api_key`, `bot_token`]
|
||||
|
||||
## Tools
|
||||
|
||||
Tool function names that require this credential:
|
||||
|
||||
- [e.g., `slack_send_message`]
|
||||
- [e.g., `slack_list_channels`]
|
||||
|
||||
## Auth Methods
|
||||
|
||||
- **Direct API key supported:** Yes / No
|
||||
- **Aden OAuth supported:** Yes / No
|
||||
|
||||
If Aden OAuth is supported, describe the OAuth scopes/permissions required.
|
||||
|
||||
## How to Get the Credential
|
||||
|
||||
Link where users obtain the key/token:
|
||||
|
||||
[e.g., https://api.slack.com/apps]
|
||||
|
||||
Step-by-step instructions:
|
||||
|
||||
1. Go to ...
|
||||
2. Create a ...
|
||||
3. Select scopes/permissions: ...
|
||||
4. Copy the key/token
|
||||
|
||||
## Health Check
|
||||
|
||||
A lightweight API call to validate the credential (no writes, no charges).
|
||||
|
||||
- **Endpoint:** [e.g., `https://slack.com/api/auth.test`]
|
||||
- **Method:** [e.g., `GET` or `POST`]
|
||||
- **Auth header:** [e.g., `Authorization: Bearer {token}` or `X-Api-Key: {key}`]
|
||||
- **Parameters (if any):** [e.g., `?limit=1`]
|
||||
- **200 means:** [e.g., key is valid]
|
||||
- **401 means:** [e.g., invalid or expired]
|
||||
- **429 means:** [e.g., rate limited but key is valid]
|
||||
|
||||
## Credential Group
|
||||
|
||||
Does this require multiple credentials configured together? (e.g., Google Custom Search needs
|
||||
both an API key and a CSE ID)
|
||||
|
||||
- [ ] No, single credential
|
||||
- [ ] Yes — list the other credential IDs in the group:
|
||||
|
||||
## Additional Context
|
||||
|
||||
Links to API docs, rate limits, free tier availability, or anything else relevant.
|
||||
@@ -1,78 +0,0 @@
|
||||
name: Standard Bounty
|
||||
description: A bounty task for general framework contributions (not integration-specific)
|
||||
title: "[Bounty]: "
|
||||
labels: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Standard Bounty
|
||||
|
||||
This issue is part of the [Bounty Program](../../docs/bounty-program/README.md).
|
||||
**Claim this bounty** by commenting below — a maintainer will assign you within 24 hours.
|
||||
|
||||
- type: dropdown
|
||||
id: bounty-size
|
||||
attributes:
|
||||
label: Bounty Size
|
||||
options:
|
||||
- "Small (10 pts)"
|
||||
- "Medium (30 pts)"
|
||||
- "Large (75 pts)"
|
||||
- "Extreme (150 pts)"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: difficulty
|
||||
attributes:
|
||||
label: Difficulty
|
||||
options:
|
||||
- Easy
|
||||
- Medium
|
||||
- Hard
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What needs to be done to complete this bounty.
|
||||
placeholder: |
|
||||
Describe the specific task, including:
|
||||
- What the contributor needs to do
|
||||
- Links to relevant files in the repo
|
||||
- Any context or motivation for the change
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: acceptance-criteria
|
||||
attributes:
|
||||
label: Acceptance Criteria
|
||||
description: What "done" looks like. The PR must meet all criteria.
|
||||
placeholder: |
|
||||
- [ ] Criterion 1
|
||||
- [ ] Criterion 2
|
||||
- [ ] CI passes
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: relevant-files
|
||||
attributes:
|
||||
label: Relevant Files
|
||||
description: Links to files or directories related to this bounty.
|
||||
placeholder: |
|
||||
- `path/to/file.py`
|
||||
- `path/to/directory/`
|
||||
|
||||
- type: textarea
|
||||
id: resources
|
||||
attributes:
|
||||
label: Resources
|
||||
description: Links to docs, issues, or external references that will help.
|
||||
placeholder: |
|
||||
- Related issue: #XXXX
|
||||
- Docs: https://...
|
||||
@@ -1,47 +0,0 @@
|
||||
name: Bounty completed
|
||||
description: Awards points and notifies Discord when a bounty PR is merged
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed]
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
pr_number:
|
||||
description: "PR number to process (for missed bounties)"
|
||||
required: true
|
||||
type: number
|
||||
|
||||
jobs:
|
||||
bounty-notify:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.pull_request.merged == true &&
|
||||
contains(join(github.event.pull_request.labels.*.name, ','), 'bounty:'))
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Award XP and notify Discord
|
||||
run: bun run scripts/bounty-tracker.ts notify
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
BOT_API_URL: ${{ secrets.BOT_API_URL }}
|
||||
BOT_API_KEY: ${{ secrets.BOT_API_KEY }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
PR_NUMBER: ${{ inputs.pr_number || github.event.pull_request.number }}
|
||||
+24
-52
@@ -5,7 +5,7 @@ on:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -21,24 +21,23 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --project core --group dev
|
||||
run: |
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
- name: Ruff lint
|
||||
run: |
|
||||
uv run --project core ruff check core/
|
||||
uv run --project core ruff check tools/
|
||||
ruff check core/
|
||||
ruff check tools/
|
||||
|
||||
- name: Ruff format
|
||||
run: |
|
||||
uv run --project core ruff format --check core/
|
||||
uv run --project core ruff format --check tools/
|
||||
ruff format --check core/
|
||||
ruff format --check tools/
|
||||
|
||||
test:
|
||||
name: Test Python Framework
|
||||
@@ -53,47 +52,23 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies and run tests
|
||||
working-directory: core
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync
|
||||
uv run pytest tests/ -v --ignore=tests/dummy_agents
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
test-tools:
|
||||
name: Test Tools (${{ matrix.os }})
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies and run tests
|
||||
working-directory: tools
|
||||
- name: Run tests
|
||||
run: |
|
||||
uv sync --extra dev
|
||||
uv run pytest tests/ -v
|
||||
cd core
|
||||
pytest tests/ -v
|
||||
|
||||
validate:
|
||||
name: Validate Agent Exports
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test, test-tools]
|
||||
needs: [lint, test]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
@@ -101,16 +76,13 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: core
|
||||
run: |
|
||||
uv sync
|
||||
cd core
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
- name: Validate exported agents
|
||||
run: |
|
||||
@@ -133,7 +105,7 @@ jobs:
|
||||
for agent_dir in "${agent_dirs[@]}"; do
|
||||
if [ -f "$agent_dir/agent.json" ]; then
|
||||
echo "Validating $agent_dir"
|
||||
uv run python -c "import json; json.load(open('$agent_dir/agent.json'))"
|
||||
python -c "import json; json.load(open('$agent_dir/agent.json'))"
|
||||
validated=$((validated + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -80,13 +80,7 @@ jobs:
|
||||
- help wanted: Extra attention is needed (if issue needs community input)
|
||||
- backlog: Tracked for the future, but not currently planned or prioritized
|
||||
|
||||
### 6. Estimate size (if NOT a duplicate, spam, or invalid)
|
||||
Apply exactly ONE size label to help contributors match their capacity to the task:
|
||||
- "size: small": Docs, typos, single-file fixes, config changes
|
||||
- "size: medium": Bug fixes with tests, adding a single tool, changes within one package
|
||||
- "size: large": Cross-package changes (core + tools), new modules, complex logic, architectural refactors
|
||||
|
||||
You may apply multiple labels if appropriate (e.g., "bug", "size: small", and "good first issue").
|
||||
You may apply multiple labels if appropriate (e.g., "bug" and "help wanted").
|
||||
|
||||
## Tools Available:
|
||||
- mcp__github__get_issue: Get issue details
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# Closes PRs that still have the `pr-requirements-warning` label
|
||||
# after contributors were warned in pr-requirements.yml.
|
||||
name: PR Requirements Enforcement
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *" # runs every day once at midnight
|
||||
jobs:
|
||||
enforce:
|
||||
name: Close PRs still failing contribution requirements
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Close PRs still failing requirements
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const { owner, repo } = context.repo;
|
||||
const prs = await github.paginate(github.rest.pulls.list, {
|
||||
owner,
|
||||
repo,
|
||||
state: "open",
|
||||
per_page: 100
|
||||
});
|
||||
for (const pr of prs) {
|
||||
// Skip draft PRs — author may still be actively working toward compliance
|
||||
if (pr.draft) continue;
|
||||
const labels = pr.labels.map(l => l.name);
|
||||
if (!labels.includes("pr-requirements-warning")) continue;
|
||||
const gracePeriod = 24 * 60 * 60 * 1000;
|
||||
const lastUpdated = new Date(pr.created_at);
|
||||
const now = new Date();
|
||||
if (now - lastUpdated < gracePeriod) {
|
||||
console.log(`Skipping PR #${pr.number} — still within grace period`);
|
||||
continue;
|
||||
}
|
||||
const prNumber = pr.number;
|
||||
const prAuthor = pr.user.login;
|
||||
await github.rest.issues.createComment({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
body: `Closing PR because the contribution requirements were not resolved within the 24-hour grace period.
|
||||
If this was closed in error, feel free to reopen the PR after fixing the requirements.`
|
||||
});
|
||||
await github.rest.pulls.update({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: prNumber,
|
||||
state: "closed"
|
||||
});
|
||||
console.log(`Closed PR #${prNumber} by ${prAuthor} (PR requirements were not met)`);
|
||||
}
|
||||
@@ -43,10 +43,9 @@ jobs:
|
||||
console.log(` Found issue references: ${issueNumbers.length > 0 ? issueNumbers.join(', ') : 'none'}`);
|
||||
|
||||
if (issueNumbers.length === 0) {
|
||||
const message = `## PR Requirements Warning
|
||||
const message = `## PR Closed - Requirements Not Met
|
||||
|
||||
This PR does not meet the contribution requirements.
|
||||
If the issue is not fixed within ~24 hours, it may be automatically closed.
|
||||
This PR has been automatically closed because it doesn't meet the requirements.
|
||||
|
||||
**Missing:** No linked issue found.
|
||||
|
||||
@@ -68,15 +67,14 @@ jobs:
|
||||
|
||||
**Why is this required?** See #472 for details.`;
|
||||
|
||||
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
per_page: 100,
|
||||
});
|
||||
|
||||
const botComment = comments.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Requirements Warning')
|
||||
const botComment = comments.data.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Closed - Requirements Not Met')
|
||||
);
|
||||
|
||||
if (!botComment) {
|
||||
@@ -88,11 +86,11 @@ jobs:
|
||||
});
|
||||
}
|
||||
|
||||
await github.rest.issues.addLabels({
|
||||
await github.rest.pulls.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
labels: ['pr-requirements-warning'],
|
||||
pull_number: prNumber,
|
||||
state: 'closed',
|
||||
});
|
||||
|
||||
core.setFailed('PR must reference an issue');
|
||||
@@ -134,10 +132,9 @@ jobs:
|
||||
`#${i.number} (assignees: ${i.assignees.length > 0 ? i.assignees.join(', ') : 'none'})`
|
||||
).join(', ');
|
||||
|
||||
const message = `## PR Requirements Warning
|
||||
const message = `## PR Closed - Requirements Not Met
|
||||
|
||||
This PR does not meet the contribution requirements.
|
||||
If the issue is not fixed within ~24 hours, it may be automatically closed.
|
||||
This PR has been automatically closed because it doesn't meet the requirements.
|
||||
|
||||
**PR Author:** @${prAuthor}
|
||||
**Found issues:** ${issueList}
|
||||
@@ -160,15 +157,14 @@ jobs:
|
||||
|
||||
**Why is this required?** See #472 for details.`;
|
||||
|
||||
const comments = await github.paginate(github.rest.issues.listComments, {
|
||||
const comments = await github.rest.issues.listComments({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
per_page: 100,
|
||||
});
|
||||
|
||||
const botComment = comments.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Requirements Warning')
|
||||
const botComment = comments.data.find(
|
||||
(c) => c.user.type === 'Bot' && c.body.includes('PR Closed - Requirements Not Met')
|
||||
);
|
||||
|
||||
if (!botComment) {
|
||||
@@ -180,24 +176,14 @@ jobs:
|
||||
});
|
||||
}
|
||||
|
||||
await github.rest.issues.addLabels({
|
||||
await github.rest.pulls.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
labels: ['pr-requirements-warning'],
|
||||
pull_number: prNumber,
|
||||
state: 'closed',
|
||||
});
|
||||
|
||||
core.setFailed('PR author must be assigned to the linked issue');
|
||||
} else {
|
||||
console.log(`PR requirements met! Issue #${issueWithAuthorAssigned} has ${prAuthor} as assignee.`);
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: prNumber,
|
||||
name: "pr-requirements-warning"
|
||||
});
|
||||
}catch (error){
|
||||
//ignore if label doesn't exist
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,19 +21,18 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
cd core
|
||||
uv sync
|
||||
pip install -e .
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cd core
|
||||
uv run pytest tests/ -v
|
||||
pytest tests/ -v
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
name: Weekly bounty leaderboard
|
||||
description: Posts the integration bounty leaderboard to Discord every Monday
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every Monday at 9:00 UTC
|
||||
- cron: "0 9 * * 1"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
since_date:
|
||||
description: "Only count PRs merged after this date (YYYY-MM-DD). Leave empty for all-time."
|
||||
required: false
|
||||
|
||||
jobs:
|
||||
leaderboard:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Post leaderboard to Discord
|
||||
run: bun run scripts/bounty-tracker.ts leaderboard
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
GITHUB_REPOSITORY_NAME: ${{ github.event.repository.name }}
|
||||
DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_BOUNTY_WEBHOOK_URL }}
|
||||
BOT_API_URL: ${{ secrets.BOT_API_URL }}
|
||||
BOT_API_KEY: ${{ secrets.BOT_API_KEY }}
|
||||
LURKR_API_KEY: ${{ secrets.LURKR_API_KEY }}
|
||||
LURKR_GUILD_ID: ${{ secrets.LURKR_GUILD_ID }}
|
||||
SINCE_DATE: ${{ github.event.inputs.since_date || '' }}
|
||||
+3
-15
@@ -13,10 +13,6 @@ out/
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
.venv
|
||||
/venv
|
||||
tools/src/uv.lock
|
||||
|
||||
|
||||
# User configuration (copied from .example)
|
||||
config.yaml
|
||||
@@ -50,7 +46,6 @@ coverage/
|
||||
|
||||
# TypeScript
|
||||
*.tsbuildinfo
|
||||
vite.config.d.ts
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
@@ -59,6 +54,7 @@ __pycache__/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
*.egg
|
||||
uv.lock
|
||||
|
||||
# Generated runtime data
|
||||
core/data/
|
||||
@@ -70,15 +66,7 @@ tmp/
|
||||
temp/
|
||||
|
||||
exports/*
|
||||
exports.old*
|
||||
artifacts/*
|
||||
|
||||
.claude/settings.local.json
|
||||
.agent-builder-sessions/*
|
||||
|
||||
docs/github-issues/*
|
||||
core/tests/*dumps/*
|
||||
|
||||
screenshots/*
|
||||
|
||||
.gemini/*
|
||||
.coverage
|
||||
.venv
|
||||
@@ -1,9 +0,0 @@
|
||||
{"type": "connection", "event": "connect", "ts": "2026-04-04T01:10:38.245667+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "hello", "details": {"version": "1.0"}, "ts": "2026-04-04T01:10:38.247207+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "disconnect", "ts": "2026-04-04T01:11:57.148273+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "connect", "ts": "2026-04-04T01:12:09.162378+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "hello", "details": {"version": "1.0"}, "ts": "2026-04-04T01:12:09.163899+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "disconnect", "ts": "2026-04-04T01:15:12.826042+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "connect", "ts": "2026-04-04T01:15:30.842533+00:00", "profile": "default"}
|
||||
{"type": "connection", "event": "hello", "details": {"version": "1.0"}, "ts": "2026-04-04T01:15:30.845025+00:00", "profile": "default"}
|
||||
{"type": "tool_call", "tool": "browser_stop", "params": {"profile": "gcu-browser-worker:3"}, "result": {"ok": true, "status": "not_running", "profile": "gcu-browser-worker:3"}, "ok": true, "duration_ms": 0.01, "ts": "2026-04-04T01:29:04.294954+00:00", "profile": "default"}
|
||||
@@ -1,3 +1,20 @@
|
||||
{
|
||||
"mcpServers": {}
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": ".venv/bin/python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core",
|
||||
"env": {
|
||||
"PYTHONPATH": "../tools/src"
|
||||
}
|
||||
},
|
||||
"tools": {
|
||||
"command": ".venv/bin/python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"cwd": "tools",
|
||||
"env": {
|
||||
"PYTHONPATH": "src:../core"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.15.0
|
||||
rev: v0.8.6
|
||||
hooks:
|
||||
- id: ruff
|
||||
name: ruff lint (core)
|
||||
|
||||
Vendored
+7
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"recommendations": [
|
||||
"charliermarsh.ruff",
|
||||
"editorconfig.editorconfig",
|
||||
"ms-python.python"
|
||||
]
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
# Repository Guidelines
|
||||
|
||||
Shared agent instructions for this workspace.
|
||||
|
||||
## Coding Agent Notes
|
||||
|
||||
-
|
||||
- When working on a GitHub Issue or PR, print the full URL at the end of the task.
|
||||
- When answering questions, respond with high-confidence answers only: verify in code; do not guess.
|
||||
- Do not update dependencies casually. Version bumps, patched dependencies, overrides, or vendored dependency changes require explicit approval.
|
||||
- Add brief comments for tricky logic. Keep files reasonably small when practical; split or refactor large files instead of growing them indefinitely.
|
||||
- If shared guardrails are available locally, review them; otherwise follow this repo's guidance.
|
||||
- Use `uv` for Python execution and package management. Do not use `python` or `python3` directly unless the user explicitly asks for it.
|
||||
- Prefer `uv run` for scripts and tests, and `uv pip` for package operations.
|
||||
|
||||
|
||||
## Multi-Agent Safety
|
||||
|
||||
- Do not create, apply, or drop `git stash` entries unless explicitly requested.
|
||||
- Do not create, remove, or modify `git worktree` checkouts unless explicitly requested.
|
||||
- Do not switch branches or check out a different branch unless explicitly requested.
|
||||
- When the user says `push`, you may `git pull --rebase` to integrate latest changes, but never discard other in-progress work.
|
||||
- When the user says `commit`, commit only your changes. When the user says `commit all`, commit everything in grouped chunks.
|
||||
- When you see unrecognized files or unrelated changes, keep going and focus on your scoped changes.
|
||||
|
||||
## Change Hygiene
|
||||
|
||||
- If staged and unstaged diffs are formatting-only, resolve them without asking.
|
||||
- If a commit or push was already requested, include formatting-only follow-up changes in that same commit when practical.
|
||||
- Only stop to ask for confirmation when changes are semantic and may alter behavior.
|
||||
+28
-317
@@ -1,330 +1,41 @@
|
||||
# Release Notes
|
||||
# Changelog
|
||||
|
||||
## v0.7.1
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
**Release Date:** March 13, 2026
|
||||
**Tag:** v0.7.1
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
### Chrome-Native Browser Control
|
||||
## [Unreleased]
|
||||
|
||||
v0.7.1 replaces Playwright with direct Chrome DevTools Protocol (CDP) integration. The GCU now launches the user's system Chrome via `open -n` on macOS, connects over CDP, and manages browser lifecycle end-to-end -- no extra browser binary required.
|
||||
### Added
|
||||
- Initial project structure
|
||||
- React frontend (honeycomb) with Vite and TypeScript
|
||||
- Node.js backend (hive) with Express and TypeScript
|
||||
- Docker Compose configuration for local development
|
||||
- Configuration system via `config.yaml`
|
||||
- GitHub Actions CI/CD workflows
|
||||
- Comprehensive documentation
|
||||
|
||||
---
|
||||
### Changed
|
||||
- N/A
|
||||
|
||||
### Highlights
|
||||
### Deprecated
|
||||
- N/A
|
||||
|
||||
#### System Chrome via CDP
|
||||
### Removed
|
||||
- N/A
|
||||
|
||||
The entire GCU browser stack has been rewritten:
|
||||
|
||||
- **Chrome finder & launcher** -- New `chrome_finder.py` discovers installed Chrome and `chrome_launcher.py` manages process lifecycle with `--remote-debugging-port`
|
||||
- **Coexist with user's browser** -- `open -n` on macOS launches a separate Chrome instance so the user's tabs stay untouched
|
||||
- **Dynamic viewport sizing** -- Viewport auto-sizes to the available display area, suppressing Chrome warning bars
|
||||
- **Orphan cleanup** -- Chrome processes are killed on GCU server shutdown to prevent leaks
|
||||
- **`--no-startup-window`** -- Chrome launches headlessly by default until a page is needed
|
||||
### Fixed
|
||||
- tools: Fixed web_scrape tool attempting to parse non-HTML content (PDF, JSON) as HTML (#487)
|
||||
|
||||
#### Per-Subagent Browser Isolation
|
||||
### Security
|
||||
- N/A
|
||||
|
||||
Each GCU subagent gets its own Chrome user-data directory, preventing cookie/session cross-contamination:
|
||||
## [0.1.0] - 2025-01-13
|
||||
|
||||
- Unique browser profiles injected per subagent
|
||||
- Profiles cleaned up after top-level GCU node execution
|
||||
- Tab origin and age metadata tracked per subagent
|
||||
### Added
|
||||
- Initial release
|
||||
|
||||
#### Dummy Agent Testing Framework
|
||||
|
||||
A comprehensive test suite for validating agent graph patterns without LLM calls:
|
||||
|
||||
- 8 test modules covering echo, pipeline, branch, parallel merge, retry, feedback loop, worker, and GCU subagent patterns
|
||||
- Shared fixtures and a `run_all.py` runner for CI integration
|
||||
- Subagent lifecycle tests
|
||||
|
||||
---
|
||||
|
||||
### What's New
|
||||
|
||||
#### GCU Browser
|
||||
|
||||
- **Switch from Playwright to system Chrome via CDP** -- Direct CDP connection replaces Playwright dependency. (@bryanadenhq)
|
||||
- **Chrome finder and launcher modules** -- `chrome_finder.py` and `chrome_launcher.py` for cross-platform Chrome discovery and process management. (@bryanadenhq)
|
||||
- **Dynamic viewport sizing** -- Auto-size viewport and suppress Chrome warning bar. (@bryanadenhq)
|
||||
- **Per-subagent browser profile isolation** -- Unique user-data directories per subagent with cleanup. (@bryanadenhq)
|
||||
- **Tab origin/age metadata** -- Track which subagent opened each tab and when. (@bryanadenhq)
|
||||
- **`browser_close_all` tool** -- Bulk tab cleanup for agents managing many pages. (@bryanadenhq)
|
||||
- **Auto-track popup pages** -- Popups are automatically captured and tracked. (@bryanadenhq)
|
||||
- **Auto-snapshot from browser interactions** -- Browser interaction tools return screenshots automatically. (@bryanadenhq)
|
||||
- **Kill orphaned Chrome processes** -- GCU server shutdown cleans up lingering Chrome instances. (@bryanadenhq)
|
||||
- **`--no-startup-window` Chrome flag** -- Prevent empty window on launch. (@bryanadenhq)
|
||||
- **Launch Chrome via `open -n` on macOS** -- Coexist with the user's running browser. (@bryanadenhq)
|
||||
|
||||
#### Framework & Runtime
|
||||
|
||||
- **Session resume fix for new agents** -- Correctly resume sessions when a new agent is loaded. (@bryanadenhq)
|
||||
- **Queen upsert fix** -- Prevent duplicate queen entries on session restore. (@bryanadenhq)
|
||||
- **Anchor worker monitoring to queen's session ID on cold-restore** -- Worker monitors reconnect to the correct queen after restart. (@bryanadenhq)
|
||||
- **Update meta.json when loading workers** -- Worker metadata stays in sync with runtime state. (@RichardTang-Aden)
|
||||
- **Generate worker MCP file correctly** -- Fix MCP config generation for spawned workers. (@RichardTang-Aden)
|
||||
- **Share event bus so tool events are visible to parent** -- Tool execution events propagate up to parent graphs. (@bryanadenhq)
|
||||
- **Subagent activity tracking in queen status** -- Queen instructions include live subagent status. (@bryanadenhq)
|
||||
- **GCU system prompt updates** -- Auto-snapshots, batching, popup tracking, and close_all guidance. (@bryanadenhq)
|
||||
|
||||
#### Frontend
|
||||
|
||||
- **Loading spinner in draft panel** -- Shows spinner during planning phase instead of blank panel. (@bryanadenhq)
|
||||
- **Fix credential modal errors** -- Modal no longer eats errors; banner stays visible. (@bryanadenhq)
|
||||
- **Fix credentials_required loop** -- Stop clearing the flag on modal close to prevent infinite re-prompting. (@bryanadenhq)
|
||||
- **Fix "Add tab" dropdown overflow** -- Dropdown no longer hidden when many agents are open. (@prasoonmhwr)
|
||||
|
||||
#### Testing
|
||||
|
||||
- **Dummy agent test framework** -- 8 test modules (echo, pipeline, branch, parallel merge, retry, feedback loop, worker, GCU subagent) with shared fixtures and CI runner. (@bryanadenhq)
|
||||
- **Subagent lifecycle tests** -- Validate subagent spawn and completion flows. (@bryanadenhq)
|
||||
|
||||
#### Documentation & Infrastructure
|
||||
|
||||
- **MCP integration PRD** -- Product requirements for MCP server registry. (@TimothyZhang7)
|
||||
- **Skills registry PRD** -- Product requirements for skill registry system. (@bryanadenhq)
|
||||
- **Bounty program updates** -- Standard bounty issue template and updated contributor guide. (@bryanadenhq)
|
||||
- **Windows quickstart** -- Add default context limit for PowerShell setup. (@bryanadenhq)
|
||||
- **Remove deprecated files** -- Clean up `setup_mcp.py`, `verify_mcp.py`, `antigravity-setup.md`, and `setup-antigravity-mcp.sh`. (@bryanadenhq)
|
||||
|
||||
---
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix credential modal eating errors and banner staying open
|
||||
- Stop clearing `credentials_required` on modal close to prevent infinite loop
|
||||
- Share event bus so tool events are visible to parent graph
|
||||
- Use lazy %-formatting in subagent completion log to avoid f-string in logger
|
||||
- Anchor worker monitoring to queen's session ID on cold-restore
|
||||
- Update meta.json when loading workers
|
||||
- Generate worker MCP file correctly
|
||||
- Fix "Add tab" dropdown partially hidden when creating multiple agents
|
||||
|
||||
---
|
||||
|
||||
### Community Contributors
|
||||
|
||||
- **Prasoon Mahawar** (@prasoonmhwr) -- Fix UI overflow on agent tab dropdown
|
||||
- **Richard Tang** (@RichardTang-Aden) -- Worker MCP generation and meta.json fixes
|
||||
|
||||
---
|
||||
|
||||
### Upgrading
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
uv sync
|
||||
```
|
||||
|
||||
The Playwright dependency is no longer required for GCU browser operations. Chrome must be installed on the host system.
|
||||
|
||||
---
|
||||
|
||||
## v0.7.0
|
||||
|
||||
**Release Date:** March 5, 2026
|
||||
**Tag:** v0.7.0
|
||||
|
||||
Session management refactor release.
|
||||
|
||||
---
|
||||
|
||||
## v0.5.1
|
||||
|
||||
**Release Date:** February 18, 2026
|
||||
**Tag:** v0.5.1
|
||||
|
||||
### The Hive Gets a Brain
|
||||
|
||||
v0.5.1 is our most ambitious release yet. Hive agents can now **build other agents** -- the new Hive Coder meta-agent writes, tests, and fixes agent packages from natural language. The runtime grows multi-graph support so one session can orchestrate multiple agents simultaneously. The TUI gets a complete overhaul with an in-app agent picker, live streaming, and seamless escalation to the Coder. And we're now provider-agnostic: Claude Code subscriptions, OpenAI-compatible endpoints, and any LiteLLM-supported model work out of the box.
|
||||
|
||||
---
|
||||
|
||||
### Highlights
|
||||
|
||||
#### Hive Coder -- The Agent That Builds Agents
|
||||
|
||||
A native meta-agent that lives inside the framework at `core/framework/agents/hive_coder/`. Give it a natural-language specification and it produces a complete agent package -- goal definition, node prompts, edge routing, MCP tool wiring, tests, and all boilerplate files.
|
||||
|
||||
```bash
|
||||
# Launch the Coder directly
|
||||
hive code
|
||||
|
||||
# Or escalate from any running agent (TUI)
|
||||
Ctrl+E # or /coder in chat
|
||||
```
|
||||
|
||||
The Coder ships with:
|
||||
|
||||
- **Reference documentation** -- anti-patterns, construction guide, and design patterns baked into its system prompt
|
||||
- **Guardian watchdog** -- an event-driven monitor that catches agent failures and triggers automatic remediation
|
||||
- **Coder Tools MCP server** -- file I/O, fuzzy-match editing, git snapshots, and sandboxed shell execution (`tools/coder_tools_server.py`)
|
||||
- **Test generation** -- structural tests for forever-alive agents that don't hang on `runner.run()`
|
||||
|
||||
#### Multi-Graph Agent Runtime
|
||||
|
||||
`AgentRuntime` now supports loading, managing, and switching between multiple agent graphs within a single session. Six new lifecycle tools give agents (and the TUI) full control:
|
||||
|
||||
```python
|
||||
# Load a second agent into the runtime
|
||||
await runtime.add_graph("exports/deep_research_agent")
|
||||
|
||||
# Tools available to agents:
|
||||
# load_agent, unload_agent, start_agent, restart_agent, list_agents, get_user_presence
|
||||
```
|
||||
|
||||
The Hive Coder uses multi-graph internally -- when you escalate from a worker agent, the Coder loads as a separate graph while the worker stays alive in the background.
|
||||
|
||||
#### TUI Revamp
|
||||
|
||||
The Terminal UI gets a ground-up rebuild with five major additions:
|
||||
|
||||
- **Agent Picker** (Ctrl+A) -- tabbed modal screen for browsing Your Agents, Framework agents, and Examples with metadata badges (node count, tool count, session count, tags)
|
||||
- **Runtime-optional startup** -- TUI launches without a pre-loaded agent, showing the picker on first open
|
||||
- **Live streaming pane** -- dedicated RichLog widget shows LLM tokens as they arrive, replacing the old one-token-per-line display
|
||||
- **PDF attachments** -- `/attach` and `/detach` commands with native OS file dialog (macOS, Linux, Windows)
|
||||
- **Multi-graph commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>` for managing agent graphs in-session
|
||||
|
||||
#### Provider-Agnostic LLM Support
|
||||
|
||||
Hive is no longer Anthropic-only. v0.5.1 adds first-class support for:
|
||||
|
||||
- **Claude Code subscriptions** -- `use_claude_code_subscription: true` in `~/.hive/configuration.json` reads OAuth tokens from `~/.claude/.credentials.json` with automatic refresh
|
||||
- **OpenAI-compatible endpoints** -- `api_base` config routes traffic through any compatible API (Azure OpenAI, vLLM, Ollama, etc.)
|
||||
- **Any LiteLLM model** -- `RuntimeConfig` now passes `api_key`, `api_base`, and `extra_kwargs` through to LiteLLM
|
||||
|
||||
The quickstart script auto-detects Claude Code subscriptions and ZAI Code installations.
|
||||
|
||||
---
|
||||
|
||||
### What's New
|
||||
|
||||
#### Architecture & Runtime
|
||||
|
||||
- **Hive Coder meta-agent** -- Natural-language agent builder with reference docs, guardian watchdog, and `hive code` CLI command. (@TimothyZhang7)
|
||||
- **Multi-graph agent sessions** -- `add_graph`/`remove_graph` on AgentRuntime with 6 lifecycle tools (`load_agent`, `unload_agent`, `start_agent`, `restart_agent`, `list_agents`, `get_user_presence`). (@TimothyZhang7)
|
||||
- **Claude Code subscription support** -- OAuth token refresh via `use_claude_code_subscription` config, auto-detection in quickstart, LiteLLM header patching. (@TimothyZhang7)
|
||||
- **OpenAI-compatible endpoint support** -- `api_base` and `extra_kwargs` in `RuntimeConfig` for any OpenAI-compatible API. (@TimothyZhang7)
|
||||
- **Remove deprecated node types** -- Delete `FlexibleGraphExecutor`, `WorkerNode`, `HybridJudge`, `CodeSandbox`, `Plan`, `FunctionNode`, `LLMNode`, `RouterNode`. Deprecated types (`llm_tool_use`, `llm_generate`, `function`, `router`, `human_input`) now raise `RuntimeError` with migration guidance. (@TimothyZhang7)
|
||||
- **Interactive credential setup** -- Guided `CredentialSetupSession` with health checks and encrypted storage, accessible via `hive setup-credentials` or automatic prompting on credential errors. (@RichardTang-Aden)
|
||||
- **Pre-start confirmation prompt** -- Interactive prompt before agent execution allowing credential updates or abort. (@RichardTang-Aden)
|
||||
- **Event bus multi-graph support** -- `graph_id` on events, `filter_graph` on subscriptions, `ESCALATION_REQUESTED` event type, `exclude_own_graph` filter. (@TimothyZhang7)
|
||||
|
||||
#### TUI Improvements
|
||||
|
||||
- **In-app agent picker** (Ctrl+A) -- Tabbed modal for browsing agents with metadata badges (nodes, tools, sessions, tags). (@TimothyZhang7)
|
||||
- **Runtime-optional TUI startup** -- Launches without a pre-loaded agent, shows agent picker on startup. (@TimothyZhang7)
|
||||
- **Hive Coder escalation** (Ctrl+E) -- Escalate to Hive Coder and return; also available via `/coder` and `/back` chat commands. (@TimothyZhang7)
|
||||
- **PDF attachment support** -- `/attach` and `/detach` commands with native OS file dialog. (@TimothyZhang7)
|
||||
- **Streaming output pane** -- Dedicated RichLog widget for live LLM token streaming. (@TimothyZhang7)
|
||||
- **Multi-graph TUI commands** -- `/graphs`, `/graph <id>`, `/load <path>`, `/unload <id>`. (@TimothyZhang7)
|
||||
- **Agent Guardian watchdog** -- Event-driven monitor that catches secondary agent failures and triggers automatic remediation, with `--no-guardian` CLI flag. (@TimothyZhang7)
|
||||
|
||||
#### New Tool Integrations
|
||||
|
||||
| Tool | Description | Contributor |
|
||||
| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
|
||||
| **Discord** | 4 MCP tools (`discord_list_guilds`, `discord_list_channels`, `discord_send_message`, `discord_get_messages`) with rate-limit retry and channel filtering | @mishrapravin114 |
|
||||
| **Exa Search API** | 4 AI-powered search tools (`exa_search`, `exa_find_similar`, `exa_get_contents`, `exa_answer`) with neural/keyword search, domain filters, and citation-backed answers | @JeetKaria06 |
|
||||
| **Razorpay** | 6 payment processing tools for payments, invoices, payment links, and refunds with HTTP Basic Auth | @shivamshahi07 |
|
||||
| **Google Docs** | Document creation, reading, and editing with OAuth credential support | @haliaeetusvocifer |
|
||||
| **Gmail enhancements** | Expanded mail operations for inbox management | @bryanadenhq |
|
||||
|
||||
#### Infrastructure
|
||||
|
||||
- **Default node type → `event_loop`** -- `NodeSpec.node_type` defaults to `"event_loop"` instead of `"llm_tool_use"`. (@TimothyZhang7)
|
||||
- **Default `max_node_visits` → 0 (unlimited)** -- Nodes default to unlimited visits, reducing friction for feedback loops and forever-alive agents. (@TimothyZhang7)
|
||||
- **Remove `function` field from NodeSpec** -- Follows deprecation of `FunctionNode`. (@TimothyZhang7)
|
||||
- **LiteLLM OAuth patch** -- Correct header construction for OAuth tokens (remove `x-api-key` when Bearer token is present). (@TimothyZhang7)
|
||||
- **Orchestrator config centralization** -- Reads `api_key`, `api_base`, `extra_kwargs` from centralized `~/.hive/configuration.json`. (@TimothyZhang7)
|
||||
- **System prompt datetime injection** -- All system prompts now include current date/time for time-aware agent behavior. (@TimothyZhang7)
|
||||
- **Utils module exports** -- Proper `__init__.py` exports for the utils module. (@Siddharth2624)
|
||||
- **Increased default max_tokens** -- Opus 4.6 defaults to 32768, Sonnet 4.5 to 16384 (up from 8192). (@TimothyZhang7)
|
||||
|
||||
---
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Flush WIP accumulator outputs on cancel/failure so edge conditions see correct values on resume
|
||||
- Stall detection state preserved across resume (no more resets on checkpoint restore)
|
||||
- Skip client-facing blocking for event-triggered executions (timer/webhook)
|
||||
- Executor retry override scoped to actual EventLoopNode instances only
|
||||
- Add `_awaiting_input` flag to EventLoopNode to prevent input injection race conditions
|
||||
- Fix TUI streaming display (tokens no longer appear one-per-line)
|
||||
- Fix `_return_from_escalation` crash when ChatRepl widgets not yet mounted
|
||||
- Fix tools registration problems for Google Docs credentials (@RichardTang-Aden)
|
||||
- Fix email agent version conflicts (@RichardTang-Aden)
|
||||
- Fix coder tool timeouts (120s for tests, 300s cap for commands)
|
||||
|
||||
### Documentation
|
||||
|
||||
- Clarify installation and prevent root pip install misuse (@paarths-collab)
|
||||
|
||||
---
|
||||
|
||||
### Agent Updates
|
||||
|
||||
- **Email Inbox Management** -- Consolidate `gmail_inbox_guardian` and `inbox_management` into a single unified agent with updated prompts and config. (@RichardTang-Aden, @bryanadenhq)
|
||||
- **Job Hunter** -- Updated node prompts, config, and agent metadata; added PDF resume selection. (@bryanadenhq)
|
||||
- **Deep Research Agent** -- Revised node implementations with updated prompts and output handling.
|
||||
- **Tech News Reporter** -- Revised node prompts for improved output quality.
|
||||
- **Vulnerability Assessment** -- Expanded prompts with more detailed assessment instructions. (@bryanadenhq)
|
||||
|
||||
---
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- **Deprecated node types raise `RuntimeError`** -- `llm_tool_use`, `llm_generate`, `function`, `router`, `human_input` now fail instead of warning. Migrate to `event_loop`.
|
||||
- **`NodeSpec.node_type` defaults to `"event_loop"`** (was `"llm_tool_use"`)
|
||||
- **`NodeSpec.max_node_visits` defaults to `0` / unlimited** (was `1`)
|
||||
- **`NodeSpec.function` field removed** -- `FunctionNode` is deleted; use event_loop nodes with tools instead.
|
||||
|
||||
---
|
||||
|
||||
### Community Contributors
|
||||
|
||||
A huge thank you to everyone who contributed to this release:
|
||||
|
||||
- **Richard Tang** (@RichardTang-Aden) -- Interactive credential setup, pre-start confirmation, email agent consolidation, tool registration fixes, lint and formatting
|
||||
- **Pravin Mishra** (@mishrapravin114) -- Discord integration with 4 MCP tools
|
||||
- **Jeet Karia** (@JeetKaria06) -- Exa Search API integration with 4 AI-powered search tools
|
||||
- **Shivam Shahi** (@shivamshahi07) -- Razorpay payment processing integration
|
||||
- **Siddharth Varshney** (@Siddharth2624) -- Utils module exports
|
||||
- **@haliaeetusvocifer** -- Google Docs integration with OAuth support
|
||||
- **Bryan** (@bryanadenhq) -- PDF selection, inbox agent fixes, Job Hunter and Vulnerability Assessment updates
|
||||
- **@paarths-collab** -- Documentation improvements
|
||||
|
||||
---
|
||||
|
||||
### Upgrading
|
||||
|
||||
```bash
|
||||
git pull origin main
|
||||
uv sync
|
||||
```
|
||||
|
||||
#### Migration Guide
|
||||
|
||||
If your agents use deprecated node types, update them:
|
||||
|
||||
```python
|
||||
# Before (v0.5.0) -- these now raise RuntimeError
|
||||
NodeSpec(node_type="llm_tool_use", ...)
|
||||
NodeSpec(node_type="function", function=my_func, ...)
|
||||
|
||||
# After (v0.5.1) -- use event_loop for everything
|
||||
NodeSpec(node_type="event_loop", ...) # or just omit node_type (it's the default now)
|
||||
```
|
||||
|
||||
If your agents set `max_node_visits=1` explicitly, they'll still work. The only change is the _default_ -- new agents without an explicit value now get unlimited visits.
|
||||
|
||||
To try the new Hive Coder:
|
||||
|
||||
```bash
|
||||
# Launch Coder directly
|
||||
hive code
|
||||
|
||||
# Or from TUI -- press Ctrl+E to escalate
|
||||
hive tui
|
||||
```
|
||||
[Unreleased]: https://github.com/adenhq/hive/compare/v0.1.0...HEAD
|
||||
[0.1.0]: https://github.com/adenhq/hive/releases/tag/v0.1.0
|
||||
|
||||
+27
-1063
File diff suppressed because it is too large
Load Diff
@@ -8,12 +8,11 @@ This guide covers everything you need to know to develop with the Aden Agent Fra
|
||||
2. [Initial Setup](#initial-setup)
|
||||
3. [Project Structure](#project-structure)
|
||||
4. [Building Agents](#building-agents)
|
||||
5. [Running Agents](#running-agents)
|
||||
6. [Testing Agents](#testing-agents)
|
||||
7. [Code Style & Conventions](#code-style--conventions)
|
||||
8. [Git Workflow](#git-workflow)
|
||||
9. [Common Tasks](#common-tasks)
|
||||
10. [Troubleshooting](#troubleshooting)
|
||||
5. [Testing Agents](#testing-agents)
|
||||
6. [Code Style & Conventions](#code-style--conventions)
|
||||
7. [Git Workflow](#git-workflow)
|
||||
8. [Common Tasks](#common-tasks)
|
||||
9. [Troubleshooting](#troubleshooting)
|
||||
|
||||
---
|
||||
|
||||
@@ -21,13 +20,12 @@ This guide covers everything you need to know to develop with the Aden Agent Fra
|
||||
|
||||
Aden Agent Framework is a Python-based system for building goal-driven, self-improving AI agents.
|
||||
|
||||
| Package | Directory | Description | Tech Stack |
|
||||
| ------------- | ---------- | ----------------------------------------- | ------------ |
|
||||
| **framework** | `/core` | Core runtime, graph executor, protocols | Python 3.11+ |
|
||||
| **tools** | `/tools` | MCP tools for agent capabilities | Python 3.11+ |
|
||||
| Package | Directory | Description | Tech Stack |
|
||||
| ------------- | ---------- | --------------------------------------- | ------------ |
|
||||
| **framework** | `/core` | Core runtime, graph executor, protocols | Python 3.11+ |
|
||||
| **tools** | `/tools` | MCP tools for agent capabilities | Python 3.11+ |
|
||||
| **exports** | `/exports` | Agent packages (user-created, gitignored) | Python 3.11+ |
|
||||
| **skills** | `.claude`, `.agents`, `.agent` | Shared skills for Claude/Codex/other coding agents | Markdown |
|
||||
| **codex** | `.codex` | Codex CLI project configuration (MCP servers) | TOML |
|
||||
| **skills** | `.claude` | Claude Code skills for building/testing | Markdown |
|
||||
|
||||
### Key Principles
|
||||
|
||||
@@ -41,22 +39,84 @@ Aden Agent Framework is a Python-based system for building goal-driven, self-imp
|
||||
|
||||
## Initial Setup
|
||||
|
||||
See [environment-setup.md](./environment-setup.md) for the full setup guide, including Windows, Alpine Linux, and troubleshooting.
|
||||
### Prerequisites
|
||||
|
||||
### Quick Start
|
||||
Ensure you have installed:
|
||||
|
||||
- **Python 3.11+** - [Download](https://www.python.org/downloads/) (3.12 or 3.13 recommended)
|
||||
- **pip** - Package installer for Python (comes with Python)
|
||||
- **git** - Version control
|
||||
- **Claude Code** - [Install](https://docs.anthropic.com/claude/docs/claude-code) (optional, for using building skills)
|
||||
|
||||
Verify installation:
|
||||
|
||||
```bash
|
||||
python --version # Should be 3.11+
|
||||
pip --version # Should be latest
|
||||
git --version # Any recent version
|
||||
```
|
||||
|
||||
### Step-by-Step Setup
|
||||
|
||||
```bash
|
||||
# 1. Clone the repository
|
||||
git clone https://github.com/adenhq/hive.git
|
||||
cd hive
|
||||
|
||||
# 2. Run automated setup
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
The setup script performs these actions:
|
||||
|
||||
1. Checks Python version (3.11+)
|
||||
2. Installs `framework` package from `/core` (editable mode)
|
||||
3. Installs `aden_tools` package from `/tools` (editable mode)
|
||||
4. Fixes package compatibility (upgrades openai for litellm)
|
||||
5. Verifies all installations
|
||||
|
||||
### API Keys (Optional)
|
||||
|
||||
For running agents with real LLMs:
|
||||
|
||||
```bash
|
||||
# Add to your shell profile (~/.bashrc, ~/.zshrc, etc.)
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
export OPENAI_API_KEY="your-key-here" # Optional
|
||||
export BRAVE_SEARCH_API_KEY="your-key-here" # Optional, for web search tool
|
||||
```
|
||||
|
||||
Get API keys:
|
||||
|
||||
- **Anthropic**: [console.anthropic.com](https://console.anthropic.com/)
|
||||
- **OpenAI**: [platform.openai.com](https://platform.openai.com/)
|
||||
- **Brave Search**: [brave.com/search/api](https://brave.com/search/api/)
|
||||
|
||||
### Install Claude Code Skills
|
||||
|
||||
```bash
|
||||
# Install building-agents and testing-agent skills
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
This installs agent-related Claude Code skills:
|
||||
|
||||
- `/building-agents-core` - Fundamental agent concepts
|
||||
- `/building-agents-construction` - Step-by-step agent building
|
||||
- `/building-agents-patterns` - Best practices and design patterns
|
||||
- `/testing-agent` - Test and validate agents
|
||||
- `/agent-workflow` - End-to-end guided workflow
|
||||
|
||||
### Verify Setup
|
||||
|
||||
```bash
|
||||
uv run python -c "import framework; print('OK')"
|
||||
uv run python -c "import aden_tools; print('OK')"
|
||||
uv run python -c "import litellm; print('OK')"
|
||||
# Verify package imports
|
||||
python -c "import framework; print('✓ framework OK')"
|
||||
python -c "import aden_tools; print('✓ aden_tools OK')"
|
||||
python -c "import litellm; print('✓ litellm OK')"
|
||||
|
||||
# Run an agent (after building one via /building-agents-construction)
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
```
|
||||
|
||||
---
|
||||
@@ -68,42 +128,48 @@ hive/ # Repository root
|
||||
│
|
||||
├── .github/ # GitHub configuration
|
||||
│ ├── workflows/
|
||||
│ │ ├── ci.yml # Lint, test, validate on every PR
|
||||
│ │ ├── release.yml # Runs on tags
|
||||
│ │ ├── pr-requirements.yml # PR requirement checks
|
||||
│ │ ├── pr-check-command.yml # PR check commands
|
||||
│ │ ├── claude-issue-triage.yml # Automated issue triage
|
||||
│ │ └── auto-close-duplicates.yml # Close duplicate issues
|
||||
│ │ ├── ci.yml # Runs on every PR
|
||||
│ │ └── release.yml # Runs on tags
|
||||
│ ├── ISSUE_TEMPLATE/ # Bug report & feature request templates
|
||||
│ ├── PULL_REQUEST_TEMPLATE.md # PR description template
|
||||
│ └── CODEOWNERS # Auto-assign reviewers
|
||||
│
|
||||
├── .codex/ # Codex CLI project config
|
||||
│ └── config.toml # Codex MCP server definitions
|
||||
├── .claude/ # Claude Code Skills
|
||||
│ └── skills/ # Skills for building
|
||||
│ ├── building-agents-core/
|
||||
| | ├── SKILL.md # Main skill definition
|
||||
│ | └── examples
|
||||
│ ├── building-agents-patterns/
|
||||
| | ├── SKILL.md
|
||||
│ | └── examples
|
||||
│ ├── building-agents-construction/
|
||||
| | ├── SKILL.md
|
||||
│ | └── examples
|
||||
│ ├── testing-agent/ # Skills for testing agents
|
||||
│ │ ├── SKILL.md
|
||||
│ | └── examples
|
||||
│ └── agent-workflow/ # Complete workflow
|
||||
| ├── SKILL.md
|
||||
│ └── examples
|
||||
│
|
||||
├── core/ # CORE FRAMEWORK PACKAGE
|
||||
│ ├── framework/ # Main package code
|
||||
│ │ ├── agents/ # Agent definitions and helpers
|
||||
│ │ ├── builder/ # Agent builder utilities
|
||||
│ │ ├── credentials/ # Credential management
|
||||
│ │ ├── debugger/ # Debugging tools
|
||||
│ │ ├── graph/ # GraphExecutor - executes node graphs
|
||||
│ │ ├── llm/ # LLM provider integrations (Anthropic, OpenAI, OpenRouter, Hive, etc.)
|
||||
│ │ ├── llm/ # LLM provider integrations (Anthropic, OpenAI, etc.)
|
||||
│ │ ├── mcp/ # MCP server integration
|
||||
│ │ ├── observability/ # Structured logging - human-readable and machine-parseable tracing
|
||||
│ │ ├── runner/ # AgentRunner - loads and runs agents
|
||||
│ │ ├── runtime/ # Runtime environment
|
||||
│ │ ├── schemas/ # Data schemas
|
||||
│ │ ├── server/ # HTTP API server
|
||||
│ │ ├── skills/ # Skill definitions
|
||||
│ │ ├── storage/ # File-based persistence
|
||||
│ │ ├── testing/ # Testing utilities
|
||||
│ │ ├── tools/ # Built-in tool implementations
|
||||
│ │ └── utils/ # Shared utilities
|
||||
│ ├── tests/ # Unit and E2E tests (including dummy agents)
|
||||
│ │ └── __init__.py
|
||||
│ ├── pyproject.toml # Package metadata and dependencies
|
||||
│ ├── requirements.txt # Python dependencies
|
||||
│ ├── README.md # Framework documentation
|
||||
│ └── MCP_INTEGRATION_GUIDE.md # MCP server integration guide
|
||||
│ ├── MCP_INTEGRATION_GUIDE.md # MCP server integration guide
|
||||
│ └── docs/ # Protocol documentation
|
||||
│
|
||||
├── tools/ # TOOLS PACKAGE (MCP tools)
|
||||
│ ├── src/
|
||||
@@ -116,31 +182,31 @@ hive/ # Repository root
|
||||
│ │ ├── mcp_server.py # HTTP MCP server
|
||||
│ │ └── __init__.py
|
||||
│ ├── pyproject.toml # Package metadata
|
||||
│ ├── requirements.txt # Python dependencies
|
||||
│ └── README.md # Tools documentation
|
||||
│
|
||||
├── exports/ # AGENT PACKAGES (user-created, gitignored)
|
||||
│ └── your_agent_name/ # Created via coder-tools workflow
|
||||
│
|
||||
├── examples/ # Example agents
|
||||
│ └── templates/ # Pre-built template agents
|
||||
│ └── your_agent_name/ # Created via /building-agents-construction
|
||||
│
|
||||
├── docs/ # Documentation
|
||||
│ ├── getting-started.md # Quick start guide
|
||||
│ ├── configuration.md # Configuration reference
|
||||
│ ├── architecture/ # System architecture
|
||||
│ ├── articles/ # Technical articles
|
||||
│ ├── quizzes/ # Developer quizzes
|
||||
│ └── i18n/ # Translations
|
||||
│ ├── architecture.md # System architecture
|
||||
│ └── articles/ # Technical articles
|
||||
│
|
||||
├── scripts/ # Utility scripts
|
||||
│ └── auto-close-duplicates.ts # GitHub duplicate issue closer
|
||||
├── scripts/ # Build & utility scripts
|
||||
│ ├── setup-python.sh # Python environment setup
|
||||
│ └── setup.sh # Legacy setup script
|
||||
│
|
||||
├── .agent/ # Antigravity IDE: mcp_config.json + skills (symlinks)
|
||||
├── quickstart.sh # Interactive setup wizard
|
||||
├── quickstart.sh # Install Claude Code skills
|
||||
├── ENVIRONMENT_SETUP.md # Complete Python setup guide
|
||||
├── README.md # Project overview
|
||||
├── DEVELOPER.md # This file
|
||||
├── CONTRIBUTING.md # Contribution guidelines
|
||||
├── CHANGELOG.md # Version history
|
||||
├── ROADMAP.md # Product roadmap
|
||||
├── LICENSE # Apache 2.0 License
|
||||
├── docs/CODE_OF_CONDUCT.md # Community guidelines
|
||||
├── CODE_OF_CONDUCT.md # Community guidelines
|
||||
└── SECURITY.md # Security policy
|
||||
```
|
||||
|
||||
@@ -148,16 +214,19 @@ hive/ # Repository root
|
||||
|
||||
## Building Agents
|
||||
|
||||
### Using Coder Tools Workflow
|
||||
### Using Claude Code Skills
|
||||
|
||||
The fastest way to build agents is with the configured MCP workflow:
|
||||
The fastest way to build agents is using the Claude Code skills:
|
||||
|
||||
```bash
|
||||
# Install dependencies (one-time)
|
||||
# Install skills (one-time)
|
||||
./quickstart.sh
|
||||
|
||||
# Build a new agent
|
||||
Use the coder-tools MCP tools from your IDE agent chat (e.g., initialize_and_build_agent)
|
||||
claude> /building-agents-construction
|
||||
|
||||
# Test the agent
|
||||
claude> /testing-agent
|
||||
```
|
||||
|
||||
### Agent Development Workflow
|
||||
@@ -165,220 +234,119 @@ Use the coder-tools MCP tools from your IDE agent chat (e.g., initialize_and_bui
|
||||
1. **Define Your Goal**
|
||||
|
||||
```
|
||||
Use the coder-tools initialize_and_build_agent tool
|
||||
claude> /building-agents-construction
|
||||
Enter goal: "Build an agent that processes customer support tickets"
|
||||
```
|
||||
|
||||
2. **Design the Workflow**
|
||||
|
||||
- The workflow guides you through defining nodes
|
||||
- Each node is a unit of work (LLM call with event_loop)
|
||||
- The skill guides you through defining nodes
|
||||
- Each node is a unit of work (LLM call, function, router)
|
||||
- Edges define how execution flows
|
||||
|
||||
3. **Generate the Agent**
|
||||
|
||||
- The workflow generates a complete Python package in `exports/`
|
||||
- The skill generates a complete Python package in `exports/`
|
||||
- Includes: `agent.json`, `tools.py`, `README.md`
|
||||
|
||||
4. **Validate the Agent**
|
||||
|
||||
```bash
|
||||
PYTHONPATH=exports uv run python -m your_agent_name validate
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
```
|
||||
|
||||
5. **Test the Agent**
|
||||
Run tests with:
|
||||
```bash
|
||||
PYTHONPATH=exports uv run python -m your_agent_name test
|
||||
```
|
||||
claude> /testing-agent
|
||||
```
|
||||
|
||||
### Manual Agent Development
|
||||
|
||||
If you prefer to build agents manually:
|
||||
|
||||
```jsonc
|
||||
// exports/my_agent/agent.json
|
||||
```python
|
||||
# exports/my_agent/agent.json
|
||||
{
|
||||
"agent": {
|
||||
"id": "my_agent",
|
||||
"name": "Support Ticket Handler",
|
||||
"version": "1.0.0",
|
||||
"description": "Process customer support tickets"
|
||||
},
|
||||
"graph": {
|
||||
"id": "my_agent-graph",
|
||||
"goal_id": "support_ticket",
|
||||
"entry_node": "analyze",
|
||||
"terminal_nodes": ["analyze"],
|
||||
"nodes": [
|
||||
{
|
||||
"id": "analyze",
|
||||
"name": "Analyze Ticket",
|
||||
"description": "Categorize and prioritize the support ticket",
|
||||
"node_type": "event_loop",
|
||||
"system_prompt": "Analyze this support ticket...",
|
||||
"input_keys": ["ticket_content"],
|
||||
"output_keys": ["category", "priority"]
|
||||
}
|
||||
],
|
||||
"edges": []
|
||||
},
|
||||
"goal": {
|
||||
"id": "support_ticket",
|
||||
"goal_id": "support_ticket",
|
||||
"name": "Support Ticket Handler",
|
||||
"description": "Process customer support tickets",
|
||||
"success_criteria": [
|
||||
{
|
||||
"id": "sc-categorized",
|
||||
"description": "Ticket is categorized and prioritized correctly"
|
||||
}
|
||||
]
|
||||
}
|
||||
"success_criteria": "Ticket is categorized, prioritized, and routed correctly"
|
||||
},
|
||||
"nodes": [
|
||||
{
|
||||
"node_id": "analyze",
|
||||
"name": "Analyze Ticket",
|
||||
"node_type": "llm_generate",
|
||||
"system_prompt": "Analyze this support ticket...",
|
||||
"input_keys": ["ticket_content"],
|
||||
"output_keys": ["category", "priority"]
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"edge_id": "start_to_analyze",
|
||||
"source": "START",
|
||||
"target": "analyze",
|
||||
"condition": "on_success"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Running Agents
|
||||
|
||||
### Using the `hive` CLI
|
||||
### Running Agents
|
||||
|
||||
```bash
|
||||
# Open the browser dashboard (Recommended for interactive use)
|
||||
hive open
|
||||
# Validate agent structure
|
||||
PYTHONPATH=core:exports python -m agent_name validate
|
||||
|
||||
# Run a specific agent
|
||||
hive run exports/my_agent --input '{"ticket_content": "My login is broken", "customer_id": "CUST-123"}'
|
||||
# Show agent information
|
||||
PYTHONPATH=core:exports python -m agent_name info
|
||||
|
||||
# Run with input from a file
|
||||
hive run exports/my_agent --input-file input.json
|
||||
# Run agent with input
|
||||
PYTHONPATH=core:exports python -m agent_name run --input '{
|
||||
"ticket_content": "My login is broken",
|
||||
"customer_id": "CUST-123"
|
||||
}'
|
||||
|
||||
# Run and write output to file
|
||||
hive run exports/my_agent -i '{...}' -o result.json
|
||||
|
||||
# Resume a previous session
|
||||
hive run exports/my_agent --resume-session <session_id>
|
||||
|
||||
# Resume from a specific checkpoint
|
||||
hive run exports/my_agent --resume-session <session_id> --checkpoint <checkpoint>
|
||||
|
||||
# Use a specific LLM model
|
||||
hive run exports/my_agent --model claude-sonnet-4-20250514
|
||||
```
|
||||
|
||||
### CLI Command Reference
|
||||
|
||||
| Command | Description |
|
||||
| ---------------------- | ----------------------------------------------------------------------- |
|
||||
| `hive run <path>` | Execute an agent (see flags below) |
|
||||
| `hive shell [path]` | Interactive REPL (`--no-approve`) |
|
||||
| `hive serve` | Start HTTP API server |
|
||||
| `hive open` | Start server + open dashboard in browser |
|
||||
| `hive info <path>` | Show agent details |
|
||||
| `hive validate <path>` | Validate agent structure |
|
||||
| `hive list [dir]` | List available agents |
|
||||
|
||||
### `hive run` flags
|
||||
|
||||
| Flag | Description |
|
||||
| --------------------- | ---------------------------------------------------- |
|
||||
| `-i, --input` | Input context as JSON string |
|
||||
| `-f, --input-file` | Input context from JSON file |
|
||||
| `-o, --output` | Write results to file instead of stdout |
|
||||
| `-m, --model` | LLM model to use (any LiteLLM-compatible name) |
|
||||
| `-q, --quiet` | Only output the final result JSON (log level: ERROR) |
|
||||
| `-v, --verbose` | Show execution logs (log level: INFO) |
|
||||
| `--debug` | Show all debug-level logs (log level: DEBUG) |
|
||||
| `--resume-session` | Resume from a specific session ID |
|
||||
| `--checkpoint` | Resume from a specific checkpoint (requires --resume-session) |
|
||||
|
||||
### `hive serve` / `hive open` flags
|
||||
|
||||
| Flag | Description |
|
||||
| ----------------- | -------------------------------------------------- |
|
||||
| `--host` | Host to bind (default: 127.0.0.1) |
|
||||
| `-p, --port` | Port to listen on (default: 8787) |
|
||||
| `-a, --agent` | Agent path to preload (repeatable) |
|
||||
| `-m, --model` | LLM model for preloaded agents |
|
||||
| `--open` | Open dashboard in browser after server starts (serve only) |
|
||||
| `-v, --verbose` | Enable INFO log level |
|
||||
| `--debug` | Enable DEBUG log level |
|
||||
|
||||
### Log levels
|
||||
|
||||
All commands support three verbosity tiers:
|
||||
|
||||
```bash
|
||||
# Quiet — errors only
|
||||
hive run exports/my_agent -q -i '{...}'
|
||||
|
||||
# Verbose — execution steps, LLM calls
|
||||
hive run -v exports/my_agent -i '{...}'
|
||||
|
||||
# Debug — everything including internal subsystems (memory reflection, recall)
|
||||
hive run --debug exports/my_agent -i '{...}'
|
||||
```
|
||||
|
||||
The same flags work for `hive serve` and `hive open`:
|
||||
|
||||
```bash
|
||||
hive open --debug # Start with full debug logging
|
||||
hive serve --debug -p 9090 # Custom port with debug logs
|
||||
```
|
||||
|
||||
### Using Python Directly
|
||||
|
||||
```bash
|
||||
PYTHONPATH=exports uv run python -m agent_name run --input '{...}'
|
||||
# Run in mock mode (no LLM calls)
|
||||
PYTHONPATH=core:exports python -m agent_name run --mock --input '{...}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Agents
|
||||
|
||||
### Agent Tests
|
||||
### Using the Testing Agent Skill
|
||||
|
||||
```bash
|
||||
# Run tests for an agent
|
||||
PYTHONPATH=exports uv run python -m agent_name test
|
||||
claude> /testing-agent
|
||||
```
|
||||
|
||||
This generates and runs:
|
||||
|
||||
- **Constraint tests** - Verify agent respects constraints
|
||||
- **Success tests** - Verify agent achieves success criteria
|
||||
- **Integration tests** - End-to-end workflows
|
||||
|
||||
### Manual Testing
|
||||
|
||||
```bash
|
||||
# Run all tests for an agent
|
||||
PYTHONPATH=core:exports python -m agent_name test
|
||||
|
||||
# Run specific test type
|
||||
PYTHONPATH=exports uv run python -m agent_name test --type constraint
|
||||
PYTHONPATH=exports uv run python -m agent_name test --type success
|
||||
PYTHONPATH=core:exports python -m agent_name test --type constraint
|
||||
PYTHONPATH=core:exports python -m agent_name test --type success
|
||||
|
||||
# Run with parallel execution
|
||||
PYTHONPATH=exports uv run python -m agent_name test --parallel 4
|
||||
PYTHONPATH=core:exports python -m agent_name test --parallel 4
|
||||
|
||||
# Fail fast (stop on first failure)
|
||||
PYTHONPATH=exports uv run python -m agent_name test --fail-fast
|
||||
PYTHONPATH=core:exports python -m agent_name test --fail-fast
|
||||
```
|
||||
|
||||
### Framework Tests
|
||||
|
||||
```bash
|
||||
# Run all unit tests (core + tools)
|
||||
make test
|
||||
|
||||
# Run linting and format checks
|
||||
make check
|
||||
```
|
||||
|
||||
### Dummy Agent Tests (E2E)
|
||||
|
||||
The repository includes end-to-end dummy agent tests under `core/tests/dummy_agents/` that run real LLM calls against deterministic graph structures. These are **not** part of CI — run them manually to verify the executor works with real providers.
|
||||
|
||||
```bash
|
||||
cd core && uv run python tests/dummy_agents/run_all.py
|
||||
```
|
||||
|
||||
The script detects available LLM credentials and prompts you to pick a provider. For verbose output:
|
||||
|
||||
```bash
|
||||
cd core && uv run python tests/dummy_agents/run_all.py --verbose
|
||||
```
|
||||
|
||||
See [environment-setup.md](./environment-setup.md#testing-with-dummy-agents) for the full list of covered agents and details.
|
||||
|
||||
### Writing Custom Tests
|
||||
|
||||
```python
|
||||
@@ -407,7 +375,7 @@ def test_ticket_categorization():
|
||||
- **PEP 8** - Follow Python style guide
|
||||
- **Type hints** - Use for function signatures and class attributes
|
||||
- **Docstrings** - Document classes and public functions
|
||||
- **Ruff** - Linter and formatter (run with `make check`)
|
||||
- **Black** - Code formatter (run with `black .`)
|
||||
|
||||
```python
|
||||
# Good
|
||||
@@ -541,8 +509,8 @@ chore(deps): update React to 18.2.0
|
||||
|
||||
1. Create a feature branch from `main`
|
||||
2. Make your changes with clear commits
|
||||
3. Run tests locally: `make test`
|
||||
4. Run linting: `make check`
|
||||
3. Run tests locally: `PYTHONPATH=core:exports python -m pytest`
|
||||
4. Run linting: `black --check .`
|
||||
5. Push and create a PR
|
||||
6. Fill out the PR template
|
||||
7. Request review from CODEOWNERS
|
||||
@@ -551,6 +519,8 @@ chore(deps): update React to 18.2.0
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### Adding Python Dependencies
|
||||
@@ -558,18 +528,23 @@ chore(deps): update React to 18.2.0
|
||||
```bash
|
||||
# Add to core framework
|
||||
cd core
|
||||
uv add <package>
|
||||
pip install <package>
|
||||
# Then add to requirements.txt or pyproject.toml
|
||||
|
||||
# Add to tools package
|
||||
cd tools
|
||||
uv add <package>
|
||||
pip install <package>
|
||||
# Then add to requirements.txt or pyproject.toml
|
||||
|
||||
# Reinstall in editable mode
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### Creating a New Agent
|
||||
|
||||
```bash
|
||||
# Option 1: Use Claude Code skill (recommended)
|
||||
Use the coder-tools initialize_and_build_agent tool
|
||||
claude> /building-agents-construction
|
||||
|
||||
# Option 2: Create manually
|
||||
# Note: exports/ is initially empty (gitignored). Create your agent directory:
|
||||
@@ -577,7 +552,7 @@ mkdir -p exports/my_new_agent
|
||||
cd exports/my_new_agent
|
||||
# Create agent.json, tools.py, README.md (see Agent Package Structure below)
|
||||
|
||||
# Option 3: Use the coder-tools MCP tools (advanced)
|
||||
# Option 3: Use the agent builder MCP tools (advanced)
|
||||
# See core/MCP_BUILDER_TOOLS_GUIDE.md
|
||||
```
|
||||
|
||||
@@ -601,17 +576,16 @@ def my_custom_tool(param1: str, param2: int) -> Dict[str, Any]:
|
||||
# Implementation
|
||||
return {"result": "success", "data": ...}
|
||||
|
||||
# Register tool in agent.json (inside "graph" → "nodes")
|
||||
# Register tool in agent.json
|
||||
{
|
||||
"graph": {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "use_tool",
|
||||
"node_type": "event_loop",
|
||||
"tools": ["my_custom_tool"]
|
||||
}
|
||||
]
|
||||
}
|
||||
"nodes": [
|
||||
{
|
||||
"node_id": "use_tool",
|
||||
"node_type": "function",
|
||||
"tools": ["my_custom_tool"],
|
||||
...
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -630,16 +604,15 @@ def my_custom_tool(param1: str, param2: int) -> Dict[str, Any]:
|
||||
}
|
||||
}
|
||||
|
||||
# 2. Reference tools in agent.json (inside "graph" → "nodes")
|
||||
# 2. Reference tools in agent.json
|
||||
{
|
||||
"graph": {
|
||||
"nodes": [
|
||||
{
|
||||
"id": "search",
|
||||
"tools": ["web_search", "web_scrape"]
|
||||
}
|
||||
]
|
||||
}
|
||||
"nodes": [
|
||||
{
|
||||
"node_id": "search",
|
||||
"tools": ["web_search", "web_scrape"],
|
||||
...
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -649,8 +622,6 @@ def my_custom_tool(param1: str, param2: int) -> Dict[str, Any]:
|
||||
# Add to your shell profile (~/.bashrc, ~/.zshrc, etc.)
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
export OPENAI_API_KEY="your-key-here"
|
||||
export OPENROUTER_API_KEY="your-key-here"
|
||||
export HIVE_API_KEY="your-key-here"
|
||||
export BRAVE_SEARCH_API_KEY="your-key-here"
|
||||
|
||||
# Or create .env file (not committed to git)
|
||||
@@ -659,17 +630,52 @@ echo 'ANTHROPIC_API_KEY=your-key-here' >> .env
|
||||
|
||||
### Debugging Agent Execution
|
||||
|
||||
```bash
|
||||
# Run with verbose output
|
||||
hive run exports/my_agent --verbose --input '{"task": "..."}'
|
||||
```python
|
||||
# Add debug logging to your agent
|
||||
import logging
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
# Run with verbose output
|
||||
PYTHONPATH=core:exports python -m agent_name run --input '{...}' --verbose
|
||||
|
||||
# Use mock mode to test without LLM calls
|
||||
PYTHONPATH=core:exports python -m agent_name run --mock --input '{...}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See [environment-setup.md](./environment-setup.md#troubleshooting) for common setup issues (module not found errors, broken installations, PEP 668, etc.).
|
||||
### Port Already in Use
|
||||
|
||||
```bash
|
||||
# Find process using port
|
||||
lsof -i :3000
|
||||
lsof -i :4000
|
||||
|
||||
# Kill process
|
||||
kill -9 <PID>
|
||||
|
||||
# Or change ports in config.yaml and regenerate
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Environment Variables Not Loading
|
||||
|
||||
```bash
|
||||
# Verify .env file exists at project root
|
||||
cat .env
|
||||
|
||||
# Or check shell environment
|
||||
echo $ANTHROPIC_API_KEY
|
||||
|
||||
# Copy from .env.example if needed
|
||||
cp .env.example .env
|
||||
# Then edit .env with your API keys
|
||||
```
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
@@ -679,3 +685,7 @@ See [environment-setup.md](./environment-setup.md#troubleshooting) for common se
|
||||
- **Issues**: Search [existing issues](https://github.com/adenhq/hive/issues)
|
||||
- **Discord**: Join our [community](https://discord.com/invite/MXE49hrKDk)
|
||||
- **Code Review**: Tag a maintainer on your PR
|
||||
|
||||
---
|
||||
|
||||
_Happy coding!_ 🐝
|
||||
@@ -0,0 +1,495 @@
|
||||
# Agent Development Environment Setup
|
||||
|
||||
Complete setup guide for building and running goal-driven agents with the Aden Agent Framework.
|
||||
|
||||
## Quick Setup
|
||||
|
||||
```bash
|
||||
# Run the automated setup script
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
> **Note for Windows Users:**
|
||||
> Running the setup script on native Windows shells (PowerShell / Git Bash) may sometimes fail due to Python App Execution Aliases.
|
||||
> It is **strongly recommended to use WSL (Windows Subsystem for Linux)** for a smoother setup experience.
|
||||
|
||||
This will:
|
||||
|
||||
- Check Python version (requires 3.11+)
|
||||
- Install the core framework package (`framework`)
|
||||
- Install the tools package (`aden_tools`)
|
||||
- Fix package compatibility issues (openai + litellm)
|
||||
- Verify all installations
|
||||
|
||||
## Alpine Linux Setup
|
||||
|
||||
If you are using Alpine Linux (e.g., inside a Docker container), you must install system dependencies and use a virtual environment before running the setup script:
|
||||
|
||||
1. Install System Dependencies:
|
||||
```bash
|
||||
apk update
|
||||
apk add bash git python3 py3-pip nodejs npm curl build-base python3-dev linux-headers libffi-dev
|
||||
```
|
||||
2. Set up Virtual Environment (Required for Python 3.12+):
|
||||
```
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install --upgrade pip setuptools wheel
|
||||
```
|
||||
3. Run the Quickstart Script:
|
||||
```
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
## Manual Setup (Alternative)
|
||||
|
||||
If you prefer to set up manually or the script fails:
|
||||
|
||||
### 1. Install Core Framework
|
||||
|
||||
```bash
|
||||
cd core
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### 2. Install Tools Package
|
||||
|
||||
```bash
|
||||
cd tools
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### 3. Upgrade OpenAI Package
|
||||
|
||||
```bash
|
||||
# litellm requires openai >= 1.0.0
|
||||
pip install --upgrade "openai>=1.0.0"
|
||||
```
|
||||
|
||||
### 4. Verify Installation
|
||||
|
||||
```bash
|
||||
python -c "import framework; print('✓ framework OK')"
|
||||
python -c "import aden_tools; print('✓ aden_tools OK')"
|
||||
python -c "import litellm; print('✓ litellm OK')"
|
||||
```
|
||||
|
||||
> **Windows Tip:**
|
||||
> On Windows, if the verification commands fail, ensure you are running them in **WSL** or after **disabling Python App Execution Aliases** in Windows Settings → Apps → App Execution Aliases.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Python Version
|
||||
|
||||
- **Minimum:** Python 3.11
|
||||
- **Recommended:** Python 3.11 or 3.12
|
||||
- **Tested on:** Python 3.11, 3.12, 3.13
|
||||
|
||||
### System Requirements
|
||||
|
||||
- pip (latest version)
|
||||
- 2GB+ RAM
|
||||
- Internet connection (for LLM API calls)
|
||||
- For Windows users: WSL 2 is recommended for full compatibility.
|
||||
|
||||
### API Keys (Optional)
|
||||
|
||||
For running agents with real LLMs:
|
||||
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="your-key-here"
|
||||
```
|
||||
|
||||
## Running Agents
|
||||
|
||||
All agent commands must be run from the project root with `PYTHONPATH` set:
|
||||
|
||||
```bash
|
||||
# From /hive/ directory
|
||||
PYTHONPATH=core:exports python -m agent_name COMMAND
|
||||
```
|
||||
|
||||
### Example Commands
|
||||
|
||||
After building an agent via `/building-agents-construction`, use these commands:
|
||||
|
||||
```bash
|
||||
# Validate agent structure
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
|
||||
# Show agent information
|
||||
PYTHONPATH=core:exports python -m your_agent_name info
|
||||
|
||||
# Run agent with input
|
||||
PYTHONPATH=core:exports python -m your_agent_name run --input '{
|
||||
"task": "Your input here"
|
||||
}'
|
||||
|
||||
# Run in mock mode (no LLM calls)
|
||||
PYTHONPATH=core:exports python -m your_agent_name run --mock --input '{...}'
|
||||
```
|
||||
|
||||
## Building New Agents and Run Flow
|
||||
|
||||
Build and run an agent using Claude Code CLI with the agent building skills:
|
||||
|
||||
### 1. Install Claude Skills (One-time)
|
||||
|
||||
```bash
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
This verifies agent-related Claude Code skills are available:
|
||||
|
||||
- `/building-agents-construction` - Step-by-step build guide
|
||||
- `/building-agents-core` - Fundamental concepts
|
||||
- `/building-agents-patterns` - Best practices
|
||||
- `/testing-agent` - Test and validate agents
|
||||
- `/agent-workflow` - Complete workflow
|
||||
|
||||
### 2. Build an Agent
|
||||
|
||||
```
|
||||
claude> /building-agents-construction
|
||||
```
|
||||
|
||||
Follow the prompts to:
|
||||
|
||||
1. Define your agent's goal
|
||||
2. Design the workflow nodes
|
||||
3. Connect nodes with edges
|
||||
4. Generate the agent package under `exports/`
|
||||
|
||||
This step creates the initial agent structure required for further development.
|
||||
|
||||
### 3. Define Agent Logic
|
||||
|
||||
```
|
||||
claude> /building-agents-core
|
||||
```
|
||||
|
||||
Follow the prompts to:
|
||||
|
||||
1. Understand the agent architecture and file structure
|
||||
2. Define the agent's goal, success criteria, and constraints
|
||||
3. Learn node types (LLM, tool-use, router, function)
|
||||
4. Discover and validate available tools before use
|
||||
|
||||
This step establishes the core concepts and rules needed before building an agent.
|
||||
|
||||
### 4. Apply Agent Patterns
|
||||
|
||||
```
|
||||
claude> /building-agents-patterns
|
||||
```
|
||||
|
||||
Follow the prompts to:
|
||||
|
||||
1. Apply best-practice agent design patterns
|
||||
2. Add pause/resume flows for multi-turn interactions
|
||||
3. Improve robustness with routing, fallbacks, and retries
|
||||
4. Avoid common anti-patterns during agent construction
|
||||
|
||||
This step helps optimize agent design before final testing.
|
||||
|
||||
### 5. Test Your Agent
|
||||
|
||||
```
|
||||
claude> /testing-agent
|
||||
```
|
||||
Follow the prompts to:
|
||||
|
||||
1. Generate test guidelines for constraints and success criteria
|
||||
2. Write agent tests directly under `exports/{agent}/tests/`
|
||||
3. Run goal-based evaluation tests
|
||||
4. Debug failing tests and iterate on agent improvements
|
||||
|
||||
This step verifies that the agent meets its goals before production use.
|
||||
|
||||
### 6. Agent Development Workflow (End-to-End)
|
||||
|
||||
```
|
||||
claude> /agent-workflow
|
||||
```
|
||||
|
||||
Follow the guided flow to:
|
||||
|
||||
1. Understand core agent concepts (optional)
|
||||
2. Build the agent structure step by step
|
||||
3. Apply best-practice design patterns (optional)
|
||||
4. Test and validate the agent against its goals
|
||||
|
||||
This workflow orchestrates all agent-building skills to take you from idea → production-ready agent.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "externally-managed-environment" error (PEP 668)
|
||||
|
||||
**Cause:** Python 3.12+ on macOS/Homebrew, WSL, or some Linux distros prevents system-wide pip installs.
|
||||
|
||||
**Solution:** Create and use a virtual environment:
|
||||
|
||||
```bash
|
||||
# Create virtual environment
|
||||
python3 -m venv .venv
|
||||
|
||||
# Activate it
|
||||
source .venv/bin/activate # macOS/Linux
|
||||
# .venv\Scripts\activate # Windows
|
||||
|
||||
# Then run setup
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
Always activate the venv before running agents:
|
||||
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
PYTHONPATH=core:exports python -m your_agent_name demo
|
||||
```
|
||||
|
||||
### "ModuleNotFoundError: No module named 'framework'"
|
||||
|
||||
**Solution:** Install the core package:
|
||||
|
||||
```bash
|
||||
cd core && pip install -e .
|
||||
```
|
||||
|
||||
### "ModuleNotFoundError: No module named 'aden_tools'"
|
||||
|
||||
**Solution:** Install the tools package:
|
||||
|
||||
```bash
|
||||
cd tools && pip install -e .
|
||||
```
|
||||
|
||||
Or run the setup script:
|
||||
|
||||
```bash
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
### "ModuleNotFoundError: No module named 'openai.\_models'"
|
||||
|
||||
**Cause:** Outdated `openai` package (0.27.x) incompatible with `litellm`
|
||||
|
||||
**Solution:** Upgrade openai:
|
||||
|
||||
```bash
|
||||
pip install --upgrade "openai>=1.0.0"
|
||||
```
|
||||
|
||||
### "No module named 'your_agent_name'"
|
||||
|
||||
**Cause:** Not running from project root, missing PYTHONPATH, or agent not yet created
|
||||
|
||||
**Solution:** Ensure you're in the project root directory, have built an agent, and use:
|
||||
|
||||
```bash
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
```
|
||||
|
||||
### Agent imports fail with "broken installation"
|
||||
|
||||
**Symptom:** `pip list` shows packages pointing to non-existent directories
|
||||
|
||||
**Solution:** Reinstall packages properly:
|
||||
|
||||
```bash
|
||||
# Remove broken installations
|
||||
pip uninstall -y framework tools
|
||||
|
||||
# Reinstall correctly
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
## Package Structure
|
||||
|
||||
The Hive framework consists of three Python packages:
|
||||
|
||||
```
|
||||
hive/
|
||||
├── core/ # Core framework (runtime, graph executor, LLM providers)
|
||||
│ ├── framework/
|
||||
│ ├── .venv/ # Created by quickstart.sh
|
||||
│ └── pyproject.toml
|
||||
│
|
||||
├── tools/ # Tools and MCP servers
|
||||
│ ├── src/
|
||||
│ │ └── aden_tools/ # Actual package location
|
||||
│ ├── .venv/ # Created by quickstart.sh
|
||||
│ └── pyproject.toml
|
||||
│
|
||||
└── exports/ # Agent packages (user-created, gitignored)
|
||||
└── your_agent_name/ # Created via /building-agents-construction
|
||||
```
|
||||
|
||||
## Separate Virtual Environments
|
||||
|
||||
The project uses **separate virtual environments** for `core` and `tools` packages to:
|
||||
|
||||
- Isolate dependencies and avoid conflicts
|
||||
- Allow independent development and testing of each package
|
||||
- Enable MCP servers to run with their specific dependencies
|
||||
|
||||
### How It Works
|
||||
|
||||
When you run `./quickstart.sh` or `uv sync` in each directory:
|
||||
|
||||
1. **core/.venv/** - Contains the `framework` package and its dependencies (anthropic, litellm, mcp, etc.)
|
||||
2. **tools/.venv/** - Contains the `aden_tools` package and its dependencies (beautifulsoup4, pandas, etc.)
|
||||
|
||||
### Cross-Package Imports
|
||||
|
||||
The `core` and `tools` packages are **intentionally independent**:
|
||||
|
||||
- **No cross-imports**: `framework` does not import `aden_tools` directly, and vice versa
|
||||
- **Communication via MCP**: Tools are exposed to agents through MCP servers, not direct Python imports
|
||||
- **Runtime integration**: The agent runner loads tools via the MCP protocol at runtime
|
||||
|
||||
If you need to use both packages in a single script (e.g., for testing), you have two options:
|
||||
|
||||
```bash
|
||||
# Option 1: Install both in a shared environment
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -e core/ -e tools/
|
||||
|
||||
# Option 2: Use PYTHONPATH (for quick testing)
|
||||
PYTHONPATH=core:tools/src python your_script.py
|
||||
```
|
||||
|
||||
### MCP Server Configuration
|
||||
|
||||
The `.mcp.json` at project root configures MCP servers to use their respective virtual environments:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "core/.venv/bin/python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"]
|
||||
},
|
||||
"tools": {
|
||||
"command": "tools/.venv/bin/python",
|
||||
"args": ["-m", "aden_tools.mcp_server", "--stdio"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This ensures each MCP server runs with its correct dependencies.
|
||||
|
||||
### Why PYTHONPATH is Required
|
||||
|
||||
The packages are installed in **editable mode** (`pip install -e`), which means:
|
||||
|
||||
- `framework` and `aden_tools` are globally importable (no PYTHONPATH needed)
|
||||
- `exports` is NOT installed as a package (PYTHONPATH required)
|
||||
|
||||
This design allows agents in `exports/` to be:
|
||||
|
||||
- Developed independently
|
||||
- Version controlled separately
|
||||
- Deployed as standalone packages
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### 1. Setup (Once)
|
||||
|
||||
```bash
|
||||
./quickstart.sh
|
||||
```
|
||||
|
||||
### 2. Build Agent (Claude Code)
|
||||
|
||||
```
|
||||
claude> /building-agents-construction
|
||||
Enter goal: "Build an agent that processes customer support tickets"
|
||||
```
|
||||
|
||||
### 3. Validate Agent
|
||||
|
||||
```bash
|
||||
PYTHONPATH=core:exports python -m your_agent_name validate
|
||||
```
|
||||
|
||||
### 4. Test Agent
|
||||
|
||||
```
|
||||
claude> /testing-agent
|
||||
```
|
||||
|
||||
### 5. Run Agent
|
||||
|
||||
```bash
|
||||
PYTHONPATH=core:exports python -m your_agent_name run --input '{...}'
|
||||
```
|
||||
|
||||
## IDE Setup
|
||||
|
||||
### VSCode
|
||||
|
||||
Add to `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"python.analysis.extraPaths": [
|
||||
"${workspaceFolder}/core",
|
||||
"${workspaceFolder}/exports"
|
||||
],
|
||||
"python.autoComplete.extraPaths": [
|
||||
"${workspaceFolder}/core",
|
||||
"${workspaceFolder}/exports"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### PyCharm
|
||||
|
||||
1. Open Project Settings → Project Structure
|
||||
2. Mark `core` as Sources Root
|
||||
3. Mark `exports` as Sources Root
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Required for LLM Operations
|
||||
|
||||
```bash
|
||||
export ANTHROPIC_API_KEY="sk-ant-..."
|
||||
```
|
||||
|
||||
### Optional Configuration
|
||||
|
||||
```bash
|
||||
# Credentials storage location (default: ~/.aden/credentials)
|
||||
export ADEN_CREDENTIALS_PATH="/custom/path"
|
||||
|
||||
# Agent storage location (default: /tmp)
|
||||
export AGENT_STORAGE_PATH="/custom/storage"
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- **Framework Documentation:** [core/README.md](core/README.md)
|
||||
- **Tools Documentation:** [tools/README.md](tools/README.md)
|
||||
- **Example Agents:** [exports/](exports/)
|
||||
- **Agent Building Guide:** [.claude/skills/building-agents-construction/SKILL.md](.claude/skills/building-agents-construction/SKILL.md)
|
||||
- **Testing Guide:** [.claude/skills/testing-agent/SKILL.md](.claude/skills/testing-agent/SKILL.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing agent packages:
|
||||
|
||||
1. Place agents in `exports/agent_name/`
|
||||
2. Follow the standard agent structure (see existing agents)
|
||||
3. Include README.md with usage instructions
|
||||
4. Add tests if using `/testing-agent`
|
||||
5. Document required environment variables
|
||||
|
||||
## Support
|
||||
|
||||
- **Issues:** https://github.com/adenhq/hive/issues
|
||||
- **Discord:** https://discord.com/invite/MXE49hrKDk
|
||||
- **Documentation:** https://docs.adenhq.com/
|
||||
@@ -1,56 +1,26 @@
|
||||
.PHONY: lint format check test test-tools test-live test-all install-hooks help frontend-install frontend-dev frontend-build
|
||||
|
||||
# ── Ensure uv is findable in Git Bash on Windows ──────────────────────────────
|
||||
# uv installs to ~/.local/bin on Windows/Linux/macOS. Git Bash may not include
|
||||
# this in PATH by default, so we prepend it here.
|
||||
export PATH := $(HOME)/.local/bin:$(PATH)
|
||||
|
||||
# ── Targets ───────────────────────────────────────────────────────────────────
|
||||
.PHONY: lint format check test install-hooks help
|
||||
|
||||
help: ## Show this help
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## Run ruff linter and formatter (with auto-fix)
|
||||
cd core && uv run ruff check --fix .
|
||||
cd tools && uv run ruff check --fix .
|
||||
cd core && uv run ruff format .
|
||||
cd tools && uv run ruff format .
|
||||
lint: ## Run ruff linter (with auto-fix)
|
||||
cd core && ruff check --fix .
|
||||
cd tools && ruff check --fix .
|
||||
|
||||
format: ## Run ruff formatter
|
||||
cd core && uv run ruff format .
|
||||
cd tools && uv run ruff format .
|
||||
cd core && ruff format .
|
||||
cd tools && ruff format .
|
||||
|
||||
check: ## Run all checks without modifying files (CI-safe)
|
||||
cd core && uv run ruff check .
|
||||
cd tools && uv run ruff check .
|
||||
cd core && uv run ruff format --check .
|
||||
cd tools && uv run ruff format --check .
|
||||
cd core && ruff check .
|
||||
cd tools && ruff check .
|
||||
cd core && ruff format --check .
|
||||
cd tools && ruff format --check .
|
||||
|
||||
test: ## Run all tests (core + tools, excludes live)
|
||||
cd core && uv run python -m pytest tests/ -v --ignore=tests/dummy_agents
|
||||
cd tools && uv run python -m pytest -v
|
||||
|
||||
test-tools: ## Run tool tests only (mocked, no credentials needed)
|
||||
cd tools && uv run python -m pytest -v
|
||||
|
||||
test-live: ## Run live integration tests (requires real API credentials)
|
||||
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
|
||||
|
||||
test-all: ## Run everything including live tests
|
||||
cd core && uv run python -m pytest tests/ -v --ignore=tests/dummy_agents
|
||||
cd tools && uv run python -m pytest -v
|
||||
cd tools && uv run python -m pytest -m live -s -o "addopts=" --log-cli-level=INFO
|
||||
test: ## Run all tests
|
||||
cd core && python -m pytest tests/ -v
|
||||
|
||||
install-hooks: ## Install pre-commit hooks
|
||||
uv pip install pre-commit
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
|
||||
frontend-install: ## Install frontend npm packages
|
||||
cd core/frontend && npm install
|
||||
|
||||
frontend-dev: ## Start frontend dev server
|
||||
cd core/frontend && npm run dev
|
||||
|
||||
frontend-build: ## Build frontend for production
|
||||
cd core/frontend && npm run build
|
||||
@@ -0,0 +1,51 @@
|
||||
## Summary
|
||||
- **Added HubSpot integration** — new HubSpot MCP tool with search, get, create, and update operations for contacts, companies, and deals. Includes OAuth2 provider for HubSpot credentials and credential store adapter for the tools layer.
|
||||
- **Replaced web_scrape tool with Playwright + stealth** — swapped httpx/BeautifulSoup for a headless Chromium browser using `playwright` (async API) and `playwright-stealth`, enabling JS-rendered page scraping and bot detection evasion
|
||||
- **Added empty response retry logic** — LLM provider now detects empty responses (e.g. Gemini returning 200 with no content on rate limit) and retries with exponential backoff, preventing hallucinated output from the cleanup LLM
|
||||
- **Added context-aware input compaction** — LLM nodes now estimate input token count before calling the model and progressively truncate the largest values if they exceed the context window budget
|
||||
- **Increased rate limit retries to 10** with verbose `[retry]` and `[compaction]` logging that includes model name, finish reason, and attempt count
|
||||
- **Updated setup scripts** — `scripts/setup-python.sh` now installs Playwright Chromium browser automatically for web scraping support
|
||||
- **Interactive quickstart onboarding** — `quickstart.sh` rewritten as bee-themed interactive wizard that detects existing API keys (including Claude Code subscription), lets user pick ONE default LLM provider, and saves configuration to `~/.hive/configuration.json`
|
||||
- **Fixed lint errors** across `hubspot_tool.py` (line length) and `agent_builder_server.py` (unused variable)
|
||||
|
||||
## Changed files
|
||||
|
||||
### HubSpot Integration
|
||||
- `tools/src/aden_tools/tools/hubspot_tool/` — New MCP tool: contacts, companies, and deals CRUD
|
||||
- `tools/src/aden_tools/tools/__init__.py` — Registered HubSpot tools
|
||||
- `tools/src/aden_tools/credentials/integrations.py` — HubSpot credential integration
|
||||
- `tools/src/aden_tools/credentials/__init__.py` — Updated credential exports
|
||||
- `core/framework/credentials/oauth2/hubspot_provider.py` — HubSpot OAuth2 provider
|
||||
- `core/framework/credentials/oauth2/__init__.py` — Registered HubSpot OAuth2 provider
|
||||
- `core/framework/runner/runner.py` — Updated runner for credential support
|
||||
|
||||
### Web Scrape Rewrite
|
||||
- `tools/src/aden_tools/tools/web_scrape_tool/web_scrape_tool.py` — Playwright async rewrite
|
||||
- `tools/src/aden_tools/tools/web_scrape_tool/README.md` — Updated docs
|
||||
- `tools/pyproject.toml` — Added `playwright`, `playwright-stealth` deps
|
||||
- `tools/Dockerfile` — Added `playwright install chromium --with-deps`
|
||||
- `scripts/setup-python.sh` — Added Playwright Chromium browser install step
|
||||
|
||||
### LLM Reliability
|
||||
- `core/framework/llm/litellm.py` — Empty response retry + max retries 10 + verbose logging
|
||||
- `core/framework/graph/node.py` — Input compaction via `_compact_inputs()`, `_estimate_tokens()`, `_get_context_limit()`
|
||||
|
||||
### Quickstart & Setup
|
||||
- `quickstart.sh` — Interactive bee-themed onboarding wizard with single provider selection
|
||||
- `~/.hive/configuration.json` — New user config file for default LLM provider/model
|
||||
|
||||
### Fixes
|
||||
- `core/framework/mcp/agent_builder_server.py` — Removed unused variable
|
||||
- `tools/src/aden_tools/tools/hubspot_tool/hubspot_tool.py` — Fixed E501 line length violations
|
||||
|
||||
## Test plan
|
||||
- [ ] Run `make lint` — passes clean
|
||||
- [ ] Run `./quickstart.sh` and verify interactive flow works, config saved to `~/.hive/configuration.json`
|
||||
- [ ] Run `./scripts/setup-python.sh` and verify Playwright Chromium installs
|
||||
- [ ] Run `pytest tests/tools/test_web_scrape_tool.py -v`
|
||||
- [ ] Run agent against a JS-heavy site and verify `web_scrape` returns rendered content
|
||||
- [ ] Set `HUBSPOT_ACCESS_TOKEN` and verify HubSpot tool CRUD operations work
|
||||
- [ ] Trigger rate limit and verify `[retry]` logs appear with correct attempt counts
|
||||
- [ ] Run agent with large inputs and verify `[compaction]` logs show truncation
|
||||
|
||||
🤖 Generated with [Claude Code](https://claude.com/claude-code)
|
||||
@@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<img width="100%" alt="Hive Banner" src="https://asset.acho.io/github/img/banner.gif" />
|
||||
<img width="100%" alt="Hive Banner" src="https://storage.googleapis.com/aden-prod-assets/website/aden-title-card.png" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -13,152 +13,116 @@
|
||||
<a href="docs/i18n/ko.md">한국어</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/aden-hive/hive/blob/main/LICENSE"><img src="https://img.shields.io/badge/License-Apache%202.0-blue.svg" alt="Apache 2.0 License" /></a>
|
||||
<a href="https://www.ycombinator.com/companies/aden"><img src="https://img.shields.io/badge/Y%20Combinator-Aden-orange" alt="Y Combinator" /></a>
|
||||
<a href="https://discord.com/invite/MXE49hrKDk"><img src="https://img.shields.io/discord/1172610340073242735?logo=discord&labelColor=%235462eb&logoColor=%23f5f5f5&color=%235462eb" alt="Discord" /></a>
|
||||
<a href="https://x.com/aden_hq"><img src="https://img.shields.io/twitter/follow/teamaden?logo=X&color=%23f5f5f5" alt="Twitter Follow" /></a>
|
||||
<a href="https://www.linkedin.com/company/teamaden/"><img src="https://custom-icon-badges.demolab.com/badge/LinkedIn-0A66C2?logo=linkedin-white&logoColor=fff" alt="LinkedIn" /></a>
|
||||
<img src="https://img.shields.io/badge/MCP-102_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
[](https://github.com/adenhq/hive/blob/main/LICENSE)
|
||||
[](https://www.ycombinator.com/companies/aden)
|
||||
[](https://hub.docker.com/u/adenhq)
|
||||
[](https://discord.com/invite/MXE49hrKDk)
|
||||
[](https://x.com/aden_hq)
|
||||
[](https://www.linkedin.com/company/teamaden/)
|
||||
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/Agent_Harness-Runtime_Layer-ff6600?style=flat-square" alt="Agent Harness" />
|
||||
<img src="https://img.shields.io/badge/AI_Agents-Self--Improving-brightgreen?style=flat-square" alt="AI Agents" />
|
||||
<img src="https://img.shields.io/badge/Multi--Agent-Systems-blue?style=flat-square" alt="Multi-Agent" />
|
||||
<img src="https://img.shields.io/badge/Headless-Development-purple?style=flat-square" alt="Headless" />
|
||||
<img src="https://img.shields.io/badge/Goal--Driven-Development-purple?style=flat-square" alt="Goal-Driven" />
|
||||
<img src="https://img.shields.io/badge/Human--in--the--Loop-orange?style=flat-square" alt="HITL" />
|
||||
<img src="https://img.shields.io/badge/Browser-Use-red?style=flat-square" alt="Browser Use" />
|
||||
<img src="https://img.shields.io/badge/Production--Ready-red?style=flat-square" alt="Production" />
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="https://img.shields.io/badge/OpenAI-supported-412991?style=flat-square&logo=openai" alt="OpenAI" />
|
||||
<img src="https://img.shields.io/badge/Anthropic-supported-d4a574?style=flat-square" alt="Anthropic" />
|
||||
<img src="https://img.shields.io/badge/Google_Gemini-supported-4285F4?style=flat-square&logo=google" alt="Gemini" />
|
||||
<img src="https://img.shields.io/badge/MCP-19_Tools-00ADD8?style=flat-square" alt="MCP" />
|
||||
</p>
|
||||
|
||||
<p align="center"><em>The agent harness for production workloads — state management, failure recovery, observability, and human oversight so your agents actually run.</em></p>
|
||||
|
||||
## Overview
|
||||
|
||||
OpenHive is a zero-setup, model-agnostic execution harness that dynamically generates multi-agent topologies to tackle complex, long-running business workflows without requiring any orchestration boilerplate. By simply defining your objective, the runtime compiles a strict, graph-based execution DAG that safely coordinates specialized agents to execute concurrent tasks in parallel. Backed by persistent, role-based memory that intelligently evolves with your project's context, OpenHive ensures deterministic fault tolerance, deep state observability, and seamless asynchronous execution across whichever underlying LLMs you choose to plug in.
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ Multi-Agent Coordination for parallel task execution
|
||||
- ✅ Graph-based execution for recurring and complex processes
|
||||
- ✅ Role-based memory that evolves with your projects
|
||||
- ✅ Zero Setup - No technical configuration required
|
||||
- ✅ General Compute Use and Browser Use with Native Extension
|
||||
- ✅ Custom Model Support
|
||||
Build reliable, self-improving AI agents without hardcoding workflows. Define your goal through conversation with a coding agent, and the framework generates a node graph with dynamically created connection code. When things break, the framework captures failure data, evolves the agent through the coding agent, and redeploys. Built-in human-in-the-loop nodes, credential management, and real-time monitoring give you control without sacrificing adaptability.
|
||||
|
||||
Visit [adenhq.com](https://adenhq.com) for complete documentation, examples, and guides.
|
||||
|
||||
Visit [HoneyComb](http://honeycomb.open-hive.com/) to see what jobs are being automated by AI. It’s a stock market for jobs, driven by our community’s AI agent progress. You can long and short jobs (with no real money but compute token)based on how much you think a job is going to be replaced by AI.
|
||||
## What is Aden
|
||||
|
||||
https://github.com/user-attachments/assets/bf10edc3-06ba-48b6-98ba-d069b15fb69d
|
||||
<p align="center">
|
||||
<img width="100%" alt="Aden Architecture" src="docs/assets/aden-architecture-diagram.jpg" />
|
||||
</p>
|
||||
|
||||
Aden is a platform for building, deploying, operating, and adapting AI agents:
|
||||
|
||||
## Who Is Hive For?
|
||||
|
||||
Hive is the multi-agent harness layer for teams moving AI agents from prototype to production. Single agents like Openclaw and Cowork can finish personal jobs pretty well but lack the rigor to fulfil business processes.
|
||||
|
||||
Hive is a good fit if you:
|
||||
|
||||
- Want AI agents that **execute real business processes**, not demos
|
||||
- Need a **runtime that handles state, recovery, and parallel execution** at scale
|
||||
- Need **self-healing and adaptive agents** that improve over time
|
||||
- Require **human-in-the-loop control**, observability, and cost limits
|
||||
- Plan to run agents in **production** where uptime, cost, and auditability matter
|
||||
|
||||
Hive may not be the best fit if you’re only experimenting with simple agent chains or one-off scripts.
|
||||
|
||||
## When Should You Use Hive?
|
||||
|
||||
Use Hive when the bottleneck is no longer the model but the harness around it:
|
||||
|
||||
- Long-running agents that need **state persistence and crash recovery**
|
||||
- Production workloads requiring **cost enforcement, observability, and audit trails**
|
||||
- Agents that **self-heal** through failure capture and graph evolution
|
||||
- Multi-agent coordination with **session isolation and shared buffers**
|
||||
- A framework that **scales with model improvements** rather than fighting them
|
||||
- **Build** - A Coding Agent generates specialized Worker Agents (Sales, Marketing, Ops) from natural language goals
|
||||
- **Deploy** - Headless deployment with CI/CD integration and full API lifecycle management
|
||||
- **Operate** - Real-time monitoring, observability, and runtime guardrails keep agents reliable
|
||||
- **Adapt** - Continuous evaluation, supervision, and adaptation ensure agents improve over time
|
||||
- **Infra** - Shared memory, LLM integrations, tools, and skills power every agent
|
||||
|
||||
## Quick Links
|
||||
|
||||
- **[Documentation](https://docs.adenhq.com/)** - Complete guides and API reference
|
||||
- **[Self-Hosting Guide](https://docs.adenhq.com/getting-started/quickstart)** - Deploy Hive on your infrastructure
|
||||
- **[Changelog](https://github.com/aden-hive/hive/releases)** - Latest updates and releases
|
||||
- **[Roadmap](docs/roadmap.md)** - Upcoming features and plans
|
||||
- **[Report Issues](https://github.com/aden-hive/hive/issues)** - Bug reports and feature requests
|
||||
- **[Contributing](CONTRIBUTING.md)** - How to contribute and submit PRs
|
||||
- **[Changelog](https://github.com/adenhq/hive/releases)** - Latest updates and releases
|
||||
<!-- - **[Roadmap](https://adenhq.com/roadmap)** - Upcoming features and plans -->
|
||||
- **[Report Issues](https://github.com/adenhq/hive/issues)** - Bug reports and feature requests
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.11+ for agent development
|
||||
- An LLM provider that powers the agents
|
||||
- **ripgrep (optional, recommended on Windows):** The `search_files` tool uses ripgrep for faster file search. If not installed, a Python fallback is used. On Windows: `winget install BurntSushi.ripgrep` or `scoop install ripgrep`
|
||||
|
||||
> **Windows Users:** Native Windows is supported via `quickstart.ps1` and `hive.ps1`. Run these in PowerShell 5.1+. WSL is also an option but not required.
|
||||
- [Python 3.11+](https://www.python.org/downloads/) for agent development
|
||||
- Claude Code or Cursor for utilizing agent skills
|
||||
|
||||
### Installation
|
||||
|
||||
> **Note**
|
||||
> Hive uses a `uv` workspace layout and is not installed with `pip install`.
|
||||
> Running `pip install -e .` from the repository root will create a placeholder package and Hive will not function correctly.
|
||||
> Please use the quickstart script below to set up the environment.
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/aden-hive/hive.git
|
||||
git clone https://github.com/adenhq/hive.git
|
||||
cd hive
|
||||
|
||||
# Run quickstart setup (macOS/Linux)
|
||||
# Run quickstart setup
|
||||
./quickstart.sh
|
||||
|
||||
# Windows (PowerShell)
|
||||
.\quickstart.ps1
|
||||
```
|
||||
|
||||
This sets up:
|
||||
|
||||
- **framework** - Core agent runtime and graph executor (in `core/.venv`)
|
||||
- **aden_tools** - MCP tools for agent capabilities (in `tools/.venv`)
|
||||
- **credential store** - Encrypted API key storage (`~/.hive/credentials`)
|
||||
- **LLM provider** - Interactive default model configuration, including Hive LLM and OpenRouter
|
||||
- All required Python dependencies with `uv`
|
||||
|
||||
- Finally, it will open the Hive interface in your browser
|
||||
|
||||
> **Tip:** To reopen the dashboard later, run `hive open` from the project directory.
|
||||
- All required Python dependencies
|
||||
|
||||
### Build Your First Agent
|
||||
|
||||
Type the agent you want to build in the home input box. The queen is going to ask you questions and work out a solution with you.
|
||||
```bash
|
||||
# Build an agent using Claude Code
|
||||
claude> /building-agents-construction
|
||||
|
||||
<img width="2500" height="1214" alt="Image" src="https://github.com/user-attachments/assets/1ce19141-a78b-46f5-8d64-dbf987e048f4" />
|
||||
# Test your agent
|
||||
claude> /testing-agent
|
||||
|
||||
### Use Template Agents
|
||||
# Run your agent
|
||||
PYTHONPATH=core:exports python -m your_agent_name run --input '{...}'
|
||||
```
|
||||
|
||||
Click "Try a sample agent" and check the templates. You can run a template directly or choose to build your version on top of the existing template.
|
||||
**[📖 Complete Setup Guide](ENVIRONMENT_SETUP.md)** - Detailed instructions for agent development
|
||||
|
||||
### Run Agents
|
||||
### Cursor IDE Support
|
||||
|
||||
Now you can run an agent by selecting the agent (either an existing agent or example agent). You can click the Run button on the top left, or talk to the queen agent and it can run the agent for you.
|
||||
Skills are also available in Cursor. To enable:
|
||||
|
||||
<img width="2549" height="1174" alt="Screenshot 2026-03-12 at 9 27 36 PM" src="https://github.com/user-attachments/assets/7c7d30fa-9ceb-4c23-95af-b1caa405547d" />
|
||||
1. Open Command Palette (`Cmd+Shift+P` / `Ctrl+Shift+P`)
|
||||
2. Run `MCP: Enable` to enable MCP servers
|
||||
3. Restart Cursor to load the MCP servers from `.cursor/mcp.json`
|
||||
4. Type `/` in Agent chat and search for skills (e.g., `/building-agents-construction`)
|
||||
|
||||
## Integration
|
||||
## Features
|
||||
|
||||
<a href="https://github.com/aden-hive/hive/tree/main/tools/src/aden_tools/tools"><img width="100%" alt="Integration" src="https://github.com/user-attachments/assets/a1573f93-cf02-4bb8-b3d5-b305b05b1e51" /></a>
|
||||
Hive is built to be model-agnostic and system-agnostic.
|
||||
- **Goal-Driven Development** - Define objectives in natural language; the coding agent generates the agent graph and connection code to achieve them
|
||||
- **Adaptiveness** - Framework captures failures, calibrates according to the objectives, and evolves the agent graph
|
||||
- **Dynamic Node Connections** - No predefined edges; connection code is generated by any capable LLM based on your goals
|
||||
- **SDK-Wrapped Nodes** - Every node gets shared memory, local RLM memory, monitoring, tools, and LLM access out of the box
|
||||
- **Human-in-the-Loop** - Intervention nodes that pause execution for human input with configurable timeouts and escalation
|
||||
- **Real-time Observability** - WebSocket streaming for live monitoring of agent execution, decisions, and node-to-node communication
|
||||
- **Cost & Budget Control** - Set spending limits, throttles, and automatic model degradation policies
|
||||
- **Production-Ready** - Self-hostable, built for scale and reliability
|
||||
|
||||
- **LLM flexibility** - Hive Framework supports Anthropic, OpenAI, OpenRouter, Hive LLM, and other hosted or local models through LiteLLM-compatible providers.
|
||||
- **Business system connectivity** - Hive Framework is designed to connect to all kinds of business systems as tools, such as CRM, support, messaging, data, file, and internal APIs via MCP.
|
||||
## Why Aden
|
||||
|
||||
## Why Hive
|
||||
|
||||
As models improve, the upper bound of what agents can do rises — but their reliability and production value are determined by the harness. Hive focuses on generating agents that run real business processes rather than generic agents. Instead of requiring you to manually design workflows, define agent interactions, and handle failures reactively, Hive flips the paradigm: **you describe outcomes, and the system builds itself**—delivering an outcome-driven, adaptive experience with an easy-to-use set of tools and integrations.
|
||||
Hive focuses on generating agents that run real business processes rather than generic agents. Instead of requiring you to manually design workflows, define agent interactions, and handle failures reactively, Hive flips the paradigm: **you describe outcomes, and the system builds itself**—delivering an outcome-driven, adaptive experience with an easy-to-use set of tools and integrations.
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
@@ -192,25 +156,161 @@ flowchart LR
|
||||
style V6 fill:#fff,stroke:#ed8c00,stroke-width:1px,color:#cc5d00
|
||||
```
|
||||
|
||||
### The Aden Advantage
|
||||
|
||||
| Traditional Frameworks | Aden |
|
||||
| -------------------------- | -------------------------------------- |
|
||||
| Hardcode agent workflows | Describe goals in natural language |
|
||||
| Manual graph definition | Auto-generated agent graphs |
|
||||
| Reactive error handling | Outcome-evaluation and adaptiveness |
|
||||
| Static tool configurations | Dynamic SDK-wrapped nodes |
|
||||
| Separate monitoring setup | Built-in real-time observability |
|
||||
| DIY budget management | Integrated cost controls & degradation |
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **[Define Your Goal](docs/key_concepts/goals_outcome.md)** → Describe what you want to achieve in plain English
|
||||
2. **Coding Agent Generates** → Creates the [agent graph](docs/key_concepts/graph.md), connection code, and test cases
|
||||
3. **[Workers Execute](docs/key_concepts/worker_agent.md)** → SDK-wrapped nodes run with full observability and tool access
|
||||
1. **Define Your Goal** → Describe what you want to achieve in plain English
|
||||
2. **Coding Agent Generates** → Creates the agent graph, connection code, and test cases
|
||||
3. **Workers Execute** → SDK-wrapped nodes run with full observability and tool access
|
||||
4. **Control Plane Monitors** → Real-time metrics, budget enforcement, policy management
|
||||
5. **[Adaptiveness](docs/key_concepts/evolution.md)** → On failure, the system evolves the graph and redeploys automatically
|
||||
5. **Adaptiveness** → On failure, the system evolves the graph and redeploys automatically
|
||||
|
||||
## Run pre-built Agents (Coming Soon)
|
||||
|
||||
### Run a sample agent
|
||||
Aden Hive provides a list of featured agents that you can use and build on top of.
|
||||
|
||||
### Run an agent shared by others
|
||||
Put the agent in `exports/` and run `PYTHONPATH=core:exports python -m your_agent_name run --input '{...}'`
|
||||
|
||||
|
||||
For building and running goal-driven agents with the framework:
|
||||
|
||||
```bash
|
||||
# One-time setup
|
||||
./quickstart.sh
|
||||
|
||||
# This sets up:
|
||||
# - framework package (core runtime)
|
||||
# - aden_tools package (MCP tools)
|
||||
# - All Python dependencies
|
||||
|
||||
# Build new agents using Claude Code skills
|
||||
claude> /building-agents-construction
|
||||
|
||||
# Test agents
|
||||
claude> /testing-agent
|
||||
|
||||
# Run agents
|
||||
PYTHONPATH=core:exports python -m agent_name run --input '{...}'
|
||||
```
|
||||
|
||||
See [ENVIRONMENT_SETUP.md](ENVIRONMENT_SETUP.md) for complete setup instructions.
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Developer Guide](docs/developer-guide.md)** - Comprehensive guide for developers
|
||||
- **[Developer Guide](DEVELOPER.md)** - Comprehensive guide for developers
|
||||
- [Getting Started](docs/getting-started.md) - Quick setup instructions
|
||||
- [Configuration Guide](docs/configuration.md) - All configuration options
|
||||
- [Architecture Overview](docs/architecture/README.md) - System design and structure
|
||||
|
||||
## Contributing
|
||||
We welcome contributions from the community! We’re especially looking for help building tools, integrations, and example agents for the framework ([check #2805](https://github.com/aden-hive/hive/issues/2805)). If you’re interested in extending its functionality, this is the perfect place to start. Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
## Roadmap
|
||||
|
||||
**Important:** Please get assigned to an issue before submitting a PR. Comment on an issue to claim it, and a maintainer will assign you. Issues with reproducible steps and proposals are prioritized. This helps prevent duplicate work.
|
||||
Aden Hive Agent Framework aims to help developers build outcome-oriented, self-adaptive agents. See [ROADMAP.md](ROADMAP.md) for details.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Foundation
|
||||
direction LR
|
||||
subgraph arch["Architecture"]
|
||||
a1["Node-Based Architecture"]:::done
|
||||
a2["Python SDK"]:::done
|
||||
a3["LLM Integration"]:::done
|
||||
a4["Communication Protocol"]:::done
|
||||
end
|
||||
subgraph ca["Coding Agent"]
|
||||
b1["Goal Creation Session"]:::done
|
||||
b2["Worker Agent Creation"]
|
||||
b3["MCP Tools"]:::done
|
||||
end
|
||||
subgraph wa["Worker Agent"]
|
||||
c1["Human-in-the-Loop"]:::done
|
||||
c2["Callback Handlers"]:::done
|
||||
c3["Intervention Points"]:::done
|
||||
c4["Streaming Interface"]
|
||||
end
|
||||
subgraph cred["Credentials"]
|
||||
d1["Setup Process"]:::done
|
||||
d2["Pluggable Sources"]:::done
|
||||
d3["Enterprise Secrets"]
|
||||
d4["Integration Tools"]:::done
|
||||
end
|
||||
subgraph tools["Tools"]
|
||||
e1["File Use"]:::done
|
||||
e2["Memory STM/LTM"]:::done
|
||||
e3["Web Search/Scraper"]:::done
|
||||
e4["CSV/PDF"]:::done
|
||||
e5["Excel/Email"]
|
||||
end
|
||||
subgraph core["Core"]
|
||||
f1["Eval System"]
|
||||
f2["Pydantic Validation"]:::done
|
||||
f3["Documentation"]:::done
|
||||
f4["Adaptiveness"]
|
||||
f5["Sample Agents"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Expansion
|
||||
direction LR
|
||||
subgraph intel["Intelligence"]
|
||||
g1["Guardrails"]
|
||||
g2["Streaming Mode"]
|
||||
g3["Image Generation"]
|
||||
g4["Semantic Search"]
|
||||
end
|
||||
subgraph mem["Memory Iteration"]
|
||||
h1["Message Model & Sessions"]
|
||||
h2["Storage Migration"]
|
||||
h3["Context Building"]
|
||||
h4["Proactive Compaction"]
|
||||
h5["Token Tracking"]
|
||||
end
|
||||
subgraph evt["Event System"]
|
||||
i1["Event Bus for Nodes"]
|
||||
end
|
||||
subgraph cas["Coding Agent Support"]
|
||||
j1["Claude Code"]
|
||||
j2["Cursor"]
|
||||
j3["Opencode"]
|
||||
j4["Antigravity"]
|
||||
end
|
||||
subgraph plat["Platform"]
|
||||
k1["JavaScript/TypeScript SDK"]
|
||||
k2["Custom Tool Integrator"]
|
||||
k3["Windows Support"]
|
||||
end
|
||||
subgraph dep["Deployment"]
|
||||
l1["Self-Hosted"]
|
||||
l2["Cloud Services"]
|
||||
l3["CI/CD Pipeline"]
|
||||
end
|
||||
subgraph tmpl["Templates"]
|
||||
m1["Sales Agent"]
|
||||
m2["Marketing Agent"]
|
||||
m3["Analytics Agent"]
|
||||
m4["Training Agent"]
|
||||
m5["Smart Form Agent"]
|
||||
end
|
||||
end
|
||||
|
||||
classDef done fill:#9e9e9e,color:#fff,stroke:#757575
|
||||
```
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community! We’re especially looking for help building tools, integrations, and example agents for the framework ([check #2805](https://github.com/adenhq/hive/issues/2805)). If you’re interested in extending its functionality, this is the perfect place to start. Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
**Important:** Please get assigned to an issue before submitting a PR. Comment on an issue to claim it, and a maintainer will assign you. Issues with reproducible steps and proposals are prioritized. This helps prevent duplicate work.
|
||||
|
||||
1. Find or create an issue and get assigned
|
||||
2. Fork the repository
|
||||
@@ -243,9 +343,13 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
**Q: Does Hive depend on LangChain or other agent frameworks?**
|
||||
|
||||
No. Hive is built from the ground up with no dependencies on LangChain, CrewAI, or other agent frameworks. The framework is designed to be lean and flexible, generating agent graphs dynamically rather than relying on predefined components.
|
||||
|
||||
**Q: What LLM providers does Hive support?**
|
||||
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, OpenRouter, and Hive LLM. Simply set the appropriate API key environment variable and specify the model name. See [docs/configuration.md](docs/configuration.md) for provider-specific configuration examples.
|
||||
Hive supports 100+ LLM providers through LiteLLM integration, including OpenAI (GPT-4, GPT-4o), Anthropic (Claude models), Google Gemini, DeepSeek, Mistral, Groq, and many more. Simply set the appropriate API key environment variable and specify the model name.
|
||||
|
||||
**Q: Can I use Hive with local AI models like Ollama?**
|
||||
|
||||
@@ -253,21 +357,37 @@ Yes! Hive supports local models through LiteLLM. Simply use the model name forma
|
||||
|
||||
**Q: What makes Hive different from other agent frameworks?**
|
||||
|
||||
Hive is an agent harness, not just an orchestration framework. It provides the production runtime layer — session isolation, checkpoint-based crash recovery, cost enforcement, real-time observability, and human-in-the-loop controls — that makes agents reliable enough to run real workloads. On top of that, Hive generates your entire agent system from natural language goals and automatically [evolves the graph](docs/key_concepts/evolution.md) when agents fail. The combination of a robust harness with self-improving generation is what sets Hive apart.
|
||||
Hive generates your entire agent system from natural language goals using a coding agent—you don't hardcode workflows or manually define graphs. When agents fail, the framework automatically captures failure data, evolves the agent graph, and redeploys. This self-improving loop is unique to Aden.
|
||||
|
||||
**Q: Is Hive open-source?**
|
||||
|
||||
Yes, Hive is fully open-source under the Apache License 2.0. We actively encourage community contributions and collaboration.
|
||||
|
||||
**Q: Does Hive collect data from users?**
|
||||
|
||||
Hive collects telemetry data for monitoring and observability purposes, including token usage, latency metrics, and cost tracking. Content capture (prompts and responses) is configurable and stored with team-scoped data isolation. All data stays within your infrastructure when self-hosted.
|
||||
|
||||
**Q: What deployment options does Hive support?**
|
||||
|
||||
Hive supports self-hosted deployments via Python packages. See the [Environment Setup Guide](ENVIRONMENT_SETUP.md) for installation instructions. Cloud deployment options and Kubernetes-ready configurations are on the roadmap.
|
||||
|
||||
**Q: Can Hive handle complex, production-scale use cases?**
|
||||
|
||||
Yes. Hive is explicitly designed for production environments with features like automatic failure recovery, real-time observability, cost controls, and horizontal scaling support. The framework handles both simple automations and complex multi-agent workflows.
|
||||
|
||||
**Q: Does Hive support human-in-the-loop workflows?**
|
||||
|
||||
Yes, Hive fully supports [human-in-the-loop](docs/key_concepts/graph.md#human-in-the-loop) workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
Yes, Hive fully supports human-in-the-loop workflows through intervention nodes that pause execution for human input. These include configurable timeouts and escalation policies, allowing seamless collaboration between human experts and AI agents.
|
||||
|
||||
**Q: What monitoring and debugging tools does Hive provide?**
|
||||
|
||||
Hive includes comprehensive observability features: real-time WebSocket streaming for live agent execution monitoring, TimescaleDB-powered analytics for cost and performance metrics, health check endpoints for Kubernetes integration, and MCP tools for agent execution, including file operations, web search, data processing, and more.
|
||||
|
||||
**Q: What programming languages does Hive support?**
|
||||
|
||||
The Hive framework is built in Python. A JavaScript/TypeScript SDK is on the roadmap.
|
||||
|
||||
**Q: Can Hive agents interact with external tools and APIs?**
|
||||
**Q: Can Aden agents interact with external tools and APIs?**
|
||||
|
||||
Yes. Aden's SDK-wrapped nodes provide built-in tool access, and the framework supports flexible tool ecosystems. Agents can integrate with external APIs, databases, and services through the node architecture.
|
||||
|
||||
@@ -277,21 +397,23 @@ Hive provides granular budget controls including spending limits, throttles, and
|
||||
|
||||
**Q: Where can I find examples and documentation?**
|
||||
|
||||
Visit [docs.adenhq.com](https://docs.adenhq.com/) for complete guides, API reference, and getting started tutorials. The repository also includes documentation in the `docs/` folder and a comprehensive [developer guide](docs/developer-guide.md).
|
||||
Visit [docs.adenhq.com](https://docs.adenhq.com/) for complete guides, API reference, and getting started tutorials. The repository also includes documentation in the `docs/` folder and a comprehensive [DEVELOPER.md](DEVELOPER.md) guide.
|
||||
|
||||
**Q: How can I contribute to Aden?**
|
||||
|
||||
Contributions are welcome! Fork the repository, create your feature branch, implement your changes, and submit a pull request. See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines.
|
||||
|
||||
## Star History
|
||||
**Q: When will my team start seeing results from Aden's adaptive agents?**
|
||||
|
||||
<a href="https://star-history.com/#aden-hive/hive&Date">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date&theme=dark" />
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date" />
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=aden-hive/hive&type=Date" />
|
||||
</picture>
|
||||
</a>
|
||||
Aden's adaptation loop begins working from the first execution. When an agent fails, the framework captures the failure data, helping developers evolve the agent graph through the coding agent. How quickly this translates to measurable results depends on the complexity of your use case, the quality of your goal definitions, and the volume of executions generating feedback.
|
||||
|
||||
**Q: How does Hive compare to other agent frameworks?**
|
||||
|
||||
Hive focuses on generating agents that run real business processes, rather than generic agents. This vision emphasizes outcome-driven design, adaptability, and an easy-to-use set of tools and integrations.
|
||||
|
||||
**Q: Does Aden offer enterprise support?**
|
||||
|
||||
For enterprise inquiries, contact the Aden team through [adenhq.com](https://adenhq.com) or join our [Discord community](https://discord.com/invite/MXE49hrKDk) for support and discussions.
|
||||
|
||||
---
|
||||
|
||||
|
||||
+299
@@ -0,0 +1,299 @@
|
||||
# Product Roadmap
|
||||
|
||||
Aden Agent Framework aims to help developers build outcome oriented, self-adaptive agents. Please find our roadmap here
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph Foundation
|
||||
direction LR
|
||||
subgraph arch["Architecture"]
|
||||
a1["Node-Based Architecture"]:::done
|
||||
a2["Python SDK"]:::done
|
||||
a3["LLM Integration"]:::done
|
||||
a4["Communication Protocol"]:::done
|
||||
end
|
||||
subgraph ca["Coding Agent"]
|
||||
b1["Goal Creation Session"]:::done
|
||||
b2["Worker Agent Creation"]
|
||||
b3["MCP Tools"]:::done
|
||||
end
|
||||
subgraph wa["Worker Agent"]
|
||||
c1["Human-in-the-Loop"]:::done
|
||||
c2["Callback Handlers"]:::done
|
||||
c3["Intervention Points"]:::done
|
||||
c4["Streaming Interface"]
|
||||
end
|
||||
subgraph cred["Credentials"]
|
||||
d1["Setup Process"]:::done
|
||||
d2["Pluggable Sources"]:::done
|
||||
d3["Enterprise Secrets"]
|
||||
d4["Integration Tools"]:::done
|
||||
end
|
||||
subgraph tools["Tools"]
|
||||
e1["File Use"]:::done
|
||||
e2["Memory STM/LTM"]:::done
|
||||
e3["Web Search/Scraper"]:::done
|
||||
e4["CSV/PDF"]:::done
|
||||
e5["Excel/Email"]
|
||||
end
|
||||
subgraph core["Core"]
|
||||
f1["Eval System"]
|
||||
f2["Pydantic Validation"]:::done
|
||||
f3["Documentation"]:::done
|
||||
f4["Adaptiveness"]
|
||||
f5["Sample Agents"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Expansion
|
||||
direction LR
|
||||
subgraph intel["Intelligence"]
|
||||
g1["Guardrails"]
|
||||
g2["Streaming Mode"]
|
||||
g3["Image Generation"]
|
||||
g4["Semantic Search"]
|
||||
end
|
||||
subgraph mem["Memory Iteration"]
|
||||
h1["Message Model & Sessions"]
|
||||
h2["Storage Migration"]
|
||||
h3["Context Building"]
|
||||
h4["Proactive Compaction"]
|
||||
h5["Token Tracking"]
|
||||
end
|
||||
subgraph evt["Event System"]
|
||||
i1["Event Bus for Nodes"]
|
||||
end
|
||||
subgraph cas["Coding Agent Support"]
|
||||
j1["Claude Code"]
|
||||
j2["Cursor"]
|
||||
j3["Opencode"]
|
||||
j4["Antigravity"]
|
||||
end
|
||||
subgraph plat["Platform"]
|
||||
k1["JavaScript/TypeScript SDK"]
|
||||
k2["Custom Tool Integrator"]
|
||||
k3["Windows Support"]
|
||||
end
|
||||
subgraph dep["Deployment"]
|
||||
l1["Self-Hosted"]
|
||||
l2["Cloud Services"]
|
||||
l3["CI/CD Pipeline"]
|
||||
end
|
||||
subgraph tmpl["Templates"]
|
||||
m1["Sales Agent"]
|
||||
m2["Marketing Agent"]
|
||||
m3["Analytics Agent"]
|
||||
m4["Training Agent"]
|
||||
m5["Smart Form Agent"]
|
||||
end
|
||||
end
|
||||
|
||||
classDef done fill:#9e9e9e,color:#fff,stroke:#757575
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Foundation
|
||||
|
||||
### Backbone Architecture
|
||||
- [ ] **Node-Based Architecture (Agent as a node)**
|
||||
- [x] Object schema definition
|
||||
- [x] Node wrapper SDK
|
||||
- [x] Shared memory access
|
||||
- [ ] Default monitoring hooks
|
||||
- [x] Tool access layer
|
||||
- [x] LLM integration layer (Natively supports all mainstream LLMs through LiteLLM)
|
||||
- [x] Anthropic
|
||||
- [x] OpenAI
|
||||
- [x] Google
|
||||
- [x] **Communication protocol between nodes**
|
||||
- [x] **[Coding Agent] Goal Creation Session** (separate from coding session)
|
||||
- [x] Instruction back and forth
|
||||
- [x] Goal Object schema definition
|
||||
- [x] Being able to generate the test cases
|
||||
- [x] Test case validation for worker agent (Outcome driven)
|
||||
- [ ] **[Coding Agent] Worker Agent Creation**
|
||||
- [x] Coding Agent tools
|
||||
- [ ] Use Template Agent as a start
|
||||
- [x] Use our MCP tools
|
||||
- [ ] **[Worker Agent] Human-in-the-Loop**
|
||||
- [x] Worker Agents request with questions and options
|
||||
- [x] Callback Handler System to receive events throughout execution
|
||||
- [x] Tool-Based Intervention Points (tool to pause execution and request human input)
|
||||
- [x] Multiple entrypoint for different event source (e.g. Human input, webhook)
|
||||
- [ ] Streaming Interface for Real-time Monitoring
|
||||
- [x] Request State Management
|
||||
|
||||
### Credential Management
|
||||
- [x] **Credentials Setup Process**
|
||||
- [x] Install Credential MCP
|
||||
- [x] **Pluggable Credential Sources**
|
||||
- [x] **Abstraction & Local Sources**
|
||||
- [x] Introduce `CredentialSource` base class
|
||||
- [x] Refactor existing logic into `EnvVarSource`
|
||||
- [x] Implementation of Source Priority Chain mechanism
|
||||
- [ ] Foundation unit tests
|
||||
- [ ] **Enterprise Secret Managers**
|
||||
- [x] `VaultSource` (HashiCorp Vault)
|
||||
- [ ] `AWSSecretsSource` (AWS Secrets Manager)
|
||||
- [ ] `AzureKeyVaultSource` (Azure Key Vault)
|
||||
- [ ] Management of optional provider dependencies
|
||||
- [ ] **Advanced Features**
|
||||
- [x] Credential expiration and auto-refresh
|
||||
- [ ] Audit logging for compliance/tracking
|
||||
- [ ] Per-environment configuration support
|
||||
- [ ] **Documentation & DX**
|
||||
- [ ] Comprehensive source documentation
|
||||
- [ ] Example configurations for all providers
|
||||
- [x] **Integration as tools coverage**
|
||||
- [x] Gsuite Tools
|
||||
- [x] Social Media
|
||||
- [ ] Twitter(X)
|
||||
- [x] Github
|
||||
- [ ] Instagram
|
||||
- [ ] SAAS
|
||||
- [ ] Hubspot
|
||||
- [ ] Slack
|
||||
- [ ] Teams
|
||||
- [ ] Zoom
|
||||
- [ ] Stripe
|
||||
- [ ] Salesforce
|
||||
|
||||
> [!IMPORTANT]
|
||||
> **Community Contribution Wanted**: We appreciate help from the community to expand the "Integration as tools" capability. Leave an issue of the integration you want to support via Hive!
|
||||
|
||||
### Essential Tools
|
||||
- [x] **File Use Tool Kit**
|
||||
- [X] **Memory Tools**
|
||||
- [x] STM Layer Tool (state-based short-term memory)
|
||||
- [x] LTM Layer Tool (RLM - long-term memory)
|
||||
- [ ] **Infrastructure Tools**
|
||||
- [x] Runtime Log Tool (logs for coding agent)
|
||||
- [x] Web Search
|
||||
- [x] Web Scraper
|
||||
- [x] CSV tools
|
||||
- [x] PDF tools
|
||||
- [ ] Excel tools
|
||||
- [ ] Email Tools
|
||||
- [ ] Recipe for "Add your own tools"
|
||||
|
||||
### Memory & File System
|
||||
- [x] DB for long-term persistent memory (Filesystem as durable scratchpad pattern)
|
||||
- [x] Session Local memory isolation
|
||||
|
||||
### Eval System (Basic)
|
||||
- [x] Test Driven - Run test case for all agent iteration
|
||||
- [ ] Failure recording mechanism
|
||||
- [ ] SDK for defining failure conditions
|
||||
- [ ] Basic observability hooks
|
||||
- [ ] User-driven log analysis (OSS approach)
|
||||
|
||||
### Data Validation
|
||||
- [x] Natively Support data validation of LLMs output with Pydantic
|
||||
|
||||
### Developer Experience
|
||||
- [ ] **MVP Features**
|
||||
- [ ] Debugging mode
|
||||
- [ ] CLI tools for memory management
|
||||
- [ ] CLI tools for credential management
|
||||
- [ ] **MVP Resources & Documentation**
|
||||
- [x] Quick start guide
|
||||
- [x] Goal creation guide
|
||||
- [x] Agent creation guide
|
||||
- [x] GitHub Page setup
|
||||
- [x] README with examples
|
||||
- [x] Contributing guidelines
|
||||
- [ ] Introduction Video
|
||||
|
||||
### Adaptiveness
|
||||
- [ ] Runtime data feedback loop
|
||||
- [ ] Instant Developer Feedback for improvement
|
||||
|
||||
### Sample Agents
|
||||
- [ ] Knowledge Agent
|
||||
- [ ] Blog Writer Agent
|
||||
- [ ] SDR Agent
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Expansion
|
||||
|
||||
### Basic Guardrails
|
||||
- [ ] Support Basic Monitoring from Agent node SDK
|
||||
- [ ] SDK guardrail implementation (in node)
|
||||
- [ ] Guardrail type support (Determined Condition as Guardrails)
|
||||
|
||||
### Agent Capability
|
||||
- [ ] Streaming mode support
|
||||
- [ ] Image Generation support
|
||||
- [ ] Take end user input Image and flatfile understand capability
|
||||
|
||||
### Event-loop For Nodes (Opencode-style)
|
||||
- [ ] **Event bus**
|
||||
|
||||
### Memory System Iteration
|
||||
- [ ] **Message Model & Session Management**
|
||||
- [ ] Introduce `Message` class with structured content types
|
||||
- [ ] Implement `Session` classes for conversation state
|
||||
- [ ] **Storage Migration**
|
||||
- [ ] Implement granular per-message file persistence (`/message/[agentID]/...`)
|
||||
- [ ] Migrate from monolithic run storage
|
||||
- [ ] **Context Building & Conversation Loop**
|
||||
- [ ] Implement `Message.stream(sessionID)`
|
||||
- [ ] Update `LLMNode.execute()` for full context building
|
||||
- [ ] Implement `Message.toModelMessages()` conversion
|
||||
- [ ] **Proactive Compaction**
|
||||
- [ ] Implement proactive overflow detection
|
||||
- [ ] Develop backward-scanning pruning strategy (e.g., clearing old tool outputs)
|
||||
- [ ] **Enhanced Token Tracking**
|
||||
- [ ] Extend `LLMResponse` to track reasoning and cache tokens
|
||||
- [ ] Integrate granular token metrics into compaction logic
|
||||
|
||||
### Coding Agent Support
|
||||
- [ ] Claude Code
|
||||
- [ ] Cursor
|
||||
- [ ] Opencode
|
||||
- [ ] Antigravity
|
||||
|
||||
### File System Enhancement
|
||||
- [ ] Semantic Search integration
|
||||
- [ ] Interactive File System in product (frontend integration)
|
||||
|
||||
### More Worker Tools
|
||||
- [ ] Custom Tool Integrator
|
||||
- [ ] Integration as a tool (Credential Store & Support)
|
||||
- [ ] **Core Agent Tools**
|
||||
- [ ] Node Discovery Tool (find other agents in the graph)
|
||||
- [ ] HITL Tool (pause execution for human approval)
|
||||
- [ ] Wake-up Tool (resume agent tasks)
|
||||
|
||||
### Deployment (Self-Hosted)
|
||||
- [ ] Docker container standardization
|
||||
- [ ] Headless backend execution
|
||||
- [ ] Exposed API for frontend attachment
|
||||
- [ ] Local monitoring & observability
|
||||
- [ ] Basic lifecycle APIs (Start, Stop, Pause, Resume)
|
||||
|
||||
### Deployment (Cloud)
|
||||
- [ ] Cloud Service Options
|
||||
- [ ] Support deployment to 3rd-party platforms
|
||||
- [ ] Self-deploy + orchestrator connection
|
||||
- [ ] **CI/CD Pipeline**
|
||||
- [ ] Automated test execution
|
||||
- [ ] Agent version control
|
||||
- [ ] All tests must pass for deployment
|
||||
|
||||
### Developer Experience Enhancement
|
||||
- [ ] Tool usage documentation
|
||||
- [ ] Discord Support Channel
|
||||
|
||||
### More Agent Templates
|
||||
- [ ] GTM Sales Agent (workflow)
|
||||
- [ ] GTM Marketing Agent (workflow)
|
||||
- [ ] Analytics Agent
|
||||
- [ ] Training Agent
|
||||
- [ ] Smart Entry / Form Agent (self-evolution emphasis)
|
||||
|
||||
### Cross-Platform
|
||||
- [ ] JavaScript / TypeScript Version SDK
|
||||
- [ ] Better windows support
|
||||
+2
-2
@@ -39,8 +39,8 @@ We consider security research conducted in accordance with this policy to be:
|
||||
## Security Best Practices for Users
|
||||
|
||||
1. **Keep Updated**: Always run the latest version
|
||||
2. **Secure Configuration**: Review your `~/.hive/configuration.json`, `.mcp.json`, and environment variable settings, especially in production
|
||||
3. **Environment Variables**: Never commit `.env` files or any configuration files that contain secrets
|
||||
2. **Secure Configuration**: Review `config.yaml` settings, especially in production
|
||||
3. **Environment Variables**: Never commit `.env` files or `config.yaml` with secrets
|
||||
4. **Network Security**: Use HTTPS in production, configure firewalls appropriately
|
||||
5. **Database Security**: Use strong passwords, limit network access
|
||||
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "core"
|
||||
},
|
||||
"tools": {
|
||||
"command": "python",
|
||||
"args": ["-m", "aden_tools.mcp_server", "--stdio"],
|
||||
|
||||
@@ -82,7 +82,7 @@ Register an MCP server as a tool source for your agent.
|
||||
"example_tool"
|
||||
],
|
||||
"total_mcp_servers": 1,
|
||||
"note": "MCP server 'tools' registered with 6 tools. These tools can now be used in event_loop nodes."
|
||||
"note": "MCP server 'tools' registered with 6 tools. These tools can now be used in llm_tool_use nodes."
|
||||
}
|
||||
```
|
||||
|
||||
@@ -149,7 +149,7 @@ List tools available from registered MCP servers.
|
||||
]
|
||||
},
|
||||
"total_tools": 6,
|
||||
"note": "Use these tool names in the 'tools' parameter when adding event_loop nodes"
|
||||
"note": "Use these tool names in the 'tools' parameter when adding llm_tool_use nodes"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -246,7 +246,7 @@ Here's a complete workflow for building an agent with MCP tools:
|
||||
"node_id": "web-searcher",
|
||||
"name": "Web Search",
|
||||
"description": "Search the web for information",
|
||||
"node_type": "event_loop",
|
||||
"node_type": "llm_tool_use",
|
||||
"input_keys": "[\"query\"]",
|
||||
"output_keys": "[\"search_results\"]",
|
||||
"system_prompt": "Search for {query} using the web_search tool",
|
||||
|
||||
@@ -6,7 +6,7 @@ This guide explains how to integrate Model Context Protocol (MCP) servers with t
|
||||
|
||||
The framework provides built-in support for MCP servers, allowing you to:
|
||||
|
||||
- **Register MCP servers** via STDIO, HTTP, Unix socket, or SSE transport
|
||||
- **Register MCP servers** via STDIO or HTTP transport
|
||||
- **Auto-discover tools** from registered servers
|
||||
- **Use MCP tools** seamlessly in your agents
|
||||
- **Manage multiple MCP servers** simultaneously
|
||||
@@ -104,48 +104,6 @@ runner.register_mcp_server(
|
||||
- `url`: Base URL of the MCP server
|
||||
- `headers`: HTTP headers to include (optional)
|
||||
|
||||
### Unix Socket Transport
|
||||
|
||||
Best for same-host inter-process communication with lower overhead than TCP:
|
||||
|
||||
```python
|
||||
runner.register_mcp_server(
|
||||
name="local-ipc-tools",
|
||||
transport="unix",
|
||||
url="http://localhost",
|
||||
socket_path="/tmp/mcp_server.sock",
|
||||
headers={
|
||||
"Authorization": "Bearer token"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
|
||||
- `url`: Base URL for HTTP requests over the socket (required, e.g., `"http://localhost"`)
|
||||
- `socket_path`: Absolute path to the Unix socket file (required, e.g., `"/tmp/mcp_server.sock"`)
|
||||
- `headers`: HTTP headers to include (optional)
|
||||
|
||||
### SSE Transport
|
||||
|
||||
Best for real-time, event-driven connections using the MCP SDK's SSE client:
|
||||
|
||||
```python
|
||||
runner.register_mcp_server(
|
||||
name="streaming-tools",
|
||||
transport="sse",
|
||||
url="http://localhost:8000/sse",
|
||||
headers={
|
||||
"Authorization": "Bearer token"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
**Configuration:**
|
||||
|
||||
- `url`: SSE endpoint URL (required, e.g., `"http://localhost:8000/sse"`)
|
||||
- `headers`: HTTP headers for the SSE connection (optional)
|
||||
|
||||
## Using MCP Tools in Agents
|
||||
|
||||
Once registered, MCP tools are available just like any other tool:
|
||||
@@ -161,7 +119,7 @@ builder = WorkflowBuilder()
|
||||
builder.add_node(
|
||||
node_id="researcher",
|
||||
name="Web Researcher",
|
||||
node_type="event_loop",
|
||||
node_type="llm_tool_use",
|
||||
system_prompt="Research the topic using web_search",
|
||||
tools=["web_search"], # Tool from tools MCP server
|
||||
input_keys=["topic"],
|
||||
@@ -179,7 +137,7 @@ Tools from MCP servers can be referenced in your agent.json just like built-in t
|
||||
{
|
||||
"id": "searcher",
|
||||
"name": "Web Searcher",
|
||||
"node_type": "event_loop",
|
||||
"node_type": "llm_tool_use",
|
||||
"system_prompt": "Search for information about {topic}",
|
||||
"tools": ["web_search", "web_scrape"],
|
||||
"input_keys": ["topic"],
|
||||
@@ -300,32 +258,7 @@ runner.register_mcp_server(
|
||||
)
|
||||
```
|
||||
|
||||
### 3. Use Unix Socket for Same-Host IPC
|
||||
|
||||
When both the agent and MCP server run on the same machine, Unix sockets avoid TCP overhead:
|
||||
|
||||
```python
|
||||
runner.register_mcp_server(
|
||||
name="fast-local-tools",
|
||||
transport="unix",
|
||||
url="http://localhost",
|
||||
socket_path="/tmp/mcp_server.sock"
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Use SSE for Streaming and Real-Time Tools
|
||||
|
||||
SSE transport maintains a persistent connection, ideal for event-driven servers:
|
||||
|
||||
```python
|
||||
runner.register_mcp_server(
|
||||
name="realtime-tools",
|
||||
transport="sse",
|
||||
url="http://realtime-server:8000/sse"
|
||||
)
|
||||
```
|
||||
|
||||
### 5. Handle Cleanup
|
||||
### 3. Handle Cleanup
|
||||
|
||||
Always clean up MCP connections when done:
|
||||
|
||||
@@ -347,7 +280,7 @@ async with AgentRunner.load("exports/my-agent") as runner:
|
||||
# Automatic cleanup
|
||||
```
|
||||
|
||||
### 6. Tool Name Conflicts
|
||||
### 4. Tool Name Conflicts
|
||||
|
||||
If multiple MCP servers provide tools with the same name, the last registered server wins. To avoid conflicts:
|
||||
|
||||
@@ -382,24 +315,6 @@ If HTTP transport fails:
|
||||
2. Check firewall settings
|
||||
3. Verify the URL and port are correct
|
||||
|
||||
### Unix Socket Not Connecting
|
||||
|
||||
If Unix socket transport fails:
|
||||
|
||||
1. Verify the socket file exists: `ls -la /tmp/mcp_server.sock`
|
||||
2. Check file permissions on the socket
|
||||
3. Ensure no other process has locked the socket
|
||||
4. Verify the `url` field is set (e.g., `"http://localhost"`)
|
||||
|
||||
### SSE Connection Issues
|
||||
|
||||
If SSE transport fails:
|
||||
|
||||
1. Verify the server supports SSE at the given URL
|
||||
2. Check that the `mcp` Python package is installed (`pip install mcp`)
|
||||
3. Ensure the SSE endpoint is accessible: `curl http://localhost:8000/sse`
|
||||
4. Check for firewall or proxy issues blocking long-lived connections
|
||||
|
||||
## Example: Full Agent with MCP Tools
|
||||
|
||||
Here's a complete example of an agent that uses MCP tools:
|
||||
|
||||
+81
-27
@@ -1,16 +1,17 @@
|
||||
# MCP Server Guide - Agent Building Tools
|
||||
# MCP Server Guide - Agent Builder
|
||||
|
||||
> **Note:** The standalone `agent-builder` MCP server (`framework.mcp.agent_builder_server`) has been replaced. Agent building is now done via the `coder-tools` server's `initialize_and_build_agent` tool, with underlying logic in `tools/coder_tools_server.py`.
|
||||
|
||||
This guide covers the MCP tools available for building goal-driven agents.
|
||||
This guide covers the MCP (Model Context Protocol) server for building goal-driven agents.
|
||||
|
||||
## Setup
|
||||
|
||||
### Quick Setup
|
||||
|
||||
```bash
|
||||
# Run the quickstart script (recommended)
|
||||
./quickstart.sh
|
||||
# Using the setup script (recommended)
|
||||
python setup_mcp.py
|
||||
|
||||
# Or using bash
|
||||
./setup_mcp.sh
|
||||
```
|
||||
|
||||
### Manual Configuration
|
||||
@@ -20,10 +21,10 @@ Add to your MCP client configuration (e.g., Claude Desktop):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"coder-tools": {
|
||||
"command": "uv",
|
||||
"args": ["run", "coder_tools_server.py", "--stdio"],
|
||||
"cwd": "/path/to/hive/tools"
|
||||
"agent-builder": {
|
||||
"command": "python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "/path/to/goal-agent"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,20 +103,31 @@ Add a processing node to the agent graph.
|
||||
- `node_id` (string, required): Unique node identifier
|
||||
- `name` (string, required): Human-readable name
|
||||
- `description` (string, required): What this node does
|
||||
- `node_type` (string, required): Must be `event_loop` (the only valid type)
|
||||
- `node_type` (string, required): One of: `llm_generate`, `llm_tool_use`, `router`, `function`
|
||||
- `input_keys` (string, required): JSON array of input variable names
|
||||
- `output_keys` (string, required): JSON array of output variable names
|
||||
- `system_prompt` (string, optional): System prompt for the LLM
|
||||
- `tools` (string, optional): JSON array of tool names
|
||||
- `client_facing` (boolean, optional): Set to true for human-in-the-loop interaction
|
||||
- `system_prompt` (string, optional): System prompt for LLM nodes
|
||||
- `tools` (string, optional): JSON array of tool names for tool_use nodes
|
||||
- `routes` (string, optional): JSON object of route mappings for router nodes
|
||||
|
||||
**Node Type:**
|
||||
**Node Types:**
|
||||
|
||||
**event_loop**: LLM-powered node with self-correction loop
|
||||
- Requires: `system_prompt`
|
||||
- Optional: `tools` (array of tool names, e.g., `["web_search", "web_fetch"]`)
|
||||
- Optional: `client_facing` (set to true for HITL / user interaction)
|
||||
- Supports: iterative refinement, judge-based evaluation, tool use, streaming
|
||||
1. **llm_generate**: Uses LLM to generate output from inputs
|
||||
- Requires: `system_prompt`
|
||||
- Tools: Not used
|
||||
|
||||
2. **llm_tool_use**: Uses LLM with tools to accomplish tasks
|
||||
- Requires: `system_prompt`, `tools`
|
||||
- Tools: Array of tool names (e.g., `["web_search", "web_fetch"]`)
|
||||
|
||||
3. **router**: LLM-powered routing to different paths
|
||||
- Requires: `system_prompt`, `routes`
|
||||
- Routes: Object mapping route names to target node IDs
|
||||
- Example: `{"pass": "success_node", "fail": "retry_node"}`
|
||||
|
||||
4. **function**: Executes a pre-defined function
|
||||
- System prompt describes the function behavior
|
||||
- No LLM calls, pure computation
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
@@ -123,7 +135,7 @@ Add a processing node to the agent graph.
|
||||
"node_id": "search_sources",
|
||||
"name": "Search Sources",
|
||||
"description": "Searches for relevant sources on the topic",
|
||||
"node_type": "event_loop",
|
||||
"node_type": "llm_tool_use",
|
||||
"input_keys": "[\"topic\", \"search_queries\"]",
|
||||
"output_keys": "[\"sources\", \"source_count\"]",
|
||||
"system_prompt": "Search for sources using the provided queries...",
|
||||
@@ -186,7 +198,7 @@ Export the validated graph as an agent specification.
|
||||
|
||||
**What it does:**
|
||||
1. Validates the graph
|
||||
2. Validates edge connectivity
|
||||
2. Auto-generates missing edges from router routes
|
||||
3. Writes files to disk:
|
||||
- `exports/{agent-name}/agent.json` - Full agent specification
|
||||
- `exports/{agent-name}/README.md` - Auto-generated documentation
|
||||
@@ -240,6 +252,47 @@ Test the complete agent graph with sample inputs.
|
||||
|
||||
---
|
||||
|
||||
### Evaluation Rules
|
||||
|
||||
#### `add_evaluation_rule`
|
||||
Add a rule for the HybridJudge to evaluate node outputs.
|
||||
|
||||
**Parameters:**
|
||||
- `rule_id` (string, required): Unique rule identifier
|
||||
- `description` (string, required): What this rule checks
|
||||
- `condition` (string, required): Python expression to evaluate
|
||||
- `action` (string, required): Action to take: `accept`, `retry`, `escalate`
|
||||
- `priority` (integer, optional): Rule priority (default: 0)
|
||||
- `feedback_template` (string, optional): Feedback message template
|
||||
|
||||
**Condition Examples:**
|
||||
- `'result.get("success") == True'` - Check for success flag
|
||||
- `'result.get("error_type") == "timeout"'` - Check error type
|
||||
- `'len(result.get("data", [])) > 0'` - Check for non-empty data
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
{
|
||||
"rule_id": "timeout_retry",
|
||||
"description": "Retry on timeout errors",
|
||||
"condition": "result.get('error_type') == 'timeout'",
|
||||
"action": "retry",
|
||||
"priority": 10,
|
||||
"feedback_template": "Timeout occurred, retrying..."
|
||||
}
|
||||
```
|
||||
|
||||
#### `list_evaluation_rules`
|
||||
List all configured evaluation rules.
|
||||
|
||||
#### `remove_evaluation_rule`
|
||||
Remove an evaluation rule.
|
||||
|
||||
**Parameters:**
|
||||
- `rule_id` (string, required): Rule to remove
|
||||
|
||||
---
|
||||
|
||||
## Example Workflow
|
||||
|
||||
Here's a complete workflow for building a research agent:
|
||||
@@ -267,7 +320,7 @@ add_node(
|
||||
node_id="planner",
|
||||
name="Research Planner",
|
||||
description="Creates research strategy",
|
||||
node_type="event_loop",
|
||||
node_type="llm_generate",
|
||||
input_keys='["topic"]',
|
||||
output_keys='["strategy", "queries"]',
|
||||
system_prompt="Analyze topic and create research plan..."
|
||||
@@ -277,7 +330,7 @@ add_node(
|
||||
node_id="searcher",
|
||||
name="Search Sources",
|
||||
description="Find relevant sources",
|
||||
node_type="event_loop",
|
||||
node_type="llm_tool_use",
|
||||
input_keys='["queries"]',
|
||||
output_keys='["sources"]',
|
||||
system_prompt="Search for sources...",
|
||||
@@ -306,9 +359,10 @@ The exported agent will be saved to `exports/research-agent/`.
|
||||
|
||||
1. **Start with the goal**: Define clear success criteria before building nodes
|
||||
2. **Test nodes individually**: Use `test_node` to verify each node works
|
||||
3. **Use conditional edges for branching**: Define condition_expr on edges for decision points
|
||||
4. **Validate early, validate often**: Run `validate_graph` after adding nodes/edges
|
||||
5. **Check exports**: Review the generated README.md to verify your agent structure
|
||||
3. **Use router nodes for branching**: Don't create edges manually for routers - define routes and they'll be auto-generated
|
||||
4. **Add evaluation rules**: Help the judge evaluate outputs deterministically
|
||||
5. **Validate early, validate often**: Run `validate_graph` after adding nodes/edges
|
||||
6. **Check exports**: Review the generated README.md to verify your agent structure
|
||||
|
||||
---
|
||||
|
||||
|
||||
+70
-15
@@ -14,14 +14,69 @@ Framework provides a runtime framework that captures **decisions**, not just act
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
uv pip install -e .
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## Agent Building
|
||||
## MCP Server Setup
|
||||
|
||||
Agent scaffolding is handled by the `coder-tools` MCP server (in `tools/coder_tools_server.py`), which provides the `initialize_and_build_agent` tool and related utilities. The package generation logic lives directly in `tools/coder_tools_server.py`.
|
||||
The framework includes an MCP (Model Context Protocol) server for building agents. To set up the MCP server:
|
||||
|
||||
See the [Getting Started Guide](../docs/getting-started.md) for building agents.
|
||||
### Automated Setup
|
||||
|
||||
**Using bash (Linux/macOS):**
|
||||
```bash
|
||||
./setup_mcp.sh
|
||||
```
|
||||
|
||||
**Using Python (cross-platform):**
|
||||
```bash
|
||||
python setup_mcp.py
|
||||
```
|
||||
|
||||
The setup script will:
|
||||
1. Install the framework package
|
||||
2. Install MCP dependencies (mcp, fastmcp)
|
||||
3. Create/verify `.mcp.json` configuration
|
||||
4. Test the MCP server module
|
||||
|
||||
### Manual Setup
|
||||
|
||||
If you prefer manual setup:
|
||||
|
||||
```bash
|
||||
# Install framework
|
||||
pip install -e .
|
||||
|
||||
# Install MCP dependencies
|
||||
pip install mcp fastmcp
|
||||
|
||||
# Test the server
|
||||
python -m framework.mcp.agent_builder_server
|
||||
```
|
||||
|
||||
### Using with MCP Clients
|
||||
|
||||
To use the agent builder with Claude Desktop or other MCP clients, add this to your MCP client configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"agent-builder": {
|
||||
"command": "python",
|
||||
"args": ["-m", "framework.mcp.agent_builder_server"],
|
||||
"cwd": "/path/to/goal-agent"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The MCP server provides tools for:
|
||||
- Creating agent building sessions
|
||||
- Defining goals with success criteria
|
||||
- Adding nodes (llm_generate, llm_tool_use, router, function)
|
||||
- Connecting nodes with edges
|
||||
- Validating and exporting agent graphs
|
||||
- Testing nodes and full agent graphs
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -30,14 +85,14 @@ See the [Getting Started Guide](../docs/getting-started.md) for building agents.
|
||||
Run an LLM-powered calculator:
|
||||
|
||||
```bash
|
||||
# Run an exported agent
|
||||
uv run python -m framework run exports/calculator --input '{"expression": "2 + 3 * 4"}'
|
||||
# Single calculation
|
||||
python -m framework calculate "2 + 3 * 4"
|
||||
|
||||
# Interactive shell session
|
||||
uv run python -m framework shell exports/calculator
|
||||
# Interactive mode
|
||||
python -m framework interactive
|
||||
|
||||
# Show agent info
|
||||
uv run python -m framework info exports/calculator
|
||||
# Analyze runs with Builder
|
||||
python -m framework analyze calculator
|
||||
```
|
||||
|
||||
### Using the Runtime
|
||||
@@ -81,16 +136,16 @@ Tests are generated using MCP tools (`generate_constraint_tests`, `generate_succ
|
||||
|
||||
```bash
|
||||
# Run tests against an agent
|
||||
uv run python -m framework test-run <agent_path> --goal <goal_id> --parallel 4
|
||||
python -m framework test-run <agent_path> --goal <goal_id> --parallel 4
|
||||
|
||||
# Debug failed tests
|
||||
uv run python -m framework test-debug <agent_path> <test_name>
|
||||
python -m framework test-debug <agent_path> <test_name>
|
||||
|
||||
# List tests for an agent
|
||||
uv run python -m framework test-list <agent_path>
|
||||
# List tests for a goal
|
||||
python -m framework test-list <goal_id>
|
||||
```
|
||||
|
||||
For detailed testing workflows, see [developer-guide.md](../docs/developer-guide.md).
|
||||
For detailed testing workflows, see the [testing-agent skill](../.claude/skills/testing-agent/SKILL.md).
|
||||
|
||||
### Analyzing Agent Behavior with Builder
|
||||
|
||||
|
||||
@@ -1,583 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Antigravity authentication CLI.
|
||||
|
||||
Implements OAuth2 flow for Google's Antigravity Code Assist gateway.
|
||||
Credentials are stored in ~/.hive/antigravity-accounts.json.
|
||||
|
||||
Usage:
|
||||
python -m antigravity_auth auth account add
|
||||
python -m antigravity_auth auth account list
|
||||
python -m antigravity_auth auth account remove <email>
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import secrets
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import webbrowser
|
||||
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# OAuth endpoints
|
||||
_OAUTH_AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"
|
||||
_OAUTH_TOKEN_URL = "https://oauth2.googleapis.com/token"
|
||||
|
||||
# Scopes for Antigravity/Cloud Code Assist
|
||||
_OAUTH_SCOPES = [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
]
|
||||
|
||||
# Credentials file path in ~/.hive/
|
||||
_ACCOUNTS_FILE = Path.home() / ".hive" / "antigravity-accounts.json"
|
||||
|
||||
# Default project ID
|
||||
_DEFAULT_PROJECT_ID = "rising-fact-p41fc"
|
||||
_DEFAULT_REDIRECT_PORT = 51121
|
||||
|
||||
# OAuth credentials fetched from the opencode-antigravity-auth project.
|
||||
# This project reverse-engineered and published the public OAuth credentials
|
||||
# for Google's Antigravity/Cloud Code Assist API.
|
||||
# Source: https://github.com/NoeFabris/opencode-antigravity-auth
|
||||
_CREDENTIALS_URL = (
|
||||
"https://raw.githubusercontent.com/NoeFabris/opencode-antigravity-auth/dev/src/constants.ts"
|
||||
)
|
||||
|
||||
# Cached credentials fetched from public source
|
||||
_cached_client_id: str | None = None
|
||||
_cached_client_secret: str | None = None
|
||||
|
||||
|
||||
def _fetch_credentials_from_public_source() -> tuple[str | None, str | None]:
|
||||
"""Fetch OAuth client ID and secret from the public npm package source on GitHub."""
|
||||
global _cached_client_id, _cached_client_secret
|
||||
if _cached_client_id and _cached_client_secret:
|
||||
return _cached_client_id, _cached_client_secret
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
_CREDENTIALS_URL, headers={"User-Agent": "Hive-Antigravity-Auth/1.0"}
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
content = resp.read().decode("utf-8")
|
||||
import re
|
||||
|
||||
id_match = re.search(r'ANTIGRAVITY_CLIENT_ID\s*=\s*"([^"]+)"', content)
|
||||
secret_match = re.search(r'ANTIGRAVITY_CLIENT_SECRET\s*=\s*"([^"]+)"', content)
|
||||
if id_match:
|
||||
_cached_client_id = id_match.group(1)
|
||||
if secret_match:
|
||||
_cached_client_secret = secret_match.group(1)
|
||||
return _cached_client_id, _cached_client_secret
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to fetch credentials from public source: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
def get_client_id() -> str:
|
||||
"""Get OAuth client ID from env, config, or public source."""
|
||||
env_id = os.environ.get("ANTIGRAVITY_CLIENT_ID")
|
||||
if env_id:
|
||||
return env_id
|
||||
|
||||
# Try hive config
|
||||
hive_cfg = Path.home() / ".hive" / "configuration.json"
|
||||
if hive_cfg.exists():
|
||||
try:
|
||||
with open(hive_cfg) as f:
|
||||
cfg = json.load(f)
|
||||
cfg_id = cfg.get("llm", {}).get("antigravity_client_id")
|
||||
if cfg_id:
|
||||
return cfg_id
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fetch from public source
|
||||
client_id, _ = _fetch_credentials_from_public_source()
|
||||
if client_id:
|
||||
return client_id
|
||||
|
||||
raise RuntimeError("Could not obtain Antigravity OAuth client ID")
|
||||
|
||||
|
||||
def get_client_secret() -> str | None:
|
||||
"""Get OAuth client secret from env, config, or public source."""
|
||||
secret = os.environ.get("ANTIGRAVITY_CLIENT_SECRET")
|
||||
if secret:
|
||||
return secret
|
||||
|
||||
# Try to read from hive config
|
||||
hive_cfg = Path.home() / ".hive" / "configuration.json"
|
||||
if hive_cfg.exists():
|
||||
try:
|
||||
with open(hive_cfg) as f:
|
||||
cfg = json.load(f)
|
||||
secret = cfg.get("llm", {}).get("antigravity_client_secret")
|
||||
if secret:
|
||||
return secret
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fetch from public source (npm package on GitHub)
|
||||
_, secret = _fetch_credentials_from_public_source()
|
||||
return secret
|
||||
|
||||
|
||||
def find_free_port() -> int:
|
||||
"""Find an available local port."""
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.bind(("", 0))
|
||||
s.listen(1)
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
class OAuthCallbackHandler(BaseHTTPRequestHandler):
|
||||
"""Handle OAuth callback from browser."""
|
||||
|
||||
auth_code: str | None = None
|
||||
state: str | None = None
|
||||
error: str | None = None
|
||||
|
||||
def log_message(self, format: str, *args: Any) -> None:
|
||||
pass # Suppress default logging
|
||||
|
||||
def do_GET(self) -> None:
|
||||
parsed = urllib.parse.urlparse(self.path)
|
||||
|
||||
if parsed.path == "/oauth-callback":
|
||||
query = urllib.parse.parse_qs(parsed.query)
|
||||
|
||||
if "error" in query:
|
||||
self.error = query["error"][0]
|
||||
self._send_response("Authentication failed. You can close this window.")
|
||||
return
|
||||
|
||||
if "code" in query and "state" in query:
|
||||
OAuthCallbackHandler.auth_code = query["code"][0]
|
||||
OAuthCallbackHandler.state = query["state"][0]
|
||||
self._send_response(
|
||||
"Authentication successful! You can close this window "
|
||||
"and return to the terminal."
|
||||
)
|
||||
return
|
||||
|
||||
self._send_response("Waiting for authentication...")
|
||||
|
||||
def _send_response(self, message: str) -> None:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html")
|
||||
self.end_headers()
|
||||
html = f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Antigravity Auth</title></head>
|
||||
<body style="font-family: system-ui; display: flex; align-items: center;
|
||||
justify-content: center; height: 100vh; margin: 0; background: #1a1a2e;
|
||||
color: #eee;">
|
||||
<div style="text-align: center;">
|
||||
<h2>{message}</h2>
|
||||
</div>
|
||||
</body>
|
||||
</html>"""
|
||||
self.wfile.write(html.encode())
|
||||
|
||||
|
||||
def wait_for_callback(port: int, timeout: int = 300) -> tuple[str | None, str | None, str | None]:
|
||||
"""Start local server and wait for OAuth callback."""
|
||||
server = HTTPServer(("localhost", port), OAuthCallbackHandler)
|
||||
server.timeout = 1
|
||||
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
if OAuthCallbackHandler.auth_code:
|
||||
return (
|
||||
OAuthCallbackHandler.auth_code,
|
||||
OAuthCallbackHandler.state,
|
||||
OAuthCallbackHandler.error,
|
||||
)
|
||||
server.handle_request()
|
||||
|
||||
return None, None, "timeout"
|
||||
|
||||
|
||||
def exchange_code_for_tokens(
|
||||
code: str, redirect_uri: str, client_id: str, client_secret: str | None
|
||||
) -> dict[str, Any] | None:
|
||||
"""Exchange authorization code for tokens."""
|
||||
data = {
|
||||
"code": code,
|
||||
"client_id": client_id,
|
||||
"redirect_uri": redirect_uri,
|
||||
"grant_type": "authorization_code",
|
||||
}
|
||||
if client_secret:
|
||||
data["client_secret"] = client_secret
|
||||
|
||||
body = urllib.parse.urlencode(data).encode()
|
||||
|
||||
req = urllib.request.Request(
|
||||
_OAUTH_TOKEN_URL,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read())
|
||||
except Exception as e:
|
||||
logger.error(f"Token exchange failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def get_user_email(access_token: str) -> str | None:
|
||||
"""Get user email from Google API."""
|
||||
req = urllib.request.Request(
|
||||
"https://www.googleapis.com/oauth2/v2/userinfo",
|
||||
headers={"Authorization": f"Bearer {access_token}"},
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
data = json.loads(resp.read())
|
||||
return data.get("email")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def load_accounts() -> dict[str, Any]:
|
||||
"""Load existing accounts from file."""
|
||||
if not _ACCOUNTS_FILE.exists():
|
||||
return {"schemaVersion": 4, "accounts": []}
|
||||
try:
|
||||
with open(_ACCOUNTS_FILE) as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return {"schemaVersion": 4, "accounts": []}
|
||||
|
||||
|
||||
def save_accounts(data: dict[str, Any]) -> None:
|
||||
"""Save accounts to file."""
|
||||
_ACCOUNTS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(_ACCOUNTS_FILE, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
logger.info(f"Saved credentials to {_ACCOUNTS_FILE}")
|
||||
|
||||
|
||||
def validate_credentials(access_token: str, project_id: str = _DEFAULT_PROJECT_ID) -> bool:
|
||||
"""Test if credentials work by making a simple API call to Antigravity.
|
||||
|
||||
Returns True if credentials are valid, False otherwise.
|
||||
"""
|
||||
endpoint = "https://daily-cloudcode-pa.sandbox.googleapis.com"
|
||||
body = {
|
||||
"project": project_id,
|
||||
"model": "gemini-3-flash",
|
||||
"request": {
|
||||
"contents": [{"role": "user", "parts": [{"text": "hi"}]}],
|
||||
"generationConfig": {"maxOutputTokens": 10},
|
||||
},
|
||||
"requestType": "agent",
|
||||
"userAgent": "antigravity",
|
||||
"requestId": "validation-test",
|
||||
}
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": (
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) "
|
||||
"AppleWebKit/537.36 (KHTML, like Gecko) Antigravity/1.18.3"
|
||||
),
|
||||
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
|
||||
}
|
||||
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
f"{endpoint}/v1internal:generateContent",
|
||||
data=json.dumps(body).encode("utf-8"),
|
||||
headers=headers,
|
||||
method="POST",
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
json.loads(resp.read())
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def refresh_access_token(
|
||||
refresh_token: str, client_id: str, client_secret: str | None
|
||||
) -> dict | None:
|
||||
"""Refresh the access token using the refresh token."""
|
||||
data = {
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refresh_token,
|
||||
"client_id": client_id,
|
||||
}
|
||||
if client_secret:
|
||||
data["client_secret"] = client_secret
|
||||
|
||||
body = urllib.parse.urlencode(data).encode()
|
||||
req = urllib.request.Request(
|
||||
_OAUTH_TOKEN_URL,
|
||||
data=body,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return json.loads(resp.read())
|
||||
except Exception as e:
|
||||
logger.debug(f"Token refresh failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def cmd_account_add(args: argparse.Namespace) -> int:
|
||||
"""Add a new Antigravity account via OAuth2.
|
||||
|
||||
First checks if valid credentials already exist. If so, validates them
|
||||
and skips OAuth if they work. Otherwise, proceeds with OAuth flow.
|
||||
"""
|
||||
client_id = get_client_id()
|
||||
client_secret = get_client_secret()
|
||||
|
||||
# Check if credentials already exist
|
||||
accounts_data = load_accounts()
|
||||
accounts = accounts_data.get("accounts", [])
|
||||
|
||||
if accounts:
|
||||
account = next((a for a in accounts if a.get("enabled", True) is not False), accounts[0])
|
||||
access_token = account.get("access")
|
||||
refresh_token_str = account.get("refresh", "")
|
||||
refresh_token = refresh_token_str.split("|")[0] if refresh_token_str else None
|
||||
project_id = (
|
||||
refresh_token_str.split("|")[1] if "|" in refresh_token_str else _DEFAULT_PROJECT_ID
|
||||
)
|
||||
email = account.get("email", "unknown")
|
||||
expires_ms = account.get("expires", 0)
|
||||
expires_at = expires_ms / 1000.0 if expires_ms else 0.0
|
||||
|
||||
# Check if token is expired or near expiry
|
||||
if access_token and expires_at and time.time() < expires_at - 60:
|
||||
# Token still valid, test it
|
||||
logger.info(f"Found existing credentials for: {email}")
|
||||
logger.info("Validating existing credentials...")
|
||||
if validate_credentials(access_token, project_id):
|
||||
logger.info("✓ Credentials valid! Skipping OAuth.")
|
||||
return 0
|
||||
else:
|
||||
logger.info("Credentials failed validation, refreshing...")
|
||||
elif refresh_token:
|
||||
logger.info(f"Found expired credentials for: {email}")
|
||||
logger.info("Attempting token refresh...")
|
||||
|
||||
tokens = refresh_access_token(refresh_token, client_id, client_secret)
|
||||
if tokens:
|
||||
new_access = tokens.get("access_token")
|
||||
expires_in = tokens.get("expires_in", 3600)
|
||||
if new_access:
|
||||
# Update the account
|
||||
account["access"] = new_access
|
||||
account["expires"] = int((time.time() + expires_in) * 1000)
|
||||
accounts_data["last_refresh"] = time.strftime(
|
||||
"%Y-%m-%dT%H:%M:%SZ", time.gmtime()
|
||||
)
|
||||
save_accounts(accounts_data)
|
||||
|
||||
# Validate the refreshed token
|
||||
logger.info("Validating refreshed credentials...")
|
||||
if validate_credentials(new_access, project_id):
|
||||
logger.info("✓ Credentials refreshed and validated!")
|
||||
return 0
|
||||
else:
|
||||
logger.info("Refreshed token failed validation, proceeding with OAuth...")
|
||||
else:
|
||||
logger.info("Token refresh failed, proceeding with OAuth...")
|
||||
|
||||
# No valid credentials, proceed with OAuth
|
||||
if not client_secret:
|
||||
logger.warning(
|
||||
"No client secret configured. Token refresh may fail.\n"
|
||||
"Set ANTIGRAVITY_CLIENT_SECRET env var or add "
|
||||
"'antigravity_client_secret' to ~/.hive/configuration.json"
|
||||
)
|
||||
|
||||
# Use fixed port and path matching Google's expected OAuth redirect URI
|
||||
port = _DEFAULT_REDIRECT_PORT
|
||||
redirect_uri = f"http://localhost:{port}/oauth-callback"
|
||||
|
||||
# Generate state for CSRF protection
|
||||
state = secrets.token_urlsafe(16)
|
||||
|
||||
# Build authorization URL
|
||||
params = {
|
||||
"client_id": client_id,
|
||||
"redirect_uri": redirect_uri,
|
||||
"response_type": "code",
|
||||
"scope": " ".join(_OAUTH_SCOPES),
|
||||
"state": state,
|
||||
"access_type": "offline",
|
||||
"prompt": "consent",
|
||||
}
|
||||
auth_url = f"{_OAUTH_AUTH_URL}?{urllib.parse.urlencode(params)}"
|
||||
|
||||
logger.info("Opening browser for authentication...")
|
||||
logger.info(f"If the browser doesn't open, visit: {auth_url}\n")
|
||||
|
||||
# Open browser
|
||||
webbrowser.open(auth_url)
|
||||
|
||||
# Wait for callback
|
||||
logger.info(f"Listening for callback on port {port}...")
|
||||
code, received_state, error = wait_for_callback(port)
|
||||
|
||||
if error:
|
||||
logger.error(f"Authentication failed: {error}")
|
||||
return 1
|
||||
|
||||
if not code:
|
||||
logger.error("No authorization code received")
|
||||
return 1
|
||||
|
||||
if received_state != state:
|
||||
logger.error("State mismatch - possible CSRF attack")
|
||||
return 1
|
||||
|
||||
# Exchange code for tokens
|
||||
logger.info("Exchanging authorization code for tokens...")
|
||||
tokens = exchange_code_for_tokens(code, redirect_uri, client_id, client_secret)
|
||||
|
||||
if not tokens:
|
||||
return 1
|
||||
|
||||
access_token = tokens.get("access_token")
|
||||
refresh_token = tokens.get("refresh_token")
|
||||
expires_in = tokens.get("expires_in", 3600)
|
||||
|
||||
if not access_token:
|
||||
logger.error("No access token in response")
|
||||
return 1
|
||||
|
||||
# Get user email
|
||||
email = get_user_email(access_token)
|
||||
if email:
|
||||
logger.info(f"Authenticated as: {email}")
|
||||
|
||||
# Load existing accounts and add/update
|
||||
accounts_data = load_accounts()
|
||||
accounts = accounts_data.get("accounts", [])
|
||||
|
||||
# Build new account entry (V4 schema)
|
||||
expires_ms = int((time.time() + expires_in) * 1000)
|
||||
refresh_entry = f"{refresh_token}|{_DEFAULT_PROJECT_ID}"
|
||||
|
||||
new_account = {
|
||||
"access": access_token,
|
||||
"refresh": refresh_entry,
|
||||
"expires": expires_ms,
|
||||
"email": email,
|
||||
"enabled": True,
|
||||
}
|
||||
|
||||
# Update existing account or add new one
|
||||
existing_idx = next((i for i, a in enumerate(accounts) if a.get("email") == email), None)
|
||||
if existing_idx is not None:
|
||||
accounts[existing_idx] = new_account
|
||||
logger.info(f"Updated existing account: {email}")
|
||||
else:
|
||||
accounts.append(new_account)
|
||||
logger.info(f"Added new account: {email}")
|
||||
|
||||
accounts_data["accounts"] = accounts
|
||||
accounts_data["schemaVersion"] = 4
|
||||
accounts_data["last_refresh"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
|
||||
|
||||
save_accounts(accounts_data)
|
||||
logger.info("\n✓ Authentication complete!")
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_account_list(args: argparse.Namespace) -> int:
|
||||
"""List all stored accounts."""
|
||||
data = load_accounts()
|
||||
accounts = data.get("accounts", [])
|
||||
|
||||
if not accounts:
|
||||
logger.info("No accounts configured.")
|
||||
logger.info("Run 'antigravity auth account add' to add one.")
|
||||
return 0
|
||||
|
||||
logger.info("Configured accounts:\n")
|
||||
for i, account in enumerate(accounts, 1):
|
||||
email = account.get("email", "unknown")
|
||||
enabled = "enabled" if account.get("enabled", True) else "disabled"
|
||||
logger.info(f" {i}. {email} ({enabled})")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def cmd_account_remove(args: argparse.Namespace) -> int:
|
||||
"""Remove an account by email."""
|
||||
email = args.email
|
||||
data = load_accounts()
|
||||
accounts = data.get("accounts", [])
|
||||
|
||||
original_len = len(accounts)
|
||||
accounts = [a for a in accounts if a.get("email") != email]
|
||||
|
||||
if len(accounts) == original_len:
|
||||
logger.error(f"No account found with email: {email}")
|
||||
return 1
|
||||
|
||||
data["accounts"] = accounts
|
||||
save_accounts(data)
|
||||
logger.info(f"Removed account: {email}")
|
||||
return 0
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Antigravity authentication CLI",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
subparsers = parser.add_subparsers(dest="command", help="Commands")
|
||||
|
||||
# auth account add
|
||||
auth_parser = subparsers.add_parser("auth", help="Authentication commands")
|
||||
auth_subparsers = auth_parser.add_subparsers(dest="auth_command")
|
||||
|
||||
account_parser = auth_subparsers.add_parser("account", help="Account management")
|
||||
account_subparsers = account_parser.add_subparsers(dest="account_command")
|
||||
|
||||
add_parser = account_subparsers.add_parser("add", help="Add a new account via OAuth2")
|
||||
add_parser.set_defaults(func=cmd_account_add)
|
||||
|
||||
list_parser = account_subparsers.add_parser("list", help="List configured accounts")
|
||||
list_parser.set_defaults(func=cmd_account_list)
|
||||
|
||||
remove_parser = account_subparsers.add_parser("remove", help="Remove an account")
|
||||
remove_parser.add_argument("email", help="Email of account to remove")
|
||||
remove_parser.set_defaults(func=cmd_account_remove)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if hasattr(args, "func"):
|
||||
return args.func(args)
|
||||
|
||||
parser.print_help()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,441 +0,0 @@
|
||||
"""OpenAI Codex OAuth PKCE login flow.
|
||||
|
||||
Runs the full browser-based OAuth flow so users can authenticate with their
|
||||
ChatGPT Plus/Pro subscription without needing the Codex CLI installed.
|
||||
|
||||
Usage (from quickstart.sh):
|
||||
uv run python codex_oauth.py
|
||||
|
||||
Exit codes:
|
||||
0 - success (credentials saved to ~/.codex/auth.json)
|
||||
1 - failure (user cancelled, timeout, or token exchange error)
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import http.server
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import queue
|
||||
import secrets
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import TextIO
|
||||
|
||||
# OAuth constants (from the Codex CLI binary)
|
||||
CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize"
|
||||
TOKEN_URL = "https://auth.openai.com/oauth/token"
|
||||
REDIRECT_URI = "http://localhost:1455/auth/callback"
|
||||
SCOPE = "openid profile email offline_access"
|
||||
CALLBACK_PORT = 1455
|
||||
|
||||
# Where to save credentials (same location the Codex CLI uses)
|
||||
CODEX_AUTH_FILE = Path.home() / ".codex" / "auth.json"
|
||||
|
||||
# JWT claim path for account_id
|
||||
JWT_CLAIM_PATH = "https://api.openai.com/auth"
|
||||
|
||||
|
||||
def _base64url(data: bytes) -> str:
|
||||
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
|
||||
|
||||
|
||||
def generate_pkce() -> tuple[str, str]:
|
||||
"""Generate PKCE code_verifier and code_challenge (S256)."""
|
||||
verifier_bytes = secrets.token_bytes(32)
|
||||
verifier = _base64url(verifier_bytes)
|
||||
challenge = _base64url(hashlib.sha256(verifier.encode("ascii")).digest())
|
||||
return verifier, challenge
|
||||
|
||||
|
||||
def build_authorize_url(state: str, challenge: str) -> str:
|
||||
"""Build the OpenAI OAuth authorize URL with PKCE."""
|
||||
params = urllib.parse.urlencode(
|
||||
{
|
||||
"response_type": "code",
|
||||
"client_id": CLIENT_ID,
|
||||
"redirect_uri": REDIRECT_URI,
|
||||
"scope": SCOPE,
|
||||
"code_challenge": challenge,
|
||||
"code_challenge_method": "S256",
|
||||
"state": state,
|
||||
"id_token_add_organizations": "true",
|
||||
"codex_cli_simplified_flow": "true",
|
||||
"originator": "hive",
|
||||
}
|
||||
)
|
||||
return f"{AUTHORIZE_URL}?{params}"
|
||||
|
||||
|
||||
def exchange_code_for_tokens(code: str, verifier: str) -> dict | None:
|
||||
"""Exchange the authorization code for tokens."""
|
||||
data = urllib.parse.urlencode(
|
||||
{
|
||||
"grant_type": "authorization_code",
|
||||
"client_id": CLIENT_ID,
|
||||
"code": code,
|
||||
"code_verifier": verifier,
|
||||
"redirect_uri": REDIRECT_URI,
|
||||
}
|
||||
).encode("utf-8")
|
||||
|
||||
req = urllib.request.Request(
|
||||
TOKEN_URL,
|
||||
data=data,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
method="POST",
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=15) as resp:
|
||||
token_data = json.loads(resp.read())
|
||||
except (urllib.error.URLError, json.JSONDecodeError, TimeoutError, OSError) as exc:
|
||||
print(f"\033[0;31mToken exchange failed: {exc}\033[0m", file=sys.stderr)
|
||||
return None
|
||||
|
||||
if not token_data.get("access_token") or not token_data.get("refresh_token"):
|
||||
print("\033[0;31mToken response missing required fields\033[0m", file=sys.stderr)
|
||||
return None
|
||||
|
||||
return token_data
|
||||
|
||||
|
||||
def decode_jwt_payload(token: str) -> dict | None:
|
||||
"""Decode the payload of a JWT (no signature verification)."""
|
||||
try:
|
||||
parts = token.split(".")
|
||||
if len(parts) != 3:
|
||||
return None
|
||||
payload = parts[1]
|
||||
# Add padding
|
||||
padding = 4 - len(payload) % 4
|
||||
if padding != 4:
|
||||
payload += "=" * padding
|
||||
decoded = base64.urlsafe_b64decode(payload)
|
||||
return json.loads(decoded)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_account_id(access_token: str) -> str | None:
|
||||
"""Extract the ChatGPT account_id from the access token JWT."""
|
||||
payload = decode_jwt_payload(access_token)
|
||||
if not payload:
|
||||
return None
|
||||
auth = payload.get(JWT_CLAIM_PATH)
|
||||
if isinstance(auth, dict):
|
||||
account_id = auth.get("chatgpt_account_id")
|
||||
if isinstance(account_id, str) and account_id:
|
||||
return account_id
|
||||
return None
|
||||
|
||||
|
||||
def save_credentials(token_data: dict, account_id: str) -> None:
|
||||
"""Save credentials to ~/.codex/auth.json in the same format the Codex CLI uses."""
|
||||
auth_data = {
|
||||
"tokens": {
|
||||
"access_token": token_data["access_token"],
|
||||
"refresh_token": token_data["refresh_token"],
|
||||
"account_id": account_id,
|
||||
},
|
||||
"auth_mode": "chatgpt",
|
||||
"last_refresh": datetime.now(UTC).isoformat(),
|
||||
}
|
||||
if "id_token" in token_data:
|
||||
auth_data["tokens"]["id_token"] = token_data["id_token"]
|
||||
|
||||
CODEX_AUTH_FILE.parent.mkdir(parents=True, exist_ok=True, mode=0o700)
|
||||
fd = os.open(CODEX_AUTH_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
with os.fdopen(fd, "w") as f:
|
||||
json.dump(auth_data, f, indent=2)
|
||||
|
||||
|
||||
def open_browser(url: str) -> bool:
|
||||
"""Open the URL in the user's default browser."""
|
||||
system = platform.system()
|
||||
try:
|
||||
devnull = subprocess.DEVNULL
|
||||
if system == "Darwin":
|
||||
subprocess.Popen(["open", url], stdout=devnull, stderr=devnull)
|
||||
elif system == "Windows":
|
||||
os.startfile(url) # type: ignore[attr-defined]
|
||||
else:
|
||||
subprocess.Popen(["xdg-open", url], stdout=devnull, stderr=devnull)
|
||||
return True
|
||||
except (AttributeError, OSError):
|
||||
return False
|
||||
|
||||
|
||||
class OAuthCallbackHandler(http.server.BaseHTTPRequestHandler):
|
||||
"""HTTP handler that captures the OAuth callback."""
|
||||
|
||||
auth_code: str | None = None
|
||||
received_state: str | None = None
|
||||
|
||||
def do_GET(self) -> None:
|
||||
parsed = urllib.parse.urlparse(self.path)
|
||||
if parsed.path != "/auth/callback":
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
self.wfile.write(b"Not found")
|
||||
return
|
||||
|
||||
params = urllib.parse.parse_qs(parsed.query)
|
||||
code = params.get("code", [None])[0]
|
||||
state = params.get("state", [None])[0]
|
||||
|
||||
if not code:
|
||||
self.send_response(400)
|
||||
self.end_headers()
|
||||
self.wfile.write(b"Missing authorization code")
|
||||
return
|
||||
|
||||
OAuthCallbackHandler.auth_code = code
|
||||
OAuthCallbackHandler.received_state = state
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/html; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(
|
||||
b"<!doctype html><html><head><meta charset='utf-8'/></head>"
|
||||
b"<body><h2>Authentication successful</h2>"
|
||||
b"<p>Return to your terminal to continue.</p></body></html>"
|
||||
)
|
||||
|
||||
def log_message(self, format: str, *args: object) -> None:
|
||||
# Suppress request logging
|
||||
pass
|
||||
|
||||
|
||||
def wait_for_callback(state: str, timeout_secs: int = 120) -> str | None:
|
||||
"""Start a local HTTP server and wait for the OAuth callback.
|
||||
|
||||
Returns the authorization code on success, None on timeout.
|
||||
"""
|
||||
OAuthCallbackHandler.auth_code = None
|
||||
OAuthCallbackHandler.received_state = None
|
||||
|
||||
server = http.server.HTTPServer(("127.0.0.1", CALLBACK_PORT), OAuthCallbackHandler)
|
||||
server.timeout = 1
|
||||
|
||||
deadline = time.time() + timeout_secs
|
||||
server_thread = threading.Thread(target=_serve_until_done, args=(server, deadline, state))
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
server_thread.join(timeout=timeout_secs + 2)
|
||||
|
||||
server.server_close()
|
||||
|
||||
if OAuthCallbackHandler.auth_code and OAuthCallbackHandler.received_state == state:
|
||||
return OAuthCallbackHandler.auth_code
|
||||
return None
|
||||
|
||||
|
||||
def _serve_until_done(server: http.server.HTTPServer, deadline: float, state: str) -> None:
|
||||
while time.time() < deadline:
|
||||
server.handle_request()
|
||||
if OAuthCallbackHandler.auth_code and OAuthCallbackHandler.received_state == state:
|
||||
return
|
||||
|
||||
|
||||
def parse_manual_input(value: str, expected_state: str) -> str | None:
|
||||
"""Parse user-pasted redirect URL or auth code."""
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
try:
|
||||
parsed = urllib.parse.urlparse(value)
|
||||
params = urllib.parse.parse_qs(parsed.query)
|
||||
code = params.get("code", [None])[0]
|
||||
state = params.get("state", [None])[0]
|
||||
if state and state != expected_state:
|
||||
return None
|
||||
return code
|
||||
except Exception:
|
||||
pass
|
||||
# Maybe it's just the raw code
|
||||
if len(value) > 10 and " " not in value:
|
||||
return value
|
||||
return None
|
||||
|
||||
|
||||
def _read_manual_input_lines(
|
||||
manual_inputs: queue.Queue[str],
|
||||
stop_event: threading.Event,
|
||||
stdin: TextIO | None = None,
|
||||
) -> None:
|
||||
stream = sys.stdin if stdin is None else stdin
|
||||
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
manual = stream.readline()
|
||||
except (EOFError, OSError):
|
||||
return
|
||||
|
||||
if not manual:
|
||||
return
|
||||
|
||||
if manual.strip():
|
||||
manual_inputs.put(manual)
|
||||
|
||||
|
||||
def wait_for_code_from_callback_or_stdin(
|
||||
expected_state: str,
|
||||
callback_result: list[str | None],
|
||||
callback_done: threading.Event,
|
||||
timeout_secs: float = 120,
|
||||
poll_interval: float = 0.1,
|
||||
stdin: TextIO | None = None,
|
||||
) -> str | None:
|
||||
manual_inputs: queue.Queue[str] = queue.Queue()
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Read stdin on a daemon thread so manual paste works on platforms where
|
||||
# select() cannot poll console handles, including Windows terminals.
|
||||
threading.Thread(
|
||||
target=_read_manual_input_lines,
|
||||
args=(manual_inputs, stop_event, stdin),
|
||||
daemon=True,
|
||||
).start()
|
||||
|
||||
deadline = time.time() + timeout_secs
|
||||
try:
|
||||
while time.time() < deadline:
|
||||
if callback_result[0]:
|
||||
return callback_result[0]
|
||||
|
||||
while True:
|
||||
try:
|
||||
manual = manual_inputs.get_nowait()
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
code = parse_manual_input(manual, expected_state)
|
||||
if code:
|
||||
return code
|
||||
|
||||
if callback_done.is_set():
|
||||
return callback_result[0]
|
||||
|
||||
time.sleep(poll_interval)
|
||||
|
||||
return callback_result[0]
|
||||
finally:
|
||||
stop_event.set()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
# Generate PKCE and state
|
||||
verifier, challenge = generate_pkce()
|
||||
state = secrets.token_hex(16)
|
||||
|
||||
# Build URL
|
||||
auth_url = build_authorize_url(state, challenge)
|
||||
|
||||
print()
|
||||
print("\033[1mOpenAI Codex OAuth Login\033[0m")
|
||||
print()
|
||||
|
||||
# Try to start the local callback server first
|
||||
try:
|
||||
server_available = True
|
||||
# Quick test that port is free
|
||||
import socket
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(1)
|
||||
result = sock.connect_ex(("127.0.0.1", CALLBACK_PORT))
|
||||
sock.close()
|
||||
if result == 0:
|
||||
print(f"\033[1;33mPort {CALLBACK_PORT} is in use. Using manual paste mode.\033[0m")
|
||||
server_available = False
|
||||
except Exception:
|
||||
server_available = True
|
||||
|
||||
# Open browser
|
||||
browser_opened = open_browser(auth_url)
|
||||
if browser_opened:
|
||||
print(" Browser opened for OpenAI sign-in...")
|
||||
else:
|
||||
print(" Could not open browser automatically.")
|
||||
|
||||
print()
|
||||
print(" If the browser didn't open, visit this URL:")
|
||||
print(f" \033[0;36m{auth_url}\033[0m")
|
||||
print()
|
||||
|
||||
code = None
|
||||
|
||||
if server_available:
|
||||
print(" Waiting for authentication (up to 2 minutes)...")
|
||||
print(" \033[2mOr paste the redirect URL below if the callback didn't work:\033[0m")
|
||||
print()
|
||||
|
||||
# Start callback server in background
|
||||
callback_result: list[str | None] = [None]
|
||||
callback_done = threading.Event()
|
||||
|
||||
def run_server() -> None:
|
||||
try:
|
||||
callback_result[0] = wait_for_callback(state, timeout_secs=120)
|
||||
finally:
|
||||
callback_done.set()
|
||||
|
||||
server_thread = threading.Thread(target=run_server)
|
||||
server_thread.daemon = True
|
||||
server_thread.start()
|
||||
|
||||
try:
|
||||
code = wait_for_code_from_callback_or_stdin(
|
||||
state,
|
||||
callback_result,
|
||||
callback_done,
|
||||
timeout_secs=120,
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
print("\n\033[0;31mCancelled.\033[0m")
|
||||
return 1
|
||||
else:
|
||||
# Manual paste mode
|
||||
try:
|
||||
manual = input(" Paste the redirect URL: ").strip()
|
||||
code = parse_manual_input(manual, state)
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
print("\n\033[0;31mCancelled.\033[0m")
|
||||
return 1
|
||||
|
||||
if not code:
|
||||
print("\n\033[0;31mAuthentication timed out or failed.\033[0m")
|
||||
return 1
|
||||
|
||||
# Exchange code for tokens
|
||||
print()
|
||||
print(" Exchanging authorization code for tokens...")
|
||||
token_data = exchange_code_for_tokens(code, verifier)
|
||||
if not token_data:
|
||||
return 1
|
||||
|
||||
# Extract account_id from JWT
|
||||
account_id = get_account_id(token_data["access_token"])
|
||||
if not account_id:
|
||||
print("\033[0;31mFailed to extract account ID from token.\033[0m", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# Save credentials
|
||||
save_credentials(token_data, account_id)
|
||||
print(" \033[0;32mAuthentication successful!\033[0m")
|
||||
print(f" Credentials saved to {CODEX_AUTH_FILE}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -0,0 +1,123 @@
|
||||
"""
|
||||
Minimal Manual Agent Example
|
||||
----------------------------
|
||||
This example demonstrates how to build and run an agent programmatically
|
||||
without using the Claude Code CLI or external LLM APIs.
|
||||
|
||||
It uses 'function' nodes to define logic in pure Python, making it perfect
|
||||
for understanding the core runtime loop:
|
||||
Setup -> Graph definition -> Execution -> Result
|
||||
|
||||
Run with:
|
||||
PYTHONPATH=core python core/examples/manual_agent.py
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from framework.graph import EdgeCondition, EdgeSpec, Goal, GraphSpec, NodeSpec
|
||||
from framework.graph.executor import GraphExecutor
|
||||
from framework.runtime.core import Runtime
|
||||
|
||||
|
||||
# 1. Define Node Logic (Pure Python Functions)
|
||||
def greet(name: str) -> str:
|
||||
"""Generate a simple greeting."""
|
||||
return f"Hello, {name}!"
|
||||
|
||||
|
||||
def uppercase(greeting: str) -> str:
|
||||
"""Convert text to uppercase."""
|
||||
return greeting.upper()
|
||||
|
||||
|
||||
async def main():
|
||||
print("🚀 Setting up Manual Agent...")
|
||||
|
||||
# 2. Define the Goal
|
||||
# Every agent needs a goal with success criteria
|
||||
goal = Goal(
|
||||
id="greet-user",
|
||||
name="Greet User",
|
||||
description="Generate a friendly uppercase greeting",
|
||||
success_criteria=[
|
||||
{
|
||||
"id": "greeting_generated",
|
||||
"description": "Greeting produced",
|
||||
"metric": "custom",
|
||||
"target": "any",
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
# 3. Define Nodes
|
||||
# Nodes describe steps in the process
|
||||
node1 = NodeSpec(
|
||||
id="greeter",
|
||||
name="Greeter",
|
||||
description="Generates a simple greeting",
|
||||
node_type="function",
|
||||
function="greet", # Matches the registered function name
|
||||
input_keys=["name"],
|
||||
output_keys=["greeting"],
|
||||
)
|
||||
|
||||
node2 = NodeSpec(
|
||||
id="uppercaser",
|
||||
name="Uppercaser",
|
||||
description="Converts greeting to uppercase",
|
||||
node_type="function",
|
||||
function="uppercase",
|
||||
input_keys=["greeting"],
|
||||
output_keys=["final_greeting"],
|
||||
)
|
||||
|
||||
# 4. Define Edges
|
||||
# Edges define the flow between nodes
|
||||
edge1 = EdgeSpec(
|
||||
id="greet-to-upper",
|
||||
source="greeter",
|
||||
target="uppercaser",
|
||||
condition=EdgeCondition.ON_SUCCESS,
|
||||
)
|
||||
|
||||
# 5. Create Graph
|
||||
# The graph works like a blueprint connecting nodes and edges
|
||||
graph = GraphSpec(
|
||||
id="greeting-agent",
|
||||
goal_id="greet-user",
|
||||
entry_node="greeter",
|
||||
terminal_nodes=["uppercaser"],
|
||||
nodes=[node1, node2],
|
||||
edges=[edge1],
|
||||
)
|
||||
|
||||
# 6. Initialize Runtime & Executor
|
||||
# Runtime handles state/memory; Executor runs the graph
|
||||
from pathlib import Path
|
||||
|
||||
runtime = Runtime(storage_path=Path("./agent_logs"))
|
||||
executor = GraphExecutor(runtime=runtime)
|
||||
|
||||
# 7. Register Function Implementations
|
||||
# Connect string names in NodeSpecs to actual Python functions
|
||||
executor.register_function("greeter", greet)
|
||||
executor.register_function("uppercaser", uppercase)
|
||||
|
||||
# 8. Execute Agent
|
||||
print("▶ Executing agent with input: name='Alice'...")
|
||||
|
||||
result = await executor.execute(graph=graph, goal=goal, input_data={"name": "Alice"})
|
||||
|
||||
# 9. Verify Results
|
||||
if result.success:
|
||||
print("\n✅ Success!")
|
||||
print(f"Path taken: {' -> '.join(result.path)}")
|
||||
print(f"Final output: {result.output.get('final_greeting')}")
|
||||
else:
|
||||
print(f"\n❌ Failed: {result.error}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Optional: Enable logging to see internal decision flow
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
asyncio.run(main())
|
||||
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example: Integrating MCP Servers with the Core Framework
|
||||
|
||||
This example demonstrates how to:
|
||||
1. Register MCP servers programmatically
|
||||
2. Use MCP tools in agents
|
||||
3. Load MCP servers from configuration files
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
|
||||
from framework.runner.runner import AgentRunner
|
||||
|
||||
|
||||
async def example_1_programmatic_registration():
|
||||
"""Example 1: Register MCP server programmatically"""
|
||||
print("\n=== Example 1: Programmatic MCP Server Registration ===\n")
|
||||
|
||||
# Load an existing agent
|
||||
runner = AgentRunner.load("exports/task-planner")
|
||||
|
||||
# Register tools MCP server via STDIO
|
||||
num_tools = runner.register_mcp_server(
|
||||
name="tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args=["-m", "aden_tools.mcp_server", "--stdio"],
|
||||
cwd="../tools",
|
||||
)
|
||||
|
||||
print(f"Registered {num_tools} tools from tools MCP server")
|
||||
|
||||
# List all available tools
|
||||
tools = runner._tool_registry.get_tools()
|
||||
print(f"\nAvailable tools: {list(tools.keys())}")
|
||||
|
||||
# Run the agent with MCP tools available
|
||||
result = await runner.run(
|
||||
{"objective": "Search for 'Claude AI' and summarize the top 3 results"}
|
||||
)
|
||||
|
||||
print(f"\nAgent result: {result}")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
|
||||
async def example_2_http_transport():
|
||||
"""Example 2: Connect to MCP server via HTTP"""
|
||||
print("\n=== Example 2: HTTP MCP Server Connection ===\n")
|
||||
|
||||
# First, start the tools MCP server in HTTP mode:
|
||||
# cd tools && python mcp_server.py --port 4001
|
||||
|
||||
runner = AgentRunner.load("exports/task-planner")
|
||||
|
||||
# Register tools via HTTP
|
||||
num_tools = runner.register_mcp_server(
|
||||
name="tools-http",
|
||||
transport="http",
|
||||
url="http://localhost:4001",
|
||||
)
|
||||
|
||||
print(f"Registered {num_tools} tools from HTTP MCP server")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
|
||||
async def example_3_config_file():
|
||||
"""Example 3: Load MCP servers from configuration file"""
|
||||
print("\n=== Example 3: Load from Configuration File ===\n")
|
||||
|
||||
# Create a test agent folder with mcp_servers.json
|
||||
test_agent_path = Path("exports/task-planner")
|
||||
|
||||
# Copy example config (in practice, you'd place this in your agent folder)
|
||||
import shutil
|
||||
|
||||
shutil.copy("examples/mcp_servers.json", test_agent_path / "mcp_servers.json")
|
||||
|
||||
# Load agent - MCP servers will be auto-discovered
|
||||
runner = AgentRunner.load(test_agent_path)
|
||||
|
||||
# Tools are automatically available
|
||||
tools = runner._tool_registry.get_tools()
|
||||
print(f"Available tools: {list(tools.keys())}")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
# Clean up the test config
|
||||
(test_agent_path / "mcp_servers.json").unlink()
|
||||
|
||||
|
||||
async def example_4_custom_agent_with_mcp_tools():
|
||||
"""Example 4: Build custom agent that uses MCP tools"""
|
||||
print("\n=== Example 4: Custom Agent with MCP Tools ===\n")
|
||||
|
||||
from framework.builder.workflow import GraphBuilder
|
||||
|
||||
# Create a workflow builder
|
||||
builder = GraphBuilder()
|
||||
|
||||
# Define goal
|
||||
builder.set_goal(
|
||||
goal_id="web-researcher",
|
||||
name="Web Research Agent",
|
||||
description="Search the web and summarize findings",
|
||||
)
|
||||
|
||||
# Add success criteria
|
||||
builder.add_success_criterion(
|
||||
"search-results", "Successfully retrieve at least 3 web search results"
|
||||
)
|
||||
builder.add_success_criterion("summary", "Provide a clear, concise summary of the findings")
|
||||
|
||||
# Add nodes that will use MCP tools
|
||||
builder.add_node(
|
||||
node_id="web-searcher",
|
||||
name="Web Search",
|
||||
description="Search the web for information",
|
||||
node_type="llm_tool_use",
|
||||
system_prompt="Search for {query} and return the top results. Use the web_search tool.",
|
||||
tools=["web_search"], # This tool comes from tools MCP server
|
||||
input_keys=["query"],
|
||||
output_keys=["search_results"],
|
||||
)
|
||||
|
||||
builder.add_node(
|
||||
node_id="summarizer",
|
||||
name="Summarize Results",
|
||||
description="Summarize the search results",
|
||||
node_type="llm_generate",
|
||||
system_prompt="Summarize the following search results in 2-3 sentences: {search_results}",
|
||||
input_keys=["search_results"],
|
||||
output_keys=["summary"],
|
||||
)
|
||||
|
||||
# Connect nodes
|
||||
builder.add_edge("web-searcher", "summarizer")
|
||||
|
||||
# Set entry point
|
||||
builder.set_entry("web-searcher")
|
||||
builder.set_terminal("summarizer")
|
||||
|
||||
# Export the agent
|
||||
export_path = Path("exports/web-research-agent")
|
||||
export_path.mkdir(parents=True, exist_ok=True)
|
||||
builder.export(export_path)
|
||||
|
||||
# Load and register MCP server
|
||||
runner = AgentRunner.load(export_path)
|
||||
runner.register_mcp_server(
|
||||
name="tools",
|
||||
transport="stdio",
|
||||
command="python",
|
||||
args=["-m", "aden_tools.mcp_server", "--stdio"],
|
||||
cwd="../tools",
|
||||
)
|
||||
|
||||
# Run the agent
|
||||
result = await runner.run({"query": "latest AI breakthroughs 2026"})
|
||||
|
||||
print(f"\nAgent completed with result:\n{result}")
|
||||
|
||||
# Cleanup
|
||||
runner.cleanup()
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all examples"""
|
||||
print("=" * 60)
|
||||
print("MCP Integration Examples")
|
||||
print("=" * 60)
|
||||
|
||||
try:
|
||||
# Run examples
|
||||
await example_1_programmatic_registration()
|
||||
# await example_2_http_transport() # Requires HTTP server running
|
||||
# await example_3_config_file()
|
||||
# await example_4_custom_agent_with_mcp_tools()
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nError running example: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -4,8 +4,8 @@
|
||||
"name": "tools",
|
||||
"description": "Aden tools including web search, file operations, and PDF reading",
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--stdio"],
|
||||
"cwd": "../tools",
|
||||
"env": {
|
||||
"BRAVE_SEARCH_API_KEY": "${BRAVE_SEARCH_API_KEY}"
|
||||
|
||||
+64
-14
@@ -1,20 +1,70 @@
|
||||
"""Hive Agent Framework.
|
||||
"""
|
||||
Aden Hive Framework: A goal-driven agent runtime optimized for Builder observability.
|
||||
|
||||
Core classes:
|
||||
ColonyRuntime -- orchestrates parallel worker clones in a colony
|
||||
AgentLoop -- the LLM + tool execution loop (one per worker)
|
||||
AgentLoader -- loads agent config from disk, builds pipeline
|
||||
DecisionTracker -- records decisions for post-hoc analysis
|
||||
The runtime is designed around DECISIONS, not just actions. Every significant
|
||||
choice the agent makes is captured with:
|
||||
- What it was trying to do (intent)
|
||||
- What options it considered
|
||||
- What it chose and why
|
||||
- What happened as a result
|
||||
- Whether that was good or bad (evaluated post-hoc)
|
||||
|
||||
This gives the Builder LLM the information it needs to improve agent behavior.
|
||||
|
||||
## Testing Framework
|
||||
|
||||
The framework includes a Goal-Based Testing system (Goal → Agent → Eval):
|
||||
- Generate tests from Goal success_criteria and constraints
|
||||
- Mandatory user approval before tests are stored
|
||||
- Parallel test execution with error categorization
|
||||
- Debug tools with fix suggestions
|
||||
|
||||
See `framework.testing` for details.
|
||||
"""
|
||||
|
||||
from framework.agent_loop import AgentLoop
|
||||
from framework.host import ColonyRuntime
|
||||
from framework.loader import AgentLoader
|
||||
from framework.tracker import DecisionTracker
|
||||
from framework.builder.query import BuilderQuery
|
||||
from framework.llm import AnthropicProvider, LLMProvider
|
||||
from framework.runner import AgentOrchestrator, AgentRunner
|
||||
from framework.runtime.core import Runtime
|
||||
from framework.schemas.decision import Decision, DecisionEvaluation, Option, Outcome
|
||||
from framework.schemas.run import Problem, Run, RunSummary
|
||||
|
||||
# Testing framework
|
||||
from framework.testing import (
|
||||
ApprovalStatus,
|
||||
DebugTool,
|
||||
ErrorCategory,
|
||||
Test,
|
||||
TestResult,
|
||||
TestStorage,
|
||||
TestSuiteResult,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ColonyRuntime",
|
||||
"AgentLoader",
|
||||
"AgentLoop",
|
||||
"DecisionTracker",
|
||||
# Schemas
|
||||
"Decision",
|
||||
"Option",
|
||||
"Outcome",
|
||||
"DecisionEvaluation",
|
||||
"Run",
|
||||
"RunSummary",
|
||||
"Problem",
|
||||
# Runtime
|
||||
"Runtime",
|
||||
# Builder
|
||||
"BuilderQuery",
|
||||
# LLM
|
||||
"LLMProvider",
|
||||
"AnthropicProvider",
|
||||
# Runner
|
||||
"AgentRunner",
|
||||
"AgentOrchestrator",
|
||||
# Testing
|
||||
"Test",
|
||||
"TestResult",
|
||||
"TestSuiteResult",
|
||||
"TestStorage",
|
||||
"ApprovalStatus",
|
||||
"ErrorCategory",
|
||||
"DebugTool",
|
||||
]
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
"""Agent loop -- the core agent execution primitive."""
|
||||
|
||||
from framework.agent_loop.conversation import ( # noqa: F401
|
||||
ConversationStore,
|
||||
Message,
|
||||
NodeConversation,
|
||||
)
|
||||
from framework.agent_loop.types import ( # noqa: F401
|
||||
AgentContext,
|
||||
AgentProtocol,
|
||||
AgentResult,
|
||||
AgentSpec,
|
||||
)
|
||||
|
||||
|
||||
def __getattr__(name: str):
|
||||
if name in ("AgentLoop", "JudgeProtocol", "JudgeVerdict", "LoopConfig", "OutputAccumulator"):
|
||||
from framework.agent_loop.agent_loop import (
|
||||
AgentLoop,
|
||||
JudgeProtocol,
|
||||
JudgeVerdict,
|
||||
LoopConfig,
|
||||
OutputAccumulator,
|
||||
)
|
||||
|
||||
_exports = {
|
||||
"AgentLoop": AgentLoop,
|
||||
"JudgeProtocol": JudgeProtocol,
|
||||
"JudgeVerdict": JudgeVerdict,
|
||||
"LoopConfig": LoopConfig,
|
||||
"OutputAccumulator": OutputAccumulator,
|
||||
}
|
||||
return _exports[name]
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,7 +0,0 @@
|
||||
"""Agent loop internals -- compaction, judge, tools, subagent execution.
|
||||
|
||||
Re-exports from legacy locations for the new import path.
|
||||
"""
|
||||
|
||||
from framework.agent_loop.internals.compaction import * # noqa: F401, F403
|
||||
from framework.agent_loop.internals.synthetic_tools import * # noqa: F401, F403
|
||||
@@ -1,871 +0,0 @@
|
||||
"""Conversation compaction pipeline.
|
||||
|
||||
Implements the multi-level compaction strategy:
|
||||
0. Microcompaction (count-based tool result clearing — cheapest)
|
||||
1. Prune old tool results (token-budget based)
|
||||
2. Structure-preserving compaction (spillover)
|
||||
3. LLM summary compaction (with recursive splitting)
|
||||
4. Emergency deterministic summary (no LLM)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import Message, NodeConversation
|
||||
from framework.agent_loop.internals.event_publishing import publish_context_usage
|
||||
from framework.agent_loop.internals.types import LoopConfig, OutputAccumulator
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Limits for LLM compaction
|
||||
LLM_COMPACT_CHAR_LIMIT: int = 240_000
|
||||
LLM_COMPACT_MAX_DEPTH: int = 10
|
||||
|
||||
# Microcompaction: tools whose results can be safely cleared
|
||||
COMPACTABLE_TOOLS: frozenset[str] = frozenset(
|
||||
{
|
||||
"read_file",
|
||||
"run_command",
|
||||
"web_search",
|
||||
"web_fetch",
|
||||
"grep_search",
|
||||
"glob_search",
|
||||
"write_file",
|
||||
"edit_file",
|
||||
"browser_screenshot",
|
||||
"list_directory",
|
||||
}
|
||||
)
|
||||
|
||||
# Keep at most this many compactable tool results; clear older ones
|
||||
MICROCOMPACT_KEEP_RECENT: int = 8
|
||||
|
||||
# Circuit-breaker: stop auto-compacting after this many consecutive failures
|
||||
MAX_CONSECUTIVE_FAILURES: int = 3
|
||||
|
||||
# Track consecutive compaction failures per conversation (module-level)
|
||||
_failure_counts: dict[int, int] = {}
|
||||
|
||||
# Track last compaction time per conversation for recompaction detection
|
||||
_last_compact_times: dict[int, float] = {}
|
||||
|
||||
|
||||
def microcompact(
|
||||
conversation: NodeConversation,
|
||||
*,
|
||||
keep_recent: int = MICROCOMPACT_KEEP_RECENT,
|
||||
) -> int:
|
||||
"""Clear old compactable tool results by count, keeping only the most recent.
|
||||
|
||||
This is the cheapest possible compaction — no LLM call, no structural
|
||||
changes, just replaces old tool result content with a short placeholder.
|
||||
Inspired by Claude Code's cached-microcompact strategy.
|
||||
|
||||
Returns the number of tool results cleared.
|
||||
"""
|
||||
# Collect indices of compactable tool results (newest first)
|
||||
compactable_indices: list[int] = []
|
||||
messages = conversation.messages
|
||||
for i in range(len(messages) - 1, -1, -1):
|
||||
msg = messages[i]
|
||||
if msg.role != "tool" or msg.is_error or msg.is_skill_content:
|
||||
continue
|
||||
if msg.content.startswith(("[Pruned tool result", "[Old tool result")):
|
||||
continue
|
||||
if len(msg.content) < 100:
|
||||
continue
|
||||
|
||||
# Check if the tool that produced this result is compactable
|
||||
tool_name = _find_tool_name_for_result(messages, msg)
|
||||
if tool_name and tool_name in COMPACTABLE_TOOLS:
|
||||
compactable_indices.append(i)
|
||||
|
||||
# Keep the most recent N, clear the rest
|
||||
to_clear = compactable_indices[keep_recent:]
|
||||
if not to_clear:
|
||||
return 0
|
||||
|
||||
cleared = 0
|
||||
for i in to_clear:
|
||||
msg = messages[i]
|
||||
spillover = _extract_spillover_filename_inline(msg.content)
|
||||
orig_len = len(msg.content)
|
||||
if spillover:
|
||||
placeholder = (
|
||||
f"[Old tool result cleared: {orig_len} chars. "
|
||||
f"Full data in '{spillover}'. "
|
||||
f"Use read_file('{spillover}') to retrieve.]"
|
||||
)
|
||||
else:
|
||||
placeholder = f"[Old tool result cleared: {orig_len} chars.]"
|
||||
|
||||
# Mutate in-place (microcompact is synchronous, no store writes)
|
||||
conversation._messages[i] = Message(
|
||||
seq=msg.seq,
|
||||
role=msg.role,
|
||||
content=placeholder,
|
||||
tool_use_id=msg.tool_use_id,
|
||||
tool_calls=msg.tool_calls,
|
||||
is_error=msg.is_error,
|
||||
phase_id=msg.phase_id,
|
||||
is_transition_marker=msg.is_transition_marker,
|
||||
)
|
||||
cleared += 1
|
||||
|
||||
if cleared > 0:
|
||||
# Invalidate cached token count
|
||||
conversation._last_api_input_tokens = None
|
||||
|
||||
return cleared
|
||||
|
||||
|
||||
def _find_tool_name_for_result(messages: list[Message], tool_msg: Message) -> str | None:
|
||||
"""Find the tool name from the assistant message that triggered this tool result."""
|
||||
if not tool_msg.tool_use_id:
|
||||
return None
|
||||
for msg in messages:
|
||||
if msg.tool_calls:
|
||||
for tc in msg.tool_calls:
|
||||
if tc.get("id") == tool_msg.tool_use_id:
|
||||
return tc.get("function", {}).get("name")
|
||||
return None
|
||||
|
||||
|
||||
def _extract_spillover_filename_inline(content: str) -> str | None:
|
||||
"""Quick inline check for spillover filename in tool result content."""
|
||||
match = re.search(r"saved to '([^']+)'", content, re.IGNORECASE)
|
||||
return match.group(1) if match else None
|
||||
|
||||
|
||||
async def compact(
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
accumulator: OutputAccumulator | None,
|
||||
*,
|
||||
config: LoopConfig,
|
||||
event_bus: EventBus | None,
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
) -> None:
|
||||
"""Run the full compaction pipeline if conversation needs compaction.
|
||||
|
||||
Pipeline stages (in order, short-circuits when budget is restored):
|
||||
0. Microcompaction (count-based tool result clearing — cheapest)
|
||||
1. Prune old tool results (token-budget based)
|
||||
2. Structure-preserving compaction (free, no LLM)
|
||||
3. LLM summary compaction (recursive split if too large)
|
||||
4. Emergency deterministic summary (fallback)
|
||||
"""
|
||||
conv_id = id(conversation)
|
||||
|
||||
# Circuit breaker: stop LLM-based compaction after repeated failures,
|
||||
# but still fall through to the emergency deterministic summary so
|
||||
# the conversation doesn't silently grow past the context window.
|
||||
# Without this, a persistent LLM outage during compaction would
|
||||
# leave the agent stuck sending oversized prompts until the API 400s.
|
||||
_llm_compaction_skipped = _failure_counts.get(conv_id, 0) >= MAX_CONSECUTIVE_FAILURES
|
||||
if _llm_compaction_skipped:
|
||||
logger.warning(
|
||||
"Circuit breaker: LLM compaction disabled after %d failures — "
|
||||
"skipping straight to emergency summary",
|
||||
_failure_counts[conv_id],
|
||||
)
|
||||
|
||||
# Recompaction detection
|
||||
now = time.monotonic()
|
||||
last_time = _last_compact_times.get(conv_id)
|
||||
if last_time is not None and (now - last_time) < 30:
|
||||
logger.warning(
|
||||
"Recompaction chain detected: only %.1fs since last compaction",
|
||||
now - last_time,
|
||||
)
|
||||
|
||||
ratio_before = conversation.usage_ratio()
|
||||
phase_grad = getattr(ctx, "continuous_mode", False)
|
||||
pre_inventory: list[dict[str, Any]] | None = None
|
||||
|
||||
if ratio_before >= 1.0:
|
||||
pre_inventory = build_message_inventory(conversation)
|
||||
|
||||
# --- Step 0: Microcompaction (count-based, cheapest) ---
|
||||
mc_cleared = microcompact(conversation)
|
||||
if mc_cleared > 0:
|
||||
logger.info(
|
||||
"Microcompact cleared %d old tool results: %.0f%% -> %.0f%%",
|
||||
mc_cleared,
|
||||
ratio_before * 100,
|
||||
conversation.usage_ratio() * 100,
|
||||
)
|
||||
if not conversation.needs_compaction():
|
||||
_record_success(conv_id, now)
|
||||
await log_compaction(
|
||||
ctx,
|
||||
conversation,
|
||||
ratio_before,
|
||||
event_bus,
|
||||
pre_inventory=pre_inventory,
|
||||
)
|
||||
return
|
||||
|
||||
# --- Step 1: Prune old tool results (free, fast) ---
|
||||
protect = max(2000, config.max_context_tokens // 12)
|
||||
pruned = await conversation.prune_old_tool_results(
|
||||
protect_tokens=protect,
|
||||
min_prune_tokens=max(1000, protect // 3),
|
||||
)
|
||||
if pruned > 0:
|
||||
logger.info(
|
||||
"Pruned %d old tool results: %.0f%% -> %.0f%%",
|
||||
pruned,
|
||||
ratio_before * 100,
|
||||
conversation.usage_ratio() * 100,
|
||||
)
|
||||
if not conversation.needs_compaction():
|
||||
_record_success(conv_id, now)
|
||||
await log_compaction(
|
||||
ctx,
|
||||
conversation,
|
||||
ratio_before,
|
||||
event_bus,
|
||||
pre_inventory=pre_inventory,
|
||||
)
|
||||
return
|
||||
|
||||
# --- Step 2: Standard structure-preserving compaction (free, no LLM) ---
|
||||
spill_dir = config.spillover_dir
|
||||
if spill_dir:
|
||||
await conversation.compact_preserving_structure(
|
||||
spillover_dir=spill_dir,
|
||||
keep_recent=4,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
if not conversation.needs_compaction():
|
||||
_record_success(conv_id, now)
|
||||
await log_compaction(
|
||||
ctx,
|
||||
conversation,
|
||||
ratio_before,
|
||||
event_bus,
|
||||
pre_inventory=pre_inventory,
|
||||
)
|
||||
return
|
||||
|
||||
# --- Step 3: LLM summary compaction ---
|
||||
if ctx.llm is not None and not _llm_compaction_skipped:
|
||||
logger.info(
|
||||
"LLM summary compaction triggered (%.0f%% usage)",
|
||||
conversation.usage_ratio() * 100,
|
||||
)
|
||||
try:
|
||||
summary = await llm_compact(
|
||||
ctx,
|
||||
list(conversation.messages),
|
||||
accumulator,
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=config.max_context_tokens,
|
||||
)
|
||||
await conversation.compact(
|
||||
summary,
|
||||
keep_recent=2,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("LLM compaction failed: %s", e)
|
||||
_failure_counts[conv_id] = _failure_counts.get(conv_id, 0) + 1
|
||||
|
||||
if not conversation.needs_compaction():
|
||||
_record_success(conv_id, now)
|
||||
await log_compaction(
|
||||
ctx,
|
||||
conversation,
|
||||
ratio_before,
|
||||
event_bus,
|
||||
pre_inventory=pre_inventory,
|
||||
)
|
||||
return
|
||||
|
||||
# --- Step 4: Emergency deterministic summary (LLM failed/unavailable) ---
|
||||
logger.warning(
|
||||
"Emergency compaction (%.0f%% usage)",
|
||||
conversation.usage_ratio() * 100,
|
||||
)
|
||||
summary = build_emergency_summary(ctx, accumulator, conversation, config)
|
||||
await conversation.compact(
|
||||
summary,
|
||||
keep_recent=1,
|
||||
phase_graduated=phase_grad,
|
||||
)
|
||||
_record_success(conv_id, now)
|
||||
await log_compaction(
|
||||
ctx,
|
||||
conversation,
|
||||
ratio_before,
|
||||
event_bus,
|
||||
pre_inventory=pre_inventory,
|
||||
)
|
||||
|
||||
|
||||
def _record_success(conv_id: int, timestamp: float) -> None:
|
||||
"""Reset failure counter and record compaction time on success."""
|
||||
_failure_counts.pop(conv_id, None)
|
||||
_last_compact_times[conv_id] = timestamp
|
||||
|
||||
|
||||
# --- LLM compaction with binary-search splitting ----------------------
|
||||
|
||||
|
||||
def strip_images_from_messages(messages: list[Message]) -> list[Message]:
|
||||
"""Strip image_content from messages before LLM summarisation.
|
||||
|
||||
Images/documents are replaced with ``[image]`` markers so the summary
|
||||
notes they existed without wasting tokens sending binary data to the
|
||||
compaction LLM. Returns a new list (original messages are not mutated).
|
||||
"""
|
||||
stripped: list[Message] = []
|
||||
for msg in messages:
|
||||
if msg.image_content:
|
||||
n_images = len(msg.image_content)
|
||||
marker = " ".join("[image]" for _ in range(n_images))
|
||||
content = f"{msg.content}\n{marker}" if msg.content else marker
|
||||
stripped.append(
|
||||
Message(
|
||||
seq=msg.seq,
|
||||
role=msg.role,
|
||||
content=content,
|
||||
tool_use_id=msg.tool_use_id,
|
||||
tool_calls=msg.tool_calls,
|
||||
is_error=msg.is_error,
|
||||
phase_id=msg.phase_id,
|
||||
is_transition_marker=msg.is_transition_marker,
|
||||
image_content=None, # stripped
|
||||
)
|
||||
)
|
||||
else:
|
||||
stripped.append(msg)
|
||||
return stripped
|
||||
|
||||
|
||||
async def llm_compact(
|
||||
ctx: NodeContext,
|
||||
messages: list,
|
||||
accumulator: OutputAccumulator | None = None,
|
||||
_depth: int = 0,
|
||||
*,
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
) -> str:
|
||||
"""Summarise *messages* with LLM, splitting recursively if too large.
|
||||
|
||||
If the formatted text exceeds ``LLM_COMPACT_CHAR_LIMIT`` or the LLM
|
||||
rejects the call with a context-length error, the messages are split
|
||||
in half and each half is summarised independently. Tool history is
|
||||
appended once at the top-level call (``_depth == 0``).
|
||||
"""
|
||||
from framework.agent_loop.conversation import extract_tool_call_history
|
||||
from framework.agent_loop.internals.tool_result_handler import is_context_too_large_error
|
||||
|
||||
if _depth > max_depth:
|
||||
raise RuntimeError(f"LLM compaction recursion limit ({max_depth})")
|
||||
|
||||
# Strip images before summarisation to avoid wasting tokens
|
||||
if _depth == 0:
|
||||
messages = strip_images_from_messages(messages)
|
||||
|
||||
formatted = format_messages_for_summary(messages)
|
||||
|
||||
# Proactive split: avoid wasting an API call on oversized input
|
||||
if len(formatted) > char_limit and len(messages) > 1:
|
||||
summary = await _llm_compact_split(
|
||||
ctx,
|
||||
messages,
|
||||
accumulator,
|
||||
_depth,
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
else:
|
||||
prompt = build_llm_compaction_prompt(
|
||||
ctx,
|
||||
accumulator,
|
||||
formatted,
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
summary_budget = max(1024, max_context_tokens // 2)
|
||||
try:
|
||||
response = await ctx.llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
system=(
|
||||
"You are a conversation compactor for an AI agent. "
|
||||
"Write a detailed summary that allows the agent to "
|
||||
"continue its work. Preserve user-stated rules, "
|
||||
"constraints, and account/identity preferences verbatim."
|
||||
),
|
||||
max_tokens=summary_budget,
|
||||
)
|
||||
summary = response.content
|
||||
except Exception as e:
|
||||
if is_context_too_large_error(e) and len(messages) > 1:
|
||||
logger.info(
|
||||
"LLM context too large (depth=%d, msgs=%d) — splitting",
|
||||
_depth,
|
||||
len(messages),
|
||||
)
|
||||
summary = await _llm_compact_split(
|
||||
ctx,
|
||||
messages,
|
||||
accumulator,
|
||||
_depth,
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Append tool history at top level only
|
||||
if _depth == 0:
|
||||
tool_history = extract_tool_call_history(messages)
|
||||
if tool_history and "TOOLS ALREADY CALLED" not in summary:
|
||||
summary += "\n\n" + tool_history
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
async def _llm_compact_split(
|
||||
ctx: NodeContext,
|
||||
messages: list,
|
||||
accumulator: OutputAccumulator | None,
|
||||
_depth: int,
|
||||
*,
|
||||
char_limit: int = LLM_COMPACT_CHAR_LIMIT,
|
||||
max_depth: int = LLM_COMPACT_MAX_DEPTH,
|
||||
max_context_tokens: int = 128_000,
|
||||
) -> str:
|
||||
"""Split messages in half and summarise each half independently."""
|
||||
mid = max(1, len(messages) // 2)
|
||||
s1 = await llm_compact(
|
||||
ctx,
|
||||
messages[:mid],
|
||||
None,
|
||||
_depth + 1,
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
s2 = await llm_compact(
|
||||
ctx,
|
||||
messages[mid:],
|
||||
accumulator,
|
||||
_depth + 1,
|
||||
char_limit=char_limit,
|
||||
max_depth=max_depth,
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
return s1 + "\n\n" + s2
|
||||
|
||||
|
||||
# --- Compaction helpers ------------------------------------------------
|
||||
|
||||
|
||||
def format_messages_for_summary(messages: list) -> str:
|
||||
"""Format messages as text for LLM summarisation."""
|
||||
lines: list[str] = []
|
||||
for m in messages:
|
||||
if m.role == "tool":
|
||||
content = m.content[:500]
|
||||
if len(m.content) > 500:
|
||||
content += "..."
|
||||
lines.append(f"[tool result]: {content}")
|
||||
elif m.role == "assistant" and m.tool_calls:
|
||||
names = [tc.get("function", {}).get("name", "?") for tc in m.tool_calls]
|
||||
text = m.content[:200] if m.content else ""
|
||||
lines.append(f"[assistant (calls: {', '.join(names)})]: {text}")
|
||||
else:
|
||||
lines.append(f"[{m.role}]: {m.content}")
|
||||
return "\n\n".join(lines)
|
||||
|
||||
|
||||
def build_llm_compaction_prompt(
|
||||
ctx: NodeContext,
|
||||
accumulator: OutputAccumulator | None,
|
||||
formatted_messages: str,
|
||||
*,
|
||||
max_context_tokens: int = 128_000,
|
||||
) -> str:
|
||||
"""Build prompt for LLM compaction targeting 50% of token budget.
|
||||
|
||||
Uses a structured section format inspired by Claude Code's compact
|
||||
service. Each section focuses on a different aspect of the conversation
|
||||
so the summariser produces consistently useful, well-organised output.
|
||||
"""
|
||||
spec = ctx.agent_spec
|
||||
ctx_lines = [f"NODE: {spec.name} (id={spec.id})"]
|
||||
if spec.description:
|
||||
ctx_lines.append(f"PURPOSE: {spec.description}")
|
||||
if spec.success_criteria:
|
||||
ctx_lines.append(f"SUCCESS CRITERIA: {spec.success_criteria}")
|
||||
|
||||
if accumulator:
|
||||
acc = accumulator.to_dict()
|
||||
done = {k: v for k, v in acc.items() if v is not None}
|
||||
todo = [k for k, v in acc.items() if v is None]
|
||||
if done:
|
||||
ctx_lines.append(
|
||||
"OUTPUTS ALREADY SET:\n"
|
||||
+ "\n".join(f" {k}: {str(v)[:150]}" for k, v in done.items())
|
||||
)
|
||||
if todo:
|
||||
ctx_lines.append(f"OUTPUTS STILL NEEDED: {', '.join(todo)}")
|
||||
elif spec.output_keys:
|
||||
ctx_lines.append(f"OUTPUTS STILL NEEDED: {', '.join(spec.output_keys)}")
|
||||
|
||||
target_tokens = max_context_tokens // 2
|
||||
target_chars = target_tokens * 4
|
||||
node_ctx = "\n".join(ctx_lines)
|
||||
|
||||
return (
|
||||
"You are compacting an AI agent's conversation history. "
|
||||
"The agent is still working and needs to continue.\n\n"
|
||||
f"AGENT CONTEXT:\n{node_ctx}\n\n"
|
||||
f"CONVERSATION MESSAGES:\n{formatted_messages}\n\n"
|
||||
"INSTRUCTIONS:\n"
|
||||
f"Write a summary of approximately {target_chars} characters "
|
||||
f"(~{target_tokens} tokens).\n\n"
|
||||
"Organise the summary into these sections (omit empty ones):\n\n"
|
||||
"1. **Primary Request and Intent** — What the user originally asked "
|
||||
"for and the high-level goal the agent is working toward.\n"
|
||||
"2. **Key Technical Concepts** — Important domain-specific terms, "
|
||||
"patterns, or architectural decisions established in the conversation.\n"
|
||||
"3. **Files and Code Sections** — Specific files read/written/edited "
|
||||
"with brief descriptions of changes. Include short code snippets only "
|
||||
"when they capture critical logic.\n"
|
||||
"4. **Errors and Fixes** — Problems encountered and how they were "
|
||||
"resolved. Include root causes so the agent doesn't repeat them.\n"
|
||||
"5. **Problem Solving Efforts** — Approaches tried, dead ends hit, "
|
||||
"and reasoning behind the current strategy.\n"
|
||||
"6. **User Messages** — Preserve ALL user-stated rules, constraints, "
|
||||
"identity preferences, and account details verbatim.\n"
|
||||
"7. **Pending Tasks** — Work remaining, outputs still needed, and "
|
||||
"any blockers.\n"
|
||||
"8. **Current Work** — The most recent action taken and the immediate "
|
||||
"next step the agent should perform. This section is the most important "
|
||||
"for seamless resumption.\n\n"
|
||||
"Additional rules:\n"
|
||||
"- Be detailed enough that the agent can resume without re-doing work.\n"
|
||||
"- Preserve key decisions made and results obtained.\n"
|
||||
"- When in doubt, keep information rather than discard it.\n"
|
||||
)
|
||||
|
||||
|
||||
def build_message_inventory(conversation: NodeConversation) -> list[dict[str, Any]]:
|
||||
"""Build a per-message size inventory for debug logging."""
|
||||
inventory: list[dict[str, Any]] = []
|
||||
for message in conversation.messages:
|
||||
content_chars = len(message.content)
|
||||
tool_call_args_chars = 0
|
||||
tool_name = None
|
||||
if message.tool_calls:
|
||||
for tool_call in message.tool_calls:
|
||||
args = tool_call.get("function", {}).get("arguments", "")
|
||||
tool_call_args_chars += (
|
||||
len(args) if isinstance(args, str) else len(json.dumps(args))
|
||||
)
|
||||
names = [
|
||||
tool_call.get("function", {}).get("name", "?") for tool_call in message.tool_calls
|
||||
]
|
||||
tool_name = ", ".join(names)
|
||||
elif message.role == "tool" and message.tool_use_id:
|
||||
for previous in conversation.messages:
|
||||
if previous.tool_calls:
|
||||
for tool_call in previous.tool_calls:
|
||||
if tool_call.get("id") == message.tool_use_id:
|
||||
tool_name = tool_call.get("function", {}).get("name", "?")
|
||||
break
|
||||
if tool_name:
|
||||
break
|
||||
entry: dict[str, Any] = {
|
||||
"seq": message.seq,
|
||||
"role": message.role,
|
||||
"content_chars": content_chars,
|
||||
}
|
||||
if tool_call_args_chars:
|
||||
entry["tool_call_args_chars"] = tool_call_args_chars
|
||||
if tool_name:
|
||||
entry["tool"] = tool_name
|
||||
if message.is_error:
|
||||
entry["is_error"] = True
|
||||
if message.phase_id:
|
||||
entry["phase"] = message.phase_id
|
||||
if content_chars > 2000:
|
||||
entry["preview"] = message.content[:200] + "…"
|
||||
inventory.append(entry)
|
||||
return inventory
|
||||
|
||||
|
||||
def write_compaction_debug_log(
|
||||
ctx: NodeContext,
|
||||
before_pct: int,
|
||||
after_pct: int,
|
||||
level: str,
|
||||
inventory: list[dict[str, Any]] | None,
|
||||
) -> None:
|
||||
"""Write detailed compaction analysis to ~/.hive/compaction_log/."""
|
||||
log_dir = Path.home() / ".hive" / "compaction_log"
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ts = datetime.now(UTC).strftime("%Y%m%dT%H%M%S_%f")
|
||||
node_label = ctx.agent_id.replace("/", "_")
|
||||
log_path = log_dir / f"{ts}_{node_label}.md"
|
||||
|
||||
lines: list[str] = [
|
||||
f"# Compaction Debug — {ctx.agent_id}",
|
||||
f"**Time:** {datetime.now(UTC).isoformat()}",
|
||||
f"**Node:** {ctx.agent_spec.name} (`{ctx.agent_id}`)",
|
||||
]
|
||||
if ctx.stream_id:
|
||||
lines.append(f"**Stream:** {ctx.stream_id}")
|
||||
lines.append(f"**Level:** {level}")
|
||||
lines.append(f"**Usage:** {before_pct}% → {after_pct}%")
|
||||
lines.append("")
|
||||
|
||||
if inventory:
|
||||
total_chars = sum(
|
||||
entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0)
|
||||
for entry in inventory
|
||||
)
|
||||
lines.append(
|
||||
"## Pre-Compaction Message Inventory "
|
||||
f"({len(inventory)} messages, {total_chars:,} total chars)"
|
||||
)
|
||||
lines.append("")
|
||||
ranked = sorted(
|
||||
inventory,
|
||||
key=lambda entry: entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0),
|
||||
reverse=True,
|
||||
)
|
||||
lines.append("| # | seq | role | tool | chars | % of total | flags |")
|
||||
lines.append("|---|-----|------|------|------:|------------|-------|")
|
||||
for i, entry in enumerate(ranked, 1):
|
||||
chars = entry.get("content_chars", 0) + entry.get("tool_call_args_chars", 0)
|
||||
pct = (chars / total_chars * 100) if total_chars else 0
|
||||
tool = entry.get("tool", "")
|
||||
flags: list[str] = []
|
||||
if entry.get("is_error"):
|
||||
flags.append("error")
|
||||
if entry.get("phase"):
|
||||
flags.append(f"phase={entry['phase']}")
|
||||
lines.append(
|
||||
f"| {i} | {entry['seq']} | {entry['role']} | {tool} "
|
||||
f"| {chars:,} | {pct:.1f}% | {', '.join(flags)} |"
|
||||
)
|
||||
|
||||
large = [entry for entry in ranked if entry.get("preview")]
|
||||
if large:
|
||||
lines.append("")
|
||||
lines.append("### Large message previews")
|
||||
for entry in large:
|
||||
lines.append(
|
||||
f"\n**seq={entry['seq']}** ({entry['role']}, {entry.get('tool', '')}):"
|
||||
)
|
||||
lines.append(f"```\n{entry['preview']}\n```")
|
||||
lines.append("")
|
||||
|
||||
try:
|
||||
log_path.write_text("\n".join(lines), encoding="utf-8")
|
||||
logger.debug("Compaction debug log written to %s", log_path)
|
||||
except OSError:
|
||||
logger.debug("Failed to write compaction debug log to %s", log_path)
|
||||
|
||||
|
||||
async def log_compaction(
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
ratio_before: float,
|
||||
event_bus: EventBus | None,
|
||||
*,
|
||||
pre_inventory: list[dict[str, Any]] | None = None,
|
||||
) -> None:
|
||||
"""Log compaction result to runtime logger and event bus."""
|
||||
ratio_after = conversation.usage_ratio()
|
||||
before_pct = round(ratio_before * 100)
|
||||
after_pct = round(ratio_after * 100)
|
||||
|
||||
# Determine label from what happened
|
||||
if after_pct >= before_pct - 1:
|
||||
level = "prune_only"
|
||||
elif ratio_after <= 0.6:
|
||||
level = "llm"
|
||||
else:
|
||||
level = "structural"
|
||||
|
||||
logger.info(
|
||||
"Compaction complete (%s): %d%% -> %d%%",
|
||||
level,
|
||||
before_pct,
|
||||
after_pct,
|
||||
)
|
||||
|
||||
if ctx.runtime_logger:
|
||||
ctx.runtime_logger.log_step(
|
||||
node_id=ctx.agent_id,
|
||||
node_type="event_loop",
|
||||
step_index=-1,
|
||||
llm_text=f"Context compacted ({level}): {before_pct}% \u2192 {after_pct}%",
|
||||
verdict="COMPACTION",
|
||||
verdict_feedback=f"level={level} before={before_pct}% after={after_pct}%",
|
||||
)
|
||||
|
||||
if event_bus:
|
||||
from framework.host.event_bus import AgentEvent, EventType
|
||||
|
||||
event_data: dict[str, Any] = {
|
||||
"level": level,
|
||||
"usage_before": before_pct,
|
||||
"usage_after": after_pct,
|
||||
}
|
||||
if pre_inventory is not None:
|
||||
event_data["message_inventory"] = pre_inventory
|
||||
await event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.CONTEXT_COMPACTED,
|
||||
stream_id=ctx.stream_id or ctx.agent_id,
|
||||
node_id=ctx.agent_id,
|
||||
data=event_data,
|
||||
)
|
||||
)
|
||||
|
||||
await publish_context_usage(event_bus, ctx, conversation, "post_compaction")
|
||||
|
||||
if os.environ.get("HIVE_COMPACTION_DEBUG"):
|
||||
write_compaction_debug_log(ctx, before_pct, after_pct, level, pre_inventory)
|
||||
|
||||
|
||||
def build_emergency_summary(
|
||||
ctx: NodeContext,
|
||||
accumulator: OutputAccumulator | None = None,
|
||||
conversation: NodeConversation | None = None,
|
||||
config: LoopConfig | None = None,
|
||||
) -> str:
|
||||
"""Build a structured emergency compaction summary.
|
||||
|
||||
Unlike normal/aggressive compaction which uses an LLM summary,
|
||||
emergency compaction cannot afford an LLM call (context is already
|
||||
way over budget). Instead, build a deterministic summary from the
|
||||
node's known state so the LLM can continue working after
|
||||
compaction without losing track of its task and inputs.
|
||||
"""
|
||||
parts = [
|
||||
"EMERGENCY COMPACTION — previous conversation was too large "
|
||||
"and has been replaced with this summary.\n"
|
||||
]
|
||||
|
||||
# 1. Node identity
|
||||
spec = ctx.agent_spec
|
||||
parts.append(f"NODE: {spec.name} (id={spec.id})")
|
||||
if spec.description:
|
||||
parts.append(f"PURPOSE: {spec.description}")
|
||||
|
||||
# 2. Inputs the node received
|
||||
input_lines = []
|
||||
for key in spec.input_keys:
|
||||
value = ctx.input_data.get(key)
|
||||
if value is not None:
|
||||
# Truncate long values but keep them recognisable
|
||||
v_str = str(value)
|
||||
if len(v_str) > 200:
|
||||
v_str = v_str[:200] + "…"
|
||||
input_lines.append(f" {key}: {v_str}")
|
||||
if input_lines:
|
||||
parts.append("INPUTS:\n" + "\n".join(input_lines))
|
||||
|
||||
# 3. Output accumulator state (what's been set so far)
|
||||
if accumulator:
|
||||
acc_state = accumulator.to_dict()
|
||||
set_keys = {k: v for k, v in acc_state.items() if v is not None}
|
||||
missing = [k for k, v in acc_state.items() if v is None]
|
||||
if set_keys:
|
||||
lines = [f" {k}: {str(v)[:150]}" for k, v in set_keys.items()]
|
||||
parts.append("OUTPUTS ALREADY SET:\n" + "\n".join(lines))
|
||||
if missing:
|
||||
parts.append(f"OUTPUTS STILL NEEDED: {', '.join(missing)}")
|
||||
elif spec.output_keys:
|
||||
parts.append(f"OUTPUTS STILL NEEDED: {', '.join(spec.output_keys)}")
|
||||
|
||||
# 4. Available tools reminder
|
||||
if spec.tools:
|
||||
parts.append(f"AVAILABLE TOOLS: {', '.join(spec.tools)}")
|
||||
|
||||
# 5. Spillover files — list actual files so the LLM can load
|
||||
# them immediately instead of having to call list_data_files first.
|
||||
spillover_dir = config.spillover_dir if config else None
|
||||
if spillover_dir:
|
||||
try:
|
||||
from pathlib import Path
|
||||
|
||||
data_dir = Path(spillover_dir)
|
||||
if data_dir.is_dir():
|
||||
all_files = sorted(f.name for f in data_dir.iterdir() if f.is_file())
|
||||
# Separate conversation history files from regular data files
|
||||
conv_files = [f for f in all_files if re.match(r"conversation_\d+\.md$", f)]
|
||||
data_files = [f for f in all_files if f not in conv_files]
|
||||
|
||||
if conv_files:
|
||||
conv_list = "\n".join(
|
||||
f" - {f} (full path: {data_dir / f})" for f in conv_files
|
||||
)
|
||||
parts.append(
|
||||
"CONVERSATION HISTORY (freeform messages saved during compaction — "
|
||||
"use read_file('<filename>') to review earlier dialogue):\n" + conv_list
|
||||
)
|
||||
if data_files:
|
||||
file_list = "\n".join(
|
||||
f" - {f} (full path: {data_dir / f})" for f in data_files[:30]
|
||||
)
|
||||
parts.append("DATA FILES (use read_file('<filename>') to read):\n" + file_list)
|
||||
if not all_files:
|
||||
parts.append(
|
||||
"NOTE: Large tool results may have been saved to files. "
|
||||
"Use list_directory to check the data directory."
|
||||
)
|
||||
except Exception:
|
||||
parts.append(
|
||||
"NOTE: Large tool results were saved to files. "
|
||||
"Use read_file(path='<path>') to read them."
|
||||
)
|
||||
|
||||
# 6. Tool call history (prevent re-calling tools)
|
||||
if conversation is not None:
|
||||
tool_history = _extract_tool_call_history(conversation)
|
||||
if tool_history:
|
||||
parts.append(tool_history)
|
||||
|
||||
parts.append(
|
||||
"\nContinue working towards setting the remaining outputs. "
|
||||
"Use your tools and the inputs above."
|
||||
)
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
def _extract_tool_call_history(conversation: NodeConversation) -> str:
|
||||
"""Extract tool call history from conversation messages.
|
||||
|
||||
This is the instance-level variant that operates on a NodeConversation
|
||||
directly (vs. the module-level extract_tool_call_history in conversation.py
|
||||
which works on raw message lists).
|
||||
"""
|
||||
from framework.agent_loop.conversation import extract_tool_call_history
|
||||
|
||||
return extract_tool_call_history(list(conversation.messages))
|
||||
@@ -1,269 +0,0 @@
|
||||
"""Cursor persistence, queue draining, and pause detection.
|
||||
|
||||
Handles the checkpoint/resume cycle: restoring state from a previous
|
||||
conversation store, writing cursor data, and managing injection/trigger
|
||||
queues between iterations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from framework.agent_loop.conversation import ConversationStore, NodeConversation
|
||||
from framework.agent_loop.internals.types import LoopConfig, OutputAccumulator, TriggerEvent
|
||||
from framework.llm.capabilities import supports_image_tool_results
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class RestoredState:
|
||||
"""State recovered from a previous checkpoint."""
|
||||
|
||||
conversation: NodeConversation
|
||||
accumulator: OutputAccumulator
|
||||
start_iteration: int
|
||||
recent_responses: list[str]
|
||||
recent_tool_fingerprints: list[list[tuple[str, str]]]
|
||||
pending_input: dict[str, Any] | None
|
||||
|
||||
|
||||
async def restore(
|
||||
conversation_store: ConversationStore | None,
|
||||
ctx: NodeContext,
|
||||
config: LoopConfig,
|
||||
) -> RestoredState | None:
|
||||
"""Attempt to restore from a previous checkpoint.
|
||||
|
||||
Returns a ``RestoredState`` with conversation, accumulator, iteration
|
||||
counter, and stall/doom-loop detection state — everything needed to
|
||||
resume exactly where execution stopped.
|
||||
"""
|
||||
if conversation_store is None:
|
||||
return None
|
||||
|
||||
# In isolated mode, filter parts by phase_id so the node only sees
|
||||
# its own messages in the shared flat conversation store. In
|
||||
# continuous mode (or when _restore is called for timer-resume)
|
||||
# load all parts — the full conversation threads across nodes.
|
||||
_is_continuous = getattr(ctx, "continuous_mode", False)
|
||||
# The queen has agent_id="queen" but messages are stored with phase_id=None.
|
||||
# Only apply phase filtering for non-queen workers in a multi-agent setup.
|
||||
phase_filter = None if (_is_continuous or ctx.agent_id == "queen") else ctx.agent_id
|
||||
conversation = await NodeConversation.restore(
|
||||
conversation_store,
|
||||
phase_id=phase_filter,
|
||||
run_id=ctx.effective_run_id,
|
||||
)
|
||||
if conversation is None:
|
||||
logger.info(
|
||||
"[restore] No conversation found for agent_id=%s phase_filter=%s run_id=%s",
|
||||
ctx.agent_id,
|
||||
phase_filter,
|
||||
ctx.effective_run_id,
|
||||
)
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
"[restore] Restored %d messages for agent_id=%s phase_filter=%s run_id=%s",
|
||||
conversation.message_count,
|
||||
ctx.agent_id,
|
||||
phase_filter,
|
||||
ctx.effective_run_id,
|
||||
)
|
||||
|
||||
# If run_id filtering removed all messages, this is an intentional
|
||||
# restart (new run), not a crash recovery. Return None so the caller
|
||||
# falls through to the fresh-conversation path.
|
||||
if conversation.message_count == 0:
|
||||
return None
|
||||
|
||||
accumulator = await OutputAccumulator.restore(conversation_store, run_id=ctx.effective_run_id)
|
||||
accumulator.spillover_dir = config.spillover_dir
|
||||
accumulator.max_value_chars = config.max_output_value_chars
|
||||
|
||||
cursor = await conversation_store.read_cursor() or {}
|
||||
start_iteration = cursor.get("iteration", 0) + 1
|
||||
|
||||
# Restore stall/doom-loop detection state
|
||||
recent_responses: list[str] = cursor.get("recent_responses", [])
|
||||
raw_fps = cursor.get("recent_tool_fingerprints", [])
|
||||
recent_tool_fingerprints: list[list[tuple[str, str]]] = [
|
||||
[tuple(pair) for pair in fps] # type: ignore[misc]
|
||||
for fps in raw_fps
|
||||
]
|
||||
pending_input = cursor.get("pending_input")
|
||||
if not isinstance(pending_input, dict):
|
||||
pending_input = None
|
||||
|
||||
logger.info(
|
||||
f"Restored event loop: iteration={start_iteration}, "
|
||||
f"messages={conversation.message_count}, "
|
||||
f"outputs={list(accumulator.values.keys())}, "
|
||||
f"stall_window={len(recent_responses)}, "
|
||||
f"doom_window={len(recent_tool_fingerprints)}"
|
||||
)
|
||||
return RestoredState(
|
||||
conversation=conversation,
|
||||
accumulator=accumulator,
|
||||
start_iteration=start_iteration,
|
||||
recent_responses=recent_responses,
|
||||
recent_tool_fingerprints=recent_tool_fingerprints,
|
||||
pending_input=pending_input,
|
||||
)
|
||||
|
||||
|
||||
async def write_cursor(
|
||||
conversation_store: ConversationStore | None,
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
accumulator: OutputAccumulator,
|
||||
iteration: int,
|
||||
*,
|
||||
recent_responses: list[str] | None = None,
|
||||
recent_tool_fingerprints: list[list[tuple[str, str]]] | None = None,
|
||||
pending_input: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Write checkpoint cursor for crash recovery.
|
||||
|
||||
Persists iteration counter, accumulator outputs, and stall/doom-loop
|
||||
detection state so that resume picks up exactly where execution stopped.
|
||||
"""
|
||||
if conversation_store:
|
||||
cursor = await conversation_store.read_cursor() or {}
|
||||
cursor.update(
|
||||
{
|
||||
"iteration": iteration,
|
||||
"node_id": ctx.agent_id,
|
||||
"outputs": accumulator.to_dict(),
|
||||
}
|
||||
)
|
||||
# Persist stall/doom-loop detection state for reliable resume
|
||||
if recent_responses is not None:
|
||||
cursor["recent_responses"] = recent_responses
|
||||
if recent_tool_fingerprints is not None:
|
||||
# Convert list[list[tuple]] → list[list[list]] for JSON
|
||||
cursor["recent_tool_fingerprints"] = [
|
||||
[list(pair) for pair in fps] for fps in recent_tool_fingerprints
|
||||
]
|
||||
# Persist blocked-input state so restored runs re-block instead of
|
||||
# manufacturing a synthetic continuation turn.
|
||||
cursor["pending_input"] = pending_input
|
||||
await conversation_store.write_cursor(cursor)
|
||||
|
||||
|
||||
async def drain_injection_queue(
|
||||
queue: asyncio.Queue,
|
||||
conversation: NodeConversation,
|
||||
*,
|
||||
ctx: NodeContext,
|
||||
describe_images_as_text_fn: (
|
||||
Callable[[list[dict[str, Any]]], Awaitable[str | None]] | None
|
||||
) = None,
|
||||
) -> int:
|
||||
"""Drain all pending injected events as user messages. Returns count."""
|
||||
count = 0
|
||||
logger.debug(
|
||||
"[drain_injection_queue] Starting to drain queue, initial queue size: %s",
|
||||
queue.qsize() if hasattr(queue, "qsize") else "unknown",
|
||||
)
|
||||
while not queue.empty():
|
||||
try:
|
||||
content, is_client_input, image_content = queue.get_nowait()
|
||||
logger.info(
|
||||
"[drain] injected message (client_input=%s, images=%d): %s",
|
||||
is_client_input,
|
||||
len(image_content) if image_content else 0,
|
||||
content[:200] if content else "(empty)",
|
||||
)
|
||||
if image_content and ctx.llm and not supports_image_tool_results(ctx.llm.model):
|
||||
logger.info(
|
||||
"Model '%s' does not support images; attempting vision fallback",
|
||||
ctx.llm.model,
|
||||
)
|
||||
if describe_images_as_text_fn is not None:
|
||||
description = await describe_images_as_text_fn(image_content)
|
||||
if description:
|
||||
content = f"{content}\n\n{description}" if content else description
|
||||
logger.info("[drain] image described as text via vision fallback")
|
||||
else:
|
||||
logger.info("[drain] no vision fallback available; images dropped")
|
||||
image_content = None
|
||||
# Real user input is stored as-is; external events get a prefix
|
||||
if is_client_input:
|
||||
await conversation.add_user_message(
|
||||
content,
|
||||
is_client_input=True,
|
||||
image_content=image_content,
|
||||
)
|
||||
else:
|
||||
await conversation.add_user_message(f"[External event]: {content}")
|
||||
count += 1
|
||||
except asyncio.QueueEmpty:
|
||||
break
|
||||
return count
|
||||
|
||||
|
||||
async def drain_trigger_queue(
|
||||
queue: asyncio.Queue,
|
||||
conversation: NodeConversation,
|
||||
) -> int:
|
||||
"""Drain all pending trigger events as a single batched user message.
|
||||
|
||||
Multiple triggers are merged so the LLM sees them atomically and can
|
||||
reason about all pending triggers before acting.
|
||||
"""
|
||||
triggers: list[TriggerEvent] = []
|
||||
while not queue.empty():
|
||||
try:
|
||||
triggers.append(queue.get_nowait())
|
||||
except asyncio.QueueEmpty:
|
||||
break
|
||||
|
||||
if not triggers:
|
||||
return 0
|
||||
|
||||
parts: list[str] = []
|
||||
for t in triggers:
|
||||
task = t.payload.get("task", "")
|
||||
task_line = f"\nTask: {task}" if task else ""
|
||||
payload_str = json.dumps(t.payload, default=str)
|
||||
parts.append(f"[TRIGGER: {t.trigger_type}/{t.source_id}]{task_line}\n{payload_str}")
|
||||
|
||||
combined = "\n\n".join(parts)
|
||||
logger.info("[drain] %d trigger(s): %s", len(triggers), combined[:200])
|
||||
await conversation.add_user_message(combined)
|
||||
return len(triggers)
|
||||
|
||||
|
||||
async def check_pause(
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
iteration: int,
|
||||
) -> bool:
|
||||
"""
|
||||
Check if pause has been requested. Returns True if paused.
|
||||
|
||||
Note: This check happens BEFORE starting iteration N, after completing N-1.
|
||||
If paused, the node exits having completed {iteration} iterations (0 to iteration-1).
|
||||
"""
|
||||
# Check executor-level pause event (for /pause command, Ctrl+Z)
|
||||
if ctx.pause_event and ctx.pause_event.is_set():
|
||||
completed = iteration # 0-indexed: iteration=3 means 3 iterations completed (0,1,2)
|
||||
logger.info(f"⏸ Pausing after {completed} iteration(s) completed (executor-level)")
|
||||
return True
|
||||
|
||||
# Check context-level pause flags (legacy/alternative methods)
|
||||
pause_requested = ctx.input_data.get("pause_requested", False)
|
||||
if pause_requested:
|
||||
completed = iteration
|
||||
logger.info(f"⏸ Pausing after {completed} iteration(s) completed (context-level)")
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -1,358 +0,0 @@
|
||||
"""EventBus publishing helpers for the event loop.
|
||||
|
||||
Thin wrappers around EventBus.emit_*() calls that check for bus existence
|
||||
before publishing. Extracted to reduce noise in the main orchestrator.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
from framework.agent_loop.conversation import NodeConversation
|
||||
from framework.agent_loop.internals.types import HookContext
|
||||
from framework.host.event_bus import EventBus
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def publish_loop_started(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
max_iterations: int,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_node_loop_started(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
max_iterations=max_iterations,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def generate_action_plan(
|
||||
event_bus: EventBus | None,
|
||||
ctx: NodeContext,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
execution_id: str,
|
||||
) -> None:
|
||||
"""Generate a brief action plan via LLM and emit it as an SSE event.
|
||||
|
||||
Runs as a fire-and-forget task so it never blocks the main loop.
|
||||
"""
|
||||
try:
|
||||
system_prompt = ctx.agent_spec.system_prompt or ""
|
||||
# Trim to keep the prompt small
|
||||
prompt_summary = system_prompt[:500]
|
||||
if len(system_prompt) > 500:
|
||||
prompt_summary += "..."
|
||||
|
||||
tool_names = [t.name for t in ctx.available_tools]
|
||||
output_keys = ctx.agent_spec.output_keys or []
|
||||
|
||||
prompt = (
|
||||
f'You are about to work on a task as node "{node_id}".\n\n'
|
||||
f"System prompt:\n{prompt_summary}\n\n"
|
||||
f"Tools available: {tool_names}\n"
|
||||
f"Required outputs: {output_keys}\n\n"
|
||||
f"Write a brief action plan (2-5 bullet points) describing "
|
||||
f"what you will do to complete this task. Be specific and concise.\n"
|
||||
f"Return ONLY the plan text, no preamble."
|
||||
)
|
||||
|
||||
response = await ctx.llm.acomplete(
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
max_tokens=1024,
|
||||
)
|
||||
|
||||
plan = response.content.strip()
|
||||
if plan and event_bus:
|
||||
await event_bus.emit_node_action_plan(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
plan=plan,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Action plan generation failed for node '%s': %s", node_id, e)
|
||||
|
||||
|
||||
async def publish_iteration(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
iteration: int,
|
||||
execution_id: str = "",
|
||||
extra_data: dict | None = None,
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_node_loop_iteration(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
iteration=iteration,
|
||||
execution_id=execution_id,
|
||||
extra_data=extra_data,
|
||||
)
|
||||
|
||||
|
||||
async def publish_llm_turn_complete(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
stop_reason: str,
|
||||
model: str,
|
||||
input_tokens: int,
|
||||
output_tokens: int,
|
||||
cached_tokens: int = 0,
|
||||
execution_id: str = "",
|
||||
iteration: int | None = None,
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_llm_turn_complete(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
stop_reason=stop_reason,
|
||||
model=model,
|
||||
input_tokens=input_tokens,
|
||||
output_tokens=output_tokens,
|
||||
cached_tokens=cached_tokens,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
)
|
||||
|
||||
|
||||
def log_skip_judge(
|
||||
ctx: NodeContext,
|
||||
node_id: str,
|
||||
iteration: int,
|
||||
feedback: str,
|
||||
tool_calls: list[dict],
|
||||
llm_text: str,
|
||||
turn_tokens: dict[str, int],
|
||||
iter_start: float,
|
||||
) -> None:
|
||||
"""Log a CONTINUE step that skips judge evaluation (e.g., waiting for input)."""
|
||||
if ctx.runtime_logger:
|
||||
ctx.runtime_logger.log_step(
|
||||
node_id=node_id,
|
||||
node_type="event_loop",
|
||||
step_index=iteration,
|
||||
verdict="CONTINUE",
|
||||
verdict_feedback=feedback,
|
||||
tool_calls=tool_calls,
|
||||
llm_text=llm_text,
|
||||
input_tokens=turn_tokens.get("input", 0),
|
||||
output_tokens=turn_tokens.get("output", 0),
|
||||
latency_ms=int((time.time() - iter_start) * 1000),
|
||||
)
|
||||
|
||||
|
||||
async def publish_loop_completed(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
iterations: int,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_node_loop_completed(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
iterations=iterations,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def publish_context_usage(
|
||||
event_bus: EventBus | None,
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
trigger: str,
|
||||
) -> None:
|
||||
"""Emit a CONTEXT_USAGE_UPDATED event with current context window state."""
|
||||
if not event_bus:
|
||||
return
|
||||
|
||||
from framework.host.event_bus import AgentEvent, EventType
|
||||
|
||||
estimated = conversation.estimate_tokens()
|
||||
max_tokens = conversation._max_context_tokens
|
||||
ratio = estimated / max_tokens if max_tokens > 0 else 0.0
|
||||
await event_bus.publish(
|
||||
AgentEvent(
|
||||
type=EventType.CONTEXT_USAGE_UPDATED,
|
||||
stream_id=ctx.stream_id or ctx.agent_id,
|
||||
node_id=ctx.agent_id,
|
||||
data={
|
||||
"usage_ratio": round(ratio, 4),
|
||||
"usage_pct": round(ratio * 100),
|
||||
"message_count": conversation.message_count,
|
||||
"estimated_tokens": estimated,
|
||||
"max_context_tokens": max_tokens,
|
||||
"trigger": trigger,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
async def publish_stalled(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_node_stalled(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
reason="Consecutive similar responses detected",
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def publish_text_delta(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
content: str,
|
||||
snapshot: str,
|
||||
ctx: NodeContext,
|
||||
execution_id: str = "",
|
||||
iteration: int | None = None,
|
||||
inner_turn: int = 0,
|
||||
) -> None:
|
||||
if event_bus:
|
||||
if ctx.emits_client_io:
|
||||
await event_bus.emit_client_output_delta(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
content=content,
|
||||
snapshot=snapshot,
|
||||
execution_id=execution_id,
|
||||
iteration=iteration,
|
||||
inner_turn=inner_turn,
|
||||
)
|
||||
else:
|
||||
await event_bus.emit_llm_text_delta(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
content=content,
|
||||
snapshot=snapshot,
|
||||
execution_id=execution_id,
|
||||
inner_turn=inner_turn,
|
||||
)
|
||||
|
||||
|
||||
async def publish_tool_started(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
tool_use_id: str,
|
||||
tool_name: str,
|
||||
tool_input: dict,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_tool_call_started(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
tool_use_id=tool_use_id,
|
||||
tool_name=tool_name,
|
||||
tool_input=tool_input,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def publish_tool_completed(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
tool_use_id: str,
|
||||
tool_name: str,
|
||||
result: str,
|
||||
is_error: bool,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_tool_call_completed(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
tool_use_id=tool_use_id,
|
||||
tool_name=tool_name,
|
||||
result=result,
|
||||
is_error=is_error,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def publish_judge_verdict(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
action: str,
|
||||
feedback: str = "",
|
||||
judge_type: str = "implicit",
|
||||
iteration: int = 0,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
await event_bus.emit_judge_verdict(
|
||||
stream_id=stream_id,
|
||||
node_id=node_id,
|
||||
action=action,
|
||||
feedback=feedback,
|
||||
judge_type=judge_type,
|
||||
iteration=iteration,
|
||||
execution_id=execution_id,
|
||||
)
|
||||
|
||||
|
||||
async def publish_output_key_set(
|
||||
event_bus: EventBus | None,
|
||||
stream_id: str,
|
||||
node_id: str,
|
||||
key: str,
|
||||
execution_id: str = "",
|
||||
) -> None:
|
||||
if event_bus:
|
||||
pass
|
||||
|
||||
|
||||
async def run_hooks(
|
||||
hooks_config: dict[str, list],
|
||||
event: str,
|
||||
conversation: NodeConversation,
|
||||
trigger: str | None = None,
|
||||
) -> None:
|
||||
"""Run all registered hooks for *event*, applying their results.
|
||||
|
||||
Each hook receives a HookContext and may return a HookResult that:
|
||||
- replaces the system prompt (result.system_prompt)
|
||||
- injects an extra user message (result.inject)
|
||||
Hooks run in registration order; each sees the prompt as left by the
|
||||
previous hook.
|
||||
"""
|
||||
hook_list = hooks_config.get(event, [])
|
||||
if not hook_list:
|
||||
return
|
||||
for hook in hook_list:
|
||||
ctx = HookContext(
|
||||
event=event,
|
||||
trigger=trigger,
|
||||
system_prompt=conversation.system_prompt,
|
||||
)
|
||||
try:
|
||||
result = await hook(ctx)
|
||||
except Exception:
|
||||
logger.warning("Hook '%s' raised an exception", event, exc_info=True)
|
||||
continue
|
||||
if result is None:
|
||||
continue
|
||||
if result.system_prompt:
|
||||
conversation.update_system_prompt(result.system_prompt)
|
||||
if result.inject:
|
||||
await conversation.add_user_message(result.inject)
|
||||
@@ -1,161 +0,0 @@
|
||||
"""Judge evaluation pipeline for the event loop."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
|
||||
from framework.agent_loop.conversation import NodeConversation
|
||||
from framework.agent_loop.internals.types import JudgeProtocol, JudgeVerdict, OutputAccumulator
|
||||
from framework.orchestrator.node import NodeContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SubagentJudge:
|
||||
"""Judge for subagent execution."""
|
||||
|
||||
def __init__(self, task: str, max_iterations: int = 10):
|
||||
self._task = task
|
||||
self._max_iterations = max_iterations
|
||||
|
||||
async def evaluate(self, context: dict[str, object]) -> JudgeVerdict:
|
||||
missing = context.get("missing_keys", [])
|
||||
if not isinstance(missing, list) or not missing:
|
||||
return JudgeVerdict(action="ACCEPT", feedback="")
|
||||
|
||||
iteration = context.get("iteration", 0)
|
||||
if not isinstance(iteration, int):
|
||||
iteration = 0
|
||||
remaining = self._max_iterations - iteration - 1
|
||||
|
||||
if remaining <= 3:
|
||||
urgency = (
|
||||
f"URGENT: Only {remaining} iterations left. "
|
||||
f"Stop all other work and call set_output NOW for: {missing}"
|
||||
)
|
||||
elif remaining <= self._max_iterations // 2:
|
||||
urgency = (
|
||||
f"WARNING: {remaining} iterations remaining. "
|
||||
f"You must call set_output for: {missing}"
|
||||
)
|
||||
else:
|
||||
urgency = f"Missing output keys: {missing}. Use set_output to provide them."
|
||||
|
||||
return JudgeVerdict(action="RETRY", feedback=f"Your task: {self._task}\n{urgency}")
|
||||
|
||||
|
||||
async def judge_turn(
|
||||
*,
|
||||
mark_complete_flag: bool,
|
||||
judge: JudgeProtocol | None,
|
||||
ctx: NodeContext,
|
||||
conversation: NodeConversation,
|
||||
accumulator: OutputAccumulator,
|
||||
assistant_text: str,
|
||||
tool_results: list[dict[str, object]],
|
||||
iteration: int,
|
||||
get_missing_output_keys_fn: Callable[
|
||||
[OutputAccumulator, list[str] | None, list[str] | None],
|
||||
list[str],
|
||||
],
|
||||
max_context_tokens: int,
|
||||
) -> JudgeVerdict:
|
||||
"""Evaluate the current state using judge or implicit logic.
|
||||
|
||||
Evaluation levels (in order):
|
||||
0. Short-circuits: mark_complete, skip_judge, tool-continue.
|
||||
1. Custom judge (JudgeProtocol) — full authority when set.
|
||||
2. Implicit judge — output-key check + optional conversation-aware
|
||||
quality gate (when ``success_criteria`` is defined).
|
||||
|
||||
Returns a JudgeVerdict. ``feedback=None`` means no real evaluation
|
||||
happened (skip_judge, tool-continue); the caller must not inject a
|
||||
feedback message. Any non-None feedback (including ``""``) means a
|
||||
real evaluation occurred and will be logged into the conversation.
|
||||
"""
|
||||
# --- Level 0: short-circuits (no evaluation) -----------------------
|
||||
|
||||
if mark_complete_flag:
|
||||
return JudgeVerdict(action="ACCEPT")
|
||||
|
||||
if ctx.agent_spec.skip_judge:
|
||||
return JudgeVerdict(action="RETRY") # feedback=None → not logged
|
||||
|
||||
# --- Level 1: custom judge -----------------------------------------
|
||||
|
||||
if judge is not None:
|
||||
context = {
|
||||
"assistant_text": assistant_text,
|
||||
"tool_calls": tool_results,
|
||||
"output_accumulator": accumulator.to_dict(),
|
||||
"accumulator": accumulator,
|
||||
"iteration": iteration,
|
||||
"conversation_summary": conversation.export_summary(),
|
||||
"output_keys": ctx.agent_spec.output_keys,
|
||||
"missing_keys": get_missing_output_keys_fn(
|
||||
accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys
|
||||
),
|
||||
}
|
||||
verdict = await judge.evaluate(context)
|
||||
# Ensure evaluated RETRY always carries feedback for logging.
|
||||
if verdict.action == "RETRY" and not verdict.feedback:
|
||||
return JudgeVerdict(action="RETRY", feedback="Custom judge returned RETRY.")
|
||||
return verdict
|
||||
|
||||
# --- Level 2: implicit judge ---------------------------------------
|
||||
|
||||
# Real tool calls were made — let the agent keep working.
|
||||
if tool_results:
|
||||
return JudgeVerdict(action="RETRY") # feedback=None → not logged
|
||||
|
||||
missing = get_missing_output_keys_fn(
|
||||
accumulator, ctx.agent_spec.output_keys, ctx.agent_spec.nullable_output_keys
|
||||
)
|
||||
|
||||
if missing:
|
||||
return JudgeVerdict(
|
||||
action="RETRY",
|
||||
feedback=(
|
||||
f"Task incomplete. Required outputs not yet produced: {missing}. "
|
||||
f"Follow your system prompt instructions to complete the work."
|
||||
),
|
||||
)
|
||||
|
||||
# All output keys present — run safety checks before accepting.
|
||||
|
||||
output_keys = ctx.agent_spec.output_keys or []
|
||||
nullable_keys = set(ctx.agent_spec.nullable_output_keys or [])
|
||||
|
||||
# All-nullable with nothing set → node produced nothing useful.
|
||||
all_nullable = output_keys and nullable_keys >= set(output_keys)
|
||||
none_set = not any(accumulator.get(k) is not None for k in output_keys)
|
||||
if all_nullable and none_set:
|
||||
return JudgeVerdict(
|
||||
action="RETRY",
|
||||
feedback=(
|
||||
f"No output keys have been set yet. "
|
||||
f"Use set_output to set at least one of: {output_keys}"
|
||||
),
|
||||
)
|
||||
|
||||
# Level 2b: conversation-aware quality check (if success_criteria set)
|
||||
if ctx.agent_spec.success_criteria and ctx.llm:
|
||||
from framework.orchestrator.conversation_judge import evaluate_phase_completion
|
||||
|
||||
verdict = await evaluate_phase_completion(
|
||||
llm=ctx.llm,
|
||||
conversation=conversation,
|
||||
phase_name=ctx.agent_spec.name,
|
||||
phase_description=ctx.agent_spec.description,
|
||||
success_criteria=ctx.agent_spec.success_criteria,
|
||||
accumulator_state=accumulator.to_dict(),
|
||||
max_context_tokens=max_context_tokens,
|
||||
)
|
||||
if verdict.action != "ACCEPT":
|
||||
return JudgeVerdict(
|
||||
action=verdict.action,
|
||||
feedback=verdict.feedback or "Phase criteria not met.",
|
||||
)
|
||||
|
||||
return JudgeVerdict(action="ACCEPT", feedback="")
|
||||
@@ -1,106 +0,0 @@
|
||||
"""Stall and doom-loop detection for the event loop.
|
||||
|
||||
Pure functions with no class dependencies — safe to call from any context.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
|
||||
def ngram_similarity(s1: str, s2: str, n: int = 2) -> float:
|
||||
"""Jaccard similarity of n-gram sets.
|
||||
|
||||
Returns 0.0-1.0, where 1.0 is exact match.
|
||||
Fast: O(len(s) + len(s2)) using set operations.
|
||||
"""
|
||||
|
||||
def _ngrams(s: str) -> set[str]:
|
||||
return {s[i : i + n] for i in range(len(s) - n + 1) if s.strip()}
|
||||
|
||||
if not s1 or not s2:
|
||||
return 0.0
|
||||
|
||||
ngrams1, ngrams2 = _ngrams(s1.lower()), _ngrams(s2.lower())
|
||||
if not ngrams1 or not ngrams2:
|
||||
return 0.0
|
||||
|
||||
intersection = len(ngrams1 & ngrams2)
|
||||
union = len(ngrams1 | ngrams2)
|
||||
return intersection / union if union else 0.0
|
||||
|
||||
|
||||
def is_stalled(
|
||||
recent_responses: list[str],
|
||||
threshold: int,
|
||||
similarity_threshold: float,
|
||||
) -> bool:
|
||||
"""Detect stall using n-gram similarity.
|
||||
|
||||
Detects when ALL N consecutive responses are mutually similar
|
||||
(>= threshold). A single dissimilar response resets the signal.
|
||||
This catches phrases like "I'm still stuck" vs "I'm stuck"
|
||||
without false-positives on "attempt 1" vs "attempt 2".
|
||||
"""
|
||||
if len(recent_responses) < threshold:
|
||||
return False
|
||||
if not recent_responses[0]:
|
||||
return False
|
||||
|
||||
# Every consecutive pair must be similar
|
||||
for i in range(1, len(recent_responses)):
|
||||
if ngram_similarity(recent_responses[i], recent_responses[i - 1]) < similarity_threshold:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def fingerprint_tool_calls(
|
||||
tool_results: list[dict],
|
||||
) -> list[tuple[str, str]]:
|
||||
"""Create deterministic fingerprints for a turn's tool calls.
|
||||
|
||||
Each fingerprint is (tool_name, canonical_args_json). Order-sensitive
|
||||
so [search("a"), fetch("b")] != [fetch("b"), search("a")].
|
||||
"""
|
||||
fingerprints = []
|
||||
for tr in tool_results:
|
||||
name = tr.get("tool_name", "")
|
||||
args = tr.get("tool_input", {})
|
||||
try:
|
||||
canonical = json.dumps(args, sort_keys=True, default=str)
|
||||
except (TypeError, ValueError):
|
||||
canonical = str(args)
|
||||
fingerprints.append((name, canonical))
|
||||
return fingerprints
|
||||
|
||||
|
||||
def is_tool_doom_loop(
|
||||
recent_tool_fingerprints: list[list[tuple[str, str]]],
|
||||
threshold: int,
|
||||
enabled: bool = True,
|
||||
) -> tuple[bool, str]:
|
||||
"""Detect doom loop via exact fingerprint match.
|
||||
|
||||
Detects when N consecutive turns invoke the same tools with
|
||||
identical (canonicalized) arguments. Different arguments mean
|
||||
different work, so only exact matches count.
|
||||
|
||||
Returns (is_doom_loop, description).
|
||||
"""
|
||||
if not enabled:
|
||||
return False, ""
|
||||
if len(recent_tool_fingerprints) < threshold:
|
||||
return False, ""
|
||||
first = recent_tool_fingerprints[0]
|
||||
if not first:
|
||||
return False, ""
|
||||
|
||||
# All turns in the window must match the first exactly
|
||||
if all(fp == first for fp in recent_tool_fingerprints[1:]):
|
||||
tool_names = [name for name, _ in first]
|
||||
desc = (
|
||||
f"Doom loop detected: {len(recent_tool_fingerprints)} "
|
||||
f"identical consecutive tool calls ({', '.join(tool_names)})"
|
||||
)
|
||||
return True, desc
|
||||
return False, ""
|
||||
@@ -1,437 +0,0 @@
|
||||
"""Synthetic tool builders for the event loop.
|
||||
|
||||
Factory functions that create ``Tool`` definitions for framework-level
|
||||
synthetic tools (set_output, ask_user, escalate, delegate, report_to_parent).
|
||||
Also includes the ``handle_set_output`` validation logic.
|
||||
|
||||
All functions are pure — they receive explicit parameters and return
|
||||
``Tool`` or ``ToolResult`` objects with no side effects.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import Tool, ToolResult
|
||||
|
||||
|
||||
def sanitize_ask_user_inputs(
|
||||
raw_question: Any,
|
||||
raw_options: Any,
|
||||
) -> tuple[str, list[str] | None]:
|
||||
"""Self-heal a malformed ``ask_user`` tool call.
|
||||
|
||||
Some model families (notably when the system prompt teaches them
|
||||
XML-ish scratchpad tags like ``<relationship>...</relationship>``)
|
||||
carry that style into tool arguments and produce calls like::
|
||||
|
||||
ask_user({
|
||||
"question": "What now?</question>\\n_OPTIONS: [\\"A\\", \\"B\\"]"
|
||||
})
|
||||
|
||||
Symptoms:
|
||||
- The chat UI renders ``</question>`` and ``_OPTIONS: [...]`` as
|
||||
literal text in the question bubble.
|
||||
- No buttons appear because the real ``options`` parameter is
|
||||
empty.
|
||||
|
||||
This function:
|
||||
- Strips leading/trailing whitespace.
|
||||
- Removes a trailing ``</question>`` (with optional preceding
|
||||
whitespace) from the question text.
|
||||
- Detects an inline ``_OPTIONS:``, ``OPTIONS:``, or ``options:``
|
||||
line followed by a JSON array, parses it, and returns the
|
||||
recovered list as the second element.
|
||||
- Removes the parsed line from the returned question text.
|
||||
|
||||
Returns ``(cleaned_question, recovered_options_or_None)``. The
|
||||
caller should treat the recovered list as a fallback only when
|
||||
the model did not also supply a real ``options`` array.
|
||||
"""
|
||||
import json as _json
|
||||
import re as _re
|
||||
|
||||
if raw_question is None:
|
||||
return "", None
|
||||
q = str(raw_question)
|
||||
|
||||
# Strip a stray </question> tag (case-insensitive, with optional
|
||||
# preceding whitespace) anywhere in the string. This is the most
|
||||
# common failure mode and never represents valid content.
|
||||
q = _re.sub(r"\s*</\s*question\s*>\s*", "\n", q, flags=_re.IGNORECASE)
|
||||
|
||||
# Look for an inline options line. Match _OPTIONS, OPTIONS, options
|
||||
# (with or without leading underscore), followed by ':' or '=', then
|
||||
# a JSON array on the same line OR on the next line.
|
||||
inline_options_re = _re.compile(
|
||||
r"(?im)^\s*_?options\s*[:=]\s*(\[.*?\])\s*$",
|
||||
_re.DOTALL,
|
||||
)
|
||||
|
||||
recovered: list[str] | None = None
|
||||
match = inline_options_re.search(q)
|
||||
if match is not None:
|
||||
try:
|
||||
parsed = _json.loads(match.group(1))
|
||||
if isinstance(parsed, list):
|
||||
cleaned = [str(o).strip() for o in parsed if str(o).strip()]
|
||||
if 1 <= len(cleaned) <= 8:
|
||||
recovered = cleaned
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
if recovered is not None:
|
||||
# Remove the parsed line so it doesn't leak into the
|
||||
# rendered question text.
|
||||
q = inline_options_re.sub("", q, count=1)
|
||||
|
||||
# Strip any final whitespace / leftover blank lines from the
|
||||
# question after removals.
|
||||
q = _re.sub(r"\n{3,}", "\n\n", q).strip()
|
||||
|
||||
return q, recovered
|
||||
|
||||
|
||||
def build_ask_user_tool() -> Tool:
|
||||
"""Build the synthetic ask_user tool for explicit user-input requests.
|
||||
|
||||
The queen calls ask_user() when it needs to pause and wait
|
||||
for user input. Text-only turns WITHOUT ask_user flow through without
|
||||
blocking, allowing progress updates and summaries to stream freely.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user",
|
||||
description=(
|
||||
"You MUST call this tool whenever you need the user's response. "
|
||||
"Always call it after greeting the user, asking a question, or "
|
||||
"requesting approval. Do NOT call it for status updates or "
|
||||
"summaries that don't require a response.\n\n"
|
||||
"STRUCTURE RULES (CRITICAL):\n"
|
||||
"- The 'question' field is PLAIN TEXT shown to the user. Do NOT "
|
||||
"include XML tags, pseudo-tags like </question>, or option lists "
|
||||
"in the question string. The UI does not parse them — they "
|
||||
"render as raw text and look broken.\n"
|
||||
"- The 'options' parameter is the ONLY way to render buttons. "
|
||||
"If you want buttons, put them in the 'options' array, not in "
|
||||
"the question string. Do NOT write 'OPTIONS: [...]', "
|
||||
"'_options: [...]', or any inline list inside 'question'.\n"
|
||||
"- The question text must read as a single clean prompt with "
|
||||
"no markup. Example: 'What would you like to do?' — not "
|
||||
"'What would you like to do?</question>'.\n\n"
|
||||
"USAGE:\n"
|
||||
"Always include 2-3 predefined options. The UI automatically "
|
||||
"appends an 'Other' free-text input after your options, so NEVER "
|
||||
"include catch-all options like 'Custom idea', 'Something else', "
|
||||
"'Other', or 'None of the above' — the UI handles that. "
|
||||
"When the question primarily needs a typed answer but you must "
|
||||
"include options, make one option signal that typing is expected "
|
||||
"(e.g. 'I\\'ll type my response'). This helps users discover the "
|
||||
"free-text input. "
|
||||
"The ONLY exception: omit options when the question demands a "
|
||||
"free-form answer the user must type out (e.g. 'Describe your "
|
||||
"agent idea', 'Paste the error message').\n\n"
|
||||
"CORRECT EXAMPLE:\n"
|
||||
'{"question": "What would you like to do?", "options": '
|
||||
'["Build a new agent", "Modify existing agent", "Run tests"]}\n\n'
|
||||
"FREE-FORM EXAMPLE:\n"
|
||||
'{"question": "Describe the agent you want to build."}\n\n'
|
||||
"WRONG (do NOT do this — buttons will not render):\n"
|
||||
'{"question": "What now?</question>\\n_OPTIONS: [\\"A\\", \\"B\\"]"}'
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"question": {
|
||||
"type": "string",
|
||||
"description": "The question or prompt shown to the user.",
|
||||
},
|
||||
"options": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 specific predefined choices. Include in most cases. "
|
||||
'Example: ["Option A", "Option B", "Option C"]. '
|
||||
"The UI always appends an 'Other' free-text input, so "
|
||||
"do NOT include catch-alls like 'Custom idea' or 'Other'. "
|
||||
"Omit ONLY when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
"maxItems": 3,
|
||||
},
|
||||
},
|
||||
"required": ["question"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_ask_user_multiple_tool() -> Tool:
|
||||
"""Build the synthetic ask_user_multiple tool for batched questions.
|
||||
|
||||
Queen-only tool that presents multiple questions at once so the user
|
||||
can answer them all in a single interaction rather than one at a time.
|
||||
"""
|
||||
return Tool(
|
||||
name="ask_user_multiple",
|
||||
description=(
|
||||
"Ask the user multiple questions at once. Use this instead of "
|
||||
"ask_user when you have 2 or more questions to ask in the same "
|
||||
"turn — it lets the user answer everything in one go rather than "
|
||||
"going back and forth. Each question can have its own predefined "
|
||||
"options (2-3 choices) or be free-form. The UI renders all "
|
||||
"questions together with a single Submit button. "
|
||||
"ALWAYS prefer this over ask_user when you have multiple things "
|
||||
"to clarify. "
|
||||
"IMPORTANT: Do NOT repeat the questions in your text response — "
|
||||
"the widget renders them. Keep your text to a brief intro only. "
|
||||
'{"questions": ['
|
||||
' {"id": "scope", "prompt": "What scope?", "options": ["Full", "Partial"]},'
|
||||
' {"id": "format", "prompt": "Output format?", "options": ["PDF", "CSV", "JSON"]},'
|
||||
' {"id": "details", "prompt": "Any special requirements?"}'
|
||||
"]}"
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"questions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Short identifier for this question (used in the response)."
|
||||
),
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "The question text shown to the user.",
|
||||
},
|
||||
"options": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": (
|
||||
"2-3 predefined choices. The UI appends an "
|
||||
"'Other' free-text input automatically. "
|
||||
"Omit only when the user must type a free-form answer."
|
||||
),
|
||||
"minItems": 2,
|
||||
"maxItems": 3,
|
||||
},
|
||||
},
|
||||
"required": ["id", "prompt"],
|
||||
},
|
||||
"minItems": 2,
|
||||
"maxItems": 8,
|
||||
"description": "List of questions to present to the user.",
|
||||
},
|
||||
},
|
||||
"required": ["questions"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_set_output_tool(output_keys: list[str] | None) -> Tool | None:
|
||||
"""Build the synthetic set_output tool for explicit output declaration."""
|
||||
if not output_keys:
|
||||
return None
|
||||
return Tool(
|
||||
name="set_output",
|
||||
description=(
|
||||
"Set an output value for this node. Call once per output key. "
|
||||
"Use this for brief notes, counts, status, and file references — "
|
||||
"NOT for large data payloads. When a tool result was saved to a "
|
||||
"data file, pass the filename as the value "
|
||||
"(e.g. 'google_sheets_get_values_1.txt') so the next phase can "
|
||||
"load the full data. Values exceeding ~2000 characters are "
|
||||
"auto-saved to data files. "
|
||||
f"Valid keys: {output_keys}"
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"key": {
|
||||
"type": "string",
|
||||
"description": f"Output key. Must be one of: {output_keys}",
|
||||
"enum": output_keys,
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"The output value — a brief note, count, status, "
|
||||
"or data filename reference."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["key", "value"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_escalate_tool() -> Tool:
|
||||
"""Build the synthetic escalate tool for worker -> queen handoff."""
|
||||
return Tool(
|
||||
name="escalate",
|
||||
description=(
|
||||
"Escalate to the queen when requesting user input, "
|
||||
"blocked by errors, missing "
|
||||
"credentials, or ambiguous constraints that require supervisor "
|
||||
"guidance. Include a concise reason and optional context. "
|
||||
"The node will pause until the queen injects guidance."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Short reason for escalation (e.g. 'Tool repeatedly failing')."
|
||||
),
|
||||
},
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": "Optional diagnostic details for the queen.",
|
||||
},
|
||||
},
|
||||
"required": ["reason"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def build_report_to_parent_tool() -> Tool:
|
||||
"""Build the synthetic ``report_to_parent`` tool.
|
||||
|
||||
Parallel workers (those spawned by the overseer via
|
||||
``run_parallel_workers``) call this to send a structured report back
|
||||
to the overseer queen when they have finished their task. Calling
|
||||
``report_to_parent`` terminates the worker's loop cleanly -- do not
|
||||
call other tools after it.
|
||||
|
||||
The overseer receives these as ``SUBAGENT_REPORT`` events and
|
||||
aggregates them into a single summary for the user.
|
||||
"""
|
||||
return Tool(
|
||||
name="report_to_parent",
|
||||
description=(
|
||||
"Send a structured report back to the parent overseer and "
|
||||
"terminate. Call this when you have finished your task "
|
||||
"(success, partial, or failed) or cannot make further "
|
||||
"progress. Your loop ends after this call -- do not call any "
|
||||
"other tool afterwards. The overseer reads the summary + "
|
||||
"data fields and aggregates them into a user-facing response."
|
||||
),
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["success", "partial", "failed"],
|
||||
"description": (
|
||||
"Overall outcome. 'success' = task complete. "
|
||||
"'partial' = some progress but incomplete. "
|
||||
"'failed' = could not make progress."
|
||||
),
|
||||
},
|
||||
"summary": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"One-paragraph narrative for the overseer. What "
|
||||
"you did, what you found, and any notable issues."
|
||||
),
|
||||
},
|
||||
"data": {
|
||||
"type": "object",
|
||||
"description": (
|
||||
"Optional structured payload (rows fetched, IDs "
|
||||
"processed, files written, etc.) that the "
|
||||
"overseer can merge into its final summary."
|
||||
),
|
||||
},
|
||||
},
|
||||
"required": ["status", "summary"],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def handle_report_to_parent(tool_input: dict[str, Any]) -> ToolResult:
|
||||
"""Normalise + validate a ``report_to_parent`` tool call.
|
||||
|
||||
Returns a ``ToolResult`` with the acknowledgement text the LLM sees;
|
||||
the side effects (record on Worker, emit SUBAGENT_REPORT, terminate
|
||||
loop) are performed by ``AgentLoop`` after this helper returns.
|
||||
"""
|
||||
status = str(tool_input.get("status", "success")).strip().lower()
|
||||
if status not in ("success", "partial", "failed"):
|
||||
status = "success"
|
||||
summary = str(tool_input.get("summary", "")).strip()
|
||||
if not summary:
|
||||
summary = f"(worker returned {status} with no summary)"
|
||||
data = tool_input.get("data") or {}
|
||||
if not isinstance(data, dict):
|
||||
data = {"value": data}
|
||||
# Store the normalised payload back on the input dict so the caller
|
||||
# can pick it up without re-parsing.
|
||||
tool_input["_normalised"] = {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"data": data,
|
||||
}
|
||||
return ToolResult(
|
||||
tool_use_id=tool_input.get("tool_use_id", ""),
|
||||
content=(
|
||||
f"Report delivered to overseer (status={status}). "
|
||||
f"This worker will terminate now."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def handle_set_output(
|
||||
tool_input: dict[str, Any],
|
||||
output_keys: list[str] | None,
|
||||
) -> ToolResult:
|
||||
"""Handle set_output tool call. Returns ToolResult (sync)."""
|
||||
import logging
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
key = tool_input.get("key", "")
|
||||
value = tool_input.get("value", "")
|
||||
valid_keys = output_keys or []
|
||||
|
||||
# Recover from truncated JSON (max_tokens hit mid-argument).
|
||||
# The _raw key is set by litellm when json.loads fails.
|
||||
if not key and "_raw" in tool_input:
|
||||
raw = tool_input["_raw"]
|
||||
key_match = re.search(r'"key"\s*:\s*"(\w+)"', raw)
|
||||
if key_match:
|
||||
key = key_match.group(1)
|
||||
val_match = re.search(r'"value"\s*:\s*"', raw)
|
||||
if val_match:
|
||||
start = val_match.end()
|
||||
value = raw[start:].rstrip()
|
||||
for suffix in ('"}\n', '"}', '"'):
|
||||
if value.endswith(suffix):
|
||||
value = value[: -len(suffix)]
|
||||
break
|
||||
if key:
|
||||
logger.warning(
|
||||
"Recovered set_output args from truncated JSON: key=%s, value_len=%d",
|
||||
key,
|
||||
len(value),
|
||||
)
|
||||
# Re-inject so the caller sees proper key/value
|
||||
tool_input["key"] = key
|
||||
tool_input["value"] = value
|
||||
|
||||
if key not in valid_keys:
|
||||
return ToolResult(
|
||||
tool_use_id="",
|
||||
content=f"Invalid output key '{key}'. Valid keys: {valid_keys}",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
return ToolResult(
|
||||
tool_use_id="",
|
||||
content=f"Output '{key}' set successfully.",
|
||||
is_error=False,
|
||||
)
|
||||
@@ -1,515 +0,0 @@
|
||||
"""Tool result handling: truncation, spillover, JSON preview, and execution.
|
||||
|
||||
Manages tool result size limits, file spillover for large results, and
|
||||
smart JSON previews. Also includes transient error classification and
|
||||
the context-window-exceeded error detector.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import contextvars
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from framework.llm.provider import ToolResult, ToolUse
|
||||
from framework.llm.stream_events import ToolCallEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Pattern for detecting context-window-exceeded errors across LLM providers.
|
||||
_CONTEXT_TOO_LARGE_RE = re.compile(
|
||||
r"context.{0,20}(length|window|limit|size)|"
|
||||
r"too.{0,10}(long|large|many.{0,10}tokens)|"
|
||||
r"(exceed|exceeds|exceeded).{0,30}(limit|window|context|tokens)|"
|
||||
r"maximum.{0,20}token|prompt.{0,20}too.{0,10}long",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
def is_context_too_large_error(exc: BaseException) -> bool:
|
||||
"""Detect whether an exception indicates the LLM input was too large."""
|
||||
cls = type(exc).__name__
|
||||
if "ContextWindow" in cls:
|
||||
return True
|
||||
return bool(_CONTEXT_TOO_LARGE_RE.search(str(exc)))
|
||||
|
||||
|
||||
def is_transient_error(exc: BaseException) -> bool:
|
||||
"""Classify whether an exception is transient (retryable) vs permanent.
|
||||
|
||||
Transient: network errors, rate limits, server errors, timeouts.
|
||||
Permanent: auth errors, bad requests, context window exceeded.
|
||||
"""
|
||||
try:
|
||||
from litellm.exceptions import (
|
||||
APIConnectionError,
|
||||
BadGatewayError,
|
||||
InternalServerError,
|
||||
RateLimitError,
|
||||
ServiceUnavailableError,
|
||||
)
|
||||
|
||||
transient_types: tuple[type[BaseException], ...] = (
|
||||
RateLimitError,
|
||||
APIConnectionError,
|
||||
InternalServerError,
|
||||
BadGatewayError,
|
||||
ServiceUnavailableError,
|
||||
TimeoutError,
|
||||
ConnectionError,
|
||||
OSError,
|
||||
)
|
||||
except ImportError:
|
||||
transient_types = (TimeoutError, ConnectionError, OSError)
|
||||
|
||||
if isinstance(exc, transient_types):
|
||||
return True
|
||||
|
||||
# RuntimeError from StreamErrorEvent with "Stream error:" prefix
|
||||
if isinstance(exc, RuntimeError):
|
||||
error_str = str(exc).lower()
|
||||
transient_keywords = [
|
||||
"rate limit",
|
||||
"429",
|
||||
"timeout",
|
||||
"connection",
|
||||
"internal server",
|
||||
"502",
|
||||
"503",
|
||||
"504",
|
||||
"service unavailable",
|
||||
"bad gateway",
|
||||
"overloaded",
|
||||
"failed to parse tool call",
|
||||
]
|
||||
return any(kw in error_str for kw in transient_keywords)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def extract_json_metadata(parsed: Any, *, _depth: int = 0, _max_depth: int = 3) -> str:
|
||||
"""Return a concise structural summary of parsed JSON.
|
||||
|
||||
Reports key names, value types, and — crucially — array lengths so
|
||||
the LLM knows how much data exists beyond the preview.
|
||||
|
||||
Returns an empty string for simple scalars.
|
||||
"""
|
||||
if _depth >= _max_depth:
|
||||
if isinstance(parsed, dict):
|
||||
return f"dict with {len(parsed)} keys"
|
||||
if isinstance(parsed, list):
|
||||
return f"list of {len(parsed)} items"
|
||||
return type(parsed).__name__
|
||||
|
||||
if isinstance(parsed, dict):
|
||||
if not parsed:
|
||||
return "empty dict"
|
||||
lines: list[str] = []
|
||||
indent = " " * (_depth + 1)
|
||||
for key, value in list(parsed.items())[:20]:
|
||||
if isinstance(value, list):
|
||||
line = f'{indent}"{key}": list of {len(value)} items'
|
||||
if value:
|
||||
first = value[0]
|
||||
if isinstance(first, dict):
|
||||
sample_keys = list(first.keys())[:10]
|
||||
line += f" (each item: dict with keys {sample_keys})"
|
||||
elif isinstance(first, list):
|
||||
line += f" (each item: list of {len(first)} elements)"
|
||||
lines.append(line)
|
||||
elif isinstance(value, dict):
|
||||
child = extract_json_metadata(value, _depth=_depth + 1, _max_depth=_max_depth)
|
||||
lines.append(f'{indent}"{key}": {child}')
|
||||
else:
|
||||
lines.append(f'{indent}"{key}": {type(value).__name__}')
|
||||
if len(parsed) > 20:
|
||||
lines.append(f"{indent}... and {len(parsed) - 20} more keys")
|
||||
return "\n".join(lines)
|
||||
|
||||
if isinstance(parsed, list):
|
||||
if not parsed:
|
||||
return "empty list"
|
||||
desc = f"list of {len(parsed)} items"
|
||||
first = parsed[0]
|
||||
if isinstance(first, dict):
|
||||
sample_keys = list(first.keys())[:10]
|
||||
desc += f" (each item: dict with keys {sample_keys})"
|
||||
elif isinstance(first, list):
|
||||
desc += f" (each item: list of {len(first)} elements)"
|
||||
return desc
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def build_json_preview(parsed: Any, *, max_chars: int = 5000) -> str | None:
|
||||
"""Build a smart preview of parsed JSON, truncating large arrays.
|
||||
|
||||
Shows first 3 + last 1 items of large arrays with explicit count
|
||||
markers so the LLM cannot mistake the preview for the full dataset.
|
||||
|
||||
Returns ``None`` if no truncation was needed (no large arrays).
|
||||
"""
|
||||
_LARGE_ARRAY_THRESHOLD = 10
|
||||
|
||||
def _truncate_arrays(obj: Any) -> tuple[Any, bool]:
|
||||
"""Return (truncated_copy, was_truncated)."""
|
||||
if isinstance(obj, list) and len(obj) > _LARGE_ARRAY_THRESHOLD:
|
||||
n = len(obj)
|
||||
head = obj[:3]
|
||||
tail = obj[-1:]
|
||||
marker = f"... ({n - 4} more items omitted, {n} total) ..."
|
||||
return head + [marker] + tail, True
|
||||
if isinstance(obj, dict):
|
||||
changed = False
|
||||
out: dict[str, Any] = {}
|
||||
for k, v in obj.items():
|
||||
new_v, did = _truncate_arrays(v)
|
||||
out[k] = new_v
|
||||
changed = changed or did
|
||||
return (out, True) if changed else (obj, False)
|
||||
return obj, False
|
||||
|
||||
preview_obj, was_truncated = _truncate_arrays(parsed)
|
||||
if not was_truncated:
|
||||
return None # No large arrays — caller should use raw slicing
|
||||
|
||||
try:
|
||||
result = json.dumps(preview_obj, indent=2, ensure_ascii=False)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
if len(result) > max_chars:
|
||||
# Even 3+1 items too big — try just 1 item
|
||||
def _minimal_arrays(obj: Any) -> Any:
|
||||
if isinstance(obj, list) and len(obj) > _LARGE_ARRAY_THRESHOLD:
|
||||
n = len(obj)
|
||||
return obj[:1] + [f"... ({n - 1} more items omitted, {n} total) ..."]
|
||||
if isinstance(obj, dict):
|
||||
return {k: _minimal_arrays(v) for k, v in obj.items()}
|
||||
return obj
|
||||
|
||||
preview_obj = _minimal_arrays(parsed)
|
||||
try:
|
||||
result = json.dumps(preview_obj, indent=2, ensure_ascii=False)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if len(result) > max_chars:
|
||||
result = result[:max_chars] + "…"
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def truncate_tool_result(
|
||||
result: ToolResult,
|
||||
tool_name: str,
|
||||
*,
|
||||
max_tool_result_chars: int,
|
||||
spillover_dir: str | None,
|
||||
next_spill_filename_fn: Any, # Callable[[str], str]
|
||||
) -> ToolResult:
|
||||
"""Persist tool result to file and optionally truncate for context.
|
||||
|
||||
When *spillover_dir* is configured, EVERY non-error tool result is
|
||||
saved to a file (short filename like ``web_search_1.txt``). A
|
||||
``[Saved to '...']`` annotation is appended so the reference
|
||||
survives pruning and compaction.
|
||||
|
||||
- Small results (≤ limit): full content kept + file annotation
|
||||
- Large results (> limit): preview + file reference
|
||||
- Errors: pass through unchanged
|
||||
- read_file results: truncate with pagination hint (no re-spill)
|
||||
"""
|
||||
limit = max_tool_result_chars
|
||||
|
||||
# Errors always pass through unchanged
|
||||
if result.is_error:
|
||||
return result
|
||||
|
||||
# read_file reads FROM spilled files — never re-spill (circular).
|
||||
# Just truncate with a pagination hint if the result is too large.
|
||||
if tool_name == "read_file":
|
||||
if limit <= 0 or len(result.content) <= limit:
|
||||
return result # Small result — pass through as-is
|
||||
# Large result — truncate with smart preview
|
||||
PREVIEW_CAP = min(5000, max(limit - 500, limit // 2))
|
||||
|
||||
metadata_str = ""
|
||||
smart_preview: str | None = None
|
||||
try:
|
||||
parsed_ld = json.loads(result.content)
|
||||
metadata_str = extract_json_metadata(parsed_ld)
|
||||
smart_preview = build_json_preview(parsed_ld, max_chars=PREVIEW_CAP)
|
||||
except (json.JSONDecodeError, TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if smart_preview is not None:
|
||||
preview_block = smart_preview
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
header = (
|
||||
f"[{tool_name} result: {len(result.content):,} chars — "
|
||||
f"too large for context. Use offset_bytes/limit_bytes "
|
||||
f"parameters to read smaller chunks.]"
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\n\nData structure:\n{metadata_str}"
|
||||
header += (
|
||||
"\n\nWARNING: This is an INCOMPLETE preview. Do NOT draw conclusions or counts from it."
|
||||
)
|
||||
|
||||
truncated = f"{header}\n\nPreview (small sample only):\n{preview_block}"
|
||||
logger.info(
|
||||
"%s result truncated: %d → %d chars (use offset/limit to paginate)",
|
||||
tool_name,
|
||||
len(result.content),
|
||||
len(truncated),
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=result.tool_use_id,
|
||||
content=truncated,
|
||||
is_error=False,
|
||||
image_content=result.image_content,
|
||||
is_skill_content=result.is_skill_content,
|
||||
)
|
||||
|
||||
spill_dir = spillover_dir
|
||||
if spill_dir:
|
||||
spill_path = Path(spill_dir)
|
||||
spill_path.mkdir(parents=True, exist_ok=True)
|
||||
filename = next_spill_filename_fn(tool_name)
|
||||
|
||||
# Pretty-print JSON content so read_file's line-based
|
||||
# pagination works correctly.
|
||||
write_content = result.content
|
||||
parsed_json: Any = None # track for metadata extraction
|
||||
try:
|
||||
parsed_json = json.loads(result.content)
|
||||
write_content = json.dumps(parsed_json, indent=2, ensure_ascii=False)
|
||||
except (json.JSONDecodeError, TypeError, ValueError):
|
||||
pass # Not JSON — write as-is
|
||||
|
||||
file_path = spill_path / filename
|
||||
file_path.write_text(write_content, encoding="utf-8")
|
||||
# Use absolute path so parent agents can find files from subagents
|
||||
abs_path = str(file_path.resolve())
|
||||
|
||||
if limit > 0 and len(result.content) > limit:
|
||||
# Large result: build a small, metadata-rich preview so the
|
||||
# LLM cannot mistake it for the complete dataset.
|
||||
PREVIEW_CAP = 5000
|
||||
|
||||
# Extract structural metadata (array lengths, key names)
|
||||
metadata_str = ""
|
||||
smart_preview: str | None = None
|
||||
if parsed_json is not None:
|
||||
metadata_str = extract_json_metadata(parsed_json)
|
||||
smart_preview = build_json_preview(parsed_json, max_chars=PREVIEW_CAP)
|
||||
|
||||
if smart_preview is not None:
|
||||
preview_block = smart_preview
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
# Assemble header with structural info + warning
|
||||
header = (
|
||||
f"[Result from {tool_name}: {len(result.content):,} chars — "
|
||||
f"too large for context, saved to '{abs_path}'.]\n"
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\nData structure:\n{metadata_str}"
|
||||
header += (
|
||||
f"\n\nWARNING: The preview below is INCOMPLETE. "
|
||||
f"Do NOT draw conclusions or counts from it. "
|
||||
f"Use read_file(path='{abs_path}') to read the "
|
||||
f"full data before analysis."
|
||||
)
|
||||
|
||||
content = f"{header}\n\nPreview (small sample only):\n{preview_block}"
|
||||
logger.info(
|
||||
"Tool result spilled to file: %s (%d chars → %s)",
|
||||
tool_name,
|
||||
len(result.content),
|
||||
abs_path,
|
||||
)
|
||||
else:
|
||||
# Small result: keep full content + annotation with absolute path
|
||||
content = f"{result.content}\n\n[Saved to '{abs_path}']"
|
||||
logger.info(
|
||||
"Tool result saved to file: %s (%d chars → %s)",
|
||||
tool_name,
|
||||
len(result.content),
|
||||
filename,
|
||||
)
|
||||
|
||||
return ToolResult(
|
||||
tool_use_id=result.tool_use_id,
|
||||
content=content,
|
||||
is_error=False,
|
||||
image_content=result.image_content,
|
||||
is_skill_content=result.is_skill_content,
|
||||
)
|
||||
|
||||
# No spillover_dir — truncate in-place if needed
|
||||
if limit > 0 and len(result.content) > limit:
|
||||
PREVIEW_CAP = min(5000, max(limit - 500, limit // 2))
|
||||
|
||||
metadata_str = ""
|
||||
smart_preview: str | None = None
|
||||
try:
|
||||
parsed_inline = json.loads(result.content)
|
||||
metadata_str = extract_json_metadata(parsed_inline)
|
||||
smart_preview = build_json_preview(parsed_inline, max_chars=PREVIEW_CAP)
|
||||
except (json.JSONDecodeError, TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if smart_preview is not None:
|
||||
preview_block = smart_preview
|
||||
else:
|
||||
preview_block = result.content[:PREVIEW_CAP] + "…"
|
||||
|
||||
header = (
|
||||
f"[Result from {tool_name}: {len(result.content):,} chars — "
|
||||
f"truncated to fit context budget.]"
|
||||
)
|
||||
if metadata_str:
|
||||
header += f"\n\nData structure:\n{metadata_str}"
|
||||
header += (
|
||||
"\n\nWARNING: This is an INCOMPLETE preview. "
|
||||
"Do NOT draw conclusions or counts from the preview alone."
|
||||
)
|
||||
|
||||
truncated = f"{header}\n\n{preview_block}"
|
||||
logger.info(
|
||||
"Tool result truncated in-place: %s (%d → %d chars)",
|
||||
tool_name,
|
||||
len(result.content),
|
||||
len(truncated),
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=result.tool_use_id,
|
||||
content=truncated,
|
||||
is_error=False,
|
||||
image_content=result.image_content,
|
||||
is_skill_content=result.is_skill_content,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def execute_tool(
|
||||
tool_executor: Any, # Callable[[ToolUse], ToolResult | Awaitable[ToolResult]] | None
|
||||
tc: ToolCallEvent,
|
||||
timeout: float,
|
||||
skill_dirs: list[str] | None = None,
|
||||
) -> ToolResult:
|
||||
"""Execute a tool call, handling both sync and async executors.
|
||||
|
||||
Applies ``tool_call_timeout_seconds`` to prevent hung MCP servers
|
||||
from blocking the event loop indefinitely. The initial executor
|
||||
call is offloaded to a thread pool so that sync executors don't
|
||||
freeze the event loop.
|
||||
"""
|
||||
if tool_executor is None:
|
||||
return ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=f"No tool executor configured for '{tc.tool_name}'",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
skill_dirs = skill_dirs or []
|
||||
skill_read_tools = {"view_file", "read_file"}
|
||||
if tc.tool_name in skill_read_tools and skill_dirs:
|
||||
raw_path = tc.tool_input.get("path", "")
|
||||
if raw_path:
|
||||
resolved = Path(raw_path).resolve(strict=False)
|
||||
resolved_roots = [Path(skill_dir).resolve(strict=False) for skill_dir in skill_dirs]
|
||||
if any(resolved.is_relative_to(root) for root in resolved_roots):
|
||||
try:
|
||||
content = resolved.read_text(encoding="utf-8")
|
||||
except Exception as exc:
|
||||
return ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=f"Could not read skill resource '{raw_path}': {exc}",
|
||||
is_error=True,
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=content,
|
||||
is_skill_content=resolved.name == "SKILL.md",
|
||||
)
|
||||
|
||||
tool_use = ToolUse(id=tc.tool_use_id, name=tc.tool_name, input=tc.tool_input)
|
||||
|
||||
async def _run() -> ToolResult:
|
||||
# Offload the executor call to a thread. Sync MCP executors
|
||||
# block on future.result() — running in a thread keeps the
|
||||
# event loop free so asyncio.wait_for can fire the timeout.
|
||||
# Copy the current context so contextvars (e.g. data_dir from
|
||||
# execution context) propagate into the worker thread.
|
||||
loop = asyncio.get_running_loop()
|
||||
ctx = contextvars.copy_context()
|
||||
result = await loop.run_in_executor(None, ctx.run, tool_executor, tool_use)
|
||||
# Async executors return a coroutine — await it on the loop
|
||||
if asyncio.iscoroutine(result) or asyncio.isfuture(result):
|
||||
result = await result
|
||||
return result
|
||||
|
||||
try:
|
||||
if timeout > 0:
|
||||
result = await asyncio.wait_for(_run(), timeout=timeout)
|
||||
else:
|
||||
result = await _run()
|
||||
except TimeoutError:
|
||||
logger.warning("Tool '%s' timed out after %.0fs", tc.tool_name, timeout)
|
||||
# asyncio.wait_for cancels the awaiting coroutine, but the sync
|
||||
# executor running inside run_in_executor keeps going — and so
|
||||
# does any MCP subprocess it is blocked on. Reach through to the
|
||||
# owning MCPClient and force-disconnect it so the subprocess is
|
||||
# torn down. Next call_tool triggers a reconnect. Without this
|
||||
# the executor thread and MCP child leak on every timeout.
|
||||
kill_for_tool = getattr(tool_executor, "kill_for_tool", None)
|
||||
if callable(kill_for_tool):
|
||||
try:
|
||||
await asyncio.to_thread(kill_for_tool, tc.tool_name)
|
||||
except Exception as exc: # defensive — never let cleanup crash the loop
|
||||
logger.warning(
|
||||
"kill_for_tool('%s') raised during timeout handling: %s",
|
||||
tc.tool_name,
|
||||
exc,
|
||||
)
|
||||
return ToolResult(
|
||||
tool_use_id=tc.tool_use_id,
|
||||
content=(
|
||||
f"Tool '{tc.tool_name}' timed out after {timeout:.0f}s. "
|
||||
"The operation took too long and was cancelled. "
|
||||
"Try a simpler request or a different approach."
|
||||
),
|
||||
is_error=True,
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def restore_spill_counter(spillover_dir: str | None) -> int:
|
||||
"""Scan spillover_dir for existing spill files and return the max counter.
|
||||
|
||||
Returns the highest spill number found (or 0 if none).
|
||||
"""
|
||||
if not spillover_dir:
|
||||
return 0
|
||||
spill_path = Path(spillover_dir)
|
||||
if not spill_path.is_dir():
|
||||
return 0
|
||||
max_n = 0
|
||||
for f in spill_path.iterdir():
|
||||
if not f.is_file():
|
||||
continue
|
||||
m = re.search(r"_(\d+)\.txt$", f.name)
|
||||
if m:
|
||||
max_n = max(max_n, int(m.group(1)))
|
||||
return max_n
|
||||
@@ -1,234 +0,0 @@
|
||||
"""Shared types and state containers for the event loop package."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal, Protocol, runtime_checkable
|
||||
|
||||
from framework.agent_loop.conversation import (
|
||||
ConversationStore,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TriggerEvent:
|
||||
"""A framework-level trigger signal (timer tick or webhook hit)."""
|
||||
|
||||
trigger_type: str
|
||||
source_id: str
|
||||
payload: dict[str, Any] = field(default_factory=dict)
|
||||
timestamp: float = field(default_factory=time.time)
|
||||
|
||||
|
||||
@dataclass
|
||||
class JudgeVerdict:
|
||||
"""Result of judge evaluation for the event loop."""
|
||||
|
||||
action: Literal["ACCEPT", "RETRY", "ESCALATE"]
|
||||
# None = no evaluation happened (skip_judge, tool-continue); not logged.
|
||||
# "" = evaluated but no feedback; logged with default text.
|
||||
# "..." = evaluated with feedback; logged as-is.
|
||||
feedback: str | None = None
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class JudgeProtocol(Protocol):
|
||||
"""Protocol for event-loop judges."""
|
||||
|
||||
async def evaluate(self, context: dict[str, Any]) -> JudgeVerdict: ...
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoopConfig:
|
||||
"""Configuration for the event loop."""
|
||||
|
||||
max_iterations: int = 50
|
||||
max_tool_calls_per_turn: int = 30
|
||||
judge_every_n_turns: int = 1
|
||||
stall_detection_threshold: int = 3
|
||||
stall_similarity_threshold: float = 0.85
|
||||
max_context_tokens: int = 32_000
|
||||
# Headroom reserved for the NEXT turn's input + output so that
|
||||
# proactive compaction always finishes before the hard context limit
|
||||
# is hit mid-stream. Scaled to match Claude Code's 13k-buffer-on-
|
||||
# 200k-window ratio (~6.5%) applied to hive's default 32k window,
|
||||
# with extra margin because hive's token estimator is char-based
|
||||
# and less tight than Anthropic's own counting. Override via
|
||||
# LoopConfig for larger windows.
|
||||
compaction_buffer_tokens: int = 8_000
|
||||
# Warning is emitted one buffer earlier so the user/telemetry gets
|
||||
# a "we're close" signal without triggering a compaction pass.
|
||||
compaction_warning_buffer_tokens: int = 12_000
|
||||
store_prefix: str = ""
|
||||
|
||||
# Overflow margin for max_tool_calls_per_turn. Tool calls are only
|
||||
# discarded when the count exceeds max_tool_calls_per_turn * (1 + margin).
|
||||
tool_call_overflow_margin: float = 0.5
|
||||
|
||||
# Tool result context management.
|
||||
max_tool_result_chars: int = 30_000
|
||||
spillover_dir: str | None = None
|
||||
|
||||
# set_output value spilling.
|
||||
max_output_value_chars: int = 2_000
|
||||
|
||||
# Stream retry.
|
||||
max_stream_retries: int = 5
|
||||
stream_retry_backoff_base: float = 2.0
|
||||
stream_retry_max_delay: float = 60.0
|
||||
# Persistent retry for capacity-class errors (429, 529, overloaded).
|
||||
# Unlike the bounded retry above, these keep trying until the wall-clock
|
||||
# budget below is exhausted — modelled after claude-code's withRetry.
|
||||
# The loop still publishes a retry event each attempt so the UI can
|
||||
# see progress. Set to 0 to disable and fall back to bounded retry.
|
||||
capacity_retry_max_seconds: float = 600.0
|
||||
capacity_retry_max_delay: float = 60.0
|
||||
|
||||
# Tool doom loop detection.
|
||||
tool_doom_loop_threshold: int = 3
|
||||
|
||||
# Client-facing auto-block grace period.
|
||||
cf_grace_turns: int = 1
|
||||
# Worker auto-escalation: text-only turns before escalating to queen.
|
||||
worker_escalation_grace_turns: int = 1
|
||||
tool_doom_loop_enabled: bool = True
|
||||
# Silent worker: consecutive tool-only turns (no user-facing text)
|
||||
# before injecting a nudge to communicate progress.
|
||||
silent_tool_streak_threshold: int = 5
|
||||
|
||||
# Per-tool-call timeout.
|
||||
tool_call_timeout_seconds: float = 60.0
|
||||
|
||||
# LLM stream inactivity watchdog. If no stream event (delta, tool call,
|
||||
# finish) arrives within this many seconds, the stream task is cancelled
|
||||
# and a transient error is raised so the retry loop can back off and
|
||||
# reconnect. Prevents agents from hanging forever on a silently dead
|
||||
# HTTP connection (no provider heartbeat, no exception, just silence).
|
||||
# Set to 0 to disable.
|
||||
llm_stream_inactivity_timeout_seconds: float = 120.0
|
||||
|
||||
# Subagent delegation timeout (wall-clock max).
|
||||
subagent_timeout_seconds: float = 3600.0
|
||||
|
||||
# Subagent inactivity timeout - only timeout if no activity for this duration.
|
||||
# This resets whenever the subagent makes progress (tool calls, LLM responses).
|
||||
# Set to 0 to use only the wall-clock timeout.
|
||||
subagent_inactivity_timeout_seconds: float = 300.0
|
||||
|
||||
# Lifecycle hooks.
|
||||
hooks: dict[str, list] | None = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.hooks is None:
|
||||
object.__setattr__(self, "hooks", {})
|
||||
|
||||
|
||||
@dataclass
|
||||
class HookContext:
|
||||
"""Context passed to every lifecycle hook."""
|
||||
|
||||
event: str
|
||||
trigger: str | None
|
||||
system_prompt: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class HookResult:
|
||||
"""What a hook may return to modify node state."""
|
||||
|
||||
system_prompt: str | None = None
|
||||
inject: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class OutputAccumulator:
|
||||
"""Accumulates output key-value pairs with optional write-through persistence."""
|
||||
|
||||
values: dict[str, Any] = field(default_factory=dict)
|
||||
store: ConversationStore | None = None
|
||||
spillover_dir: str | None = None
|
||||
max_value_chars: int = 0
|
||||
run_id: str | None = None
|
||||
|
||||
async def set(self, key: str, value: Any) -> None:
|
||||
"""Set a key-value pair, auto-spilling large values to files."""
|
||||
value = self._auto_spill(key, value)
|
||||
self.values[key] = value
|
||||
if self.store:
|
||||
cursor = await self.store.read_cursor() or {}
|
||||
outputs = cursor.get("outputs", {})
|
||||
outputs[key] = value
|
||||
cursor["outputs"] = outputs
|
||||
await self.store.write_cursor(cursor)
|
||||
|
||||
def _auto_spill(self, key: str, value: Any) -> Any:
|
||||
"""Save large values to a file and return a reference string."""
|
||||
if self.max_value_chars <= 0 or not self.spillover_dir:
|
||||
return value
|
||||
|
||||
val_str = json.dumps(value, ensure_ascii=False) if not isinstance(value, str) else value
|
||||
if len(val_str) <= self.max_value_chars:
|
||||
return value
|
||||
|
||||
spill_path = Path(self.spillover_dir)
|
||||
spill_path.mkdir(parents=True, exist_ok=True)
|
||||
ext = ".json" if isinstance(value, (dict, list)) else ".txt"
|
||||
filename = f"output_{key}{ext}"
|
||||
write_content = (
|
||||
json.dumps(value, indent=2, ensure_ascii=False)
|
||||
if isinstance(value, (dict, list))
|
||||
else str(value)
|
||||
)
|
||||
file_path = spill_path / filename
|
||||
file_path.write_text(write_content, encoding="utf-8")
|
||||
file_size = file_path.stat().st_size
|
||||
logger.info(
|
||||
"set_output value auto-spilled: key=%s, %d chars -> %s (%d bytes)",
|
||||
key,
|
||||
len(val_str),
|
||||
filename,
|
||||
file_size,
|
||||
)
|
||||
# Use absolute path so parent agents can find files from subagents
|
||||
abs_path = str(file_path.resolve())
|
||||
return (
|
||||
f"[Saved to '{abs_path}' ({file_size:,} bytes). "
|
||||
f"Use read_file(path='{abs_path}') "
|
||||
f"to access full data.]"
|
||||
)
|
||||
|
||||
def get(self, key: str) -> Any | None:
|
||||
return self.values.get(key)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return dict(self.values)
|
||||
|
||||
def has_all_keys(self, required: list[str]) -> bool:
|
||||
return all(key in self.values and self.values[key] is not None for key in required)
|
||||
|
||||
@classmethod
|
||||
async def restore(
|
||||
cls,
|
||||
store: ConversationStore,
|
||||
run_id: str | None = None,
|
||||
) -> OutputAccumulator:
|
||||
cursor = await store.read_cursor()
|
||||
values = cursor.get("outputs", {}) if cursor else {}
|
||||
return cls(values=values, store=store, run_id=run_id)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"HookContext",
|
||||
"HookResult",
|
||||
"JudgeProtocol",
|
||||
"JudgeVerdict",
|
||||
"LoopConfig",
|
||||
"OutputAccumulator",
|
||||
"TriggerEvent",
|
||||
]
|
||||
@@ -1,93 +0,0 @@
|
||||
"""Prompt composition for agent loops.
|
||||
|
||||
Builds canonical system prompts from AgentContext fields.
|
||||
Extracted from the former orchestrator/prompting module.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class PromptSpec:
|
||||
identity_prompt: str = ""
|
||||
focus_prompt: str = ""
|
||||
narrative: str = ""
|
||||
accounts_prompt: str = ""
|
||||
skills_catalog_prompt: str = ""
|
||||
protocols_prompt: str = ""
|
||||
memory_prompt: str = ""
|
||||
agent_type: str = "event_loop"
|
||||
output_keys: tuple[str, ...] = ()
|
||||
|
||||
|
||||
def stamp_prompt_datetime(prompt: str) -> str:
|
||||
local = datetime.now().astimezone()
|
||||
stamp = f"Current date and time: {local.strftime('%Y-%m-%d %H:%M %Z (UTC%z)')}"
|
||||
return f"{prompt}\n\n{stamp}" if prompt else stamp
|
||||
|
||||
|
||||
def build_prompt_spec(
|
||||
ctx: Any,
|
||||
*,
|
||||
focus_prompt: str | None = None,
|
||||
narrative: str | None = None,
|
||||
memory_prompt: str | None = None,
|
||||
) -> PromptSpec:
|
||||
resolved_memory = memory_prompt
|
||||
if resolved_memory is None:
|
||||
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
|
||||
dynamic = getattr(ctx, "dynamic_memory_provider", None)
|
||||
if dynamic is not None:
|
||||
try:
|
||||
resolved_memory = dynamic() or ""
|
||||
except Exception:
|
||||
resolved_memory = getattr(ctx, "memory_prompt", "") or ""
|
||||
return PromptSpec(
|
||||
identity_prompt=ctx.identity_prompt or "",
|
||||
focus_prompt=focus_prompt
|
||||
if focus_prompt is not None
|
||||
else (ctx.agent_spec.system_prompt or ""),
|
||||
narrative=narrative if narrative is not None else (ctx.narrative or ""),
|
||||
accounts_prompt=ctx.accounts_prompt or "",
|
||||
skills_catalog_prompt=ctx.skills_catalog_prompt or "",
|
||||
protocols_prompt=ctx.protocols_prompt or "",
|
||||
memory_prompt=resolved_memory,
|
||||
agent_type=ctx.agent_spec.agent_type,
|
||||
output_keys=tuple(ctx.agent_spec.output_keys or ()),
|
||||
)
|
||||
|
||||
|
||||
def build_system_prompt(spec: PromptSpec) -> str:
|
||||
parts: list[str] = []
|
||||
if spec.identity_prompt:
|
||||
parts.append(spec.identity_prompt)
|
||||
if spec.accounts_prompt:
|
||||
parts.append(f"\n{spec.accounts_prompt}")
|
||||
if spec.skills_catalog_prompt:
|
||||
parts.append(f"\n{spec.skills_catalog_prompt}")
|
||||
if spec.protocols_prompt:
|
||||
parts.append(f"\n{spec.protocols_prompt}")
|
||||
if spec.memory_prompt:
|
||||
parts.append(f"\n{spec.memory_prompt}")
|
||||
if spec.focus_prompt:
|
||||
parts.append(f"\n{spec.focus_prompt}")
|
||||
if spec.narrative:
|
||||
parts.append(f"\n{spec.narrative}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def build_system_prompt_for_context(
|
||||
ctx: Any,
|
||||
*,
|
||||
focus_prompt: str | None = None,
|
||||
narrative: str | None = None,
|
||||
memory_prompt: str | None = None,
|
||||
) -> str:
|
||||
spec = build_prompt_spec(
|
||||
ctx, focus_prompt=focus_prompt, narrative=narrative, memory_prompt=memory_prompt
|
||||
)
|
||||
return build_system_prompt(spec)
|
||||
@@ -1,267 +0,0 @@
|
||||
"""Core types for the agent loop — the execution primitive of the colony.
|
||||
|
||||
AgentSpec: Declarative definition of what an agent does.
|
||||
AgentContext: Everything an agent loop needs to execute.
|
||||
AgentResult: What comes out of an agent loop execution.
|
||||
AgentProtocol: Interface that all agent implementations must satisfy.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from framework.llm.provider import LLMProvider, Tool
|
||||
from framework.tracker.decision_tracker import DecisionTracker
|
||||
|
||||
|
||||
class AgentSpec(BaseModel):
|
||||
"""Declarative definition of an agent's capabilities and configuration.
|
||||
|
||||
This is the blueprint from which AgentLoop instances are created.
|
||||
Workers in a colony are exact copies of the queen's AgentSpec.
|
||||
"""
|
||||
|
||||
id: str
|
||||
name: str
|
||||
description: str
|
||||
|
||||
agent_type: str = Field(
|
||||
default="event_loop",
|
||||
description="Type: 'event_loop' (recommended), 'gcu' (browser automation).",
|
||||
)
|
||||
|
||||
input_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Keys this agent reads from input data",
|
||||
)
|
||||
output_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Keys this agent produces as output",
|
||||
)
|
||||
nullable_output_keys: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Output keys that can be None without triggering validation errors",
|
||||
)
|
||||
|
||||
input_schema: dict[str, dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Optional schema for input validation.",
|
||||
)
|
||||
output_schema: dict[str, dict] = Field(
|
||||
default_factory=dict,
|
||||
description="Optional schema for output validation.",
|
||||
)
|
||||
|
||||
system_prompt: str | None = Field(default=None, description="System prompt for the LLM")
|
||||
tools: list[str] = Field(default_factory=list, description="Tool names this agent can use")
|
||||
tool_access_policy: str = Field(
|
||||
default="explicit",
|
||||
description=(
|
||||
"'all' = all tools from registry, "
|
||||
"'explicit' = only tools listed in `tools` (default), "
|
||||
"'none' = no tools at all."
|
||||
),
|
||||
)
|
||||
model: str | None = Field(default=None, description="Specific model override")
|
||||
|
||||
function: str | None = Field(default=None, description="Function name or path")
|
||||
routes: dict[str, str] = Field(default_factory=dict, description="Condition -> target mapping")
|
||||
|
||||
max_retries: int = Field(default=3)
|
||||
retry_on: list[str] = Field(default_factory=list, description="Error types to retry on")
|
||||
|
||||
max_visits: int = Field(
|
||||
default=0,
|
||||
description=(
|
||||
"Max times this agent executes in one colony run. "
|
||||
"0 = unlimited. Set >1 for one-shot agents."
|
||||
),
|
||||
)
|
||||
|
||||
output_model: type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Optional Pydantic model for validating LLM output.",
|
||||
)
|
||||
max_validation_retries: int = Field(
|
||||
default=2,
|
||||
description="Maximum retries when Pydantic validation fails",
|
||||
)
|
||||
|
||||
client_facing: bool = Field(
|
||||
default=False,
|
||||
description="Deprecated — the queen is intrinsically interactive.",
|
||||
)
|
||||
|
||||
success_criteria: str | None = Field(
|
||||
default=None,
|
||||
description="Natural-language criteria for phase completion.",
|
||||
)
|
||||
|
||||
skip_judge: bool = Field(
|
||||
default=False,
|
||||
description="When True, the implicit judge is bypassed entirely.",
|
||||
)
|
||||
|
||||
model_config = {"extra": "allow", "arbitrary_types_allowed": True}
|
||||
|
||||
def is_queen(self) -> bool:
|
||||
return self.id == "queen"
|
||||
|
||||
def supports_direct_user_io(self) -> bool:
|
||||
return self.is_queen()
|
||||
|
||||
|
||||
def deprecated_client_facing_warning(spec: AgentSpec) -> str | None:
|
||||
if spec.client_facing and not spec.is_queen():
|
||||
return (
|
||||
f"Agent '{spec.id}' sets deprecated client_facing=True. "
|
||||
"Non-queen direct human I/O is no longer supported; route worker "
|
||||
"questions and approvals through queen escalation instead."
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def warn_if_deprecated_client_facing(spec: AgentSpec) -> None:
|
||||
import logging
|
||||
|
||||
warning = deprecated_client_facing_warning(spec)
|
||||
if warning:
|
||||
logging.getLogger(__name__).warning(warning)
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentContext:
|
||||
"""Everything an agent loop needs to execute.
|
||||
|
||||
Passed to every agent implementation and provides:
|
||||
- Runtime (for decision logging)
|
||||
- LLM access
|
||||
- Tools
|
||||
- Goal context
|
||||
- Execution metadata
|
||||
"""
|
||||
|
||||
runtime: DecisionTracker
|
||||
|
||||
agent_id: str
|
||||
agent_spec: AgentSpec
|
||||
|
||||
input_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
llm: LLMProvider | None = None
|
||||
available_tools: list[Tool] = field(default_factory=list)
|
||||
|
||||
goal_context: str = ""
|
||||
goal: Any = None
|
||||
|
||||
max_tokens: int = 4096
|
||||
|
||||
attempt: int = 1
|
||||
max_attempts: int = 3
|
||||
|
||||
runtime_logger: Any = None
|
||||
pause_event: Any = None
|
||||
|
||||
accounts_prompt: str = ""
|
||||
|
||||
identity_prompt: str = ""
|
||||
narrative: str = ""
|
||||
memory_prompt: str = ""
|
||||
|
||||
event_triggered: bool = False
|
||||
|
||||
execution_id: str = ""
|
||||
run_id: str = ""
|
||||
|
||||
@property
|
||||
def effective_run_id(self) -> str | None:
|
||||
return self.run_id or None
|
||||
|
||||
stream_id: str = ""
|
||||
|
||||
dynamic_tools_provider: Any = None
|
||||
dynamic_prompt_provider: Any = None
|
||||
dynamic_memory_provider: Any = None
|
||||
|
||||
skills_catalog_prompt: str = ""
|
||||
protocols_prompt: str = ""
|
||||
skill_dirs: list[str] = field(default_factory=list)
|
||||
default_skill_batch_nudge: str | None = None
|
||||
default_skill_warn_ratio: float | None = None
|
||||
|
||||
iteration_metadata_provider: Any = None
|
||||
|
||||
@property
|
||||
def is_queen_stream(self) -> bool:
|
||||
return self.stream_id == "queen" or self.agent_spec.is_queen()
|
||||
|
||||
@property
|
||||
def emits_client_io(self) -> bool:
|
||||
return self.is_queen_stream
|
||||
|
||||
@property
|
||||
def supports_direct_user_io(self) -> bool:
|
||||
return self.is_queen_stream and not self.event_triggered
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentResult:
|
||||
"""Output of an agent loop execution."""
|
||||
|
||||
success: bool
|
||||
output: dict[str, Any] = field(default_factory=dict)
|
||||
error: str | None = None
|
||||
|
||||
next_agent: str | None = None
|
||||
route_reason: str | None = None
|
||||
|
||||
tokens_used: int = 0
|
||||
latency_ms: int = 0
|
||||
|
||||
validation_errors: list[str] = field(default_factory=list)
|
||||
|
||||
conversation: Any = None
|
||||
|
||||
# Machine-readable reason the loop stopped (see LoopExitReason in
|
||||
# agent_loop/internals/types.py). "?" means the loop didn't set one,
|
||||
# which should itself be treated as a diagnostic.
|
||||
exit_reason: str = "?"
|
||||
# Counters for reliability events surfaced during this execution.
|
||||
# Populated from the loop's TaskRegistry-style counters at return
|
||||
# time so callers can spot recurring failure modes without tailing
|
||||
# logs. Keys are stable strings; missing keys mean "zero".
|
||||
reliability_stats: dict[str, int] = field(default_factory=dict)
|
||||
|
||||
def to_summary(self, spec: Any = None) -> str:
|
||||
if not self.success:
|
||||
return f"Failed: {self.error}"
|
||||
|
||||
if not self.output:
|
||||
return "Completed (no output)"
|
||||
|
||||
parts = [f"Completed with {len(self.output)} outputs:"]
|
||||
for key, value in list(self.output.items())[:5]:
|
||||
value_str = str(value)[:100]
|
||||
if len(str(value)) > 100:
|
||||
value_str += "..."
|
||||
parts.append(f" - {key}: {value_str}")
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
class AgentProtocol(ABC):
|
||||
"""Interface all agent implementations must satisfy."""
|
||||
|
||||
@abstractmethod
|
||||
async def execute(self, ctx: AgentContext) -> AgentResult:
|
||||
pass
|
||||
|
||||
def validate_input(self, ctx: AgentContext) -> list[str]:
|
||||
errors = []
|
||||
for key in ctx.agent_spec.input_keys:
|
||||
if key not in ctx.input_data:
|
||||
errors.append(f"Missing required input: {key}")
|
||||
return errors
|
||||
@@ -1,17 +0,0 @@
|
||||
"""Framework-provided agents."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
FRAMEWORK_AGENTS_DIR = Path(__file__).parent
|
||||
|
||||
|
||||
def list_framework_agents() -> list[Path]:
|
||||
"""List all framework agent directories."""
|
||||
return sorted(
|
||||
[
|
||||
p
|
||||
for p in FRAMEWORK_AGENTS_DIR.iterdir()
|
||||
if p.is_dir() and ((p / "agent.json").exists() or (p / "agent.py").exists())
|
||||
],
|
||||
key=lambda p: p.name,
|
||||
)
|
||||
@@ -1,55 +0,0 @@
|
||||
"""
|
||||
Credential Tester — verify credentials (Aden OAuth + local API keys) via live API calls.
|
||||
|
||||
Interactive agent that lists all testable accounts, lets the user pick one,
|
||||
loads the provider's tools, and runs a chat session to test the credential.
|
||||
"""
|
||||
|
||||
from .agent import (
|
||||
CredentialTesterAgent,
|
||||
_list_aden_accounts,
|
||||
_list_env_fallback_accounts,
|
||||
_list_local_accounts,
|
||||
configure_for_account,
|
||||
conversation_mode,
|
||||
edges,
|
||||
entry_node,
|
||||
entry_points,
|
||||
get_tools_for_provider,
|
||||
goal,
|
||||
identity_prompt,
|
||||
list_connected_accounts,
|
||||
loop_config,
|
||||
nodes,
|
||||
pause_nodes,
|
||||
requires_account_selection,
|
||||
skip_credential_validation,
|
||||
terminal_nodes,
|
||||
)
|
||||
from .config import default_config
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"CredentialTesterAgent",
|
||||
"configure_for_account",
|
||||
"conversation_mode",
|
||||
"default_config",
|
||||
"edges",
|
||||
"entry_node",
|
||||
"entry_points",
|
||||
"get_tools_for_provider",
|
||||
"goal",
|
||||
"identity_prompt",
|
||||
"list_connected_accounts",
|
||||
"loop_config",
|
||||
"nodes",
|
||||
"pause_nodes",
|
||||
"requires_account_selection",
|
||||
"skip_credential_validation",
|
||||
"terminal_nodes",
|
||||
# Internal list helpers (exposed for testing)
|
||||
"_list_aden_accounts",
|
||||
"_list_local_accounts",
|
||||
"_list_env_fallback_accounts",
|
||||
]
|
||||
@@ -1,111 +0,0 @@
|
||||
"""CLI entry point for Credential Tester agent."""
|
||||
|
||||
import asyncio
|
||||
|
||||
import click
|
||||
|
||||
from .agent import CredentialTesterAgent
|
||||
|
||||
|
||||
def setup_logging(verbose=False, debug=False):
|
||||
from framework.observability import configure_logging
|
||||
|
||||
if debug:
|
||||
configure_logging(level="DEBUG")
|
||||
elif verbose:
|
||||
configure_logging(level="INFO")
|
||||
else:
|
||||
configure_logging(level="WARNING")
|
||||
|
||||
|
||||
def pick_account(agent: CredentialTesterAgent) -> dict | None:
|
||||
"""Interactive account picker. Returns selected account dict or None."""
|
||||
accounts = agent.list_accounts()
|
||||
if not accounts:
|
||||
click.echo("No connected accounts found.")
|
||||
click.echo("Set ADEN_API_KEY and connect accounts at https://app.adenhq.com")
|
||||
return None
|
||||
|
||||
click.echo("\nConnected accounts:\n")
|
||||
for i, acct in enumerate(accounts, 1):
|
||||
provider = acct.get("provider", "?")
|
||||
alias = acct.get("alias", "?")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
click.echo(f" {i}. {provider}/{alias}{detail}")
|
||||
|
||||
click.echo()
|
||||
while True:
|
||||
choice = click.prompt("Pick an account to test", type=int, default=1)
|
||||
if 1 <= choice <= len(accounts):
|
||||
return accounts[choice - 1]
|
||||
click.echo(f"Invalid choice. Enter 1-{len(accounts)}.")
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="1.0.0")
|
||||
def cli():
|
||||
"""Credential Tester — verify synced credentials via live API calls."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--verbose", "-v", is_flag=True)
|
||||
@click.option("--debug", is_flag=True)
|
||||
def shell(verbose, debug):
|
||||
"""Interactive CLI session to test a credential."""
|
||||
setup_logging(verbose=verbose, debug=debug)
|
||||
asyncio.run(_interactive_shell(verbose))
|
||||
|
||||
|
||||
async def _interactive_shell(verbose=False):
|
||||
agent = CredentialTesterAgent()
|
||||
account = pick_account(agent)
|
||||
if account is None:
|
||||
return
|
||||
|
||||
agent.select_account(account)
|
||||
provider = account.get("provider", "?")
|
||||
alias = account.get("alias", "?")
|
||||
|
||||
click.echo(f"\nTesting {provider}/{alias}")
|
||||
click.echo("Type your requests or 'quit' to exit.\n")
|
||||
|
||||
await agent.start()
|
||||
|
||||
try:
|
||||
result = await agent._agent_runtime.trigger_and_wait(
|
||||
entry_point_id="start",
|
||||
input_data={},
|
||||
)
|
||||
if result:
|
||||
click.echo(f"\nSession ended: {'success' if result.success else result.error}")
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nGoodbye!")
|
||||
finally:
|
||||
await agent.stop()
|
||||
|
||||
|
||||
@cli.command(name="list")
|
||||
def list_accounts():
|
||||
"""List all connected accounts."""
|
||||
agent = CredentialTesterAgent()
|
||||
accounts = agent.list_accounts()
|
||||
|
||||
if not accounts:
|
||||
click.echo("No connected accounts found.")
|
||||
return
|
||||
|
||||
click.echo("\nConnected accounts:\n")
|
||||
for acct in accounts:
|
||||
provider = acct.get("provider", "?")
|
||||
alias = acct.get("alias", "?")
|
||||
identity = acct.get("identity", {})
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
click.echo(f" {provider}/{alias}{detail}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -1,659 +0,0 @@
|
||||
"""Credential Tester agent — verify credentials via live API calls.
|
||||
|
||||
Supports both Aden OAuth2-synced accounts AND locally-stored API key accounts.
|
||||
Aden accounts use account="alias" routing; local accounts inject the key into
|
||||
the session environment so tools read it without an account= parameter.
|
||||
|
||||
When loaded via AgentRunner.load() (TUI picker, ``hive run``), the module-level
|
||||
``nodes`` / ``edges`` variables provide a static graph. The TUI detects
|
||||
``requires_account_selection`` and shows an account picker *before* starting
|
||||
the agent. ``configure_for_account()`` then scopes the node's tools to the
|
||||
selected provider.
|
||||
|
||||
When used directly (``CredentialTesterAgent``), the graph is built dynamically
|
||||
after the user picks an account programmatically.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from framework.config import get_max_context_tokens
|
||||
from framework.host.agent_host import AgentHost
|
||||
from framework.host.execution_manager import EntryPointSpec
|
||||
from framework.llm import LiteLLMProvider
|
||||
from framework.loader.mcp_registry import MCPRegistry
|
||||
from framework.loader.tool_registry import ToolRegistry
|
||||
from framework.orchestrator import Goal, NodeSpec, SuccessCriterion
|
||||
from framework.orchestrator.checkpoint_config import CheckpointConfig
|
||||
from framework.orchestrator.edge import GraphSpec
|
||||
from framework.orchestrator.orchestrator import ExecutionResult
|
||||
|
||||
from .config import default_config
|
||||
from .nodes import build_tester_node
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from framework.loader import AgentLoader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Goal
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
goal = Goal(
|
||||
id="credential-tester",
|
||||
name="Credential Tester",
|
||||
description="Verify that a credential can make real API calls.",
|
||||
success_criteria=[
|
||||
SuccessCriterion(
|
||||
id="api-call-success",
|
||||
description="At least one API call succeeds using the credential",
|
||||
metric="api_call_success",
|
||||
target="true",
|
||||
weight=1.0,
|
||||
),
|
||||
],
|
||||
constraints=[],
|
||||
)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_tools_for_provider(provider_name: str) -> list[str]:
|
||||
"""Collect tool names for a credential by credential_id OR credential_group.
|
||||
|
||||
Matches on both ``credential_id`` (e.g. "google" → Gmail tools) and
|
||||
``credential_group`` (e.g. "google_custom_search" → all google search tools).
|
||||
"""
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
tools: list[str] = []
|
||||
for spec in CREDENTIAL_SPECS.values():
|
||||
if spec.credential_id == provider_name or spec.credential_group == provider_name:
|
||||
tools.extend(spec.tools)
|
||||
return sorted(set(tools))
|
||||
|
||||
|
||||
def _list_aden_accounts() -> list[dict]:
|
||||
"""List active accounts from the Aden platform (requires ADEN_API_KEY)."""
|
||||
import os
|
||||
|
||||
api_key = os.environ.get("ADEN_API_KEY")
|
||||
if not api_key:
|
||||
return []
|
||||
|
||||
try:
|
||||
from framework.credentials.aden.client import AdenClientConfig, AdenCredentialClient
|
||||
|
||||
client = AdenCredentialClient(
|
||||
AdenClientConfig(
|
||||
base_url=os.environ.get("ADEN_API_URL", "https://api.adenhq.com"),
|
||||
)
|
||||
)
|
||||
try:
|
||||
integrations = client.list_integrations()
|
||||
finally:
|
||||
client.close()
|
||||
|
||||
return [
|
||||
{
|
||||
"provider": c.provider,
|
||||
"alias": c.alias,
|
||||
"identity": {"email": c.email} if c.email else {},
|
||||
"integration_id": c.integration_id,
|
||||
"source": "aden",
|
||||
}
|
||||
for c in integrations
|
||||
if c.status == "active"
|
||||
]
|
||||
except (ImportError, OSError) as exc:
|
||||
logger.debug("Could not list Aden accounts: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error listing Aden accounts", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
def _list_local_accounts() -> list[dict]:
|
||||
"""List named local API key accounts from LocalCredentialRegistry."""
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
|
||||
return [
|
||||
info.to_account_dict() for info in LocalCredentialRegistry.default().list_accounts()
|
||||
]
|
||||
except ImportError as exc:
|
||||
logger.debug("Local credential registry unavailable: %s", exc)
|
||||
return []
|
||||
except Exception:
|
||||
logger.warning("Unexpected error listing local accounts", exc_info=True)
|
||||
return []
|
||||
|
||||
|
||||
def _list_env_fallback_accounts() -> list[dict]:
|
||||
"""Surface configured-but-unregistered credentials as testable entries.
|
||||
|
||||
Detects credentials available via env vars OR stored in the encrypted
|
||||
store in the old flat format (e.g. ``brave_search`` with no alias).
|
||||
These are users who haven't yet run ``save_account()`` but have a working key.
|
||||
Shows with alias="default" and status="unknown".
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect IDs in encrypted store (includes old flat entries like "brave_search")
|
||||
try:
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
encrypted_ids: set[str] = set(EncryptedFileStorage().list_all())
|
||||
except (ImportError, OSError) as exc:
|
||||
logger.debug("Could not read encrypted store: %s", exc)
|
||||
encrypted_ids = set()
|
||||
except Exception:
|
||||
logger.warning("Unexpected error reading encrypted store", exc_info=True)
|
||||
encrypted_ids = set()
|
||||
|
||||
def _is_configured(cred_name: str, spec) -> bool:
|
||||
# 1. Env var present
|
||||
if os.environ.get(spec.env_var):
|
||||
return True
|
||||
# 2. Old flat encrypted entry (no slash — new entries have {x}/{y})
|
||||
if cred_name in encrypted_ids:
|
||||
return True
|
||||
return False
|
||||
|
||||
seen_groups: set[str] = set()
|
||||
accounts: list[dict] = []
|
||||
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items():
|
||||
if not spec.direct_api_key_supported or not spec.tools:
|
||||
continue
|
||||
|
||||
if spec.credential_group:
|
||||
if spec.credential_group in seen_groups:
|
||||
continue
|
||||
group_available = all(
|
||||
_is_configured(n, s)
|
||||
for n, s in CREDENTIAL_SPECS.items()
|
||||
if s.credential_group == spec.credential_group
|
||||
)
|
||||
if not group_available:
|
||||
continue
|
||||
seen_groups.add(spec.credential_group)
|
||||
provider = spec.credential_group
|
||||
else:
|
||||
if not _is_configured(cred_name, spec):
|
||||
continue
|
||||
provider = cred_name
|
||||
|
||||
accounts.append(
|
||||
{
|
||||
"provider": provider,
|
||||
"alias": "default",
|
||||
"identity": {},
|
||||
"integration_id": None,
|
||||
"source": "local",
|
||||
"status": "unknown",
|
||||
}
|
||||
)
|
||||
|
||||
return accounts
|
||||
|
||||
|
||||
def list_connected_accounts() -> list[dict]:
|
||||
"""List all testable accounts: Aden-synced + named local + env-var fallbacks."""
|
||||
aden = _list_aden_accounts()
|
||||
local = _list_local_accounts()
|
||||
|
||||
# Show env-var fallbacks only for credentials not already in the named registry
|
||||
local_providers = {a["provider"] for a in local}
|
||||
env_fallbacks = [
|
||||
a for a in _list_env_fallback_accounts() if a["provider"] not in local_providers
|
||||
]
|
||||
|
||||
return aden + local + env_fallbacks
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level hooks (read by AgentRunner.load / TUI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
skip_credential_validation = True
|
||||
"""Don't validate credentials at load time — we don't know which provider yet."""
|
||||
|
||||
requires_account_selection = True
|
||||
"""Signal TUI to show account picker before starting the agent."""
|
||||
|
||||
|
||||
def configure_for_account(runner: AgentLoader, account: dict) -> None:
|
||||
"""Scope the tester node's tools to the selected provider.
|
||||
|
||||
Handles both Aden accounts (account= routing) and local accounts
|
||||
(session-level env var injection, no account= parameter in prompt).
|
||||
"""
|
||||
provider = account["provider"]
|
||||
source = account.get("source", "aden")
|
||||
alias = account.get("alias", "unknown")
|
||||
identity = account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "aden":
|
||||
tools.append("get_account_info")
|
||||
email = identity.get("email", "")
|
||||
detail = f" (email: {email})" if email else ""
|
||||
_configure_aden_node(runner, provider, alias, detail, tools)
|
||||
else:
|
||||
status = account.get("status", "unknown")
|
||||
_activate_local_account(provider, alias)
|
||||
_configure_local_node(runner, provider, alias, identity, tools, status)
|
||||
|
||||
|
||||
def _activate_local_account(credential_id: str, alias: str) -> None:
|
||||
"""Inject a named local account's key into the session environment.
|
||||
|
||||
Handles three cases:
|
||||
1. Named account in LocalCredentialRegistry (new format: {credential_id}/{alias})
|
||||
2. Old flat credential in EncryptedFileStorage (id == credential_id, no alias)
|
||||
3. Env var already set — skip injection (nothing to do)
|
||||
"""
|
||||
import os
|
||||
|
||||
from aden_tools.credentials import CREDENTIAL_SPECS
|
||||
|
||||
# Collect specs for this credential (handles grouped credentials too)
|
||||
group_specs = [
|
||||
(cred_name, spec)
|
||||
for cred_name, spec in CREDENTIAL_SPECS.items()
|
||||
if spec.credential_group == credential_id
|
||||
or spec.credential_id == credential_id
|
||||
or cred_name == credential_id
|
||||
]
|
||||
# Deduplicate — credential_id and credential_group may both match the same spec
|
||||
seen_env_vars: set[str] = set()
|
||||
|
||||
try:
|
||||
from framework.credentials.local.registry import LocalCredentialRegistry
|
||||
from framework.credentials.storage import EncryptedFileStorage
|
||||
|
||||
registry = LocalCredentialRegistry.default()
|
||||
flat_storage = EncryptedFileStorage()
|
||||
|
||||
for _cred_name, spec in group_specs:
|
||||
if spec.env_var in seen_env_vars:
|
||||
continue
|
||||
# If env var is already set, nothing to do for this one
|
||||
if os.environ.get(spec.env_var):
|
||||
seen_env_vars.add(spec.env_var)
|
||||
continue
|
||||
|
||||
seen_env_vars.add(spec.env_var)
|
||||
|
||||
# Determine key name based on spec
|
||||
key_name = "api_key"
|
||||
if spec.credential_group and "cse" in spec.env_var.lower():
|
||||
key_name = "cse_id"
|
||||
|
||||
key: str | None = None
|
||||
|
||||
# 1. Try named account in registry (new format)
|
||||
if alias != "default":
|
||||
key = registry.get_key(credential_id, alias, key_name)
|
||||
else:
|
||||
# For "default" alias, check registry first, then fall back to flat store
|
||||
key = registry.get_key(credential_id, "default", key_name)
|
||||
|
||||
# 2. Fall back to old flat encrypted entry (id == credential_id, no alias)
|
||||
if key is None:
|
||||
flat_cred = flat_storage.load(credential_id)
|
||||
if flat_cred is not None:
|
||||
key = flat_cred.get_key(key_name) or flat_cred.get_default_key()
|
||||
|
||||
if key:
|
||||
os.environ[spec.env_var] = key
|
||||
except (ImportError, KeyError, OSError) as exc:
|
||||
logger.debug("Could not inject credentials: %s", exc)
|
||||
except Exception:
|
||||
logger.warning("Unexpected error injecting credentials", exc_info=True)
|
||||
|
||||
|
||||
def _configure_aden_node(
|
||||
runner: AgentLoader,
|
||||
provider: str,
|
||||
alias: str,
|
||||
detail: str,
|
||||
tools: list[str],
|
||||
) -> None:
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the account: {provider}/{alias}{detail}
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Suggest a simple read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts).
|
||||
2. Execute the call when the user agrees.
|
||||
3. Report the result: success (with sample data) or failure (with error).
|
||||
4. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
"""
|
||||
break
|
||||
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
"I'll suggest a read-only API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
def _configure_local_node(
|
||||
runner: AgentLoader,
|
||||
provider: str,
|
||||
alias: str,
|
||||
identity: dict,
|
||||
tools: list[str],
|
||||
status: str,
|
||||
) -> None:
|
||||
identity_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(identity_parts)})" if identity_parts else ""
|
||||
status_note = " [key not yet validated]" if status == "unknown" else ""
|
||||
|
||||
for node in runner.graph.nodes:
|
||||
if node.id == "tester":
|
||||
node.tools = sorted(set(tools))
|
||||
node.system_prompt = f"""\
|
||||
You are a credential tester for the local API key: {provider}/{alias}{detail}{status_note}
|
||||
|
||||
# Instructions
|
||||
|
||||
1. Suggest a simple test call to verify the credential works \
|
||||
(e.g. search for "test", list items, get profile info).
|
||||
2. Execute the call when the user agrees.
|
||||
3. Report the result: success (with sample data) or failure (with error).
|
||||
4. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Rules
|
||||
|
||||
- Do NOT pass an `account` parameter — this credential is injected \
|
||||
directly into the session environment and tools read it automatically.
|
||||
- Start with read-only operations before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
"""
|
||||
break
|
||||
|
||||
runner.intro_message = (
|
||||
f"Testing {provider}/{alias}{detail} — "
|
||||
f"{len(tools)} tools loaded. "
|
||||
"I'll suggest a test API call to verify the credential works."
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Module-level graph variables (read by AgentRunner.load)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
nodes = [
|
||||
NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
description=(
|
||||
"Interactive credential testing — lets the user pick an account "
|
||||
"and verify it via API calls."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=[],
|
||||
output_keys=["test_result"],
|
||||
nullable_output_keys=["test_result"],
|
||||
tools=["get_account_info"],
|
||||
system_prompt="""\
|
||||
You are a credential tester. Your job is to help the user verify that their \
|
||||
connected accounts and API keys can make real API calls.
|
||||
|
||||
# Startup
|
||||
|
||||
1. Call ``get_account_info`` to list the user's connected accounts.
|
||||
2. Present the list and ask the user which account to test.
|
||||
3. Once they pick one, note the account's **alias** (e.g. "Timothy", "work-slack").
|
||||
4. Suggest a simple read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts).
|
||||
5. Execute the call when the user agrees.
|
||||
6. Report the result: success (with sample data) or failure (with error).
|
||||
7. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Account routing (Aden accounts only)
|
||||
|
||||
IMPORTANT: For Aden-synced accounts, always pass the account's **alias** as the \
|
||||
``account`` parameter when calling any tool. For local API key accounts, do NOT \
|
||||
pass an account parameter — they are pre-injected into the session.
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations.
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
""",
|
||||
),
|
||||
]
|
||||
|
||||
edges = []
|
||||
|
||||
entry_node = "tester"
|
||||
entry_points = {"start": "tester"}
|
||||
pause_nodes = []
|
||||
terminal_nodes = ["tester"] # Tester node can terminate
|
||||
|
||||
conversation_mode = "continuous"
|
||||
identity_prompt = (
|
||||
"You are a credential tester that verifies connected accounts and API keys "
|
||||
"can make real API calls."
|
||||
)
|
||||
loop_config = {
|
||||
"max_iterations": 50,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Programmatic agent class (used by __main__.py CLI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class CredentialTesterAgent:
|
||||
"""Interactive agent that tests a specific credential via API calls.
|
||||
|
||||
Usage:
|
||||
agent = CredentialTesterAgent()
|
||||
accounts = agent.list_accounts()
|
||||
agent.select_account(accounts[0])
|
||||
await agent.start()
|
||||
await agent.stop()
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
self.config = config or default_config
|
||||
self._selected_account: dict | None = None
|
||||
self._agent_runtime: AgentHost | None = None
|
||||
self._tool_registry: ToolRegistry | None = None
|
||||
self._storage_path: Path | None = None
|
||||
|
||||
def list_accounts(self) -> list[dict]:
|
||||
"""List all testable accounts (Aden + local named + env-var fallbacks)."""
|
||||
return list_connected_accounts()
|
||||
|
||||
def select_account(self, account: dict) -> None:
|
||||
"""Select an account to test.
|
||||
|
||||
Args:
|
||||
account: Account dict from list_accounts() with
|
||||
provider, alias, identity, source keys.
|
||||
"""
|
||||
self._selected_account = account
|
||||
|
||||
@property
|
||||
def selected_provider(self) -> str:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
return self._selected_account["provider"]
|
||||
|
||||
@property
|
||||
def selected_alias(self) -> str:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
return self._selected_account.get("alias", "unknown")
|
||||
|
||||
def _build_graph(self) -> GraphSpec:
|
||||
provider = self.selected_provider
|
||||
alias = self.selected_alias
|
||||
source = self._selected_account.get("source", "aden")
|
||||
identity = self._selected_account.get("identity", {})
|
||||
tools = get_tools_for_provider(provider)
|
||||
|
||||
if source == "local":
|
||||
_activate_local_account(provider, alias)
|
||||
elif source == "aden":
|
||||
tools.append("get_account_info")
|
||||
|
||||
tester_node = build_tester_node(
|
||||
provider=provider,
|
||||
alias=alias,
|
||||
tools=tools,
|
||||
identity=identity,
|
||||
source=source,
|
||||
)
|
||||
|
||||
return GraphSpec(
|
||||
id="credential-tester-graph",
|
||||
goal_id=goal.id,
|
||||
version="1.0.0",
|
||||
entry_node="tester",
|
||||
entry_points={"start": "tester"},
|
||||
terminal_nodes=["tester"], # Tester node can terminate
|
||||
pause_nodes=[],
|
||||
nodes=[tester_node],
|
||||
edges=[],
|
||||
default_model=self.config.model,
|
||||
max_tokens=self.config.max_tokens,
|
||||
loop_config={
|
||||
"max_iterations": 50,
|
||||
"max_tool_calls_per_turn": 30,
|
||||
"max_context_tokens": get_max_context_tokens(),
|
||||
},
|
||||
conversation_mode="continuous",
|
||||
identity_prompt=(
|
||||
f"You are testing the {provider}/{alias} credential. "
|
||||
"Help the user verify it works by making real API calls."
|
||||
),
|
||||
)
|
||||
|
||||
def _setup(self) -> None:
|
||||
if self._selected_account is None:
|
||||
raise RuntimeError("No account selected. Call select_account() first.")
|
||||
|
||||
self._storage_path = Path.home() / ".hive" / "agents" / "credential_tester"
|
||||
self._storage_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._tool_registry = ToolRegistry()
|
||||
|
||||
mcp_config_path = Path(__file__).parent / "mcp_servers.json"
|
||||
if mcp_config_path.exists():
|
||||
self._tool_registry.load_mcp_config(mcp_config_path)
|
||||
|
||||
try:
|
||||
agent_dir = Path(__file__).parent
|
||||
registry = MCPRegistry()
|
||||
registry.initialize()
|
||||
if (agent_dir / "mcp_registry.json").is_file():
|
||||
self._tool_registry.set_mcp_registry_agent_path(agent_dir)
|
||||
registry_configs, selection_max_tools = registry.load_agent_selection(agent_dir)
|
||||
if registry_configs:
|
||||
self._tool_registry.load_registry_servers(
|
||||
registry_configs,
|
||||
preserve_existing_tools=True,
|
||||
log_collisions=True,
|
||||
max_tools=selection_max_tools,
|
||||
)
|
||||
except Exception:
|
||||
logger.warning("MCP registry config failed to load", exc_info=True)
|
||||
|
||||
extra_kwargs = getattr(self.config, "extra_kwargs", {}) or {}
|
||||
llm = LiteLLMProvider(
|
||||
model=self.config.model,
|
||||
api_key=self.config.api_key,
|
||||
api_base=self.config.api_base,
|
||||
**extra_kwargs,
|
||||
)
|
||||
|
||||
tool_executor = self._tool_registry.get_executor()
|
||||
tools = list(self._tool_registry.get_tools().values())
|
||||
|
||||
graph = self._build_graph()
|
||||
|
||||
self._agent_runtime = AgentHost(
|
||||
graph=graph,
|
||||
goal=goal,
|
||||
storage_path=self._storage_path,
|
||||
entry_points=[
|
||||
EntryPointSpec(
|
||||
id="start",
|
||||
name="Test Credential",
|
||||
entry_node="tester",
|
||||
trigger_type="manual",
|
||||
isolation_level="isolated",
|
||||
),
|
||||
],
|
||||
llm=llm,
|
||||
tools=tools,
|
||||
tool_executor=tool_executor,
|
||||
checkpoint_config=CheckpointConfig(enabled=False),
|
||||
graph_id="credential_tester",
|
||||
)
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Set up and start the agent runtime."""
|
||||
if self._agent_runtime is None:
|
||||
self._setup()
|
||||
if not self._agent_runtime.is_running:
|
||||
await self._agent_runtime.start()
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the agent runtime."""
|
||||
if self._agent_runtime and self._agent_runtime.is_running:
|
||||
await self._agent_runtime.stop()
|
||||
self._agent_runtime = None
|
||||
|
||||
async def run(self) -> ExecutionResult:
|
||||
"""Run the agent (convenience for single execution)."""
|
||||
await self.start()
|
||||
try:
|
||||
result = await self._agent_runtime.trigger_and_wait(
|
||||
entry_point_id="start",
|
||||
input_data={},
|
||||
)
|
||||
return result or ExecutionResult(success=False, error="Execution timeout")
|
||||
finally:
|
||||
await self.stop()
|
||||
@@ -1,19 +0,0 @@
|
||||
"""Runtime configuration for Credential Tester agent."""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from framework.config import RuntimeConfig
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentMetadata:
|
||||
name: str = "Credential Tester"
|
||||
version: str = "1.0.0"
|
||||
description: str = (
|
||||
"Test connected accounts by making real API calls. "
|
||||
"Pick an account, verify credentials work, and explore available tools."
|
||||
)
|
||||
|
||||
|
||||
metadata = AgentMetadata()
|
||||
default_config = RuntimeConfig(temperature=0.3)
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"hive_tools": {
|
||||
"transport": "stdio",
|
||||
"command": "uv",
|
||||
"args": ["run", "python", "mcp_server.py", "--stdio"],
|
||||
"cwd": "../../../../tools",
|
||||
"description": "hive_tools MCP server with provider-specific tools"
|
||||
}
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
"""Node definitions for Credential Tester agent."""
|
||||
|
||||
from framework.orchestrator import NodeSpec
|
||||
|
||||
|
||||
def build_tester_node(
|
||||
provider: str,
|
||||
alias: str,
|
||||
tools: list[str],
|
||||
identity: dict[str, str],
|
||||
source: str = "aden",
|
||||
) -> NodeSpec:
|
||||
"""Build the tester node dynamically for the selected account.
|
||||
|
||||
Args:
|
||||
provider: Provider / credential name (e.g. "google", "brave_search").
|
||||
alias: User-set alias (e.g. "Timothy", "work").
|
||||
tools: Tool names available for this provider.
|
||||
identity: Identity dict (email, workspace, etc.) for context.
|
||||
source: "aden" or "local" — controls routing instructions in the prompt.
|
||||
"""
|
||||
detail_parts = [f"{k}: {v}" for k, v in identity.items() if v]
|
||||
detail = f" ({', '.join(detail_parts)})" if detail_parts else ""
|
||||
|
||||
if source == "aden":
|
||||
routing_section = f"""\
|
||||
# Account routing
|
||||
|
||||
IMPORTANT: Always pass `account="{alias}"` when calling any tool. \
|
||||
This routes the API call to the correct credential. Never use the email \
|
||||
or any other identifier — always use the alias exactly as shown.
|
||||
"""
|
||||
else:
|
||||
routing_section = """\
|
||||
# Credential routing
|
||||
|
||||
This is a local API key credential — do NOT pass an `account` parameter. \
|
||||
The key is pre-injected into the session environment and tools read it automatically.
|
||||
"""
|
||||
|
||||
account_label = "account" if source == "aden" else "local API key"
|
||||
|
||||
return NodeSpec(
|
||||
id="tester",
|
||||
name="Credential Tester",
|
||||
description=(
|
||||
f"Interactive testing node for {provider}/{alias}. "
|
||||
f"Has access to all {provider} tools to verify the credential works."
|
||||
),
|
||||
node_type="event_loop",
|
||||
client_facing=True,
|
||||
max_node_visits=0,
|
||||
input_keys=[],
|
||||
output_keys=["test_result"],
|
||||
nullable_output_keys=["test_result"],
|
||||
tools=tools,
|
||||
system_prompt=f"""\
|
||||
You are a credential tester for the {account_label}: {provider}/{alias}{detail}
|
||||
|
||||
Your job is to help the user verify that this credential works by making \
|
||||
real API calls using the available tools.
|
||||
|
||||
{routing_section}
|
||||
# Instructions
|
||||
|
||||
1. Start by greeting the user and confirming which account you're testing.
|
||||
2. Suggest a simple, safe, read-only API call to verify the credential works \
|
||||
(e.g. list messages, list channels, list contacts, search for "test").
|
||||
3. Execute the call when the user agrees.
|
||||
4. Report the result clearly: success (with sample data) or failure (with error).
|
||||
5. Let the user request additional API calls to further test the credential.
|
||||
|
||||
# Available tools
|
||||
|
||||
You have access to {len(tools)} tools for {provider}:
|
||||
{chr(10).join(f"- {t}" for t in tools)}
|
||||
|
||||
# Rules
|
||||
|
||||
- Start with read-only operations (list, get) before write operations (create, update, delete).
|
||||
- Always confirm with the user before performing write operations.
|
||||
- If a call fails, report the exact error — this helps diagnose credential issues.
|
||||
- Be concise. No emojis.
|
||||
""",
|
||||
)
|
||||
@@ -1,279 +0,0 @@
|
||||
"""Agent discovery — scan known directories and return categorised AgentEntry lists."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class WorkerEntry:
|
||||
"""A single worker within a colony."""
|
||||
|
||||
name: str
|
||||
config_path: Path
|
||||
description: str = ""
|
||||
tool_count: int = 0
|
||||
task: str = ""
|
||||
spawned_at: str = ""
|
||||
queen_name: str = ""
|
||||
colony_name: str = ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"name": self.name,
|
||||
"config_path": str(self.config_path),
|
||||
"description": self.description,
|
||||
"tool_count": self.tool_count,
|
||||
"task": self.task,
|
||||
"spawned_at": self.spawned_at,
|
||||
"queen_name": self.queen_name,
|
||||
"colony_name": self.colony_name,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentEntry:
|
||||
"""Lightweight agent metadata for the picker / API discover endpoint."""
|
||||
|
||||
path: Path
|
||||
name: str
|
||||
description: str
|
||||
category: str
|
||||
session_count: int = 0
|
||||
run_count: int = 0
|
||||
node_count: int = 0
|
||||
tool_count: int = 0
|
||||
tags: list[str] = field(default_factory=list)
|
||||
last_active: str | None = None
|
||||
workers: list[WorkerEntry] = field(default_factory=list)
|
||||
|
||||
|
||||
def _get_last_active(agent_path: Path) -> str | None:
|
||||
"""Return the most recent updated_at timestamp across all sessions.
|
||||
|
||||
Checks both worker sessions (``~/.hive/agents/{name}/sessions/``) and
|
||||
queen sessions (``~/.hive/agents/queens/default/sessions/``) whose
|
||||
``meta.json`` references the same *agent_path*.
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
agent_name = agent_path.name
|
||||
latest: str | None = None
|
||||
|
||||
# 1. Worker sessions
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if sessions_dir.exists():
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
continue
|
||||
state_file = session_dir / "state.json"
|
||||
if not state_file.exists():
|
||||
continue
|
||||
try:
|
||||
data = json.loads(state_file.read_text(encoding="utf-8"))
|
||||
ts = data.get("timestamps", {}).get("updated_at")
|
||||
if ts and (latest is None or ts > latest):
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# 2. Queen sessions (scan all queen identity directories)
|
||||
from framework.config import QUEENS_DIR
|
||||
|
||||
if QUEENS_DIR.exists():
|
||||
resolved = agent_path.resolve()
|
||||
for queen_dir in QUEENS_DIR.iterdir():
|
||||
if not queen_dir.is_dir():
|
||||
continue
|
||||
sessions_dir = queen_dir / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
continue
|
||||
for d in sessions_dir.iterdir():
|
||||
if not d.is_dir():
|
||||
continue
|
||||
meta_file = d / "meta.json"
|
||||
if not meta_file.exists():
|
||||
continue
|
||||
try:
|
||||
meta = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||
stored = meta.get("agent_path")
|
||||
if not stored or Path(stored).resolve() != resolved:
|
||||
continue
|
||||
ts = datetime.fromtimestamp(d.stat().st_mtime).isoformat()
|
||||
if latest is None or ts > latest:
|
||||
latest = ts
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return latest
|
||||
|
||||
|
||||
def _count_sessions(agent_name: str) -> int:
|
||||
"""Count session directories under ~/.hive/agents/{agent_name}/sessions/."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return 0
|
||||
return sum(1 for d in sessions_dir.iterdir() if d.is_dir() and d.name.startswith("session_"))
|
||||
|
||||
|
||||
def _count_runs(agent_name: str) -> int:
|
||||
"""Count unique run_ids across all sessions for an agent."""
|
||||
sessions_dir = Path.home() / ".hive" / "agents" / agent_name / "sessions"
|
||||
if not sessions_dir.exists():
|
||||
return 0
|
||||
run_ids: set[str] = set()
|
||||
for session_dir in sessions_dir.iterdir():
|
||||
if not session_dir.is_dir() or not session_dir.name.startswith("session_"):
|
||||
continue
|
||||
# runs.jsonl lives inside workspace subdirectories
|
||||
for runs_file in session_dir.rglob("runs.jsonl"):
|
||||
try:
|
||||
for line in runs_file.read_text(encoding="utf-8").splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
record = json.loads(line)
|
||||
rid = record.get("run_id")
|
||||
if rid:
|
||||
run_ids.add(rid)
|
||||
except Exception:
|
||||
continue
|
||||
return len(run_ids)
|
||||
|
||||
|
||||
_EXCLUDED_JSON_STEMS = {"agent", "flowchart", "triggers", "configuration", "metadata"}
|
||||
|
||||
|
||||
def _is_colony_dir(path: Path) -> bool:
|
||||
"""Check if a directory is a colony with worker config files."""
|
||||
if not path.is_dir():
|
||||
return False
|
||||
return any(
|
||||
f.suffix == ".json"
|
||||
and f.stem not in _EXCLUDED_JSON_STEMS
|
||||
for f in path.iterdir()
|
||||
if f.is_file()
|
||||
)
|
||||
|
||||
|
||||
def _find_worker_configs(colony_dir: Path) -> list[Path]:
|
||||
"""Find all worker config JSON files in a colony directory."""
|
||||
return sorted(
|
||||
p
|
||||
for p in colony_dir.iterdir()
|
||||
if p.is_file()
|
||||
and p.suffix == ".json"
|
||||
and p.stem not in _EXCLUDED_JSON_STEMS
|
||||
)
|
||||
|
||||
|
||||
def _extract_agent_stats(agent_path: Path) -> tuple[int, int, list[str]]:
|
||||
"""Extract worker count, tool count, and tags from a colony directory."""
|
||||
tool_count, tags = 0, []
|
||||
|
||||
worker_configs = _find_worker_configs(agent_path)
|
||||
if worker_configs:
|
||||
all_tools: set[str] = set()
|
||||
for wc_path in worker_configs:
|
||||
try:
|
||||
data = json.loads(wc_path.read_text(encoding="utf-8"))
|
||||
if isinstance(data, dict):
|
||||
tools = data.get("tools", [])
|
||||
if isinstance(tools, list):
|
||||
all_tools.update(tools)
|
||||
except Exception:
|
||||
pass
|
||||
return len(worker_configs), len(all_tools), tags
|
||||
|
||||
return 0, 0, tags
|
||||
|
||||
|
||||
def discover_agents() -> dict[str, list[AgentEntry]]:
|
||||
"""Discover agents from all known sources grouped by category."""
|
||||
from framework.config import COLONIES_DIR
|
||||
|
||||
groups: dict[str, list[AgentEntry]] = {}
|
||||
sources = [
|
||||
("Your Agents", COLONIES_DIR),
|
||||
]
|
||||
|
||||
# Track seen agent directory names to avoid duplicates when the same
|
||||
# agent exists in both colonies/ and exports/ (colonies takes priority).
|
||||
_seen_agent_names: set[str] = set()
|
||||
|
||||
for category, base_dir in sources:
|
||||
if not base_dir.exists():
|
||||
continue
|
||||
entries: list[AgentEntry] = []
|
||||
for path in sorted(base_dir.iterdir(), key=lambda p: p.name):
|
||||
if not _is_colony_dir(path):
|
||||
continue
|
||||
if path.name in _seen_agent_names:
|
||||
continue
|
||||
_seen_agent_names.add(path.name)
|
||||
|
||||
config_fallback_name = path.name.replace("_", " ").title()
|
||||
name = config_fallback_name
|
||||
desc = ""
|
||||
|
||||
# Read colony metadata for queen provenance
|
||||
colony_queen_name = ""
|
||||
metadata_path = path / "metadata.json"
|
||||
if metadata_path.exists():
|
||||
try:
|
||||
mdata = json.loads(metadata_path.read_text(encoding="utf-8"))
|
||||
colony_queen_name = mdata.get("queen_name", "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
worker_entries: list[WorkerEntry] = []
|
||||
worker_configs = _find_worker_configs(path)
|
||||
for wc_path in worker_configs:
|
||||
try:
|
||||
data = json.loads(wc_path.read_text(encoding="utf-8"))
|
||||
if isinstance(data, dict):
|
||||
w = WorkerEntry(
|
||||
name=data.get("name", wc_path.stem),
|
||||
config_path=wc_path,
|
||||
description=data.get("description", ""),
|
||||
tool_count=len(data.get("tools", [])),
|
||||
task=data.get("goal", {}).get("description", ""),
|
||||
spawned_at=data.get("spawned_at", ""),
|
||||
queen_name=colony_queen_name,
|
||||
colony_name=path.name,
|
||||
)
|
||||
worker_entries.append(w)
|
||||
if not desc:
|
||||
desc = data.get("description", "")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
node_count = len(worker_entries)
|
||||
all_tools: set[str] = set()
|
||||
for w in worker_entries:
|
||||
pass # tool_count already per-worker
|
||||
tool_count = max((w.tool_count for w in worker_entries), default=0)
|
||||
|
||||
entries.append(
|
||||
AgentEntry(
|
||||
path=path,
|
||||
name=name,
|
||||
description=desc,
|
||||
category=category,
|
||||
session_count=_count_sessions(path.name),
|
||||
run_count=_count_runs(path.name),
|
||||
node_count=node_count,
|
||||
tool_count=tool_count,
|
||||
tags=[],
|
||||
last_active=_get_last_active(path),
|
||||
workers=worker_entries,
|
||||
)
|
||||
)
|
||||
if entries:
|
||||
existing = groups.get(category, [])
|
||||
existing.extend(entries)
|
||||
groups[category] = existing
|
||||
|
||||
return groups
|
||||
@@ -1,15 +0,0 @@
|
||||
"""Queen -- the agent builder for the Hive framework."""
|
||||
|
||||
from .agent import queen_goal, queen_loop_config
|
||||
from .config import AgentMetadata, RuntimeConfig, default_config, metadata
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__all__ = [
|
||||
"queen_goal",
|
||||
"queen_loop_config",
|
||||
"RuntimeConfig",
|
||||
"AgentMetadata",
|
||||
"default_config",
|
||||
"metadata",
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user